Merge tag 'sound-4.17-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 17 May 2018 17:13:44 +0000 (10:13 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 17 May 2018 17:13:44 +0000 (10:13 -0700)
Pull sound fixes from Takashi Iwai:
 "We have a core fix in the compat code for covering a potential race
  (double references), but it's a very minor change.

  The rest are all small device-specific quirks, as well as a correction
  of the new UAC3 support code"

* tag 'sound-4.17-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound:
  ALSA: usb-audio: Use Class Specific EP for UAC3 devices.
  ALSA: hda/realtek - Clevo P950ER ALC1220 Fixup
  ALSA: usb: mixer: volume quirk for CM102-A+/102S+
  ALSA: hda: Add Lenovo C50 All in one to the power_save blacklist
  ALSA: control: fix a redundant-copy issue

961 files changed:
Documentation/admin-guide/pm/intel_pstate.rst
Documentation/admin-guide/pm/sleep-states.rst
Documentation/bpf/bpf_devel_QA.txt
Documentation/device-mapper/thin-provisioning.txt
Documentation/devicetree/bindings/ata/ahci-platform.txt
Documentation/devicetree/bindings/display/panel/panel-common.txt
Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
Documentation/devicetree/bindings/input/atmel,maxtouch.txt
Documentation/devicetree/bindings/net/can/rcar_canfd.txt
Documentation/devicetree/bindings/net/renesas,ravb.txt
Documentation/devicetree/bindings/pinctrl/allwinner,sunxi-pinctrl.txt
Documentation/devicetree/bindings/serial/amlogic,meson-uart.txt
Documentation/devicetree/bindings/serial/mvebu-uart.txt
Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
Documentation/devicetree/bindings/usb/usb-xhci.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/devicetree/overlay-notes.txt
Documentation/doc-guide/parse-headers.rst
Documentation/driver-api/firmware/request_firmware.rst
Documentation/driver-api/infrastructure.rst
Documentation/driver-api/usb/typec.rst
Documentation/i2c/dev-interface
Documentation/ioctl/ioctl-number.txt
Documentation/media/uapi/rc/keytable.c.rst
Documentation/media/uapi/v4l/v4l2grab.c.rst
Documentation/networking/ip-sysctl.txt
Documentation/power/suspend-and-cpuhotplug.txt
Documentation/process/magic-number.rst
Documentation/sphinx/parse-headers.pl
Documentation/trace/ftrace.rst
Documentation/translations/zh_CN/video4linux/v4l2-framework.txt
Documentation/virtual/kvm/api.txt
Documentation/virtual/kvm/arm/psci.txt [new file with mode: 0644]
MAINTAINERS
Makefile
arch/Kconfig
arch/arm/boot/dts/gemini-nas4220b.dts
arch/arm/boot/dts/imx35.dtsi
arch/arm/boot/dts/imx53.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/configs/gemini_defconfig
arch/arm/configs/socfpga_defconfig
arch/arm/include/asm/kvm_host.h
arch/arm/include/uapi/asm/kvm.h
arch/arm/kvm/guest.c
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/pm-asm-offsets.c
arch/arm/mach-omap2/sleep33xx.S
arch/arm/mach-omap2/sleep43xx.S
arch/arm/mach-s3c24xx/mach-jive.c
arch/arm64/Makefile
arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
arch/arm64/boot/dts/amlogic/meson-gxm.dtsi
arch/arm64/boot/dts/arm/juno-motherboard.dtsi
arch/arm64/boot/dts/broadcom/stingray/stingray-sata.dtsi
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/module.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/uapi/asm/kvm.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/module-plts.c
arch/arm64/kernel/module.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/traps.c
arch/arm64/kvm/guest.c
arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
arch/arm64/kvm/sys_regs.c
arch/arm64/lib/Makefile
arch/arm64/mm/flush.c
arch/arm64/mm/init.c
arch/hexagon/include/asm/io.h
arch/hexagon/lib/checksum.c
arch/parisc/Makefile
arch/parisc/kernel/drivers.c
arch/parisc/kernel/pci.c
arch/parisc/kernel/time.c
arch/parisc/kernel/traps.c
arch/parisc/mm/init.c
arch/powerpc/include/asm/ftrace.h
arch/powerpc/include/asm/paca.h
arch/powerpc/include/asm/powernv.h
arch/powerpc/include/asm/topology.h
arch/powerpc/kernel/mce_power.c
arch/powerpc/kernel/smp.c
arch/powerpc/kvm/booke.c
arch/powerpc/mm/mem.c
arch/powerpc/platforms/powernv/memtrace.c
arch/powerpc/platforms/powernv/npu-dma.c
arch/powerpc/platforms/powernv/opal-rtc.c
arch/riscv/Kconfig
arch/riscv/include/asm/Kbuild
arch/riscv/kernel/vdso/Makefile
arch/s390/configs/debug_defconfig
arch/s390/configs/performance_defconfig
arch/s390/crypto/crc32be-vx.S
arch/s390/crypto/crc32le-vx.S
arch/s390/include/asm/nospec-insn.h [new file with mode: 0644]
arch/s390/include/asm/purgatory.h
arch/s390/include/asm/thread_info.h
arch/s390/kernel/Makefile
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/base.S
arch/s390/kernel/entry.S
arch/s390/kernel/irq.c
arch/s390/kernel/mcount.S
arch/s390/kernel/module.c
arch/s390/kernel/nospec-branch.c
arch/s390/kernel/nospec-sysfs.c [new file with mode: 0644]
arch/s390/kernel/perf_cpum_cf_events.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/process.c
arch/s390/kernel/reipl.S
arch/s390/kernel/swsusp.S
arch/s390/kernel/uprobes.c
arch/s390/lib/mem.S
arch/s390/net/bpf_jit.S
arch/s390/net/bpf_jit_comp.c
arch/sh/Kconfig
arch/sh/kernel/cpu/sh2/probe.c
arch/sh/kernel/setup.c
arch/sh/mm/consistent.c
arch/sh/mm/init.c
arch/sh/mm/numa.c
arch/sparc/include/uapi/asm/oradax.h
arch/sparc/kernel/vio.c
arch/x86/Kconfig
arch/x86/entry/entry_64_compat.S
arch/x86/entry/vdso/vdso32/vdso-fakesections.c [deleted file]
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/cstate.c
arch/x86/events/msr.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/ftrace.h
arch/x86/include/asm/irq_vectors.h
arch/x86/include/asm/jailhouse_para.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_64_types.h
arch/x86/include/uapi/asm/msgbuf.h
arch/x86/include/uapi/asm/shmbuf.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/jailhouse.c
arch/x86/kernel/setup.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/tsc.c
arch/x86/kvm/lapic.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.h
arch/x86/mm/pageattr.c
arch/x86/mm/pti.c
arch/x86/net/bpf_jit_comp.c
arch/x86/xen/enlighten_hvm.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/mmu.c
arch/x86/xen/mmu_pv.c
block/bfq-iosched.c
block/blk-cgroup.c
block/blk-core.c
block/blk-mq.c
block/blk-mq.h
block/genhd.c
block/partition-generic.c
crypto/api.c
crypto/drbg.c
drivers/acpi/acpi_video.c
drivers/acpi/acpi_watchdog.c
drivers/acpi/button.c
drivers/acpi/scan.c
drivers/acpi/sleep.c
drivers/amba/bus.c
drivers/android/binder.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ahci_mvebu.c
drivers/ata/ahci_qoriq.c
drivers/ata/ahci_xgene.c
drivers/ata/libahci.c
drivers/ata/libahci_platform.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/sata_highbank.c
drivers/ata/sata_sil24.c
drivers/atm/firestream.c
drivers/atm/zatm.c
drivers/base/dma-coherent.c
drivers/base/dma-mapping.c
drivers/base/firmware_loader/fallback.c
drivers/base/firmware_loader/fallback.h
drivers/block/loop.c
drivers/block/loop.h
drivers/block/rbd.c
drivers/block/swim.c
drivers/block/swim3.c
drivers/bluetooth/btusb.c
drivers/bus/Kconfig
drivers/cdrom/cdrom.c
drivers/char/agp/uninorth-agp.c
drivers/char/random.c
drivers/char/virtio_console.c
drivers/clk/clk-cs2000-cp.c
drivers/clk/clk-mux.c
drivers/clk/clk-stm32mp1.c
drivers/clk/clk.c
drivers/clk/meson/clk-regmap.c
drivers/clk/meson/gxbb-aoclk.h
drivers/clk/meson/meson8b.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/brcmstb-avs-cpufreq.c
drivers/cpufreq/cppc_cpufreq.c
drivers/cpufreq/powernv-cpufreq.c
drivers/firmware/arm_scmi/clock.c
drivers/fpga/altera-ps-spi.c
drivers/gpio/gpio-aspeed.c
drivers/gpio/gpio-pci-idio-16.c
drivers/gpio/gpio-pcie-idio-24.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdkfd/Kconfig
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
drivers/gpu/drm/amd/display/dc/core/dc_surface.c
drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
drivers/gpu/drm/amd/display/include/dal_asic_id.h
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
drivers/gpu/drm/amd/include/atomfirmware.h
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
drivers/gpu/drm/bridge/Kconfig
drivers/gpu/drm/bridge/dumb-vga-dac.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_file.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/exynos/regs-mixer.h
drivers/gpu/drm/i915/intel_cdclk.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/disp/mdp_format.c
drivers/gpu/drm/msm/disp/mdp_kms.h
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
drivers/gpu/drm/msm/msm_fb.c
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_kms.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_bo.h
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/omapdrm/dss/dispc.c
drivers/gpu/drm/omapdrm/dss/hdmi4.c
drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
drivers/gpu/drm/omapdrm/dss/hdmi5.c
drivers/gpu/drm/omapdrm/omap_connector.c
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
drivers/gpu/drm/omapdrm/tcm-sita.c
drivers/gpu/drm/qxl/qxl_cmd.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_ioctl.c
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/sun4i/sun4i_lvds.c
drivers/gpu/drm/ttm/ttm_page_alloc.c
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_dpi.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/drm/virtio/virtgpu_vq.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/hid/Kconfig
drivers/hid/hid-ids.h
drivers/hid/hid-lenovo.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/intel-ish-hid/ishtp-hid-client.c
drivers/hid/intel-ish-hid/ishtp/bus.c
drivers/hid/wacom_sys.c
drivers/hwmon/k10temp.c
drivers/hwmon/nct6683.c
drivers/hwmon/scmi-hwmon.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-sprd.c
drivers/i2c/i2c-dev.c
drivers/infiniband/Kconfig
drivers/infiniband/core/cache.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/iwpm_util.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/roce_gid_mgmt.c
drivers/infiniband/core/ucma.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_ioctl.c
drivers/infiniband/core/uverbs_std_types_flow_action.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/resource.c
drivers/infiniband/hw/hfi1/affinity.c
drivers/infiniband/hw/hfi1/driver.c
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/pcie.c
drivers/infiniband/hw/hfi1/platform.c
drivers/infiniband/hw/hfi1/qsfp.c
drivers/infiniband/hw/hfi1/ruc.c
drivers/infiniband/hw/hfi1/ud.c
drivers/infiniband/hw/hns/hns_roce_hem.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_qp.c
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx5/Kconfig
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/nes/nes_nic.c
drivers/infiniband/sw/rxe/rxe_opcode.c
drivers/infiniband/sw/rxe/rxe_req.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/srp/Kconfig
drivers/infiniband/ulp/srpt/Kconfig
drivers/input/evdev.c
drivers/input/input-leds.c
drivers/input/mouse/alps.c
drivers/input/rmi4/rmi_spi.c
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/iommu/amd_iommu.c
drivers/iommu/dma-iommu.c
drivers/iommu/dmar.c
drivers/iommu/intel_irq_remapping.c
drivers/iommu/rockchip-iommu.c
drivers/irqchip/qcom-irq-combiner.c
drivers/md/bcache/alloc.c
drivers/md/bcache/bcache.h
drivers/md/bcache/debug.c
drivers/md/bcache/io.c
drivers/md/bcache/request.c
drivers/md/bcache/super.c
drivers/md/bcache/writeback.c
drivers/md/dm-bufio.c
drivers/md/dm-cache-background-tracker.c
drivers/md/dm-integrity.c
drivers/md/dm-raid1.c
drivers/md/dm.c
drivers/media/i2c/saa7115.c
drivers/media/i2c/saa711x_regs.h
drivers/media/i2c/tda7432.c
drivers/media/i2c/tvp5150.c
drivers/media/i2c/tvp5150_reg.h
drivers/media/i2c/tvp7002.c
drivers/media/i2c/tvp7002_reg.h
drivers/media/media-devnode.c
drivers/media/pci/bt8xx/bttv-audio-hook.c
drivers/media/pci/bt8xx/bttv-audio-hook.h
drivers/media/pci/bt8xx/bttv-cards.c
drivers/media/pci/bt8xx/bttv-driver.c
drivers/media/pci/bt8xx/bttv-i2c.c
drivers/media/pci/cx23885/cx23885-input.c
drivers/media/pci/cx88/cx88-alsa.c
drivers/media/pci/cx88/cx88-blackbird.c
drivers/media/pci/cx88/cx88-core.c
drivers/media/pci/cx88/cx88-i2c.c
drivers/media/pci/cx88/cx88-video.c
drivers/media/radio/radio-aimslab.c
drivers/media/radio/radio-aztech.c
drivers/media/radio/radio-gemtek.c
drivers/media/radio/radio-maxiradio.c
drivers/media/radio/radio-rtrack2.c
drivers/media/radio/radio-sf16fmi.c
drivers/media/radio/radio-terratec.c
drivers/media/radio/radio-trust.c
drivers/media/radio/radio-typhoon.c
drivers/media/radio/radio-zoltrix.c
drivers/media/rc/keymaps/rc-avermedia-m135a.c
drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
drivers/media/rc/keymaps/rc-encore-enltv2.c
drivers/media/rc/keymaps/rc-kaiomy.c
drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
drivers/media/rc/keymaps/rc-pixelview-new.c
drivers/media/tuners/tea5761.c
drivers/media/tuners/tea5767.c
drivers/media/tuners/tuner-xc2028-types.h
drivers/media/tuners/tuner-xc2028.c
drivers/media/tuners/tuner-xc2028.h
drivers/media/usb/em28xx/em28xx-camera.c
drivers/media/usb/em28xx/em28xx-cards.c
drivers/media/usb/em28xx/em28xx-core.c
drivers/media/usb/em28xx/em28xx-dvb.c
drivers/media/usb/em28xx/em28xx-i2c.c
drivers/media/usb/em28xx/em28xx-input.c
drivers/media/usb/em28xx/em28xx-video.c
drivers/media/usb/em28xx/em28xx.h
drivers/media/usb/gspca/zc3xx-reg.h
drivers/media/usb/tm6000/tm6000-cards.c
drivers/media/usb/tm6000/tm6000-core.c
drivers/media/usb/tm6000/tm6000-i2c.c
drivers/media/usb/tm6000/tm6000-regs.h
drivers/media/usb/tm6000/tm6000-usb-isoc.h
drivers/media/usb/tm6000/tm6000-video.c
drivers/media/usb/tm6000/tm6000.h
drivers/media/v4l2-core/v4l2-dev.c
drivers/media/v4l2-core/v4l2-ioctl.c
drivers/media/v4l2-core/videobuf-core.c
drivers/media/v4l2-core/videobuf-dma-contig.c
drivers/media/v4l2-core/videobuf-dma-sg.c
drivers/media/v4l2-core/videobuf-vmalloc.c
drivers/memory/emif-asm-offsets.c
drivers/message/fusion/mptsas.c
drivers/mtd/chips/cfi_cmdset_0001.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/mtd/nand/core.c
drivers/mtd/nand/onenand/omap2.c
drivers/mtd/nand/raw/marvell_nand.c
drivers/mtd/nand/raw/nand_base.c
drivers/mtd/nand/raw/tango_nand.c
drivers/mtd/spi-nor/cadence-quadspi.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/can/dev.c
drivers/net/can/flexcan.c
drivers/net/can/spi/hi311x.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/chip.h
drivers/net/dsa/mv88e6xxx/global2.c
drivers/net/ethernet/amd/xgbe/xgbe-common.h
drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
drivers/net/ethernet/amd/xgbe/xgbe-pci.c
drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.h
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/freescale/ucc_geth_ethtool.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_controlq.c
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_sched.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/netronome/nfp/flower/action.c
drivers/net/ethernet/netronome/nfp/flower/cmsg.h
drivers/net/ethernet/netronome/nfp/flower/main.c
drivers/net/ethernet/netronome/nfp/nfp_app_nic.c
drivers/net/ethernet/netronome/nfp/nfp_main.h
drivers/net/ethernet/netronome/nfp/nfp_net_main.c
drivers/net/ethernet/ni/nixge.c
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/qlogic/qede/qede_rdma.c
drivers/net/ethernet/realtek/8139too.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/atusb.c
drivers/net/ieee802154/mcr20a.c
drivers/net/phy/broadcom.c
drivers/net/phy/marvell.c
drivers/net/phy/phy_device.c
drivers/net/phy/sfp-bus.c
drivers/net/ppp/pppoe.c
drivers/net/team/team.c
drivers/net/usb/qmi_wwan.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
drivers/net/wireless/realtek/rtlwifi/wifi.h
drivers/nvme/host/Kconfig
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/target/Kconfig
drivers/nvme/target/loop.c
drivers/of/fdt.c
drivers/of/overlay.c
drivers/parisc/ccio-dma.c
drivers/pci/dwc/pcie-kirin.c
drivers/pci/host/pci-aardvark.c
drivers/pci/pci-driver.c
drivers/pci/pci.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/intel/pinctrl-sunrisepoint.c
drivers/pinctrl/meson/pinctrl-meson-axg.c
drivers/platform/x86/Kconfig
drivers/platform/x86/asus-wireless.c
drivers/remoteproc/qcom_q6v5_pil.c
drivers/remoteproc/remoteproc_core.c
drivers/rpmsg/rpmsg_char.c
drivers/rtc/rtc-opal.c
drivers/s390/block/dasd_alias.c
drivers/s390/cio/chsc.c
drivers/s390/cio/qdio_setup.c
drivers/s390/cio/vfio_ccw_cp.c
drivers/s390/cio/vfio_ccw_fsm.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.h
drivers/s390/net/qeth_l2_main.c
drivers/sbus/char/oradax.c
drivers/scsi/aacraid/commsup.c
drivers/scsi/fnic/fnic_trace.c
drivers/scsi/isci/port_config.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/sd_zbc.c
drivers/scsi/storvsc_drv.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/vmw_pvscsi.c
drivers/slimbus/messaging.c
drivers/soc/bcm/raspberrypi-power.c
drivers/staging/media/imx/imx-media-csi.c
drivers/staging/wilc1000/host_interface.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pscsi.c
drivers/thermal/int340x_thermal/int3403_thermal.c
drivers/thermal/samsung/exynos_tmu.c
drivers/tty/n_gsm.c
drivers/tty/serial/earlycon.c
drivers/tty/serial/imx.c
drivers/tty/serial/mvebu-uart.c
drivers/tty/serial/qcom_geni_serial.c
drivers/tty/serial/xilinx_uartps.c
drivers/tty/tty_io.c
drivers/tty/tty_ldisc.c
drivers/uio/uio_hv_generic.c
drivers/usb/Kconfig
drivers/usb/core/config.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/core/phy.c
drivers/usb/core/phy.h
drivers/usb/core/quirks.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc2/pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_phonet.c
drivers/usb/host/ehci-mem.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/xhci-dbgtty.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/musb_dsps.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_host.c
drivers/usb/serial/Kconfig
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/option.c
drivers/usb/serial/usb-serial-simple.c
drivers/usb/serial/visor.c
drivers/usb/typec/tcpm.c
drivers/usb/typec/tps6598x.c
drivers/usb/typec/ucsi/Makefile
drivers/usb/typec/ucsi/ucsi.c
drivers/usb/usbip/stub_main.c
drivers/usb/usbip/usbip_common.h
drivers/usb/usbip/usbip_event.c
drivers/usb/usbip/vhci_hcd.c
drivers/virt/vboxguest/vboxguest_core.c
drivers/virt/vboxguest/vboxguest_core.h
drivers/virt/vboxguest/vboxguest_linux.c
drivers/virt/vboxguest/vboxguest_utils.c
fs/afs/addr_list.c
fs/afs/callback.c
fs/afs/cmservice.c
fs/afs/dir.c
fs/afs/file.c
fs/afs/flock.c
fs/afs/fsclient.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/rotate.c
fs/afs/rxrpc.c
fs/afs/security.c
fs/afs/server.c
fs/afs/server_list.c
fs/afs/super.c
fs/afs/write.c
fs/btrfs/extent-tree.c
fs/btrfs/relocation.c
fs/btrfs/send.c
fs/ceph/file.c
fs/ceph/xattr.c
fs/cifs/Kconfig
fs/cifs/cifsfs.c
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smbdirect.c
fs/cifs/transport.c
fs/ext4/balloc.c
fs/ext4/extents.c
fs/ext4/super.c
fs/fs-writeback.c
fs/jbd2/transaction.c
fs/ocfs2/refcounttree.c
fs/proc/base.c
fs/proc/kcore.c
fs/xfs/libxfs/xfs_attr.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/xfs_file.c
include/asm-generic/vmlinux.lds.h
include/dt-bindings/clock/stm32mp1-clks.h
include/kvm/arm_psci.h
include/kvm/arm_vgic.h
include/linux/blk-mq.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/brcmphy.h
include/linux/ceph/osd_client.h
include/linux/clk-provider.h
include/linux/device.h
include/linux/ethtool.h
include/linux/fsnotify_backend.h
include/linux/genhd.h
include/linux/hrtimer.h
include/linux/kthread.h
include/linux/mlx5/driver.h
include/linux/mm.h
include/linux/mtd/flashchip.h
include/linux/oom.h
include/linux/rbtree_augmented.h
include/linux/rbtree_latch.h
include/linux/remoteproc.h
include/linux/sched.h
include/linux/sched/signal.h
include/linux/serial_core.h
include/linux/stringhash.h
include/linux/ti-emif-sram.h
include/linux/timekeeper_internal.h
include/linux/timekeeping.h
include/linux/tty.h
include/linux/usb/composite.h
include/linux/vbox_utils.h
include/linux/virtio.h
include/linux/wait_bit.h
include/media/i2c/tvp7002.h
include/media/videobuf-core.h
include/media/videobuf-dma-sg.h
include/media/videobuf-vmalloc.h
include/net/bonding.h
include/net/flow_dissector.h
include/net/ife.h
include/net/llc_conn.h
include/net/mac80211.h
include/net/tls.h
include/net/xfrm.h
include/scsi/scsi_dbg.h
include/soc/bcm2835/raspberrypi-firmware.h
include/trace/events/afs.h
include/trace/events/initcall.h
include/trace/events/rxrpc.h
include/trace/events/sunrpc.h
include/trace/events/ufs.h
include/trace/events/workqueue.h
include/trace/events/xen.h
include/uapi/linux/if_infiniband.h
include/uapi/linux/kvm.h
include/uapi/linux/nl80211.h
include/uapi/linux/rds.h
include/uapi/linux/sysctl.h
include/uapi/linux/time.h
include/uapi/linux/tls.h
include/uapi/linux/virtio_balloon.h
include/uapi/rdma/cxgb3-abi.h
include/uapi/rdma/cxgb4-abi.h
include/uapi/rdma/hns-abi.h
include/uapi/rdma/ib_user_cm.h
include/uapi/rdma/ib_user_ioctl_verbs.h
include/uapi/rdma/ib_user_mad.h
include/uapi/rdma/ib_user_sa.h
include/uapi/rdma/ib_user_verbs.h
include/uapi/rdma/mlx4-abi.h
include/uapi/rdma/mlx5-abi.h
include/uapi/rdma/mthca-abi.h
include/uapi/rdma/nes-abi.h
include/uapi/rdma/qedr-abi.h
include/uapi/rdma/rdma_user_cm.h
include/uapi/rdma/rdma_user_ioctl.h
include/uapi/rdma/rdma_user_rxe.h
init/main.c
kernel/bpf/arraymap.c
kernel/bpf/core.c
kernel/bpf/sockmap.c
kernel/bpf/syscall.c
kernel/compat.c
kernel/events/ring_buffer.c
kernel/events/uprobes.c
kernel/kprobes.c
kernel/kthread.c
kernel/module.c
kernel/sched/autogroup.c
kernel/sched/core.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/fair.c
kernel/signal.c
kernel/stop_machine.c
kernel/sysctl_binary.c
kernel/time/clocksource.c
kernel/time/hrtimer.c
kernel/time/posix-stubs.c
kernel/time/posix-timers.c
kernel/time/tick-common.c
kernel/time/tick-internal.h
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/time/timekeeping.h
kernel/trace/bpf_trace.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_entries.h
kernel/trace/trace_events_filter.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_stack.c
kernel/trace/trace_uprobe.c
kernel/tracepoint.c
lib/dma-direct.c
lib/errseq.c
lib/find_bit_benchmark.c
lib/kobject.c
lib/swiotlb.c
lib/vsprintf.c
mm/backing-dev.c
mm/gup.c
mm/migrate.c
mm/mmap.c
mm/oom_kill.c
mm/sparse.c
mm/vmstat.c
mm/z3fold.c
net/9p/trans_common.c
net/9p/trans_fd.c
net/9p/trans_rdma.c
net/9p/trans_virtio.c
net/9p/trans_xen.c
net/atm/lec.c
net/bridge/br_if.c
net/bridge/netfilter/ebtables.c
net/ceph/messenger.c
net/ceph/mon_client.c
net/ceph/osd_client.c
net/compat.c
net/core/ethtool.c
net/core/filter.c
net/dccp/ccids/ccid2.c
net/dccp/timer.c
net/ieee802154/6lowpan/6lowpan_i.h
net/ieee802154/6lowpan/reassembly.c
net/ife/ife.c
net/ipv4/ping.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_bbr.c
net/ipv4/tcp_input.c
net/ipv4/udp.c
net/ipv6/Kconfig
net/ipv6/ip6_vti.c
net/ipv6/netfilter/Kconfig
net/ipv6/route.c
net/ipv6/seg6_iptunnel.c
net/ipv6/udp.c
net/ipv6/xfrm6_tunnel.c
net/key/af_key.c
net/l2tp/l2tp_debugfs.c
net/l2tp/l2tp_ppp.c
net/llc/af_llc.c
net/llc/llc_c_ac.c
net/llc/llc_conn.c
net/mac80211/agg-tx.c
net/mac80211/mlme.c
net/mac80211/tx.c
net/netfilter/Kconfig
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_extend.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_tables_api.c
net/netfilter/xt_connmark.c
net/netlink/af_netlink.c
net/nsh/nsh.c
net/openvswitch/flow_netlink.c
net/packet/af_packet.c
net/packet/internal.h
net/rds/ib_cm.c
net/rds/recv.c
net/rfkill/rfkill-gpio.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/conn_event.c
net/rxrpc/input.c
net/rxrpc/local_event.c
net/rxrpc/local_object.c
net/rxrpc/output.c
net/rxrpc/peer_event.c
net/rxrpc/rxkad.c
net/rxrpc/sendmsg.c
net/sched/act_ife.c
net/sched/act_skbedit.c
net/sched/act_skbmod.c
net/sched/cls_api.c
net/sched/sch_fq.c
net/sctp/associola.c
net/sctp/inqueue.c
net/sctp/ipv6.c
net/sctp/sm_make_chunk.c
net/sctp/sm_statefuns.c
net/sctp/stream.c
net/sctp/ulpevent.c
net/smc/af_smc.c
net/smc/smc_core.c
net/smc/smc_core.h
net/strparser/strparser.c
net/sunrpc/xprtrdma/fmr_ops.c
net/sunrpc/xprtrdma/frwr_ops.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h
net/tipc/node.c
net/tipc/socket.c
net/tls/tls_main.c
net/wireless/core.c
net/wireless/nl80211.c
net/wireless/reg.c
net/xfrm/xfrm_state.c
samples/sockmap/Makefile
scripts/Makefile.gcc-plugins
scripts/Makefile.lib
scripts/dtc/checks.c
scripts/extract_xc3028.pl
scripts/faddr2line
scripts/genksyms/Makefile
scripts/mod/sumversion.c
scripts/split-man.pl
security/commoncap.c
security/selinux/hooks.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/cpufeatures.h
tools/bpf/Makefile
tools/bpf/bpf_dbg.c
tools/include/uapi/linux/kvm.h
tools/perf/Documentation/perf-mem.txt
tools/perf/arch/s390/util/auxtrace.c
tools/perf/arch/s390/util/header.c
tools/perf/bench/numa.c
tools/perf/builtin-stat.c
tools/perf/pmu-events/arch/s390/mapfile.csv
tools/perf/pmu-events/arch/x86/mapfile.csv
tools/perf/tests/attr/test-record-group-sampling
tools/perf/tests/shell/record+probe_libc_inet_pton.sh
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/machine.c
tools/perf/util/pmu.c
tools/power/acpi/Makefile.config
tools/testing/selftests/bpf/.gitignore
tools/testing/selftests/bpf/test_progs.c
tools/testing/selftests/bpf/test_sock.c
tools/testing/selftests/bpf/test_sock_addr.c
tools/testing/selftests/bpf/test_sock_addr.sh
tools/testing/selftests/firmware/Makefile
tools/testing/selftests/firmware/fw_lib.sh
tools/testing/selftests/firmware/fw_run_tests.sh
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-extended-error-support.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc [new file with mode: 0644]
tools/testing/selftests/lib.mk
tools/testing/selftests/net/Makefile
tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
tools/testing/selftests/x86/test_syscall_vdso.c
virt/kvm/arm/arm.c
virt/kvm/arm/psci.c
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-mmio-v2.c
virt/kvm/arm/vgic/vgic-mmio.c
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/arm/vgic/vgic.c
virt/kvm/arm/vgic/vgic.h

index d2b6fda..ab2fe0e 100644 (file)
@@ -145,7 +145,7 @@ feature enabled.]
 
 In this mode ``intel_pstate`` registers utilization update callbacks with the
 CPU scheduler in order to run a P-state selection algorithm, either
-``powersave`` or ``performance``, depending on the ``scaling_cur_freq`` policy
+``powersave`` or ``performance``, depending on the ``scaling_governor`` policy
 setting in ``sysfs``.  The current CPU frequency information to be made
 available from the ``scaling_cur_freq`` policy attribute in ``sysfs`` is
 periodically updated by those utilization update callbacks too.
index 1e5c0f0..dbf5acd 100644 (file)
@@ -15,7 +15,7 @@ Sleep States That Can Be Supported
 ==================================
 
 Depending on its configuration and the capabilities of the platform it runs on,
-the Linux kernel can support up to four system sleep states, includig
+the Linux kernel can support up to four system sleep states, including
 hibernation and up to three variants of system suspend.  The sleep states that
 can be supported by the kernel are listed below.
 
index 1a0b704..da57601 100644 (file)
@@ -557,6 +557,14 @@ A: Although LLVM IR generation and optimization try to stay architecture
        pulls in some header files containing file scope host assembly codes.
      - You can add "-fno-jump-tables" to work around the switch table issue.
 
-   Otherwise, you can use bpf target.
+   Otherwise, you can use bpf target. Additionally, you _must_ use bpf target
+   when:
+
+     - Your program uses data structures with pointer or long / unsigned long
+       types that interface with BPF helpers or context data structures. Access
+       into these structures is verified by the BPF verifier and may result
+       in verification failures if the native architecture is not aligned with
+       the BPF architecture, e.g. 64-bit. An example of this is
+       BPF_PROG_TYPE_SK_MSG require '-target bpf'
 
 Happy BPF hacking!
index 4bcd4b7..3d01948 100644 (file)
@@ -264,7 +264,10 @@ i) Constructor
                           data device, but just remove the mapping.
 
       read_only: Don't allow any changes to be made to the pool
-                metadata.
+                metadata.  This mode is only available after the
+                thin-pool has been created and first used in full
+                read/write mode.  It cannot be specified on initial
+                thin-pool creation.
 
       error_if_no_space: Error IOs, instead of queueing, if no space.
 
index f4006d3..c760ecb 100644 (file)
@@ -30,7 +30,6 @@ compatible:
 Optional properties:
 - dma-coherent      : Present if dma operations are coherent
 - clocks            : a list of phandle + clock specifier pairs
-- resets            : a list of phandle + reset specifier pairs
 - target-supply     : regulator for SATA target power
 - phys              : reference to the SATA PHY node
 - phy-names         : must be "sata-phy"
index 557fa76..5d2519a 100644 (file)
@@ -38,7 +38,7 @@ Display Timings
   require specific display timings. The panel-timing subnode expresses those
   timings as specified in the timing subnode section of the display timing
   bindings defined in
-  Documentation/devicetree/bindings/display/display-timing.txt.
+  Documentation/devicetree/bindings/display/panel/display-timing.txt.
 
 
 Connectivity
index aadfb23..61315ea 100644 (file)
@@ -26,6 +26,7 @@ Required Properties:
                - "renesas,dmac-r8a7794" (R-Car E2)
                - "renesas,dmac-r8a7795" (R-Car H3)
                - "renesas,dmac-r8a7796" (R-Car M3-W)
+               - "renesas,dmac-r8a77965" (R-Car M3-N)
                - "renesas,dmac-r8a77970" (R-Car V3M)
                - "renesas,dmac-r8a77980" (R-Car V3H)
 
index 23e3abc..c889194 100644 (file)
@@ -4,6 +4,13 @@ Required properties:
 - compatible:
     atmel,maxtouch
 
+    The following compatibles have been used in various products but are
+    deprecated:
+       atmel,qt602240_ts
+       atmel,atmel_mxt_ts
+       atmel,atmel_mxt_tp
+       atmel,mXT224
+
 - reg: The I2C address of the device
 
 - interrupts: The sink for the touchpad's IRQ output
index 93c3a6a..ac71daa 100644 (file)
@@ -5,7 +5,9 @@ Required properties:
 - compatible: Must contain one or more of the following:
   - "renesas,rcar-gen3-canfd" for R-Car Gen3 compatible controller.
   - "renesas,r8a7795-canfd" for R8A7795 (R-Car H3) compatible controller.
-  - "renesas,r8a7796-canfd" for R8A7796 (R-Car M3) compatible controller.
+  - "renesas,r8a7796-canfd" for R8A7796 (R-Car M3-W) compatible controller.
+  - "renesas,r8a77970-canfd" for R8A77970 (R-Car V3M) compatible controller.
+  - "renesas,r8a77980-canfd" for R8A77980 (R-Car V3H) compatible controller.
 
   When compatible with the generic version, nodes must list the
   SoC-specific version corresponding to the platform first, followed by the
index c306f55..890526d 100644 (file)
@@ -18,6 +18,7 @@ Required properties:
 
       - "renesas,etheravb-r8a7795" for the R8A7795 SoC.
       - "renesas,etheravb-r8a7796" for the R8A7796 SoC.
+      - "renesas,etheravb-r8a77965" for the R8A77965 SoC.
       - "renesas,etheravb-r8a77970" for the R8A77970 SoC.
       - "renesas,etheravb-r8a77980" for the R8A77980 SoC.
       - "renesas,etheravb-r8a77995" for the R8A77995 SoC.
index ed5eb54..64bc5c2 100644 (file)
@@ -56,9 +56,9 @@ pins it needs, and how they should be configured, with regard to muxer
 configuration, drive strength and pullups. If one of these options is
 not set, its actual value will be unspecified.
 
-This driver supports the generic pin multiplexing and configuration
-bindings. For details on each properties, you can refer to
-./pinctrl-bindings.txt.
+Allwinner A1X Pin Controller supports the generic pin multiplexing and
+configuration bindings. For details on each properties, you can refer to
+ ./pinctrl-bindings.txt.
 
 Required sub-node properties:
   - pins
index 8ff65fa..c06c045 100644 (file)
@@ -21,7 +21,7 @@ Required properties:
 - interrupts : identifier to the device interrupt
 - clocks : a list of phandle + clock-specifier pairs, one for each
           entry in clock names.
-- clocks-names :
+- clock-names :
    * "xtal" for external xtal clock identifier
    * "pclk" for the bus core clock, either the clk81 clock or the gate clock
    * "baud" for the source of the baudrate generator, can be either the xtal
index 2ae2fee..b7e0e32 100644 (file)
@@ -24,7 +24,7 @@ Required properties:
     - Must contain two elements for the extended variant of the IP
       (marvell,armada-3700-uart-ext): "uart-tx" and "uart-rx",
       respectively the UART TX interrupt and the UART RX interrupt. A
-      corresponding interrupts-names property must be defined.
+      corresponding interrupt-names property must be defined.
     - For backward compatibility reasons, a single element interrupts
       property is also supported for the standard variant of the IP,
       containing only the UART sum interrupt. This form is deprecated
index ad962f4..106808b 100644 (file)
@@ -17,6 +17,8 @@ Required properties:
     - "renesas,scifa-r8a7745" for R8A7745 (RZ/G1E) SCIFA compatible UART.
     - "renesas,scifb-r8a7745" for R8A7745 (RZ/G1E) SCIFB compatible UART.
     - "renesas,hscif-r8a7745" for R8A7745 (RZ/G1E) HSCIF compatible UART.
+    - "renesas,scif-r8a77470" for R8A77470 (RZ/G1C) SCIF compatible UART.
+    - "renesas,hscif-r8a77470" for R8A77470 (RZ/G1C) HSCIF compatible UART.
     - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
     - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.
     - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.
@@ -41,6 +43,8 @@ Required properties:
     - "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART.
     - "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART.
     - "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART.
+    - "renesas,scif-r8a77965" for R8A77965 (R-Car M3-N) SCIF compatible UART.
+    - "renesas,hscif-r8a77965" for R8A77965 (R-Car M3-N) HSCIF compatible UART.
     - "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART.
     - "renesas,hscif-r8a77970" for R8A77970 (R-Car V3M) HSCIF compatible UART.
     - "renesas,scif-r8a77980" for R8A77980 (R-Car V3H) SCIF compatible UART.
index c4c00df..bd1dd31 100644 (file)
@@ -28,7 +28,10 @@ Required properties:
   - interrupts: one XHCI interrupt should be described here.
 
 Optional properties:
-  - clocks: reference to a clock
+  - clocks: reference to the clocks
+  - clock-names: mandatory if there is a second clock, in this case
+    the name must be "core" for the first clock and "reg" for the
+    second one
   - usb2-lpm-disable: indicate if we don't want to enable USB2 HW LPM
   - usb3-lpm-capable: determines if platform is USB3 LPM capable
   - quirk-broken-port-ped: set if the controller has broken port disable mechanism
index b5f978a..a38d8bf 100644 (file)
@@ -182,6 +182,7 @@ karo        Ka-Ro electronics GmbH
 keithkoep      Keith & Koep GmbH
 keymile        Keymile GmbH
 khadas Khadas
+kiebackpeter    Kieback & Peter GmbH
 kinetic Kinetic Technologies
 kingnovel      Kingnovel Technology Co., Ltd.
 kosagi Sutajio Ko-Usagi PTE Ltd.
index a4feb6d..725fb8d 100644 (file)
@@ -98,6 +98,14 @@ Finally, if you need to remove all overlays in one-go, just call
 of_overlay_remove_all() which will remove every single one in the correct
 order.
 
+In addition, there is the option to register notifiers that get called on
+overlay operations. See of_overlay_notifier_register/unregister and
+enum of_overlay_notify_action for details.
+
+Note that a notifier callback is not supposed to store pointers to a device
+tree node or its content beyond OF_OVERLAY_POST_REMOVE corresponding to the
+respective node it received.
+
 Overlay DTS Format
 ------------------
 
index 96a0423..82a3e43 100644 (file)
@@ -177,14 +177,14 @@ BUGS
 ****
 
 
-Report bugs to Mauro Carvalho Chehab <mchehab@s-opensource.com>
+Report bugs to Mauro Carvalho Chehab <mchehab@kernel.org>
 
 
 COPYRIGHT
 *********
 
 
-Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab@s-opensource.com>.
+Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab+samsung@kernel.org>.
 
 License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>.
 
index cf4516d..d5ec95a 100644 (file)
@@ -17,17 +17,17 @@ an error is returned.
 
 request_firmware
 ----------------
-.. kernel-doc:: drivers/base/firmware_class.c
+.. kernel-doc:: drivers/base/firmware_loader/main.c
    :functions: request_firmware
 
 request_firmware_direct
 -----------------------
-.. kernel-doc:: drivers/base/firmware_class.c
+.. kernel-doc:: drivers/base/firmware_loader/main.c
    :functions: request_firmware_direct
 
 request_firmware_into_buf
 -------------------------
-.. kernel-doc:: drivers/base/firmware_class.c
+.. kernel-doc:: drivers/base/firmware_loader/main.c
    :functions: request_firmware_into_buf
 
 Asynchronous firmware requests
@@ -41,7 +41,7 @@ in atomic contexts.
 
 request_firmware_nowait
 -----------------------
-.. kernel-doc:: drivers/base/firmware_class.c
+.. kernel-doc:: drivers/base/firmware_loader/main.c
    :functions: request_firmware_nowait
 
 Special optimizations on reboot
@@ -50,12 +50,12 @@ Special optimizations on reboot
 Some devices have an optimization in place to enable the firmware to be
 retained during system reboot. When such optimizations are used the driver
 author must ensure the firmware is still available on resume from suspend,
-this can be done with firmware_request_cache() insted of requesting for the
-firmare to be loaded.
+this can be done with firmware_request_cache() instead of requesting for the
+firmware to be loaded.
 
 firmware_request_cache()
------------------------
-.. kernel-doc:: drivers/base/firmware_class.c
+------------------------
+.. kernel-doc:: drivers/base/firmware_loader/main.c
    :functions: firmware_request_cache
 
 request firmware API expected driver use
index 6d9ff31..bee1b9a 100644 (file)
@@ -28,7 +28,7 @@ Device Drivers Base
 .. kernel-doc:: drivers/base/node.c
    :internal:
 
-.. kernel-doc:: drivers/base/firmware_class.c
+.. kernel-doc:: drivers/base/firmware_loader/main.c
    :export:
 
 .. kernel-doc:: drivers/base/transport_class.c
index feb3194..48ff580 100644 (file)
@@ -210,7 +210,7 @@ If the connector is dual-role capable, there may also be a switch for the data
 role. USB Type-C Connector Class does not supply separate API for them. The
 port drivers can use USB Role Class API with those.
 
-Illustration of the muxes behind a connector that supports an alternate mode:
+Illustration of the muxes behind a connector that supports an alternate mode::
 
                      ------------------------
                      |       Connector      |
index d04e6e4..fbed645 100644 (file)
@@ -9,8 +9,8 @@ i2c adapters present on your system at a given time. i2cdetect is part of
 the i2c-tools package.
 
 I2C device files are character device files with major device number 89
-and a minor device number corresponding to the number assigned as 
-explained above. They should be called "i2c-%d" (i2c-0, i2c-1, ..., 
+and a minor device number corresponding to the number assigned as
+explained above. They should be called "i2c-%d" (i2c-0, i2c-1, ...,
 i2c-10, ...). All 256 minor device numbers are reserved for i2c.
 
 
@@ -23,11 +23,6 @@ First, you need to include these two headers:
   #include <linux/i2c-dev.h>
   #include <i2c/smbus.h>
 
-(Please note that there are two files named "i2c-dev.h" out there. One is
-distributed with the Linux kernel and the other one is included in the
-source tree of i2c-tools. They used to be different in content but since 2012
-they're identical. You should use "linux/i2c-dev.h").
-
 Now, you have to decide which adapter you want to access. You should
 inspect /sys/class/i2c-dev/ or run "i2cdetect -l" to decide this.
 Adapter numbers are assigned somewhat dynamically, so you can not
@@ -38,7 +33,7 @@ Next thing, open the device file, as follows:
   int file;
   int adapter_nr = 2; /* probably dynamically determined */
   char filename[20];
-  
+
   snprintf(filename, 19, "/dev/i2c-%d", adapter_nr);
   file = open(filename, O_RDWR);
   if (file < 0) {
@@ -72,8 +67,10 @@ the device supports them. Both are illustrated below.
     /* res contains the read word */
   }
 
-  /* Using I2C Write, equivalent of 
-     i2c_smbus_write_word_data(file, reg, 0x6543) */
+  /*
+   * Using I2C Write, equivalent of
+   * i2c_smbus_write_word_data(file, reg, 0x6543)
+   */
   buf[0] = reg;
   buf[1] = 0x43;
   buf[2] = 0x65;
@@ -140,14 +137,14 @@ ioctl(file, I2C_RDWR, struct i2c_rdwr_ioctl_data *msgset)
   set in each message, overriding the values set with the above ioctl's.
 
 ioctl(file, I2C_SMBUS, struct i2c_smbus_ioctl_data *args)
-  Not meant to be called  directly; instead, use the access functions
-  below.
+  If possible, use the provided i2c_smbus_* methods described below instead
+  of issuing direct ioctls.
 
 You can do plain i2c transactions by using read(2) and write(2) calls.
 You do not need to pass the address byte; instead, set it through
 ioctl I2C_SLAVE before you try to access the device.
 
-You can do SMBus level transactions (see documentation file smbus-protocol 
+You can do SMBus level transactions (see documentation file smbus-protocol
 for details) through the following functions:
   __s32 i2c_smbus_write_quick(int file, __u8 value);
   __s32 i2c_smbus_read_byte(int file);
@@ -158,7 +155,7 @@ for details) through the following functions:
   __s32 i2c_smbus_write_word_data(int file, __u8 command, __u16 value);
   __s32 i2c_smbus_process_call(int file, __u8 command, __u16 value);
   __s32 i2c_smbus_read_block_data(int file, __u8 command, __u8 *values);
-  __s32 i2c_smbus_write_block_data(int file, __u8 command, __u8 length, 
+  __s32 i2c_smbus_write_block_data(int file, __u8 command, __u8 length,
                                    __u8 *values);
 All these transactions return -1 on failure; you can read errno to see
 what happened. The 'write' transactions return 0 on success; the
@@ -166,10 +163,9 @@ what happened. The 'write' transactions return 0 on success; the
 returns the number of values read. The block buffers need not be longer
 than 32 bytes.
 
-The above functions are all inline functions, that resolve to calls to
-the i2c_smbus_access function, that on its turn calls a specific ioctl
-with the data in a specific format. Read the source code if you
-want to know what happens behind the screens.
+The above functions are made available by linking against the libi2c library,
+which is provided by the i2c-tools project.  See:
+https://git.kernel.org/pub/scm/utils/i2c-tools/i2c-tools.git/.
 
 
 Implementation details
index 84bb74d..7f7413e 100644 (file)
@@ -217,7 +217,6 @@ Code  Seq#(hex)     Include File            Comments
 'd'    02-40   pcmcia/ds.h             conflict!
 'd'    F0-FF   linux/digi1.h
 'e'    all     linux/digi1.h           conflict!
-'e'    00-1F   drivers/net/irda/irtty-sir.h    conflict!
 'f'    00-1F   linux/ext2_fs.h         conflict!
 'f'    00-1F   linux/ext3_fs.h         conflict!
 'f'    00-0F   fs/jfs/jfs_dinode.h     conflict!
@@ -247,7 +246,6 @@ Code  Seq#(hex)     Include File            Comments
 'm'    all     linux/synclink.h        conflict!
 'm'    00-19   drivers/message/fusion/mptctl.h conflict!
 'm'    00      drivers/scsi/megaraid/megaraid_ioctl.h  conflict!
-'m'    00-1F   net/irda/irmod.h        conflict!
 'n'    00-7F   linux/ncp_fs.h and fs/ncpfs/ioctl.c
 'n'    80-8F   uapi/linux/nilfs2_api.h NILFS2
 'n'    E0-FF   linux/matroxfb.h        matroxfb
index e6ce1e3..217237f 100644 (file)
@@ -7,7 +7,7 @@ file: uapi/v4l/keytable.c
 
     /* keytable.c - This program allows checking/replacing keys at IR
 
-       Copyright (C) 2006-2009 Mauro Carvalho Chehab <mchehab@infradead.org>
+       Copyright (C) 2006-2009 Mauro Carvalho Chehab <mchehab@kernel.org>
 
        This program is free software; you can redistribute it and/or modify
        it under the terms of the GNU General Public License as published by
index 5aabd0b..f0d0ab6 100644 (file)
@@ -6,7 +6,7 @@ file: media/v4l/v4l2grab.c
 .. code-block:: c
 
     /* V4L2 video picture grabber
-       Copyright (C) 2009 Mauro Carvalho Chehab <mchehab@infradead.org>
+       Copyright (C) 2009 Mauro Carvalho Chehab <mchehab@kernel.org>
 
        This program is free software; you can redistribute it and/or modify
        it under the terms of the GNU General Public License as published by
index b583a73..35ffaa2 100644 (file)
@@ -2126,18 +2126,3 @@ max_dgram_qlen - INTEGER
 
        Default: 10
 
-
-UNDOCUMENTED:
-
-/proc/sys/net/irda/*
-       fast_poll_increase FIXME
-       warn_noreply_time FIXME
-       discovery_slots FIXME
-       slot_timeout FIXME
-       max_baud_rate FIXME
-       discovery_timeout FIXME
-       lap_keepalive_time FIXME
-       max_noreply_time FIXME
-       max_tx_data_size FIXME
-       max_tx_window FIXME
-       min_tx_turn_time FIXME
index 31abd04..6f55eb9 100644 (file)
@@ -168,7 +168,7 @@ update on the CPUs, as discussed below:
 
 [Please bear in mind that the kernel requests the microcode images from
 userspace, using the request_firmware() function defined in
-drivers/base/firmware_class.c]
+drivers/base/firmware_loader/main.c]
 
 
 a. When all the CPUs are identical:
index 00cecf1..633be10 100644 (file)
@@ -157,8 +157,5 @@ memory management. See ``include/sound/sndmagic.h`` for complete list of them. M
 OSS sound drivers have their magic numbers constructed from the soundcard PCI
 ID - these are not listed here as well.
 
-IrDA subsystem also uses large number of own magic numbers, see
-``include/net/irda/irda.h`` for a complete list of them.
-
 HFS is another larger user of magic numbers - you can find them in
 ``fs/hfs/hfs.h``.
index a958d8b..d410f47 100755 (executable)
@@ -387,11 +387,11 @@ tree for more details.
 
 =head1 BUGS
 
-Report bugs to Mauro Carvalho Chehab <mchehab@s-opensource.com>
+Report bugs to Mauro Carvalho Chehab <mchehab@kernel.org>
 
 =head1 COPYRIGHT
 
-Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab@s-opensource.com>.
+Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab+samsung@kernel.org>.
 
 License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>.
 
index e45f078..67d9c38 100644 (file)
@@ -461,9 +461,17 @@ of ftrace. Here is a list of some of the key files:
                and ticks at the same rate as the hardware clocksource.
 
        boot:
-               Same as mono. Used to be a separate clock which accounted
-               for the time spent in suspend while CLOCK_MONOTONIC did
-               not.
+               This is the boot clock (CLOCK_BOOTTIME) and is based on the
+               fast monotonic clock, but also accounts for time spent in
+               suspend. Since the clock access is designed for use in
+               tracing in the suspend path, some side effects are possible
+               if clock is accessed after the suspend time is accounted before
+               the fast mono clock is updated. In this case, the clock update
+               appears to happen slightly sooner than it normally would have.
+               Also on 32-bit systems, it's possible that the 64-bit boot offset
+               sees a partial update. These effects are rare and post
+               processing should be able to handle them. See comments in the
+               ktime_get_boot_fast_ns() function for more information.
 
        To set a clock, simply echo the clock name into this file::
 
index 698660b..c77c0f0 100644 (file)
@@ -6,7 +6,7 @@ communicating in English you can also ask the Chinese maintainer for
 help.  Contact the Chinese maintainer if this translation is outdated
 or if there is a problem with the translation.
 
-Maintainer: Mauro Carvalho Chehab <mchehab@infradead.org>
+Maintainer: Mauro Carvalho Chehab <mchehab@kernel.org>
 Chinese maintainer: Fu Wei <tekkamanninja@gmail.com>
 ---------------------------------------------------------------------
 Documentation/video4linux/v4l2-framework.txt 的中文翻译
@@ -14,7 +14,7 @@ Documentation/video4linux/v4l2-framework.txt 的中文翻译
 如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
 交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
 译存在问题,请联系中文版维护者。
-英文版维护者: Mauro Carvalho Chehab <mchehab@infradead.org>
+英文版维护者: Mauro Carvalho Chehab <mchehab@kernel.org>
 中文版维护者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
 中文版翻译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
 中文版校译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
index 1c7958b..758bf40 100644 (file)
@@ -1960,6 +1960,9 @@ ARM 32-bit VFP control registers have the following id bit patterns:
 ARM 64-bit FP registers have the following id bit patterns:
   0x4030 0000 0012 0 <regno:12>
 
+ARM firmware pseudo-registers have the following bit pattern:
+  0x4030 0000 0014 <regno:16>
+
 
 arm64 registers are mapped using the lower 32 bits. The upper 16 of
 that is the register group type, or coprocessor number:
@@ -1976,6 +1979,9 @@ arm64 CCSIDR registers are demultiplexed by CSSELR value:
 arm64 system registers have the following id bit patterns:
   0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
 
+arm64 firmware pseudo-registers have the following bit pattern:
+  0x6030 0000 0014 <regno:16>
+
 
 MIPS registers are mapped using the lower 32 bits.  The upper 16 of that is
 the register group type:
@@ -2510,7 +2516,8 @@ Possible features:
          and execute guest code when KVM_RUN is called.
        - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
          Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
-       - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU.
+       - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 (or a future revision
+          backward compatible with v0.2) for the CPU.
          Depends on KVM_CAP_ARM_PSCI_0_2.
        - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU.
          Depends on KVM_CAP_ARM_PMU_V3.
diff --git a/Documentation/virtual/kvm/arm/psci.txt b/Documentation/virtual/kvm/arm/psci.txt
new file mode 100644 (file)
index 0000000..aafdab8
--- /dev/null
@@ -0,0 +1,30 @@
+KVM implements the PSCI (Power State Coordination Interface)
+specification in order to provide services such as CPU on/off, reset
+and power-off to the guest.
+
+The PSCI specification is regularly updated to provide new features,
+and KVM implements these updates if they make sense from a virtualization
+point of view.
+
+This means that a guest booted on two different versions of KVM can
+observe two different "firmware" revisions. This could cause issues if
+a given guest is tied to a particular PSCI revision (unlikely), or if
+a migration causes a different PSCI version to be exposed out of the
+blue to an unsuspecting guest.
+
+In order to remedy this situation, KVM exposes a set of "firmware
+pseudo-registers" that can be manipulated using the GET/SET_ONE_REG
+interface. These registers can be saved/restored by userspace, and set
+to a convenient value if required.
+
+The following register is defined:
+
+* KVM_REG_ARM_PSCI_VERSION:
+
+  - Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set
+    (and thus has already been initialized)
+  - Returns the current PSCI version on GET_ONE_REG (defaulting to the
+    highest PSCI version implemented by KVM and compatible with v0.2)
+  - Allows any PSCI version implemented by KVM and compatible with
+    v0.2 to be set with SET_ONE_REG
+  - Affects the whole VM (even if the register view is per-vcpu)
index 92be777..92e47b5 100644 (file)
@@ -137,9 +137,9 @@ Maintainers List (try to look for most precise areas first)
                -----------------------------------
 
 3C59X NETWORK DRIVER
-M:     Steffen Klassert <klassert@mathematik.tu-chemnitz.de>
+M:     Steffen Klassert <klassert@kernel.org>
 L:     netdev@vger.kernel.org
-S:     Maintained
+S:     Odd Fixes
 F:     Documentation/networking/vortex.txt
 F:     drivers/net/ethernet/3com/3c59x.c
 
@@ -564,8 +564,9 @@ S:  Maintained
 F:     drivers/media/dvb-frontends/af9033*
 
 AFFS FILE SYSTEM
+M:     David Sterba <dsterba@suse.com>
 L:     linux-fsdevel@vger.kernel.org
-S:     Orphan
+S:     Odd Fixes
 F:     Documentation/filesystems/affs.txt
 F:     fs/affs/
 
@@ -905,6 +906,8 @@ ANDROID ION DRIVER
 M:     Laura Abbott <labbott@redhat.com>
 M:     Sumit Semwal <sumit.semwal@linaro.org>
 L:     devel@driverdev.osuosl.org
+L:     dri-devel@lists.freedesktop.org
+L:     linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
 S:     Supported
 F:     drivers/staging/android/ion
 F:     drivers/staging/android/uapi/ion.h
@@ -1208,7 +1211,6 @@ F:        drivers/*/*alpine*
 ARM/ARTPEC MACHINE SUPPORT
 M:     Jesper Nilsson <jesper.nilsson@axis.com>
 M:     Lars Persson <lars.persson@axis.com>
-M:     Niklas Cassel <niklas.cassel@axis.com>
 S:     Maintained
 L:     linux-arm-kernel@axis.com
 F:     arch/arm/mach-artpec
@@ -2552,7 +2554,6 @@ F:        Documentation/devicetree/bindings/sound/axentia,*
 F:     sound/soc/atmel/tse850-pcm5142.c
 
 AZ6007 DVB DRIVER
-M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
@@ -2617,7 +2618,7 @@ S:        Maintained
 F:     drivers/net/hamradio/baycom*
 
 BCACHE (BLOCK LAYER CACHE)
-M:     Michael Lyle <mlyle@lyle.org>
+M:     Coly Li <colyli@suse.de>
 M:     Kent Overstreet <kent.overstreet@gmail.com>
 L:     linux-bcache@vger.kernel.org
 W:     http://bcache.evilpiepirate.org
@@ -3081,7 +3082,6 @@ F:        include/linux/btrfs*
 F:     include/uapi/linux/btrfs*
 
 BTTV VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
@@ -3691,7 +3691,6 @@ F:        drivers/cpufreq/arm_big_little_dt.c
 
 CPU POWER MONITORING SUBSYSTEM
 M:     Thomas Renninger <trenn@suse.com>
-M:     Shuah Khan <shuahkh@osg.samsung.com>
 M:     Shuah Khan <shuah@kernel.org>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
@@ -3810,7 +3809,6 @@ S:        Maintained
 F:     drivers/media/dvb-frontends/cx24120*
 
 CX88 VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
@@ -5051,7 +5049,6 @@ F:        drivers/edac/thunderx_edac*
 
 EDAC-CORE
 M:     Borislav Petkov <bp@alien8.de>
-M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next
@@ -5080,7 +5077,6 @@ S:        Maintained
 F:     drivers/edac/fsl_ddr_edac.*
 
 EDAC-GHES
-M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 S:     Maintained
@@ -5097,21 +5093,18 @@ S:      Maintained
 F:     drivers/edac/i5000_edac.c
 
 EDAC-I5400
-M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 S:     Maintained
 F:     drivers/edac/i5400_edac.c
 
 EDAC-I7300
-M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 S:     Maintained
 F:     drivers/edac/i7300_edac.c
 
 EDAC-I7CORE
-M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 S:     Maintained
@@ -5161,7 +5154,6 @@ S:        Maintained
 F:     drivers/edac/r82600_edac.c
 
 EDAC-SBRIDGE
-M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 S:     Maintained
@@ -5220,7 +5212,6 @@ S:        Maintained
 F:     drivers/net/ethernet/ibm/ehea/
 
 EM28XX VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
@@ -7411,16 +7402,6 @@ S:       Obsolete
 F:     include/uapi/linux/ipx.h
 F:     drivers/staging/ipx/
 
-IRDA SUBSYSTEM
-M:     Samuel Ortiz <samuel@sortiz.org>
-L:     irda-users@lists.sourceforge.net (subscribers-only)
-L:     netdev@vger.kernel.org
-W:     http://irda.sourceforge.net/
-S:     Obsolete
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/irda-2.6.git
-F:     Documentation/networking/irda.txt
-F:     drivers/staging/irda/
-
 IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
 M:     Marc Zyngier <marc.zyngier@arm.com>
 S:     Maintained
@@ -7685,9 +7666,11 @@ L:       linux-kbuild@vger.kernel.org
 S:     Maintained
 F:     Documentation/kbuild/
 F:     Makefile
-F:     scripts/Makefile.*
+F:     scripts/Kbuild*
+F:     scripts/Makefile*
 F:     scripts/basic/
 F:     scripts/mk*
+F:     scripts/mod/
 F:     scripts/package/
 
 KERNEL JANITORS
@@ -7712,7 +7695,6 @@ F:        include/linux/sunrpc/
 F:     include/uapi/linux/sunrpc/
 
 KERNEL SELFTEST FRAMEWORK
-M:     Shuah Khan <shuahkh@osg.samsung.com>
 M:     Shuah Khan <shuah@kernel.org>
 L:     linux-kselftest@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git
@@ -7753,7 +7735,7 @@ F:        arch/x86/include/asm/svm.h
 F:     arch/x86/kvm/svm.c
 
 KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm)
-M:     Christoffer Dall <christoffer.dall@linaro.org>
+M:     Christoffer Dall <christoffer.dall@arm.com>
 M:     Marc Zyngier <marc.zyngier@arm.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     kvmarm@lists.cs.columbia.edu
@@ -7767,7 +7749,7 @@ F:        virt/kvm/arm/
 F:     include/kvm/arm_*
 
 KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
-M:     Christoffer Dall <christoffer.dall@linaro.org>
+M:     Christoffer Dall <christoffer.dall@arm.com>
 M:     Marc Zyngier <marc.zyngier@arm.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     kvmarm@lists.cs.columbia.edu
@@ -8879,7 +8861,6 @@ F:        Documentation/devicetree/bindings/media/nvidia,tegra-vde.txt
 F:     drivers/staging/media/tegra-vde/
 
 MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
-M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 P:     LinuxTV.org Project
 L:     linux-media@vger.kernel.org
@@ -9733,6 +9714,7 @@ W:        https://fedorahosted.org/dropwatch/
 F:     net/core/drop_monitor.c
 
 NETWORKING DRIVERS
+M:     "David S. Miller" <davem@davemloft.net>
 L:     netdev@vger.kernel.org
 W:     http://www.linuxfoundation.org/en/Net
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
@@ -9889,7 +9871,7 @@ F:        include/linux/platform_data/nxp-nci.h
 F:     Documentation/devicetree/bindings/net/nfc/
 
 NFS, SUNRPC, AND LOCKD CLIENTS
-M:     Trond Myklebust <trond.myklebust@primarydata.com>
+M:     Trond Myklebust <trond.myklebust@hammerspace.com>
 M:     Anna Schumaker <anna.schumaker@netapp.com>
 L:     linux-nfs@vger.kernel.org
 W:     http://client.linux-nfs.org
@@ -10909,7 +10891,6 @@ F:      drivers/pci/host/
 F:     drivers/pci/dwc/
 
 PCIE DRIVER FOR AXIS ARTPEC
-M:     Niklas Cassel <niklas.cassel@axis.com>
 M:     Jesper Nilsson <jesper.nilsson@axis.com>
 L:     linux-arm-kernel@axis.com
 L:     linux-pci@vger.kernel.org
@@ -12239,7 +12220,7 @@ F:      Documentation/s390/vfio-ccw.txt
 F:     include/uapi/linux/vfio_ccw.h
 
 S390 ZCRYPT DRIVER
-M:     Harald Freudenberger <freude@de.ibm.com>
+M:     Harald Freudenberger <freude@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 W:     http://www.ibm.com/developerworks/linux/linux390/
 S:     Supported
@@ -12268,7 +12249,6 @@ S:      Odd Fixes
 F:     drivers/media/i2c/saa6588*
 
 SAA7134 VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
@@ -12507,6 +12487,7 @@ F:      drivers/scsi/st_*.h
 SCTP PROTOCOL
 M:     Vlad Yasevich <vyasevich@gmail.com>
 M:     Neil Horman <nhorman@tuxdriver.com>
+M:     Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
 L:     linux-sctp@vger.kernel.org
 W:     http://lksctp.sourceforge.net
 S:     Maintained
@@ -12772,7 +12753,6 @@ S:      Maintained
 F:     drivers/media/radio/si4713/radio-usb-si4713.c
 
 SIANO DVB DRIVER
-M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
@@ -13763,7 +13743,6 @@ S:      Maintained
 F:     drivers/media/i2c/tda9840*
 
 TEA5761 TUNER DRIVER
-M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
@@ -13772,7 +13751,6 @@ S:      Odd fixes
 F:     drivers/media/tuners/tea5761.*
 
 TEA5767 TUNER DRIVER
-M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
@@ -13862,7 +13840,6 @@ S:      Supported
 F:     drivers/iommu/tegra*
 
 TEGRA KBC DRIVER
-M:     Rakesh Iyer <riyer@nvidia.com>
 M:     Laxman Dewangan <ldewangan@nvidia.com>
 S:     Supported
 F:     drivers/input/keyboard/tegra-kbc.c
@@ -13965,7 +13942,7 @@ THUNDERBOLT DRIVER
 M:     Andreas Noever <andreas.noever@gmail.com>
 M:     Michael Jamet <michael.jamet@intel.com>
 M:     Mika Westerberg <mika.westerberg@linux.intel.com>
-M:     Yehezkel Bernat <yehezkel.bernat@intel.com>
+M:     Yehezkel Bernat <YehezkelShB@gmail.com>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
 S:     Maintained
 F:     Documentation/admin-guide/thunderbolt.rst
@@ -13975,7 +13952,7 @@ F:      include/linux/thunderbolt.h
 THUNDERBOLT NETWORK DRIVER
 M:     Michael Jamet <michael.jamet@intel.com>
 M:     Mika Westerberg <mika.westerberg@linux.intel.com>
-M:     Yehezkel Bernat <yehezkel.bernat@intel.com>
+M:     Yehezkel Bernat <YehezkelShB@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/thunderbolt.c
@@ -14189,7 +14166,6 @@ F:      Documentation/networking/tlan.txt
 F:     drivers/net/ethernet/ti/tlan.*
 
 TM6000 VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
@@ -14672,7 +14648,6 @@ F:      drivers/usb/common/usb-otg-fsm.c
 
 USB OVER IP DRIVER
 M:     Valentina Manea <valentina.manea.m@gmail.com>
-M:     Shuah Khan <shuahkh@osg.samsung.com>
 M:     Shuah Khan <shuah@kernel.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
@@ -15416,7 +15391,6 @@ S:      Maintained
 F:     arch/x86/entry/vdso/
 
 XC2028/3028 TUNER DRIVER
-M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
 M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
index 83b6c54..ba3106b 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
 VERSION = 4
 PATCHLEVEL = 17
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
-NAME = Fearless Coyote
+EXTRAVERSION = -rc5
+NAME = Merciless Moray
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
index 8e0d665..75dd23a 100644 (file)
@@ -464,6 +464,10 @@ config GCC_PLUGIN_LATENT_ENTROPY
 config GCC_PLUGIN_STRUCTLEAK
        bool "Force initialization of variables containing userspace addresses"
        depends on GCC_PLUGINS
+       # Currently STRUCTLEAK inserts initialization out of live scope of
+       # variables from KASAN point of view. This leads to KASAN false
+       # positive reports. Prohibit this combination for now.
+       depends on !KASAN_EXTRA
        help
          This plugin zero-initializes any structures containing a
          __user attribute. This can prevent some classes of information
index 8bbb6f8..4785fbc 100644 (file)
                                                function = "gmii";
                                                groups = "gmii_gmac0_grp";
                                        };
-                                       /* Settings come from OpenWRT */
+                                       /* Settings come from OpenWRT, pins on SL3516 */
                                        conf0 {
-                                               pins = "R8 GMAC0 RXDV", "U11 GMAC1 RXDV";
+                                               pins = "V8 GMAC0 RXDV", "T10 GMAC1 RXDV";
                                                skew-delay = <0>;
                                        };
                                        conf1 {
-                                               pins = "T8 GMAC0 RXC", "T11 GMAC1 RXC";
+                                               pins = "Y7 GMAC0 RXC", "Y11 GMAC1 RXC";
                                                skew-delay = <15>;
                                        };
                                        conf2 {
-                                               pins = "P8 GMAC0 TXEN", "V11 GMAC1 TXEN";
+                                               pins = "T8 GMAC0 TXEN", "W11 GMAC1 TXEN";
                                                skew-delay = <7>;
                                        };
                                        conf3 {
-                                               pins = "V7 GMAC0 TXC";
+                                               pins = "U8 GMAC0 TXC";
                                                skew-delay = <11>;
                                        };
                                        conf4 {
-                                               pins = "P10 GMAC1 TXC";
+                                               pins = "V11 GMAC1 TXC";
                                                skew-delay = <10>;
                                        };
                                        conf5 {
                                                /* The data lines all have default skew */
-                                               pins = "U8 GMAC0 RXD0", "V8 GMAC0 RXD1",
-                                                      "P9 GMAC0 RXD2", "R9 GMAC0 RXD3",
-                                                      "U7 GMAC0 TXD0", "T7 GMAC0 TXD1",
-                                                      "R7 GMAC0 TXD2", "P7 GMAC0 TXD3",
-                                                      "R11 GMAC1 RXD0", "P11 GMAC1 RXD1",
-                                                      "V12 GMAC1 RXD2", "U12 GMAC1 RXD3",
-                                                      "R10 GMAC1 TXD0", "T10 GMAC1 TXD1",
-                                                      "U10 GMAC1 TXD2", "V10 GMAC1 TXD3";
+                                               pins = "W8 GMAC0 RXD0", "V9 GMAC0 RXD1",
+                                                      "Y8 GMAC0 RXD2", "U9 GMAC0 RXD3",
+                                                      "T7 GMAC0 TXD0", "U6 GMAC0 TXD1",
+                                                      "V7 GMAC0 TXD2", "U7 GMAC0 TXD3",
+                                                      "Y12 GMAC1 RXD0", "V12 GMAC1 RXD1",
+                                                      "T11 GMAC1 RXD2", "W12 GMAC1 RXD3",
+                                                      "U10 GMAC1 TXD0", "Y10 GMAC1 TXD1",
+                                                      "W10 GMAC1 TXD2", "T9 GMAC1 TXD3";
                                                skew-delay = <7>;
                                        };
                                        /* Set up drive strength on GMAC0 to 16 mA */
index bf34319..54111ed 100644 (file)
                        };
 
                        can1: can@53fe4000 {
-                               compatible = "fsl,imx35-flexcan";
+                               compatible = "fsl,imx35-flexcan", "fsl,imx25-flexcan";
                                reg = <0x53fe4000 0x1000>;
                                clocks = <&clks 33>, <&clks 33>;
                                clock-names = "ipg", "per";
                        };
 
                        can2: can@53fe8000 {
-                               compatible = "fsl,imx35-flexcan";
+                               compatible = "fsl,imx35-flexcan", "fsl,imx25-flexcan";
                                reg = <0x53fe8000 0x1000>;
                                clocks = <&clks 34>, <&clks 34>;
                                clock-names = "ipg", "per";
index 7d647d0..3d65c01 100644 (file)
                        };
 
                        can1: can@53fc8000 {
-                               compatible = "fsl,imx53-flexcan";
+                               compatible = "fsl,imx53-flexcan", "fsl,imx25-flexcan";
                                reg = <0x53fc8000 0x4000>;
                                interrupts = <82>;
                                clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>,
                        };
 
                        can2: can@53fcc000 {
-                               compatible = "fsl,imx53-flexcan";
+                               compatible = "fsl,imx53-flexcan", "fsl,imx25-flexcan";
                                reg = <0x53fcc000 0x4000>;
                                interrupts = <83>;
                                clocks = <&clks IMX5_CLK_CAN2_IPG_GATE>,
index 4759048..e554b6e 100644 (file)
 
                        cm2: cm2@8000 {
                                compatible = "ti,omap4-cm2", "simple-bus";
-                               reg = <0x8000 0x3000>;
+                               reg = <0x8000 0x2000>;
                                #address-cells = <1>;
                                #size-cells = <1>;
-                               ranges = <0 0x8000 0x3000>;
+                               ranges = <0 0x8000 0x2000>;
 
                                cm2_clocks: clocks {
                                        #address-cells = <1>;
 
                                prm: prm@6000 {
                                        compatible = "ti,omap4-prm";
-                                       reg = <0x6000 0x3000>;
+                                       reg = <0x6000 0x2000>;
                                        interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
                                        #address-cells = <1>;
                                        #size-cells = <1>;
-                                       ranges = <0 0x6000 0x3000>;
+                                       ranges = <0 0x6000 0x2000>;
 
                                        prm_clocks: clocks {
                                                #address-cells = <1>;
index 2a63fa1..553777a 100644 (file)
@@ -1,6 +1,7 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_USER_NS=y
 CONFIG_RELAY=y
@@ -12,15 +13,21 @@ CONFIG_ARCH_GEMINI=y
 CONFIG_PCI=y
 CONFIG_PREEMPT=y
 CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+CONFIG_CMA=y
 CONFIG_CMDLINE="console=ttyS0,115200n8"
 CONFIG_KEXEC=y
 CONFIG_BINFMT_MISC=y
 CONFIG_PM=y
+CONFIG_NET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
 CONFIG_MTD=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_CFI_STAA=y
@@ -33,6 +40,11 @@ CONFIG_BLK_DEV_SD=y
 # CONFIG_SCSI_LOWLEVEL is not set
 CONFIG_ATA=y
 CONFIG_PATA_FTIDE010=y
+CONFIG_NETDEVICES=y
+CONFIG_GEMINI_ETHERNET=y
+CONFIG_MDIO_BITBANG=y
+CONFIG_MDIO_GPIO=y
+CONFIG_REALTEK_PHY=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
 # CONFIG_INPUT_MOUSE is not set
@@ -43,9 +55,19 @@ CONFIG_SERIAL_8250_NR_UARTS=1
 CONFIG_SERIAL_8250_RUNTIME_UARTS=1
 CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
+CONFIG_I2C_GPIO=y
+CONFIG_SPI=y
+CONFIG_SPI_GPIO=y
+CONFIG_SENSORS_GPIO_FAN=y
+CONFIG_SENSORS_LM75=y
+CONFIG_THERMAL=y
 CONFIG_WATCHDOG=y
-CONFIG_GEMINI_WATCHDOG=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_DRM=y
+CONFIG_DRM_PANEL_ILITEK_IL9322=y
+CONFIG_DRM_TVE200=y
+CONFIG_LOGO=y
 CONFIG_USB=y
 CONFIG_USB_MON=y
 CONFIG_USB_FOTG210_HCD=y
@@ -54,6 +76,7 @@ CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_DISK=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_RTC_CLASS=y
 CONFIG_DMADEVICES=y
index 2620ce7..371fca4 100644 (file)
@@ -57,6 +57,7 @@ CONFIG_MTD_M25P80=y
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_DENALI_DT=y
 CONFIG_MTD_SPI_NOR=y
+# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set
 CONFIG_SPI_CADENCE_QUADSPI=y
 CONFIG_OF_OVERLAY=y
 CONFIG_OF_CONFIGFS=y
index c6a7495..c7c28c8 100644 (file)
@@ -77,6 +77,9 @@ struct kvm_arch {
        /* Interrupt controller */
        struct vgic_dist        vgic;
        int max_vcpus;
+
+       /* Mandated version of PSCI */
+       u32 psci_version;
 };
 
 #define KVM_NR_MEM_OBJS     40
index 2ba95d6..caae484 100644 (file)
@@ -195,6 +195,12 @@ struct kvm_arch_memory_slot {
 #define KVM_REG_ARM_VFP_FPINST         0x1009
 #define KVM_REG_ARM_VFP_FPINST2                0x100A
 
+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW                 (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r)          (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
+                                        KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION       KVM_REG_ARM_FW_REG(0)
+
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR      0
 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
index 1e0784e..a18f33e 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
+#include <kvm/arm_psci.h>
 #include <asm/cputype.h>
 #include <linux/uaccess.h>
 #include <asm/kvm.h>
@@ -176,6 +177,7 @@ static unsigned long num_core_regs(void)
 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
 {
        return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
+               + kvm_arm_get_fw_num_regs(vcpu)
                + NUM_TIMER_REGS;
 }
 
@@ -196,6 +198,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
                uindices++;
        }
 
+       ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
+       if (ret)
+               return ret;
+       uindices += kvm_arm_get_fw_num_regs(vcpu);
+
        ret = copy_timer_indices(vcpu, uindices);
        if (ret)
                return ret;
@@ -214,6 +221,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
                return get_core_reg(vcpu, reg);
 
+       if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+               return kvm_arm_get_fw_reg(vcpu, reg);
+
        if (is_timer_reg(reg->id))
                return get_timer_reg(vcpu, reg);
 
@@ -230,6 +240,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
                return set_core_reg(vcpu, reg);
 
+       if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+               return kvm_arm_set_fw_reg(vcpu, reg);
+
        if (is_timer_reg(reg->id))
                return set_timer_reg(vcpu, reg);
 
index 4603c30..0d9ce58 100644 (file)
@@ -243,8 +243,4 @@ arch/arm/mach-omap2/pm-asm-offsets.s: arch/arm/mach-omap2/pm-asm-offsets.c
 include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORCE
        $(call filechk,offsets,__TI_PM_ASM_OFFSETS_H__)
 
-# For rule to generate ti-emif-asm-offsets.h dependency
-include drivers/memory/Makefile.asm-offsets
-
-arch/arm/mach-omap2/sleep33xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h
-arch/arm/mach-omap2/sleep43xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h
+$(obj)/sleep33xx.o $(obj)/sleep43xx.o: include/generated/ti-pm-asm-offsets.h
index 6d4392d..b9846b1 100644 (file)
@@ -7,9 +7,12 @@
 
 #include <linux/kbuild.h>
 #include <linux/platform_data/pm33xx.h>
+#include <linux/ti-emif-sram.h>
 
 int main(void)
 {
+       ti_emif_asm_offsets();
+
        DEFINE(AMX3_PM_WFI_FLAGS_OFFSET,
               offsetof(struct am33xx_pm_sram_data, wfi_flags));
        DEFINE(AMX3_PM_L2_AUX_CTRL_VAL_OFFSET,
index 218d799..322b3bb 100644 (file)
@@ -6,7 +6,6 @@
  *     Dave Gerlach, Vaibhav Bedia
  */
 
-#include <generated/ti-emif-asm-offsets.h>
 #include <generated/ti-pm-asm-offsets.h>
 #include <linux/linkage.h>
 #include <linux/ti-emif-sram.h>
index b24be62..8903814 100644 (file)
@@ -6,7 +6,6 @@
  *     Dave Gerlach, Vaibhav Bedia
  */
 
-#include <generated/ti-emif-asm-offsets.h>
 #include <generated/ti-pm-asm-offsets.h>
 #include <linux/linkage.h>
 #include <linux/ti-emif-sram.h>
index 59589a4..885e8f1 100644 (file)
@@ -427,9 +427,9 @@ static struct gpiod_lookup_table jive_wm8750_gpiod_table = {
        .dev_id         = "spi_gpio",
        .table          = {
                GPIO_LOOKUP("GPIOB", 4,
-                           "gpio-sck", GPIO_ACTIVE_HIGH),
+                           "sck", GPIO_ACTIVE_HIGH),
                GPIO_LOOKUP("GPIOB", 9,
-                           "gpio-mosi", GPIO_ACTIVE_HIGH),
+                           "mosi", GPIO_ACTIVE_HIGH),
                GPIO_LOOKUP("GPIOH", 10,
                            "cs", GPIO_ACTIVE_HIGH),
                { },
index 1540286..87f7d2f 100644 (file)
@@ -56,7 +56,11 @@ KBUILD_AFLAGS        += $(lseinstr) $(brokengasinst)
 KBUILD_CFLAGS  += $(call cc-option,-mabi=lp64)
 KBUILD_AFLAGS  += $(call cc-option,-mabi=lp64)
 
+ifeq ($(cc-name),clang)
+KBUILD_CFLAGS  += -DCONFIG_ARCH_SUPPORTS_INT128
+else
 KBUILD_CFLAGS  += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128)
+endif
 
 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
 KBUILD_CPPFLAGS        += -mbig-endian
index 4eef36b..88e712e 100644 (file)
        pinctrl-0 = <&uart_ao_a_pins>;
        pinctrl-names = "default";
 };
+
+&usb0 {
+       status = "okay";
+};
index 22bf374..3e3eb31 100644 (file)
        pinctrl-0 = <&uart_ao_a_pins>;
        pinctrl-names = "default";
 };
+
+&usb0 {
+       status = "okay";
+};
+
+&usb2_phy0 {
+       /*
+        * even though the schematics don't show it:
+        * HDMI_5V is also used as supply for the USB VBUS.
+        */
+       phy-supply = <&hdmi_5v>;
+};
index 69c721a..6739697 100644 (file)
        pinctrl-0 = <&uart_ao_a_pins>;
        pinctrl-names = "default";
 };
+
+&usb0 {
+       status = "okay";
+};
index 0a0953f..0cfd701 100644 (file)
        pinctrl-0 = <&uart_ao_a_pins>;
        pinctrl-names = "default";
 };
+
+&usb0 {
+       status = "okay";
+};
index e1a39cb..dba365e 100644 (file)
                        no-map;
                };
        };
+
+       soc {
+               usb0: usb@c9000000 {
+                       status = "disabled";
+                       compatible = "amlogic,meson-gxl-dwc3";
+                       #address-cells = <2>;
+                       #size-cells = <2>;
+                       ranges;
+
+                       clocks = <&clkc CLKID_USB>;
+                       clock-names = "usb_general";
+                       resets = <&reset RESET_USB_OTG>;
+                       reset-names = "usb_otg";
+
+                       dwc3: dwc3@c9000000 {
+                               compatible = "snps,dwc3";
+                               reg = <0x0 0xc9000000 0x0 0x100000>;
+                               interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+                               dr_mode = "host";
+                               maximum-speed = "high-speed";
+                               snps,dis_u2_susphy_quirk;
+                               phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>;
+                       };
+               };
+       };
+};
+
+&apb {
+       usb2_phy0: phy@78000 {
+               compatible = "amlogic,meson-gxl-usb2-phy";
+               #phy-cells = <0>;
+               reg = <0x0 0x78000 0x0 0x20>;
+               clocks = <&clkc CLKID_USB>;
+               clock-names = "phy";
+               resets = <&reset RESET_USB_OTG>;
+               reset-names = "phy";
+               status = "okay";
+       };
+
+       usb2_phy1: phy@78020 {
+               compatible = "amlogic,meson-gxl-usb2-phy";
+               #phy-cells = <0>;
+               reg = <0x0 0x78020 0x0 0x20>;
+               clocks = <&clkc CLKID_USB>;
+               clock-names = "phy";
+               resets = <&reset RESET_USB_OTG>;
+               reset-names = "phy";
+               status = "okay";
+       };
+
+       usb3_phy: phy@78080 {
+               compatible = "amlogic,meson-gxl-usb3-phy";
+               #phy-cells = <0>;
+               reg = <0x0 0x78080 0x0 0x20>;
+               interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&clkc CLKID_USB>, <&clkc_AO CLKID_AO_CEC_32K>;
+               clock-names = "phy", "peripheral";
+               resets = <&reset RESET_USB_OTG>, <&reset RESET_USB_OTG>;
+               reset-names = "phy", "peripheral";
+               status = "okay";
+       };
 };
 
 &ethmac {
index 4fd46c1..0868da4 100644 (file)
        status = "okay";
        vref-supply = <&vddio_ao18>;
 };
+
+&usb0 {
+       status = "okay";
+};
index d076a7c..247888d 100644 (file)
        };
 };
 
+&apb {
+       usb2_phy2: phy@78040 {
+               compatible = "amlogic,meson-gxl-usb2-phy";
+               #phy-cells = <0>;
+               reg = <0x0 0x78040 0x0 0x20>;
+               clocks = <&clkc CLKID_USB>;
+               clock-names = "phy";
+               resets = <&reset RESET_USB_OTG>;
+               reset-names = "phy";
+               status = "okay";
+       };
+};
+
 &clkc_AO {
        compatible = "amlogic,meson-gxm-aoclkc", "amlogic,meson-gx-aoclkc";
 };
 &hdmi_tx {
        compatible = "amlogic,meson-gxm-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
 };
+
+&dwc3 {
+       phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>, <&usb2_phy2>;
+};
index 2ac4322..69804c5 100644 (file)
@@ -56,8 +56,6 @@
 
                        gpio_keys {
                                compatible = "gpio-keys";
-                               #address-cells = <1>;
-                               #size-cells = <0>;
 
                                power-button {
                                        debounce_interval = <50>;
index 4b5465d..8c68e0c 100644 (file)
                #size-cells = <1>;
                ranges = <0x0 0x0 0x67d00000 0x00800000>;
 
-               sata0: ahci@210000 {
+               sata0: ahci@0 {
                        compatible = "brcm,iproc-ahci", "generic-ahci";
-                       reg = <0x00210000 0x1000>;
+                       reg = <0x00000000 0x1000>;
                        reg-names = "ahci";
-                       interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
@@ -52,9 +52,9 @@
                        };
                };
 
-               sata_phy0: sata_phy@212100 {
+               sata_phy0: sata_phy@2100 {
                        compatible = "brcm,iproc-sr-sata-phy";
-                       reg = <0x00212100 0x1000>;
+                       reg = <0x00002100 0x1000>;
                        reg-names = "phy";
                        #address-cells = <1>;
                        #size-cells = <0>;
                        };
                };
 
-               sata1: ahci@310000 {
+               sata1: ahci@10000 {
                        compatible = "brcm,iproc-ahci", "generic-ahci";
-                       reg = <0x00310000 0x1000>;
+                       reg = <0x00010000 0x1000>;
                        reg-names = "ahci";
-                       interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
@@ -82,9 +82,9 @@
                        };
                };
 
-               sata_phy1: sata_phy@312100 {
+               sata_phy1: sata_phy@12100 {
                        compatible = "brcm,iproc-sr-sata-phy";
-                       reg = <0x00312100 0x1000>;
+                       reg = <0x00012100 0x1000>;
                        reg-names = "phy";
                        #address-cells = <1>;
                        #size-cells = <0>;
                        };
                };
 
-               sata2: ahci@120000 {
+               sata2: ahci@20000 {
                        compatible = "brcm,iproc-ahci", "generic-ahci";
-                       reg = <0x00120000 0x1000>;
+                       reg = <0x00020000 0x1000>;
                        reg-names = "ahci";
-                       interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                        };
                };
 
-               sata_phy2: sata_phy@122100 {
+               sata_phy2: sata_phy@22100 {
                        compatible = "brcm,iproc-sr-sata-phy";
-                       reg = <0x00122100 0x1000>;
+                       reg = <0x00022100 0x1000>;
                        reg-names = "phy";
                        #address-cells = <1>;
                        #size-cells = <0>;
                        };
                };
 
-               sata3: ahci@130000 {
+               sata3: ahci@30000 {
                        compatible = "brcm,iproc-ahci", "generic-ahci";
-                       reg = <0x00130000 0x1000>;
+                       reg = <0x00030000 0x1000>;
                        reg-names = "ahci";
-                       interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                        };
                };
 
-               sata_phy3: sata_phy@132100 {
+               sata_phy3: sata_phy@32100 {
                        compatible = "brcm,iproc-sr-sata-phy";
-                       reg = <0x00132100 0x1000>;
+                       reg = <0x00032100 0x1000>;
                        reg-names = "phy";
                        #address-cells = <1>;
                        #size-cells = <0>;
                        };
                };
 
-               sata4: ahci@330000 {
+               sata4: ahci@100000 {
                        compatible = "brcm,iproc-ahci", "generic-ahci";
-                       reg = <0x00330000 0x1000>;
+                       reg = <0x00100000 0x1000>;
                        reg-names = "ahci";
-                       interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                        };
                };
 
-               sata_phy4: sata_phy@332100 {
+               sata_phy4: sata_phy@102100 {
                        compatible = "brcm,iproc-sr-sata-phy";
-                       reg = <0x00332100 0x1000>;
+                       reg = <0x00102100 0x1000>;
                        reg-names = "phy";
                        #address-cells = <1>;
                        #size-cells = <0>;
                        };
                };
 
-               sata5: ahci@400000 {
+               sata5: ahci@110000 {
                        compatible = "brcm,iproc-ahci", "generic-ahci";
-                       reg = <0x00400000 0x1000>;
+                       reg = <0x00110000 0x1000>;
                        reg-names = "ahci";
-                       interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                        };
                };
 
-               sata_phy5: sata_phy@402100 {
+               sata_phy5: sata_phy@112100 {
                        compatible = "brcm,iproc-sr-sata-phy";
-                       reg = <0x00402100 0x1000>;
+                       reg = <0x00112100 0x1000>;
                        reg-names = "phy";
                        #address-cells = <1>;
                        #size-cells = <0>;
                        };
                };
 
-               sata6: ahci@410000 {
+               sata6: ahci@120000 {
                        compatible = "brcm,iproc-ahci", "generic-ahci";
-                       reg = <0x00410000 0x1000>;
+                       reg = <0x00120000 0x1000>;
                        reg-names = "ahci";
-                       interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                        };
                };
 
-               sata_phy6: sata_phy@412100 {
+               sata_phy6: sata_phy@122100 {
                        compatible = "brcm,iproc-sr-sata-phy";
-                       reg = <0x00412100 0x1000>;
+                       reg = <0x00122100 0x1000>;
                        reg-names = "phy";
                        #address-cells = <1>;
                        #size-cells = <0>;
                        };
                };
 
-               sata7: ahci@420000 {
+               sata7: ahci@130000 {
                        compatible = "brcm,iproc-ahci", "generic-ahci";
-                       reg = <0x00420000 0x1000>;
+                       reg = <0x00130000 0x1000>;
                        reg-names = "ahci";
-                       interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        status = "disabled";
                        };
                };
 
-               sata_phy7: sata_phy@422100 {
+               sata_phy7: sata_phy@132100 {
                        compatible = "brcm,iproc-sr-sata-phy";
-                       reg = <0x00422100 0x1000>;
+                       reg = <0x00132100 0x1000>;
                        reg-names = "phy";
                        #address-cells = <1>;
                        #size-cells = <0>;
index 30014a9..ea690b3 100644 (file)
@@ -75,6 +75,7 @@
 #define ARM_CPU_IMP_CAVIUM             0x43
 #define ARM_CPU_IMP_BRCM               0x42
 #define ARM_CPU_IMP_QCOM               0x51
+#define ARM_CPU_IMP_NVIDIA             0x4E
 
 #define ARM_CPU_PART_AEM_V8            0xD0F
 #define ARM_CPU_PART_FOUNDATION                0xD00
 #define QCOM_CPU_PART_FALKOR           0xC00
 #define QCOM_CPU_PART_KRYO             0x200
 
+#define NVIDIA_CPU_PART_DENVER         0x003
+#define NVIDIA_CPU_PART_CARMEL         0x004
+
 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
 #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
 #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
 #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
 #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
 #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
+#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
+#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
 
 #ifndef __ASSEMBLY__
 
index 23b33e8..1dab3a9 100644 (file)
@@ -333,7 +333,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
        } else {
                u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
                sctlr |= (1 << 25);
-               vcpu_write_sys_reg(vcpu, SCTLR_EL1, sctlr);
+               vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
        }
 }
 
index ab46bc7..469de8a 100644 (file)
@@ -75,6 +75,9 @@ struct kvm_arch {
 
        /* Interrupt controller */
        struct vgic_dist        vgic;
+
+       /* Mandated version of PSCI */
+       u32 psci_version;
 };
 
 #define KVM_NR_MEM_OBJS     40
index b6dbbe3..97d0ef1 100644 (file)
@@ -39,7 +39,7 @@ struct mod_arch_specific {
 u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
                          Elf64_Sym *sym);
 
-u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val);
+u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val);
 
 #ifdef CONFIG_RANDOMIZE_BASE
 extern u64 module_alloc_base;
index 7e2c27e..7c4c8f3 100644 (file)
@@ -230,7 +230,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
        }
 }
 
-extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
+extern void __sync_icache_dcache(pte_t pteval);
 
 /*
  * PTE bits configuration in the presence of hardware Dirty Bit Management
@@ -253,7 +253,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
        pte_t old_pte;
 
        if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
-               __sync_icache_dcache(pte, addr);
+               __sync_icache_dcache(pte);
 
        /*
         * If the existing pte is valid, check for potential race with
index 9abbf30..04b3256 100644 (file)
@@ -206,6 +206,12 @@ struct kvm_arch_memory_slot {
 #define KVM_REG_ARM_TIMER_CNT          ARM64_SYS_REG(3, 3, 14, 3, 2)
 #define KVM_REG_ARM_TIMER_CVAL         ARM64_SYS_REG(3, 3, 14, 0, 2)
 
+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW                 (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r)          (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+                                        KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION       KVM_REG_ARM_FW_REG(0)
+
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR      0
 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
index a900bef..e4a1182 100644 (file)
@@ -316,6 +316,7 @@ static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
        MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
        MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
        MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
+       MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
        {},
 };
 
index 536d572..9d1b06d 100644 (file)
@@ -868,6 +868,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
        static const struct midr_range kpti_safe_list[] = {
                MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
                MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+               { /* sentinel */ }
        };
        char const *str = "command line option";
 
index fa36372..f0690c2 100644 (file)
@@ -43,7 +43,7 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
 }
 
 #ifdef CONFIG_ARM64_ERRATUM_843419
-u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val)
+u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val)
 {
        struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
                                                          &mod->arch.init;
index 719fde8..155fd91 100644 (file)
@@ -215,7 +215,7 @@ static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val)
                insn &= ~BIT(31);
        } else {
                /* out of range for ADR -> emit a veneer */
-               val = module_emit_adrp_veneer(mod, place, val & ~0xfff);
+               val = module_emit_veneer_for_adrp(mod, place, val & ~0xfff);
                if (!val)
                        return -ENOEXEC;
                insn = aarch64_insn_gen_branch_imm((u64)place, val,
index 71d99af..7ff81fe 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/sched/signal.h>
 #include <linux/sched/task_stack.h>
 #include <linux/mm.h>
+#include <linux/nospec.h>
 #include <linux/smp.h>
 #include <linux/ptrace.h>
 #include <linux/user.h>
@@ -249,15 +250,20 @@ static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
 
        switch (note_type) {
        case NT_ARM_HW_BREAK:
-               if (idx < ARM_MAX_BRP)
-                       bp = tsk->thread.debug.hbp_break[idx];
+               if (idx >= ARM_MAX_BRP)
+                       goto out;
+               idx = array_index_nospec(idx, ARM_MAX_BRP);
+               bp = tsk->thread.debug.hbp_break[idx];
                break;
        case NT_ARM_HW_WATCH:
-               if (idx < ARM_MAX_WRP)
-                       bp = tsk->thread.debug.hbp_watch[idx];
+               if (idx >= ARM_MAX_WRP)
+                       goto out;
+               idx = array_index_nospec(idx, ARM_MAX_WRP);
+               bp = tsk->thread.debug.hbp_watch[idx];
                break;
        }
 
+out:
        return bp;
 }
 
@@ -1458,9 +1464,7 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
 {
        int ret;
        u32 kdata;
-       mm_segment_t old_fs = get_fs();
 
-       set_fs(KERNEL_DS);
        /* Watchpoint */
        if (num < 0) {
                ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
@@ -1471,7 +1475,6 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
        } else {
                ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
        }
-       set_fs(old_fs);
 
        if (!ret)
                ret = put_user(kdata, data);
@@ -1484,7 +1487,6 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
 {
        int ret;
        u32 kdata = 0;
-       mm_segment_t old_fs = get_fs();
 
        if (num == 0)
                return 0;
@@ -1493,12 +1495,10 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
        if (ret)
                return ret;
 
-       set_fs(KERNEL_DS);
        if (num < 0)
                ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
        else
                ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
-       set_fs(old_fs);
 
        return ret;
 }
index 1cb2749..8bbdc17 100644 (file)
@@ -277,7 +277,8 @@ void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
         * If we were single stepping, we want to get the step exception after
         * we return from the trap.
         */
-       user_fastforward_single_step(current);
+       if (user_mode(regs))
+               user_fastforward_single_step(current);
 }
 
 static LIST_HEAD(undef_hook);
index 959e50d..56a0260 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/fs.h>
+#include <kvm/arm_psci.h>
 #include <asm/cputype.h>
 #include <linux/uaccess.h>
 #include <asm/kvm.h>
@@ -205,7 +206,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
 {
        return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
-                + NUM_TIMER_REGS;
+               + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS;
 }
 
 /**
@@ -225,6 +226,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
                uindices++;
        }
 
+       ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
+       if (ret)
+               return ret;
+       uindices += kvm_arm_get_fw_num_regs(vcpu);
+
        ret = copy_timer_indices(vcpu, uindices);
        if (ret)
                return ret;
@@ -243,6 +249,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
                return get_core_reg(vcpu, reg);
 
+       if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+               return kvm_arm_get_fw_reg(vcpu, reg);
+
        if (is_timer_reg(reg->id))
                return get_timer_reg(vcpu, reg);
 
@@ -259,6 +268,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
                return set_core_reg(vcpu, reg);
 
+       if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
+               return kvm_arm_set_fw_reg(vcpu, reg);
+
        if (is_timer_reg(reg->id))
                return set_timer_reg(vcpu, reg);
 
index 86801b6..39be799 100644 (file)
 #include <linux/compiler.h>
 #include <linux/irqchip/arm-gic.h>
 #include <linux/kvm_host.h>
+#include <linux/swab.h>
 
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
 
+static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
+{
+       if (vcpu_mode_is_32bit(vcpu))
+               return !!(read_sysreg_el2(spsr) & COMPAT_PSR_E_BIT);
+
+       return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE);
+}
+
 /*
  * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the
  *                                  guest.
@@ -64,14 +73,19 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
        addr += fault_ipa - vgic->vgic_cpu_base;
 
        if (kvm_vcpu_dabt_iswrite(vcpu)) {
-               u32 data = vcpu_data_guest_to_host(vcpu,
-                                                  vcpu_get_reg(vcpu, rd),
-                                                  sizeof(u32));
+               u32 data = vcpu_get_reg(vcpu, rd);
+               if (__is_be(vcpu)) {
+                       /* guest pre-swabbed data, undo this for writel() */
+                       data = swab32(data);
+               }
                writel_relaxed(data, addr);
        } else {
                u32 data = readl_relaxed(addr);
-               vcpu_set_reg(vcpu, rd, vcpu_data_host_to_guest(vcpu, data,
-                                                              sizeof(u32)));
+               if (__is_be(vcpu)) {
+                       /* guest expects swabbed data */
+                       data = swab32(data);
+               }
+               vcpu_set_reg(vcpu, rd, data);
        }
 
        return 1;
index 806b0b1..6e3b969 100644 (file)
@@ -996,14 +996,12 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
 
        if (id == SYS_ID_AA64PFR0_EL1) {
                if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
-                       pr_err_once("kvm [%i]: SVE unsupported for guests, suppressing\n",
-                                   task_pid_nr(current));
+                       kvm_debug("SVE unsupported for guests, suppressing\n");
 
                val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
        } else if (id == SYS_ID_AA64MMFR1_EL1) {
                if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
-                       pr_err_once("kvm [%i]: LORegions unsupported for guests, suppressing\n",
-                                   task_pid_nr(current));
+                       kvm_debug("LORegions unsupported for guests, suppressing\n");
 
                val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
        }
index 0ead8a1..137710f 100644 (file)
@@ -19,5 +19,9 @@ CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2         \
                   -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15   \
                   -fcall-saved-x18 -fomit-frame-pointer
 CFLAGS_REMOVE_atomic_ll_sc.o := -pg
+GCOV_PROFILE_atomic_ll_sc.o    := n
+KASAN_SANITIZE_atomic_ll_sc.o  := n
+KCOV_INSTRUMENT_atomic_ll_sc.o := n
+UBSAN_SANITIZE_atomic_ll_sc.o  := n
 
 lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
index e36ed50..1059884 100644 (file)
@@ -58,7 +58,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
        flush_ptrace_access(vma, page, uaddr, dst, len);
 }
 
-void __sync_icache_dcache(pte_t pte, unsigned long addr)
+void __sync_icache_dcache(pte_t pte)
 {
        struct page *page = pte_page(pte);
 
index 9f3c47a..1b18b47 100644 (file)
@@ -646,8 +646,10 @@ static int keep_initrd __initdata;
 
 void __init free_initrd_mem(unsigned long start, unsigned long end)
 {
-       if (!keep_initrd)
+       if (!keep_initrd) {
                free_reserved_area((void *)start, (void *)end, 0, "initrd");
+               memblock_free(__virt_to_phys(start), end - start);
+       }
 }
 
 static int __init keepinitrd_setup(char *__unused)
index 9e8621d..e17262a 100644 (file)
@@ -216,6 +216,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
        memcpy((void *) dst, src, count);
 }
 
+static inline void memset_io(volatile void __iomem *addr, int value,
+                            size_t size)
+{
+       memset((void __force *)addr, value, size);
+}
+
 #define PCI_IO_ADDR    (volatile void __iomem *)
 
 /*
index 617506d..7cd0a22 100644 (file)
@@ -199,3 +199,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
        memcpy(dst, src, len);
        return csum_partial(dst, len, sum);
 }
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
index e2364ff..34ac503 100644 (file)
@@ -123,6 +123,9 @@ INSTALL_TARGETS = zinstall install
 
 PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
 
+# Default kernel to build
+all: bzImage
+
 zImage: vmlinuz
 Image: vmlinux
 
index 3b8507f..ee5a78a 100644 (file)
@@ -448,7 +448,8 @@ static int match_by_id(struct device * dev, void * data)
  * Checks all the children of @parent for a matching @id.  If none
  * found, it allocates a new device and returns it.
  */
-static struct parisc_device * alloc_tree_node(struct device *parent, char id)
+static struct parisc_device * __init alloc_tree_node(
+                       struct device *parent, char id)
 {
        struct match_id_data d = {
                .id = id,
@@ -825,8 +826,8 @@ static void walk_lower_bus(struct parisc_device *dev)
  * devices which are not physically connected (such as extra serial &
  * keyboard ports).  This problem is not yet solved.
  */
-static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high,
-                            struct device *parent)
+static void __init walk_native_bus(unsigned long io_io_low,
+       unsigned long io_io_high, struct device *parent)
 {
        int i, devices_found = 0;
        unsigned long hpa = io_io_low;
index 13ee356..ae684ac 100644 (file)
@@ -174,7 +174,7 @@ void pcibios_set_master(struct pci_dev *dev)
  * pcibios_init_bridge() initializes cache line and default latency
  * for pci controllers and pci-pci bridges
  */
-void __init pcibios_init_bridge(struct pci_dev *dev)
+void __ref pcibios_init_bridge(struct pci_dev *dev)
 {
        unsigned short bridge_ctl, bridge_ctl_new;
 
index c383040..a1e772f 100644 (file)
@@ -205,7 +205,7 @@ static int __init rtc_init(void)
 device_initcall(rtc_init);
 #endif
 
-void read_persistent_clock(struct timespec *ts)
+void read_persistent_clock64(struct timespec64 *ts)
 {
        static struct pdc_tod tod_data;
        if (pdc_tod_read(&tod_data) == 0) {
index 68e671a..71d3127 100644 (file)
@@ -837,6 +837,17 @@ void __init initialize_ivt(const void *iva)
        if (pdc_instr(&instr) == PDC_OK)
                ivap[0] = instr;
 
+       /*
+        * Rules for the checksum of the HPMC handler:
+        * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed
+        *    its own IVA).
+        * 2. The word at IVA + 32 is nonzero.
+        * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and
+        *    Address (IVA + 56) are word-aligned.
+        * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of
+        *    the Length/4 words starting at Address is zero.
+        */
+
        /* Compute Checksum for HPMC handler */
        length = os_hpmc_size;
        ivap[7] = length;
index cab32ee..2607d2d 100644 (file)
@@ -516,7 +516,7 @@ static void __init map_pages(unsigned long start_vaddr,
        }
 }
 
-void free_initmem(void)
+void __ref free_initmem(void)
 {
        unsigned long init_begin = (unsigned long)__init_begin;
        unsigned long init_end = (unsigned long)__init_end;
index 9abddde..b2dabd0 100644 (file)
@@ -69,17 +69,30 @@ struct dyn_arch_ftrace {
 #endif
 
 #if defined(CONFIG_FTRACE_SYSCALLS) && !defined(__ASSEMBLY__)
-#ifdef PPC64_ELF_ABI_v1
+/*
+ * Some syscall entry functions on powerpc start with "ppc_" (fork and clone,
+ * for instance) or ppc32_/ppc64_. We should also match the sys_ variant with
+ * those.
+ */
 #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+#ifdef PPC64_ELF_ABI_v1
+static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
+{
+       /* We need to skip past the initial dot, and the __se_sys alias */
+       return !strcmp(sym + 1, name) ||
+               (!strncmp(sym, ".__se_sys", 9) && !strcmp(sym + 6, name)) ||
+               (!strncmp(sym, ".ppc_", 5) && !strcmp(sym + 5, name + 4)) ||
+               (!strncmp(sym, ".ppc32_", 7) && !strcmp(sym + 7, name + 4)) ||
+               (!strncmp(sym, ".ppc64_", 7) && !strcmp(sym + 7, name + 4));
+}
+#else
 static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
 {
-       /*
-        * Compare the symbol name with the system call name. Skip the .sys or .SyS
-        * prefix from the symbol name and the sys prefix from the system call name and
-        * just match the rest. This is only needed on ppc64 since symbol names on
-        * 32bit do not start with a period so the generic function will work.
-        */
-       return !strcmp(sym + 4, name + 3);
+       return !strcmp(sym, name) ||
+               (!strncmp(sym, "__se_sys", 8) && !strcmp(sym + 5, name)) ||
+               (!strncmp(sym, "ppc_", 4) && !strcmp(sym + 4, name + 4)) ||
+               (!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) ||
+               (!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4));
 }
 #endif
 #endif /* CONFIG_FTRACE_SYSCALLS && !__ASSEMBLY__ */
index 4185f1c..3f109a3 100644 (file)
@@ -165,7 +165,6 @@ struct paca_struct {
        u64 saved_msr;                  /* MSR saved here by enter_rtas */
        u16 trap_save;                  /* Used when bad stack is encountered */
        u8 irq_soft_mask;               /* mask for irq soft masking */
-       u8 soft_enabled;                /* irq soft-enable flag */
        u8 irq_happened;                /* irq happened while soft-disabled */
        u8 io_sync;                     /* writel() needs spin_unlock sync */
        u8 irq_work_pending;            /* IRQ_WORK interrupt while soft-disable */
index d1c2d2e..2f3ff7a 100644 (file)
@@ -15,7 +15,7 @@
 extern void powernv_set_nmmu_ptcr(unsigned long ptcr);
 extern struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
                        unsigned long flags,
-                       struct npu_context *(*cb)(struct npu_context *, void *),
+                       void (*cb)(struct npu_context *, void *),
                        void *priv);
 extern void pnv_npu2_destroy_context(struct npu_context *context,
                                struct pci_dev *gpdev);
index 9f42164..16b0778 100644 (file)
@@ -91,6 +91,7 @@ extern int start_topology_update(void);
 extern int stop_topology_update(void);
 extern int prrn_is_enabled(void);
 extern int find_and_online_cpu_nid(int cpu);
+extern int timed_topology_update(int nsecs);
 #else
 static inline int start_topology_update(void)
 {
@@ -108,16 +109,12 @@ static inline int find_and_online_cpu_nid(int cpu)
 {
        return 0;
 }
+static inline int timed_topology_update(int nsecs)
+{
+       return 0;
+}
 #endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
 
-#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_NEED_MULTIPLE_NODES)
-#if defined(CONFIG_PPC_SPLPAR)
-extern int timed_topology_update(int nsecs);
-#else
-#define        timed_topology_update(nsecs)
-#endif /* CONFIG_PPC_SPLPAR */
-#endif /* CONFIG_HOTPLUG_CPU || CONFIG_NEED_MULTIPLE_NODES */
-
 #include <asm-generic/topology.h>
 
 #ifdef CONFIG_SMP
index fe6fc63..38c5b47 100644 (file)
@@ -441,7 +441,6 @@ static int mce_handle_ierror(struct pt_regs *regs,
                                        if (pfn != ULONG_MAX) {
                                                *phys_addr =
                                                        (pfn << PAGE_SHIFT);
-                                               handled = 1;
                                        }
                                }
                        }
@@ -532,9 +531,7 @@ static int mce_handle_derror(struct pt_regs *regs,
                         * kernel/exception-64s.h
                         */
                        if (get_paca()->in_mce < MAX_MCE_DEPTH)
-                               if (!mce_find_instr_ea_and_pfn(regs, addr,
-                                                               phys_addr))
-                                       handled = 1;
+                               mce_find_instr_ea_and_pfn(regs, addr, phys_addr);
                }
                found = 1;
        }
@@ -572,7 +569,7 @@ static long mce_handle_error(struct pt_regs *regs,
                const struct mce_ierror_table itable[])
 {
        struct mce_error_info mce_err = { 0 };
-       uint64_t addr, phys_addr;
+       uint64_t addr, phys_addr = ULONG_MAX;
        uint64_t srr1 = regs->msr;
        long handled;
 
index e16ec7b..9ca7148 100644 (file)
@@ -566,10 +566,35 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
 #endif
 
 #ifdef CONFIG_NMI_IPI
-static void stop_this_cpu(struct pt_regs *regs)
-#else
+static void nmi_stop_this_cpu(struct pt_regs *regs)
+{
+       /*
+        * This is a special case because it never returns, so the NMI IPI
+        * handling would never mark it as done, which makes any later
+        * smp_send_nmi_ipi() call spin forever. Mark it done now.
+        *
+        * IRQs are already hard disabled by the smp_handle_nmi_ipi.
+        */
+       nmi_ipi_lock();
+       nmi_ipi_busy_count--;
+       nmi_ipi_unlock();
+
+       /* Remove this CPU */
+       set_cpu_online(smp_processor_id(), false);
+
+       spin_begin();
+       while (1)
+               spin_cpu_relax();
+}
+
+void smp_send_stop(void)
+{
+       smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
+}
+
+#else /* CONFIG_NMI_IPI */
+
 static void stop_this_cpu(void *dummy)
-#endif
 {
        /* Remove this CPU */
        set_cpu_online(smp_processor_id(), false);
@@ -582,12 +607,22 @@ static void stop_this_cpu(void *dummy)
 
 void smp_send_stop(void)
 {
-#ifdef CONFIG_NMI_IPI
-       smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, stop_this_cpu, 1000000);
-#else
+       static bool stopped = false;
+
+       /*
+        * Prevent waiting on csd lock from a previous smp_send_stop.
+        * This is racy, but in general callers try to do the right
+        * thing and only fire off one smp_send_stop (e.g., see
+        * kernel/panic.c)
+        */
+       if (stopped)
+               return;
+
+       stopped = true;
+
        smp_call_function(stop_this_cpu, NULL, 0);
-#endif
 }
+#endif /* CONFIG_NMI_IPI */
 
 struct thread_info *current_set[NR_CPUS];
 
index 6038e2e..876d4f2 100644 (file)
@@ -305,6 +305,13 @@ void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
        kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
 }
 
+#ifdef CONFIG_ALTIVEC
+void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
+{
+       kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
+}
+#endif
+
 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
 {
        kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
index 737f8a4..c3c39b0 100644 (file)
@@ -133,6 +133,7 @@ int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *
                        start, start + size, rc);
                return -EFAULT;
        }
+       flush_inval_dcache_range(start, start + size);
 
        return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
 }
@@ -159,6 +160,7 @@ int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap
 
        /* Remove htab bolted mappings for this section of memory */
        start = (unsigned long)__va(start);
+       flush_inval_dcache_range(start, start + size);
        ret = remove_section_mapping(start, start + size);
 
        /* Ensure all vmalloc mappings are flushed in case they also
index de470ca..fc222a0 100644 (file)
@@ -82,19 +82,6 @@ static const struct file_operations memtrace_fops = {
        .open   = simple_open,
 };
 
-static void flush_memory_region(u64 base, u64 size)
-{
-       unsigned long line_size = ppc64_caches.l1d.size;
-       u64 end = base + size;
-       u64 addr;
-
-       base = round_down(base, line_size);
-       end = round_up(end, line_size);
-
-       for (addr = base; addr < end; addr += line_size)
-               asm volatile("dcbf 0,%0" : "=r" (addr) :: "memory");
-}
-
 static int check_memblock_online(struct memory_block *mem, void *arg)
 {
        if (mem->state != MEM_ONLINE)
@@ -132,10 +119,6 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
        walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE,
                          change_memblock_state);
 
-       /* RCU grace period? */
-       flush_memory_region((u64)__va(start_pfn << PAGE_SHIFT),
-                           nr_pages << PAGE_SHIFT);
-
        lock_device_hotplug();
        remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
        unlock_device_hotplug();
index 69a4f9e..525e966 100644 (file)
 
 #define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
 
+/*
+ * spinlock to protect initialisation of an npu_context for a particular
+ * mm_struct.
+ */
+static DEFINE_SPINLOCK(npu_context_lock);
+
+/*
+ * When an address shootdown range exceeds this threshold we invalidate the
+ * entire TLB on the GPU for the given PID rather than each specific address in
+ * the range.
+ */
+#define ATSD_THRESHOLD (2*1024*1024)
+
 /*
  * Other types of TCE cache invalidation are not functional in the
  * hardware.
@@ -401,7 +414,7 @@ struct npu_context {
        bool nmmu_flush;
 
        /* Callback to stop translation requests on a given GPU */
-       struct npu_context *(*release_cb)(struct npu_context *, void *);
+       void (*release_cb)(struct npu_context *context, void *priv);
 
        /*
         * Private pointer passed to the above callback for usage by
@@ -671,11 +684,19 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
        struct npu_context *npu_context = mn_to_npu_context(mn);
        unsigned long address;
 
-       for (address = start; address < end; address += PAGE_SIZE)
-               mmio_invalidate(npu_context, 1, address, false);
+       if (end - start > ATSD_THRESHOLD) {
+               /*
+                * Just invalidate the entire PID if the address range is too
+                * large.
+                */
+               mmio_invalidate(npu_context, 0, 0, true);
+       } else {
+               for (address = start; address < end; address += PAGE_SIZE)
+                       mmio_invalidate(npu_context, 1, address, false);
 
-       /* Do the flush only on the final addess == end */
-       mmio_invalidate(npu_context, 1, address, true);
+               /* Do the flush only on the final addess == end */
+               mmio_invalidate(npu_context, 1, address, true);
+       }
 }
 
 static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@@ -696,11 +717,12 @@ static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
  * Returns an error if there no contexts are currently available or a
  * npu_context which should be passed to pnv_npu2_handle_fault().
  *
- * mmap_sem must be held in write mode.
+ * mmap_sem must be held in write mode and must not be called from interrupt
+ * context.
  */
 struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
                        unsigned long flags,
-                       struct npu_context *(*cb)(struct npu_context *, void *),
+                       void (*cb)(struct npu_context *, void *),
                        void *priv)
 {
        int rc;
@@ -743,7 +765,9 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
        /*
         * Setup the NPU context table for a particular GPU. These need to be
         * per-GPU as we need the tables to filter ATSDs when there are no
-        * active contexts on a particular GPU.
+        * active contexts on a particular GPU. It is safe for these to be
+        * called concurrently with destroy as the OPAL call takes appropriate
+        * locks and refcounts on init/destroy.
         */
        rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags,
                                PCI_DEVID(gpdev->bus->number, gpdev->devfn));
@@ -754,8 +778,29 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
         * We store the npu pci device so we can more easily get at the
         * associated npus.
         */
+       spin_lock(&npu_context_lock);
        npu_context = mm->context.npu_context;
+       if (npu_context) {
+               if (npu_context->release_cb != cb ||
+                       npu_context->priv != priv) {
+                       spin_unlock(&npu_context_lock);
+                       opal_npu_destroy_context(nphb->opal_id, mm->context.id,
+                                               PCI_DEVID(gpdev->bus->number,
+                                                       gpdev->devfn));
+                       return ERR_PTR(-EINVAL);
+               }
+
+               WARN_ON(!kref_get_unless_zero(&npu_context->kref));
+       }
+       spin_unlock(&npu_context_lock);
+
        if (!npu_context) {
+               /*
+                * We can set up these fields without holding the
+                * npu_context_lock as the npu_context hasn't been returned to
+                * the caller meaning it can't be destroyed. Parallel allocation
+                * is protected against by mmap_sem.
+                */
                rc = -ENOMEM;
                npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL);
                if (npu_context) {
@@ -774,8 +819,6 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
                }
 
                mm->context.npu_context = npu_context;
-       } else {
-               WARN_ON(!kref_get_unless_zero(&npu_context->kref));
        }
 
        npu_context->release_cb = cb;
@@ -814,15 +857,16 @@ static void pnv_npu2_release_context(struct kref *kref)
                mm_context_remove_copro(npu_context->mm);
 
        npu_context->mm->context.npu_context = NULL;
-       mmu_notifier_unregister(&npu_context->mn,
-                               npu_context->mm);
-
-       kfree(npu_context);
 }
 
+/*
+ * Destroy a context on the given GPU. May free the npu_context if it is no
+ * longer active on any GPUs. Must not be called from interrupt context.
+ */
 void pnv_npu2_destroy_context(struct npu_context *npu_context,
                        struct pci_dev *gpdev)
 {
+       int removed;
        struct pnv_phb *nphb;
        struct npu *npu;
        struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
@@ -844,7 +888,21 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
        WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL);
        opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
                                PCI_DEVID(gpdev->bus->number, gpdev->devfn));
-       kref_put(&npu_context->kref, pnv_npu2_release_context);
+       spin_lock(&npu_context_lock);
+       removed = kref_put(&npu_context->kref, pnv_npu2_release_context);
+       spin_unlock(&npu_context_lock);
+
+       /*
+        * We need to do this outside of pnv_npu2_release_context so that it is
+        * outside the spinlock as mmu_notifier_destroy uses SRCU.
+        */
+       if (removed) {
+               mmu_notifier_unregister(&npu_context->mn,
+                                       npu_context->mm);
+
+               kfree(npu_context);
+       }
+
 }
 EXPORT_SYMBOL(pnv_npu2_destroy_context);
 
index f886886..aa2a513 100644 (file)
@@ -48,10 +48,12 @@ unsigned long __init opal_get_boot_time(void)
 
        while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
                rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
-               if (rc == OPAL_BUSY_EVENT)
+               if (rc == OPAL_BUSY_EVENT) {
+                       mdelay(OPAL_BUSY_DELAY_MS);
                        opal_poll_events(NULL);
-               else if (rc == OPAL_BUSY)
-                       mdelay(10);
+               } else if (rc == OPAL_BUSY) {
+                       mdelay(OPAL_BUSY_DELAY_MS);
+               }
        }
        if (rc != OPAL_SUCCESS)
                return 0;
index 23d8acc..cd4fd85 100644 (file)
@@ -11,6 +11,7 @@ config RISCV
        select ARCH_WANT_FRAME_POINTERS
        select CLONE_BACKWARDS
        select COMMON_CLK
+       select DMA_DIRECT_OPS
        select GENERIC_CLOCKEVENTS
        select GENERIC_CPU_DEVICES
        select GENERIC_IRQ_SHOW
@@ -89,9 +90,6 @@ config PGTABLE_LEVELS
 config HAVE_KPROBES
        def_bool n
 
-config DMA_DIRECT_OPS
-       def_bool y
-
 menu "Platform type"
 
 choice
index 1e5fd28..4286a5f 100644 (file)
@@ -15,7 +15,6 @@ generic-y += fcntl.h
 generic-y += futex.h
 generic-y += hardirq.h
 generic-y += hash.h
-generic-y += handle_irq.h
 generic-y += hw_irq.h
 generic-y += ioctl.h
 generic-y += ioctls.h
index 324568d..f6561b7 100644 (file)
@@ -52,7 +52,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
 # Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
 # Make sure only to export the intended __vdso_xxx symbol offsets.
 quiet_cmd_vdsold = VDSOLD  $@
-      cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \
+      cmd_vdsold = $(CC) $(KCFLAGS) $(call cc-option, -no-pie) -nostdlib $(SYSCFLAGS_$(@F)) \
                            -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \
                    $(CROSS_COMPILE)objcopy \
                            $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
index 6176fe9..941d8cc 100644 (file)
@@ -261,9 +261,9 @@ CONFIG_IP_VS_NQ=m
 CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_TABLES_IPV4=m
+CONFIG_NF_TABLES_IPV4=y
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NF_TABLES_ARP=m
+CONFIG_NF_TABLES_ARP=y
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
@@ -284,7 +284,7 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_TABLES_IPV6=m
+CONFIG_NF_TABLES_IPV6=y
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
@@ -305,7 +305,7 @@ CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_SECURITY=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
-CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NF_TABLES_BRIDGE=y
 CONFIG_RDS=m
 CONFIG_RDS_RDMA=m
 CONFIG_RDS_TCP=m
@@ -604,7 +604,6 @@ CONFIG_DETECT_HUNG_TASK=y
 CONFIG_WQ_WATCHDOG=y
 CONFIG_PANIC_ON_OOPS=y
 CONFIG_DEBUG_TIMEKEEPING=y
-CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
 CONFIG_PROVE_LOCKING=y
 CONFIG_LOCK_STAT=y
 CONFIG_DEBUG_LOCKDEP=y
index c105bcc..eb6f75f 100644 (file)
@@ -259,9 +259,9 @@ CONFIG_IP_VS_NQ=m
 CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_TABLES_IPV4=m
+CONFIG_NF_TABLES_IPV4=y
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
-CONFIG_NF_TABLES_ARP=m
+CONFIG_NF_TABLES_ARP=y
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
@@ -282,7 +282,7 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_TABLES_IPV6=m
+CONFIG_NF_TABLES_IPV6=y
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
@@ -303,7 +303,7 @@ CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_SECURITY=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
-CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NF_TABLES_BRIDGE=y
 CONFIG_RDS=m
 CONFIG_RDS_RDMA=m
 CONFIG_RDS_TCP=m
index e8077f0..2bf01ba 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/nospec-insn.h>
 #include <asm/vx-insn.h>
 
 /* Vector register range containing CRC-32 constants */
@@ -67,6 +68,8 @@
 
 .previous
 
+       GEN_BR_THUNK %r14
+
 .text
 /*
  * The CRC-32 function(s) use these calling conventions:
@@ -203,6 +206,6 @@ ENTRY(crc32_be_vgfm_16)
 
 .Ldone:
        VLGVF   %r2,%v2,3
-       br      %r14
+       BR_EX   %r14
 
 .previous
index d8c67a5..7d6f568 100644 (file)
@@ -14,6 +14,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/nospec-insn.h>
 #include <asm/vx-insn.h>
 
 /* Vector register range containing CRC-32 constants */
@@ -76,6 +77,7 @@
 
 .previous
 
+       GEN_BR_THUNK %r14
 
 .text
 
@@ -264,6 +266,6 @@ crc32_le_vgfm_generic:
 
 .Ldone:
        VLGVF   %r2,%v2,2
-       br      %r14
+       BR_EX   %r14
 
 .previous
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
new file mode 100644 (file)
index 0000000..a01f811
--- /dev/null
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_NOSPEC_ASM_H
+#define _ASM_S390_NOSPEC_ASM_H
+
+#include <asm/alternative-asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/dwarf.h>
+
+#ifdef __ASSEMBLY__
+
+#ifdef CONFIG_EXPOLINE
+
+_LC_BR_R1 = __LC_BR_R1
+
+/*
+ * The expoline macros are used to create thunks in the same format
+ * as gcc generates them. The 'comdat' section flag makes sure that
+ * the various thunks are merged into a single copy.
+ */
+       .macro __THUNK_PROLOG_NAME name
+       .pushsection .text.\name,"axG",@progbits,\name,comdat
+       .globl \name
+       .hidden \name
+       .type \name,@function
+\name:
+       CFI_STARTPROC
+       .endm
+
+       .macro __THUNK_EPILOG
+       CFI_ENDPROC
+       .popsection
+       .endm
+
+       .macro __THUNK_PROLOG_BR r1,r2
+       __THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1
+       .endm
+
+       .macro __THUNK_PROLOG_BC d0,r1,r2
+       __THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1
+       .endm
+
+       .macro __THUNK_BR r1,r2
+       jg      __s390x_indirect_jump_r\r2\()use_r\r1
+       .endm
+
+       .macro __THUNK_BC d0,r1,r2
+       jg      __s390x_indirect_branch_\d0\()_\r2\()use_\r1
+       .endm
+
+       .macro __THUNK_BRASL r1,r2,r3
+       brasl   \r1,__s390x_indirect_jump_r\r3\()use_r\r2
+       .endm
+
+       .macro  __DECODE_RR expand,reg,ruse
+       .set __decode_fail,1
+       .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+       .ifc \reg,%r\r1
+       .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+       .ifc \ruse,%r\r2
+       \expand \r1,\r2
+       .set __decode_fail,0
+       .endif
+       .endr
+       .endif
+       .endr
+       .if __decode_fail == 1
+       .error "__DECODE_RR failed"
+       .endif
+       .endm
+
+       .macro  __DECODE_RRR expand,rsave,rtarget,ruse
+       .set __decode_fail,1
+       .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+       .ifc \rsave,%r\r1
+       .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+       .ifc \rtarget,%r\r2
+       .irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+       .ifc \ruse,%r\r3
+       \expand \r1,\r2,\r3
+       .set __decode_fail,0
+       .endif
+       .endr
+       .endif
+       .endr
+       .endif
+       .endr
+       .if __decode_fail == 1
+       .error "__DECODE_RRR failed"
+       .endif
+       .endm
+
+       .macro  __DECODE_DRR expand,disp,reg,ruse
+       .set __decode_fail,1
+       .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+       .ifc \reg,%r\r1
+       .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+       .ifc \ruse,%r\r2
+       \expand \disp,\r1,\r2
+       .set __decode_fail,0
+       .endif
+       .endr
+       .endif
+       .endr
+       .if __decode_fail == 1
+       .error "__DECODE_DRR failed"
+       .endif
+       .endm
+
+       .macro __THUNK_EX_BR reg,ruse
+       # Be very careful when adding instructions to this macro!
+       # The ALTERNATIVE replacement code has a .+10 which targets
+       # the "br \reg" after the code has been patched.
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+       exrl    0,555f
+       j       .
+#else
+       .ifc \reg,%r1
+       ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35
+       j       .
+       .else
+       larl    \ruse,555f
+       ex      0,0(\ruse)
+       j       .
+       .endif
+#endif
+555:   br      \reg
+       .endm
+
+       .macro __THUNK_EX_BC disp,reg,ruse
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+       exrl    0,556f
+       j       .
+#else
+       larl    \ruse,556f
+       ex      0,0(\ruse)
+       j       .
+#endif
+556:   b       \disp(\reg)
+       .endm
+
+       .macro GEN_BR_THUNK reg,ruse=%r1
+       __DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse
+       __THUNK_EX_BR \reg,\ruse
+       __THUNK_EPILOG
+       .endm
+
+       .macro GEN_B_THUNK disp,reg,ruse=%r1
+       __DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse
+       __THUNK_EX_BC \disp,\reg,\ruse
+       __THUNK_EPILOG
+       .endm
+
+       .macro BR_EX reg,ruse=%r1
+557:   __DECODE_RR __THUNK_BR,\reg,\ruse
+       .pushsection .s390_indirect_branches,"a",@progbits
+       .long   557b-.
+       .popsection
+       .endm
+
+        .macro B_EX disp,reg,ruse=%r1
+558:   __DECODE_DRR __THUNK_BC,\disp,\reg,\ruse
+       .pushsection .s390_indirect_branches,"a",@progbits
+       .long   558b-.
+       .popsection
+       .endm
+
+       .macro BASR_EX rsave,rtarget,ruse=%r1
+559:   __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse
+       .pushsection .s390_indirect_branches,"a",@progbits
+       .long   559b-.
+       .popsection
+       .endm
+
+#else
+       .macro GEN_BR_THUNK reg,ruse=%r1
+       .endm
+
+       .macro GEN_B_THUNK disp,reg,ruse=%r1
+       .endm
+
+        .macro BR_EX reg,ruse=%r1
+       br      \reg
+       .endm
+
+        .macro B_EX disp,reg,ruse=%r1
+       b       \disp(\reg)
+       .endm
+
+       .macro BASR_EX rsave,rtarget,ruse=%r1
+       basr    \rsave,\rtarget
+       .endm
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_NOSPEC_ASM_H */
index e297bcf..6090670 100644 (file)
 
 int verify_sha256_digest(void);
 
+extern u64 kernel_entry;
+extern u64 kernel_type;
+
+extern u64 crash_start;
+extern u64 crash_size;
+
 #endif /* __ASSEMBLY__ */
 #endif /* _S390_PURGATORY_H_ */
index 83ba575..3c883c3 100644 (file)
@@ -45,6 +45,9 @@ struct thread_info {
 void arch_release_task_struct(struct task_struct *tsk);
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
 
+void arch_setup_new_exec(void);
+#define arch_setup_new_exec arch_setup_new_exec
+
 #endif
 
 /*
index 84ea622..f92dd8e 100644 (file)
@@ -65,6 +65,7 @@ obj-y += nospec-branch.o
 
 extra-y                                += head.o head64.o vmlinux.lds
 
+obj-$(CONFIG_SYSFS)            += nospec-sysfs.o
 CFLAGS_REMOVE_nospec-branch.o  += $(CC_FLAGS_EXPOLINE)
 
 obj-$(CONFIG_MODULES)          += module.o
index eb2a5c0..11aea74 100644 (file)
@@ -181,6 +181,7 @@ int main(void)
        OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
        OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
        OFFSET(__LC_GMAP, lowcore, gmap);
+       OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline);
        /* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
        OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
        /* hardware defined lowcore locations 0x1000 - 0x18ff */
index f6c5600..b65874b 100644 (file)
@@ -9,18 +9,22 @@
 
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
+#include <asm/nospec-insn.h>
 #include <asm/ptrace.h>
 #include <asm/sigp.h>
 
+       GEN_BR_THUNK %r9
+       GEN_BR_THUNK %r14
+
 ENTRY(s390_base_mcck_handler)
        basr    %r13,0
 0:     lg      %r15,__LC_PANIC_STACK   # load panic stack
        aghi    %r15,-STACK_FRAME_OVERHEAD
        larl    %r1,s390_base_mcck_handler_fn
-       lg      %r1,0(%r1)
-       ltgr    %r1,%r1
+       lg      %r9,0(%r1)
+       ltgr    %r9,%r9
        jz      1f
-       basr    %r14,%r1
+       BASR_EX %r14,%r9
 1:     la      %r1,4095
        lmg     %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
        lpswe   __LC_MCK_OLD_PSW
@@ -37,10 +41,10 @@ ENTRY(s390_base_ext_handler)
        basr    %r13,0
 0:     aghi    %r15,-STACK_FRAME_OVERHEAD
        larl    %r1,s390_base_ext_handler_fn
-       lg      %r1,0(%r1)
-       ltgr    %r1,%r1
+       lg      %r9,0(%r1)
+       ltgr    %r9,%r9
        jz      1f
-       basr    %r14,%r1
+       BASR_EX %r14,%r9
 1:     lmg     %r0,%r15,__LC_SAVE_AREA_ASYNC
        ni      __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
        lpswe   __LC_EXT_OLD_PSW
@@ -57,10 +61,10 @@ ENTRY(s390_base_pgm_handler)
        basr    %r13,0
 0:     aghi    %r15,-STACK_FRAME_OVERHEAD
        larl    %r1,s390_base_pgm_handler_fn
-       lg      %r1,0(%r1)
-       ltgr    %r1,%r1
+       lg      %r9,0(%r1)
+       ltgr    %r9,%r9
        jz      1f
-       basr    %r14,%r1
+       BASR_EX %r14,%r9
        lmg     %r0,%r15,__LC_SAVE_AREA_SYNC
        lpswe   __LC_PGM_OLD_PSW
 1:     lpswe   disabled_wait_psw-0b(%r13)
@@ -117,7 +121,7 @@ ENTRY(diag308_reset)
        larl    %r4,.Lcontinue_psw      # Restore PSW flags
        lpswe   0(%r4)
 .Lcontinue:
-       br      %r14
+       BR_EX   %r14
 .align 16
 .Lrestart_psw:
        .long   0x00080000,0x80000000 + .Lrestart_part2
index 3f22f13..f03402e 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/setup.h>
 #include <asm/nmi.h>
 #include <asm/export.h>
+#include <asm/nospec-insn.h>
 
 __PT_R0      = __PT_GPRS
 __PT_R1      = __PT_GPRS + 8
@@ -183,67 +184,9 @@ _LPP_OFFSET        = __LC_LPP
                    "jnz .+8; .long 0xb2e8d000", 82
        .endm
 
-#ifdef CONFIG_EXPOLINE
-
-       .macro GEN_BR_THUNK name,reg,tmp
-       .section .text.\name,"axG",@progbits,\name,comdat
-       .globl \name
-       .hidden \name
-       .type \name,@function
-\name:
-       CFI_STARTPROC
-#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
-       exrl    0,0f
-#else
-       larl    \tmp,0f
-       ex      0,0(\tmp)
-#endif
-       j       .
-0:     br      \reg
-       CFI_ENDPROC
-       .endm
-
-       GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
-       GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
-       GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
-
-       .macro BASR_R14_R9
-0:     brasl   %r14,__s390x_indirect_jump_r1use_r9
-       .pushsection .s390_indirect_branches,"a",@progbits
-       .long   0b-.
-       .popsection
-       .endm
-
-       .macro BR_R1USE_R14
-0:     jg      __s390x_indirect_jump_r1use_r14
-       .pushsection .s390_indirect_branches,"a",@progbits
-       .long   0b-.
-       .popsection
-       .endm
-
-       .macro BR_R11USE_R14
-0:     jg      __s390x_indirect_jump_r11use_r14
-       .pushsection .s390_indirect_branches,"a",@progbits
-       .long   0b-.
-       .popsection
-       .endm
-
-#else  /* CONFIG_EXPOLINE */
-
-       .macro BASR_R14_R9
-       basr    %r14,%r9
-       .endm
-
-       .macro BR_R1USE_R14
-       br      %r14
-       .endm
-
-       .macro BR_R11USE_R14
-       br      %r14
-       .endm
-
-#endif /* CONFIG_EXPOLINE */
-
+       GEN_BR_THUNK %r9
+       GEN_BR_THUNK %r14
+       GEN_BR_THUNK %r14,%r11
 
        .section .kprobes.text, "ax"
 .Ldummy:
@@ -260,7 +203,7 @@ _LPP_OFFSET = __LC_LPP
 ENTRY(__bpon)
        .globl __bpon
        BPON
-       BR_R1USE_R14
+       BR_EX   %r14
 
 /*
  * Scheduler resume function, called by switch_to
@@ -284,7 +227,7 @@ ENTRY(__switch_to)
        mvc     __LC_CURRENT_PID(4,%r0),0(%r3)  # store pid of next
        lmg     %r6,%r15,__SF_GPRS(%r15)        # load gprs of next task
        ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
-       BR_R1USE_R14
+       BR_EX   %r14
 
 .L__critical_start:
 
@@ -351,7 +294,7 @@ sie_exit:
        xgr     %r5,%r5
        lmg     %r6,%r14,__SF_GPRS(%r15)        # restore kernel registers
        lg      %r2,__SF_SIE_REASON(%r15)       # return exit reason code
-       BR_R1USE_R14
+       BR_EX   %r14
 .Lsie_fault:
        lghi    %r14,-EFAULT
        stg     %r14,__SF_SIE_REASON(%r15)      # set exit reason code
@@ -410,7 +353,7 @@ ENTRY(system_call)
        lgf     %r9,0(%r8,%r10)                 # get system call add.
        TSTMSK  __TI_flags(%r12),_TIF_TRACE
        jnz     .Lsysc_tracesys
-       BASR_R14_R9                             # call sys_xxxx
+       BASR_EX %r14,%r9                        # call sys_xxxx
        stg     %r2,__PT_R2(%r11)               # store return value
 
 .Lsysc_return:
@@ -595,7 +538,7 @@ ENTRY(system_call)
        lmg     %r3,%r7,__PT_R3(%r11)
        stg     %r7,STACK_FRAME_OVERHEAD(%r15)
        lg      %r2,__PT_ORIG_GPR2(%r11)
-       BASR_R14_R9                     # call sys_xxx
+       BASR_EX %r14,%r9                # call sys_xxx
        stg     %r2,__PT_R2(%r11)       # store return value
 .Lsysc_tracenogo:
        TSTMSK  __TI_flags(%r12),_TIF_TRACE
@@ -619,7 +562,7 @@ ENTRY(ret_from_fork)
        lmg     %r9,%r10,__PT_R9(%r11)  # load gprs
 ENTRY(kernel_thread_starter)
        la      %r2,0(%r10)
-       BASR_R14_R9
+       BASR_EX %r14,%r9
        j       .Lsysc_tracenogo
 
 /*
@@ -701,7 +644,7 @@ ENTRY(pgm_check_handler)
        je      .Lpgm_return
        lgf     %r9,0(%r10,%r1)         # load address of handler routine
        lgr     %r2,%r11                # pass pointer to pt_regs
-       BASR_R14_R9                     # branch to interrupt-handler
+       BASR_EX %r14,%r9                # branch to interrupt-handler
 .Lpgm_return:
        LOCKDEP_SYS_EXIT
        tm      __PT_PSW+1(%r11),0x01   # returning to user ?
@@ -1019,7 +962,7 @@ ENTRY(psw_idle)
        stpt    __TIMER_IDLE_ENTER(%r2)
 .Lpsw_idle_lpsw:
        lpswe   __SF_EMPTY(%r15)
-       BR_R1USE_R14
+       BR_EX   %r14
 .Lpsw_idle_end:
 
 /*
@@ -1061,7 +1004,7 @@ ENTRY(save_fpu_regs)
 .Lsave_fpu_regs_done:
        oi      __LC_CPU_FLAGS+7,_CIF_FPU
 .Lsave_fpu_regs_exit:
-       BR_R1USE_R14
+       BR_EX   %r14
 .Lsave_fpu_regs_end:
 EXPORT_SYMBOL(save_fpu_regs)
 
@@ -1107,7 +1050,7 @@ load_fpu_regs:
 .Lload_fpu_regs_done:
        ni      __LC_CPU_FLAGS+7,255-_CIF_FPU
 .Lload_fpu_regs_exit:
-       BR_R1USE_R14
+       BR_EX   %r14
 .Lload_fpu_regs_end:
 
 .L__critical_end:
@@ -1322,7 +1265,7 @@ cleanup_critical:
        jl      0f
        clg     %r9,BASED(.Lcleanup_table+104)  # .Lload_fpu_regs_end
        jl      .Lcleanup_load_fpu_regs
-0:     BR_R11USE_R14
+0:     BR_EX   %r14
 
        .align  8
 .Lcleanup_table:
@@ -1358,7 +1301,7 @@ cleanup_critical:
        ni      __SIE_PROG0C+3(%r9),0xfe        # no longer in SIE
        lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
        larl    %r9,sie_exit                    # skip forward to sie_exit
-       BR_R11USE_R14
+       BR_EX   %r14
 #endif
 
 .Lcleanup_system_call:
@@ -1412,7 +1355,7 @@ cleanup_critical:
        stg     %r15,56(%r11)           # r15 stack pointer
        # set new psw address and exit
        larl    %r9,.Lsysc_do_svc
-       BR_R11USE_R14
+       BR_EX   %r14,%r11
 .Lcleanup_system_call_insn:
        .quad   system_call
        .quad   .Lsysc_stmg
@@ -1424,7 +1367,7 @@ cleanup_critical:
 
 .Lcleanup_sysc_tif:
        larl    %r9,.Lsysc_tif
-       BR_R11USE_R14
+       BR_EX   %r14,%r11
 
 .Lcleanup_sysc_restore:
        # check if stpt has been executed
@@ -1441,14 +1384,14 @@ cleanup_critical:
        mvc     0(64,%r11),__PT_R8(%r9)
        lmg     %r0,%r7,__PT_R0(%r9)
 1:     lmg     %r8,%r9,__LC_RETURN_PSW
-       BR_R11USE_R14
+       BR_EX   %r14,%r11
 .Lcleanup_sysc_restore_insn:
        .quad   .Lsysc_exit_timer
        .quad   .Lsysc_done - 4
 
 .Lcleanup_io_tif:
        larl    %r9,.Lio_tif
-       BR_R11USE_R14
+       BR_EX   %r14,%r11
 
 .Lcleanup_io_restore:
        # check if stpt has been executed
@@ -1462,7 +1405,7 @@ cleanup_critical:
        mvc     0(64,%r11),__PT_R8(%r9)
        lmg     %r0,%r7,__PT_R0(%r9)
 1:     lmg     %r8,%r9,__LC_RETURN_PSW
-       BR_R11USE_R14
+       BR_EX   %r14,%r11
 .Lcleanup_io_restore_insn:
        .quad   .Lio_exit_timer
        .quad   .Lio_done - 4
@@ -1515,17 +1458,17 @@ cleanup_critical:
        # prepare return psw
        nihh    %r8,0xfcfd              # clear irq & wait state bits
        lg      %r9,48(%r11)            # return from psw_idle
-       BR_R11USE_R14
+       BR_EX   %r14,%r11
 .Lcleanup_idle_insn:
        .quad   .Lpsw_idle_lpsw
 
 .Lcleanup_save_fpu_regs:
        larl    %r9,save_fpu_regs
-       BR_R11USE_R14
+       BR_EX   %r14,%r11
 
 .Lcleanup_load_fpu_regs:
        larl    %r9,load_fpu_regs
-       BR_R11USE_R14
+       BR_EX   %r14,%r11
 
 /*
  * Integer constants
index 94f2099..3d17c41 100644 (file)
@@ -176,10 +176,9 @@ void do_softirq_own_stack(void)
                new -= STACK_FRAME_OVERHEAD;
                ((struct stack_frame *) new)->back_chain = old;
                asm volatile("   la    15,0(%0)\n"
-                            "   basr  14,%2\n"
+                            "   brasl 14,__do_softirq\n"
                             "   la    15,0(%1)\n"
-                            : : "a" (new), "a" (old),
-                                "a" (__do_softirq)
+                            : : "a" (new), "a" (old)
                             : "0", "1", "2", "3", "4", "5", "14",
                               "cc", "memory" );
        } else {
index 82df7d8..27110f3 100644 (file)
@@ -9,13 +9,17 @@
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
 #include <asm/ftrace.h>
+#include <asm/nospec-insn.h>
 #include <asm/ptrace.h>
 #include <asm/export.h>
 
+       GEN_BR_THUNK %r1
+       GEN_BR_THUNK %r14
+
        .section .kprobes.text, "ax"
 
 ENTRY(ftrace_stub)
-       br      %r14
+       BR_EX   %r14
 
 #define STACK_FRAME_SIZE  (STACK_FRAME_OVERHEAD + __PT_SIZE)
 #define STACK_PTREGS     (STACK_FRAME_OVERHEAD)
@@ -23,7 +27,7 @@ ENTRY(ftrace_stub)
 #define STACK_PTREGS_PSW  (STACK_PTREGS + __PT_PSW)
 
 ENTRY(_mcount)
-       br      %r14
+       BR_EX   %r14
 
 EXPORT_SYMBOL(_mcount)
 
@@ -53,7 +57,7 @@ ENTRY(ftrace_caller)
 #endif
        lgr     %r3,%r14
        la      %r5,STACK_PTREGS(%r15)
-       basr    %r14,%r1
+       BASR_EX %r14,%r1
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 # The j instruction gets runtime patched to a nop instruction.
 # See ftrace_enable_ftrace_graph_caller.
@@ -68,7 +72,7 @@ ftrace_graph_caller_end:
 #endif
        lg      %r1,(STACK_PTREGS_PSW+8)(%r15)
        lmg     %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
-       br      %r1
+       BR_EX   %r1
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
@@ -81,6 +85,6 @@ ENTRY(return_to_handler)
        aghi    %r15,STACK_FRAME_OVERHEAD
        lgr     %r14,%r2
        lmg     %r2,%r5,32(%r15)
-       br      %r14
+       BR_EX   %r14
 
 #endif
index 5a83be9..0dc8ac8 100644 (file)
@@ -465,11 +465,11 @@ int module_finalize(const Elf_Ehdr *hdr,
                        apply_alternatives(aseg, aseg + s->sh_size);
 
                if (IS_ENABLED(CONFIG_EXPOLINE) &&
-                   (!strcmp(".nospec_call_table", secname)))
+                   (!strncmp(".s390_indirect", secname, 14)))
                        nospec_revert(aseg, aseg + s->sh_size);
 
                if (IS_ENABLED(CONFIG_EXPOLINE) &&
-                   (!strcmp(".nospec_return_table", secname)))
+                   (!strncmp(".s390_return", secname, 12)))
                        nospec_revert(aseg, aseg + s->sh_size);
        }
 
index 46d49a1..8ad6a71 100644 (file)
@@ -1,7 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/module.h>
 #include <linux/device.h>
-#include <linux/cpu.h>
 #include <asm/nospec-branch.h>
 
 static int __init nobp_setup_early(char *str)
@@ -44,24 +43,6 @@ static int __init nospec_report(void)
 }
 arch_initcall(nospec_report);
 
-#ifdef CONFIG_SYSFS
-ssize_t cpu_show_spectre_v1(struct device *dev,
-                           struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "Mitigation: __user pointer sanitization\n");
-}
-
-ssize_t cpu_show_spectre_v2(struct device *dev,
-                           struct device_attribute *attr, char *buf)
-{
-       if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
-               return sprintf(buf, "Mitigation: execute trampolines\n");
-       if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
-               return sprintf(buf, "Mitigation: limited branch prediction.\n");
-       return sprintf(buf, "Vulnerable\n");
-}
-#endif
-
 #ifdef CONFIG_EXPOLINE
 
 int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
@@ -112,7 +93,6 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
        s32 *epo;
 
        /* Second part of the instruction replace is always a nop */
-       memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
        for (epo = start; epo < end; epo++) {
                instr = (u8 *) epo + *epo;
                if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
@@ -133,18 +113,34 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
                        br = thunk + (*(int *)(thunk + 2)) * 2;
                else
                        continue;
-               if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
+               /* Check for unconditional branch 0x07f? or 0x47f???? */
+               if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0)
                        continue;
+
+               memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4);
                switch (type) {
                case BRCL_EXPOLINE:
-                       /* brcl to thunk, replace with br + nop */
                        insnbuf[0] = br[0];
                        insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
+                       if (br[0] == 0x47) {
+                               /* brcl to b, replace with bc + nopr */
+                               insnbuf[2] = br[2];
+                               insnbuf[3] = br[3];
+                       } else {
+                               /* brcl to br, replace with bcr + nop */
+                       }
                        break;
                case BRASL_EXPOLINE:
-                       /* brasl to thunk, replace with basr + nop */
-                       insnbuf[0] = 0x0d;
                        insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
+                       if (br[0] == 0x47) {
+                               /* brasl to b, replace with bas + nopr */
+                               insnbuf[0] = 0x4d;
+                               insnbuf[2] = br[2];
+                               insnbuf[3] = br[3];
+                       } else {
+                               /* brasl to br, replace with basr + nop */
+                               insnbuf[0] = 0x0d;
+                       }
                        break;
                }
 
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
new file mode 100644 (file)
index 0000000..8affad5
--- /dev/null
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/device.h>
+#include <linux/cpu.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
+
+ssize_t cpu_show_spectre_v1(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+               return sprintf(buf, "Mitigation: execute trampolines\n");
+       if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+               return sprintf(buf, "Mitigation: limited branch prediction\n");
+       return sprintf(buf, "Vulnerable\n");
+}
index 5ee27dc..feebb29 100644 (file)
@@ -123,7 +123,7 @@ CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1);
 CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1);
 CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2);
 CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3);
-CPUMF_EVENT_ATTR(cf_z13, L1D_WRITES_RO_EXCL, 0x0080);
+CPUMF_EVENT_ATTR(cf_z13, L1D_RO_EXCL_WRITES, 0x0080);
 CPUMF_EVENT_ATTR(cf_z13, DTLB1_WRITES, 0x0081);
 CPUMF_EVENT_ATTR(cf_z13, DTLB1_MISSES, 0x0082);
 CPUMF_EVENT_ATTR(cf_z13, DTLB1_HPAGE_WRITES, 0x0083);
@@ -179,7 +179,7 @@ CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_NO_SPECIAL, 0x00db);
 CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_SPECIAL, 0x00dc);
 CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
 CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
-CPUMF_EVENT_ATTR(cf_z14, L1D_WRITES_RO_EXCL, 0x0080);
+CPUMF_EVENT_ATTR(cf_z14, L1D_RO_EXCL_WRITES, 0x0080);
 CPUMF_EVENT_ATTR(cf_z14, DTLB2_WRITES, 0x0081);
 CPUMF_EVENT_ATTR(cf_z14, DTLB2_MISSES, 0x0082);
 CPUMF_EVENT_ATTR(cf_z14, DTLB2_HPAGE_WRITES, 0x0083);
@@ -371,7 +371,7 @@ static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = {
 };
 
 static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
-       CPUMF_EVENT_PTR(cf_z13, L1D_WRITES_RO_EXCL),
+       CPUMF_EVENT_PTR(cf_z13, L1D_RO_EXCL_WRITES),
        CPUMF_EVENT_PTR(cf_z13, DTLB1_WRITES),
        CPUMF_EVENT_PTR(cf_z13, DTLB1_MISSES),
        CPUMF_EVENT_PTR(cf_z13, DTLB1_HPAGE_WRITES),
@@ -431,7 +431,7 @@ static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
 };
 
 static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = {
-       CPUMF_EVENT_PTR(cf_z14, L1D_WRITES_RO_EXCL),
+       CPUMF_EVENT_PTR(cf_z14, L1D_RO_EXCL_WRITES),
        CPUMF_EVENT_PTR(cf_z14, DTLB2_WRITES),
        CPUMF_EVENT_PTR(cf_z14, DTLB2_MISSES),
        CPUMF_EVENT_PTR(cf_z14, DTLB2_HPAGE_WRITES),
index 1c9ddd7..0292d68 100644 (file)
@@ -753,6 +753,10 @@ static int __hw_perf_event_init(struct perf_event *event)
         */
        rate = 0;
        if (attr->freq) {
+               if (!attr->sample_freq) {
+                       err = -EINVAL;
+                       goto out;
+               }
                rate = freq_to_sample_rate(&si, attr->sample_freq);
                rate = hw_limit_rate(&si, rate);
                attr->freq = 0;
index 70576a2..6e758bb 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/random.h>
 #include <linux/export.h>
 #include <linux/init_task.h>
+#include <asm/cpu_mf.h>
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/vtimer.h>
@@ -48,6 +49,15 @@ void flush_thread(void)
 {
 }
 
+void arch_setup_new_exec(void)
+{
+       if (S390_lowcore.current_pid != current->pid) {
+               S390_lowcore.current_pid = current->pid;
+               if (test_facility(40))
+                       lpp(&S390_lowcore.lpp);
+       }
+}
+
 void arch_release_task_struct(struct task_struct *tsk)
 {
        runtime_instr_release(tsk);
index 73cc375..7f14adf 100644 (file)
@@ -7,8 +7,11 @@
 
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
+#include <asm/nospec-insn.h>
 #include <asm/sigp.h>
 
+       GEN_BR_THUNK %r9
+
 #
 # Issue "store status" for the current CPU to its prefix page
 # and call passed function afterwards
@@ -67,9 +70,9 @@ ENTRY(store_status)
        st      %r4,0(%r1)
        st      %r5,4(%r1)
        stg     %r2,8(%r1)
-       lgr     %r1,%r2
+       lgr     %r9,%r2
        lgr     %r2,%r3
-       br      %r1
+       BR_EX   %r9
 
        .section .bss
        .align  8
index e991871..a049a7b 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/ptrace.h>
 #include <asm/thread_info.h>
 #include <asm/asm-offsets.h>
+#include <asm/nospec-insn.h>
 #include <asm/sigp.h>
 
 /*
@@ -24,6 +25,8 @@
  * (see below) in the resume process.
  * This function runs with disabled interrupts.
  */
+       GEN_BR_THUNK %r14
+
        .section .text
 ENTRY(swsusp_arch_suspend)
        stmg    %r6,%r15,__SF_GPRS(%r15)
@@ -103,7 +106,7 @@ ENTRY(swsusp_arch_suspend)
        spx     0x318(%r1)
        lmg     %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
        lghi    %r2,0
-       br      %r14
+       BR_EX   %r14
 
 /*
  * Restore saved memory image to correct place and restore register context.
@@ -197,11 +200,10 @@ pgm_check_entry:
        larl    %r15,init_thread_union
        ahi     %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
        larl    %r2,.Lpanic_string
-       larl    %r3,sclp_early_printk
        lghi    %r1,0
        sam31
        sigp    %r1,%r0,SIGP_SET_ARCHITECTURE
-       basr    %r14,%r3
+       brasl   %r14,sclp_early_printk
        larl    %r3,.Ldisabled_wait_31
        lpsw    0(%r3)
 4:
@@ -267,7 +269,7 @@ restore_registers:
        /* Return 0 */
        lmg     %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
        lghi    %r2,0
-       br      %r14
+       BR_EX   %r14
 
        .section .data..nosave,"aw",@progbits
        .align  8
index d9d1f51..5007fac 100644 (file)
@@ -150,6 +150,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
        return orig;
 }
 
+bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+                            struct pt_regs *regs)
+{
+       if (ctx == RP_CHECK_CHAIN_CALL)
+               return user_stack_pointer(regs) <= ret->stack;
+       else
+               return user_stack_pointer(regs) < ret->stack;
+}
+
 /* Instruction Emulation */
 
 static void adjust_psw_addr(psw_t *psw, unsigned long len)
index 495c9c4..2311f15 100644 (file)
@@ -7,6 +7,9 @@
 
 #include <linux/linkage.h>
 #include <asm/export.h>
+#include <asm/nospec-insn.h>
+
+       GEN_BR_THUNK %r14
 
 /*
  * void *memmove(void *dest, const void *src, size_t n)
@@ -33,14 +36,14 @@ ENTRY(memmove)
 .Lmemmove_forward_remainder:
        larl    %r5,.Lmemmove_mvc
        ex      %r4,0(%r5)
-       br      %r14
+       BR_EX   %r14
 .Lmemmove_reverse:
        ic      %r0,0(%r4,%r3)
        stc     %r0,0(%r4,%r1)
        brctg   %r4,.Lmemmove_reverse
        ic      %r0,0(%r4,%r3)
        stc     %r0,0(%r4,%r1)
-       br      %r14
+       BR_EX   %r14
 .Lmemmove_mvc:
        mvc     0(1,%r1),0(%r3)
 EXPORT_SYMBOL(memmove)
@@ -77,7 +80,7 @@ ENTRY(memset)
 .Lmemset_clear_remainder:
        larl    %r3,.Lmemset_xc
        ex      %r4,0(%r3)
-       br      %r14
+       BR_EX   %r14
 .Lmemset_fill:
        cghi    %r4,1
        lgr     %r1,%r2
@@ -95,10 +98,10 @@ ENTRY(memset)
        stc     %r3,0(%r1)
        larl    %r5,.Lmemset_mvc
        ex      %r4,0(%r5)
-       br      %r14
+       BR_EX   %r14
 .Lmemset_fill_exit:
        stc     %r3,0(%r1)
-       br      %r14
+       BR_EX   %r14
 .Lmemset_xc:
        xc      0(1,%r1),0(%r1)
 .Lmemset_mvc:
@@ -121,7 +124,7 @@ ENTRY(memcpy)
 .Lmemcpy_remainder:
        larl    %r5,.Lmemcpy_mvc
        ex      %r4,0(%r5)
-       br      %r14
+       BR_EX   %r14
 .Lmemcpy_loop:
        mvc     0(256,%r1),0(%r3)
        la      %r1,256(%r1)
@@ -159,10 +162,10 @@ ENTRY(__memset\bits)
        \insn   %r3,0(%r1)
        larl    %r5,.L__memset_mvc\bits
        ex      %r4,0(%r5)
-       br      %r14
+       BR_EX   %r14
 .L__memset_exit\bits:
        \insn   %r3,0(%r2)
-       br      %r14
+       BR_EX   %r14
 .L__memset_mvc\bits:
        mvc     \bytes(1,%r1),0(%r1)
 .endm
index 25bb464..9f79486 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/nospec-insn.h>
 #include "bpf_jit.h"
 
 /*
@@ -54,7 +55,7 @@ ENTRY(sk_load_##NAME##_pos);                                          \
        clg     %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */     \
        jh      sk_load_##NAME##_slow;                                  \
        LOAD    %r14,-SIZE(%r3,%r12);   /* Get data from skb */         \
-       b       OFF_OK(%r6);            /* Return */                    \
+       B_EX    OFF_OK,%r6;             /* Return */                    \
                                                                        \
 sk_load_##NAME##_slow:;                                                        \
        lgr     %r2,%r7;                /* Arg1 = skb pointer */        \
@@ -64,11 +65,14 @@ sk_load_##NAME##_slow:;                                                     \
        brasl   %r14,skb_copy_bits;     /* Get data from skb */         \
        LOAD    %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */    \
        ltgr    %r2,%r2;                /* Set cc to (%r2 != 0) */      \
-       br      %r6;                    /* Return */
+       BR_EX   %r6;                    /* Return */
 
 sk_load_common(word, 4, llgf)  /* r14 = *(u32 *) (skb->data+offset) */
 sk_load_common(half, 2, llgh)  /* r14 = *(u16 *) (skb->data+offset) */
 
+       GEN_BR_THUNK %r6
+       GEN_B_THUNK OFF_OK,%r6
+
 /*
  * Load 1 byte from SKB (optimized version)
  */
@@ -80,7 +84,7 @@ ENTRY(sk_load_byte_pos)
        clg     %r3,STK_OFF_HLEN(%r15)  # Offset >= hlen?
        jnl     sk_load_byte_slow
        llgc    %r14,0(%r3,%r12)        # Get byte from skb
-       b       OFF_OK(%r6)             # Return OK
+       B_EX    OFF_OK,%r6              # Return OK
 
 sk_load_byte_slow:
        lgr     %r2,%r7                 # Arg1 = skb pointer
@@ -90,7 +94,7 @@ sk_load_byte_slow:
        brasl   %r14,skb_copy_bits      # Get data from skb
        llgc    %r14,STK_OFF_TMP(%r15)  # Load result from temp buffer
        ltgr    %r2,%r2                 # Set cc to (%r2 != 0)
-       br      %r6                     # Return cc
+       BR_EX   %r6                     # Return cc
 
 #define sk_negative_common(NAME, SIZE, LOAD)                           \
 sk_load_##NAME##_slow_neg:;                                            \
@@ -104,7 +108,7 @@ sk_load_##NAME##_slow_neg:;                                         \
        jz      bpf_error;                                              \
        LOAD    %r14,0(%r2);            /* Get data from pointer */     \
        xr      %r3,%r3;                /* Set cc to zero */            \
-       br      %r6;                    /* Return cc */
+       BR_EX   %r6;                    /* Return cc */
 
 sk_negative_common(word, 4, llgf)
 sk_negative_common(half, 2, llgh)
@@ -113,4 +117,4 @@ sk_negative_common(byte, 1, llgc)
 bpf_error:
 # force a return 0 from jit handler
        ltgr    %r15,%r15       # Set condition code
-       br      %r6
+       BR_EX   %r6
index 78a19c9..dd2bcf0 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/bpf.h>
 #include <asm/cacheflush.h>
 #include <asm/dis.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
 #include <asm/set_memory.h>
 #include "bpf_jit.h"
 
@@ -41,6 +43,8 @@ struct bpf_jit {
        int base_ip;            /* Base address for literal pool */
        int ret0_ip;            /* Address of return 0 */
        int exit_ip;            /* Address of exit */
+       int r1_thunk_ip;        /* Address of expoline thunk for 'br %r1' */
+       int r14_thunk_ip;       /* Address of expoline thunk for 'br %r14' */
        int tail_call_start;    /* Tail call start offset */
        int labels[1];          /* Labels for local jumps */
 };
@@ -250,6 +254,19 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
        REG_SET_SEEN(b2);                                       \
 })
 
+#define EMIT6_PCREL_RILB(op, b, target)                                \
+({                                                             \
+       int rel = (target - jit->prg) / 2;                      \
+       _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff);       \
+       REG_SET_SEEN(b);                                        \
+})
+
+#define EMIT6_PCREL_RIL(op, target)                            \
+({                                                             \
+       int rel = (target - jit->prg) / 2;                      \
+       _EMIT6(op | rel >> 16, rel & 0xffff);                   \
+})
+
 #define _EMIT6_IMM(op, imm)                                    \
 ({                                                             \
        unsigned int __imm = (imm);                             \
@@ -469,8 +486,45 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
        EMIT4(0xb9040000, REG_2, BPF_REG_0);
        /* Restore registers */
        save_restore_regs(jit, REGS_RESTORE, stack_depth);
+       if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
+               jit->r14_thunk_ip = jit->prg;
+               /* Generate __s390_indirect_jump_r14 thunk */
+               if (test_facility(35)) {
+                       /* exrl %r0,.+10 */
+                       EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
+               } else {
+                       /* larl %r1,.+14 */
+                       EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
+                       /* ex 0,0(%r1) */
+                       EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
+               }
+               /* j . */
+               EMIT4_PCREL(0xa7f40000, 0);
+       }
        /* br %r14 */
        _EMIT2(0x07fe);
+
+       if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable &&
+           (jit->seen & SEEN_FUNC)) {
+               jit->r1_thunk_ip = jit->prg;
+               /* Generate __s390_indirect_jump_r1 thunk */
+               if (test_facility(35)) {
+                       /* exrl %r0,.+10 */
+                       EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
+                       /* j . */
+                       EMIT4_PCREL(0xa7f40000, 0);
+                       /* br %r1 */
+                       _EMIT2(0x07f1);
+               } else {
+                       /* larl %r1,.+14 */
+                       EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
+                       /* ex 0,S390_lowcore.br_r1_tampoline */
+                       EMIT4_DISP(0x44000000, REG_0, REG_0,
+                                  offsetof(struct lowcore, br_r1_trampoline));
+                       /* j . */
+                       EMIT4_PCREL(0xa7f40000, 0);
+               }
+       }
 }
 
 /*
@@ -966,8 +1020,13 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                /* lg %w1,<d(imm)>(%l) */
                EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
                              EMIT_CONST_U64(func));
-               /* basr %r14,%w1 */
-               EMIT2(0x0d00, REG_14, REG_W1);
+               if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
+                       /* brasl %r14,__s390_indirect_jump_r1 */
+                       EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
+               } else {
+                       /* basr %r14,%w1 */
+                       EMIT2(0x0d00, REG_14, REG_W1);
+               }
                /* lgr %b0,%r2: load return value into %b0 */
                EMIT4(0xb9040000, BPF_REG_0, REG_2);
                if ((jit->seen & SEEN_SKB) &&
index 97fe293..1851eae 100644 (file)
@@ -9,6 +9,7 @@ config SUPERH
        select HAVE_IDE if HAS_IOPORT_MAP
        select HAVE_MEMBLOCK
        select HAVE_MEMBLOCK_NODE_MAP
+       select NO_BOOTMEM
        select ARCH_DISCARD_MEMBLOCK
        select HAVE_OPROFILE
        select HAVE_GENERIC_DMA_COHERENT
index 4205f6d..a5bd036 100644 (file)
@@ -43,7 +43,11 @@ void __ref cpu_probe(void)
 #endif
 
 #if defined(CONFIG_CPU_J2)
+#if defined(CONFIG_SMP)
        unsigned cpu = hard_smp_processor_id();
+#else
+       unsigned cpu = 0;
+#endif
        if (cpu == 0) of_scan_flat_dt(scan_cache, NULL);
        if (j2_ccr_base) __raw_writel(0x80000303, j2_ccr_base + 4*cpu);
        if (cpu != 0) return;
index d34e998..c286cf5 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/ioport.h>
 #include <linux/init.h>
 #include <linux/initrd.h>
-#include <linux/bootmem.h>
 #include <linux/console.h>
 #include <linux/root_dev.h>
 #include <linux/utsname.h>
index 8ce9869..f1b4469 100644 (file)
@@ -59,7 +59,9 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
 
        split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
 
-       *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset);
+       *dma_handle = virt_to_phys(ret);
+       if (!WARN_ON(!dev))
+               *dma_handle -= PFN_PHYS(dev->dma_pfn_offset);
 
        return ret_nocache;
 }
@@ -69,9 +71,12 @@ void dma_generic_free_coherent(struct device *dev, size_t size,
                               unsigned long attrs)
 {
        int order = get_order(size);
-       unsigned long pfn = (dma_handle >> PAGE_SHIFT) + dev->dma_pfn_offset;
+       unsigned long pfn = dma_handle >> PAGE_SHIFT;
        int k;
 
+       if (!WARN_ON(!dev))
+               pfn += dev->dma_pfn_offset;
+
        for (k = 0; k < (1 << order); k++)
                __free_pages(pfn_to_page(pfn + k), 0);
 
@@ -143,7 +148,7 @@ int __init platform_resource_setup_memory(struct platform_device *pdev,
        if (!memsize)
                return 0;
 
-       buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL);
+       buf = dma_alloc_coherent(&pdev->dev, memsize, &dma_handle, GFP_KERNEL);
        if (!buf) {
                pr_warning("%s: unable to allocate memory\n", name);
                return -ENOMEM;
index ce0bbaa..4034035 100644 (file)
@@ -211,59 +211,15 @@ void __init allocate_pgdat(unsigned int nid)
 
        NODE_DATA(nid) = __va(phys);
        memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
-
-       NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
 #endif
 
        NODE_DATA(nid)->node_start_pfn = start_pfn;
        NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
 }
 
-static void __init bootmem_init_one_node(unsigned int nid)
-{
-       unsigned long total_pages, paddr;
-       unsigned long end_pfn;
-       struct pglist_data *p;
-
-       p = NODE_DATA(nid);
-
-       /* Nothing to do.. */
-       if (!p->node_spanned_pages)
-               return;
-
-       end_pfn = pgdat_end_pfn(p);
-
-       total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
-
-       paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
-       if (!paddr)
-               panic("Can't allocate bootmap for nid[%d]\n", nid);
-
-       init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
-
-       free_bootmem_with_active_regions(nid, end_pfn);
-
-       /*
-        * XXX Handle initial reservations for the system memory node
-        * only for the moment, we'll refactor this later for handling
-        * reservations in other nodes.
-        */
-       if (nid == 0) {
-               struct memblock_region *reg;
-
-               /* Reserve the sections we're already using. */
-               for_each_memblock(reserved, reg) {
-                       reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
-               }
-       }
-
-       sparse_memory_present_with_active_regions(nid);
-}
-
 static void __init do_init_bootmem(void)
 {
        struct memblock_region *reg;
-       int i;
 
        /* Add active regions with valid PFNs. */
        for_each_memblock(memory, reg) {
@@ -279,9 +235,12 @@ static void __init do_init_bootmem(void)
 
        plat_mem_setup();
 
-       for_each_online_node(i)
-               bootmem_init_one_node(i);
+       for_each_memblock(memory, reg) {
+               int nid = memblock_get_region_node(reg);
 
+               memory_present(nid, memblock_region_memory_base_pfn(reg),
+                       memblock_region_memory_end_pfn(reg));
+       }
        sparse_init();
 }
 
@@ -322,7 +281,6 @@ void __init paging_init(void)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES];
        unsigned long vaddr, end;
-       int nid;
 
        sh_mv.mv_mem_init();
 
@@ -377,21 +335,7 @@ void __init paging_init(void)
        kmap_coherent_init();
 
        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-
-       for_each_online_node(nid) {
-               pg_data_t *pgdat = NODE_DATA(nid);
-               unsigned long low, start_pfn;
-
-               start_pfn = pgdat->bdata->node_min_pfn;
-               low = pgdat->bdata->node_low_pfn;
-
-               if (max_zone_pfns[ZONE_NORMAL] < low)
-                       max_zone_pfns[ZONE_NORMAL] = low;
-
-               printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
-                      nid, start_pfn, low);
-       }
-
+       max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
        free_area_init_nodes(max_zone_pfns);
 }
 
index 05713d1..830e8b3 100644 (file)
@@ -8,7 +8,6 @@
  * for more details.
  */
 #include <linux/module.h>
-#include <linux/bootmem.h>
 #include <linux/memblock.h>
 #include <linux/mm.h>
 #include <linux/numa.h>
@@ -26,9 +25,7 @@ EXPORT_SYMBOL_GPL(node_data);
  */
 void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
 {
-       unsigned long bootmap_pages;
        unsigned long start_pfn, end_pfn;
-       unsigned long bootmem_paddr;
 
        /* Don't allow bogus node assignment */
        BUG_ON(nid >= MAX_NUMNODES || nid <= 0);
@@ -48,25 +45,9 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
                                             SMP_CACHE_BYTES, end));
        memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
 
-       NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
        NODE_DATA(nid)->node_start_pfn = start_pfn;
        NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
 
-       /* Node-local bootmap */
-       bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
-       bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT,
-                                      PAGE_SIZE, end);
-       init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
-                         start_pfn, end_pfn);
-
-       free_bootmem_with_active_regions(nid, end_pfn);
-
-       /* Reserve the pgdat and bootmap space with the bootmem allocator */
-       reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT,
-                            sizeof(struct pglist_data), BOOTMEM_DEFAULT);
-       reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr,
-                            bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
-
        /* It's up */
        node_set_online(nid);
 
index 7229519..4f6676f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * This program is free software: you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
+ * the Free Software Foundation, either version 2 of the License, or
  * (at your option) any later version.
  *
  * This program is distributed in the hope that it will be useful,
index 1a0fa10..32bae68 100644 (file)
@@ -403,7 +403,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
        if (err) {
                printk(KERN_ERR "VIO: Could not register device %s, err=%d\n",
                       dev_name(&vdev->dev), err);
-               kfree(vdev);
+               put_device(&vdev->dev);
                return NULL;
        }
        if (vdev->dp)
index 00fcf81..c07f492 100644 (file)
@@ -52,6 +52,7 @@ config X86
        select ARCH_HAS_DEVMEM_IS_ALLOWED
        select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_FAST_MULTIPLIER
+       select ARCH_HAS_FILTER_PGPROT
        select ARCH_HAS_FORTIFY_SOURCE
        select ARCH_HAS_GCOV_PROFILE_ALL
        select ARCH_HAS_KCOV                    if X86_64
@@ -273,6 +274,9 @@ config ARCH_HAS_CPU_RELAX
 config ARCH_HAS_CACHE_LINE_SIZE
        def_bool y
 
+config ARCH_HAS_FILTER_PGPROT
+       def_bool y
+
 config HAVE_SETUP_PER_CPU_AREA
        def_bool y
 
index 9af927e..9de7f1e 100644 (file)
@@ -84,13 +84,13 @@ ENTRY(entry_SYSENTER_compat)
        pushq   %rdx                    /* pt_regs->dx */
        pushq   %rcx                    /* pt_regs->cx */
        pushq   $-ENOSYS                /* pt_regs->ax */
-       pushq   $0                      /* pt_regs->r8  = 0 */
+       pushq   %r8                     /* pt_regs->r8 */
        xorl    %r8d, %r8d              /* nospec   r8 */
-       pushq   $0                      /* pt_regs->r9  = 0 */
+       pushq   %r9                     /* pt_regs->r9 */
        xorl    %r9d, %r9d              /* nospec   r9 */
-       pushq   $0                      /* pt_regs->r10 = 0 */
+       pushq   %r10                    /* pt_regs->r10 */
        xorl    %r10d, %r10d            /* nospec   r10 */
-       pushq   $0                      /* pt_regs->r11 = 0 */
+       pushq   %r11                    /* pt_regs->r11 */
        xorl    %r11d, %r11d            /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
        xorl    %ebx, %ebx              /* nospec   rbx */
diff --git a/arch/x86/entry/vdso/vdso32/vdso-fakesections.c b/arch/x86/entry/vdso/vdso32/vdso-fakesections.c
deleted file mode 100644 (file)
index 541468e..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include "../vdso-fakesections.c"
index a6006e7..45b2b1c 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/cpu.h>
 #include <linux/bitops.h>
 #include <linux/device.h>
+#include <linux/nospec.h>
 
 #include <asm/apic.h>
 #include <asm/stacktrace.h>
@@ -304,17 +305,20 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
 
        config = attr->config;
 
-       cache_type = (config >>  0) & 0xff;
+       cache_type = (config >> 0) & 0xff;
        if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
                return -EINVAL;
+       cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX);
 
        cache_op = (config >>  8) & 0xff;
        if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
                return -EINVAL;
+       cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
 
        cache_result = (config >> 16) & 0xff;
        if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
                return -EINVAL;
+       cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX);
 
        val = hw_cache_event_ids[cache_type][cache_op][cache_result];
 
@@ -421,6 +425,8 @@ int x86_setup_perfctr(struct perf_event *event)
        if (attr->config >= x86_pmu.max_events)
                return -EINVAL;
 
+       attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events);
+
        /*
         * The generic map:
         */
index 607bf56..707b2a9 100644 (file)
@@ -3339,7 +3339,8 @@ static void intel_pmu_cpu_starting(int cpu)
 
        cpuc->lbr_sel = NULL;
 
-       flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
+       if (x86_pmu.version > 1)
+               flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
 
        if (!cpuc->shared_regs)
                return;
@@ -3502,6 +3503,8 @@ static __initconst const struct x86_pmu core_pmu = {
        .cpu_dying              = intel_pmu_cpu_dying,
 };
 
+static struct attribute *intel_pmu_attrs[];
+
 static __initconst const struct x86_pmu intel_pmu = {
        .name                   = "Intel",
        .handle_irq             = intel_pmu_handle_irq,
@@ -3533,6 +3536,8 @@ static __initconst const struct x86_pmu intel_pmu = {
        .format_attrs           = intel_arch3_formats_attr,
        .events_sysfs_show      = intel_event_sysfs_show,
 
+       .attrs                  = intel_pmu_attrs,
+
        .cpu_prepare            = intel_pmu_cpu_prepare,
        .cpu_starting           = intel_pmu_cpu_starting,
        .cpu_dying              = intel_pmu_cpu_dying,
@@ -3911,8 +3916,6 @@ __init int intel_pmu_init(void)
 
        x86_pmu.max_pebs_events         = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
 
-
-       x86_pmu.attrs                   = intel_pmu_attrs;
        /*
         * Quirk: v2 perfmon does not report fixed-purpose events, so
         * assume at least 3 events, when not running in a hypervisor:
index 9aca448..9f8084f 100644 (file)
@@ -92,6 +92,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/perf_event.h>
+#include <linux/nospec.h>
 #include <asm/cpu_device_id.h>
 #include <asm/intel-family.h>
 #include "../perf_event.h"
@@ -302,6 +303,7 @@ static int cstate_pmu_event_init(struct perf_event *event)
        } else if (event->pmu == &cstate_pkg_pmu) {
                if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
                        return -EINVAL;
+               cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
                if (!pkg_msr[cfg].attr)
                        return -EINVAL;
                event->hw.event_base = pkg_msr[cfg].msr;
index e7edf19..b4771a6 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/perf_event.h>
+#include <linux/nospec.h>
 #include <asm/intel-family.h>
 
 enum perf_msr_id {
@@ -158,9 +159,6 @@ static int msr_event_init(struct perf_event *event)
        if (event->attr.type != event->pmu->type)
                return -ENOENT;
 
-       if (cfg >= PERF_MSR_EVENT_MAX)
-               return -EINVAL;
-
        /* unsupported modes and filters */
        if (event->attr.exclude_user   ||
            event->attr.exclude_kernel ||
@@ -171,6 +169,11 @@ static int msr_event_init(struct perf_event *event)
            event->attr.sample_period) /* no sampling */
                return -EINVAL;
 
+       if (cfg >= PERF_MSR_EVENT_MAX)
+               return -EINVAL;
+
+       cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
+
        if (!msr[cfg].attr)
                return -EINVAL;
 
index d554c11..578793e 100644 (file)
 #define X86_FEATURE_AVX512_VPOPCNTDQ   (16*32+14) /* POPCNT for vectors of DW/QW */
 #define X86_FEATURE_LA57               (16*32+16) /* 5-level page tables */
 #define X86_FEATURE_RDPID              (16*32+22) /* RDPID instruction */
+#define X86_FEATURE_CLDEMOTE           (16*32+25) /* CLDEMOTE instruction */
 
 /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
 #define X86_FEATURE_OVERFLOW_RECOV     (17*32+ 0) /* MCA overflow recovery support */
index 09ad885..cc8f8fc 100644 (file)
@@ -46,7 +46,21 @@ int ftrace_int3_handler(struct pt_regs *regs);
 #endif /* CONFIG_FUNCTION_TRACER */
 
 
-#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS)
+#ifndef __ASSEMBLY__
+
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
+{
+       /*
+        * Compare the symbol name with the system call name. Skip the
+        * "__x64_sys", "__ia32_sys" or simple "sys" prefix.
+        */
+       return !strcmp(sym + 3, name + 3) ||
+               (!strncmp(sym, "__x64_", 6) && !strcmp(sym + 9, name + 3)) ||
+               (!strncmp(sym, "__ia32_", 7) && !strcmp(sym + 10, name + 3));
+}
+
+#ifndef COMPILE_OFFSETS
 
 #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
 #include <asm/compat.h>
@@ -67,6 +81,7 @@ static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
        return false;
 }
 #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
-#endif /* !__ASSEMBLY__  && !COMPILE_OFFSETS */
+#endif /* !COMPILE_OFFSETS */
+#endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_X86_FTRACE_H */
index 404c5fd..548d90b 100644 (file)
  * (0x80 is the syscall vector, 0x30-0x3f are for ISA)
  */
 #define FIRST_EXTERNAL_VECTOR          0x20
-/*
- * We start allocating at 0x21 to spread out vectors evenly between
- * priority levels. (0x80 is the syscall vector)
- */
-#define VECTOR_OFFSET_START            1
 
 /*
  * Reserve the lowest usable vector (and hence lowest priority)  0x20 for
 #define FIRST_SYSTEM_VECTOR            NR_VECTORS
 #endif
 
-#define FPU_IRQ                                  13
-
 /*
  * Size the maximum number of interrupts.
  *
index b885a96..a34897a 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL2.0 */
+/* SPDX-License-Identifier: GPL-2.0 */
 
 /*
  * Jailhouse paravirt detection
index 5f49b4f..f1633de 100644 (file)
@@ -601,6 +601,11 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 
 #define canon_pgprot(p) __pgprot(massage_pgprot(p))
 
+static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
+{
+       return canon_pgprot(prot);
+}
+
 static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
                                         enum page_cache_mode pcm,
                                         enum page_cache_mode new_pcm)
index d5c21a3..adb4755 100644 (file)
@@ -105,14 +105,14 @@ extern unsigned int ptrs_per_p4d;
 #define LDT_PGD_ENTRY          (pgtable_l5_enabled ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4)
 #define LDT_BASE_ADDR          (LDT_PGD_ENTRY << PGDIR_SHIFT)
 
-#define __VMALLOC_BASE_L4      0xffffc90000000000
-#define __VMALLOC_BASE_L5      0xffa0000000000000
+#define __VMALLOC_BASE_L4      0xffffc90000000000UL
+#define __VMALLOC_BASE_L5      0xffa0000000000000UL
 
 #define VMALLOC_SIZE_TB_L4     32UL
 #define VMALLOC_SIZE_TB_L5     12800UL
 
-#define __VMEMMAP_BASE_L4      0xffffea0000000000
-#define __VMEMMAP_BASE_L5      0xffd4000000000000
+#define __VMEMMAP_BASE_L4      0xffffea0000000000UL
+#define __VMEMMAP_BASE_L5      0xffd4000000000000UL
 
 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
 # define VMALLOC_START         vmalloc_base
index 809134c..90ab9a7 100644 (file)
@@ -1 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_X64_MSGBUF_H
+#define __ASM_X64_MSGBUF_H
+
+#if !defined(__x86_64__) || !defined(__ILP32__)
 #include <asm-generic/msgbuf.h>
+#else
+/*
+ * The msqid64_ds structure for x86 architecture with x32 ABI.
+ *
+ * On x86-32 and x86-64 we can just use the generic definition, but
+ * x32 uses the same binary layout as x86_64, which is differnet
+ * from other 32-bit architectures.
+ */
+
+struct msqid64_ds {
+       struct ipc64_perm msg_perm;
+       __kernel_time_t msg_stime;      /* last msgsnd time */
+       __kernel_time_t msg_rtime;      /* last msgrcv time */
+       __kernel_time_t msg_ctime;      /* last change time */
+       __kernel_ulong_t msg_cbytes;    /* current number of bytes on queue */
+       __kernel_ulong_t msg_qnum;      /* number of messages in queue */
+       __kernel_ulong_t msg_qbytes;    /* max number of bytes on queue */
+       __kernel_pid_t msg_lspid;       /* pid of last msgsnd */
+       __kernel_pid_t msg_lrpid;       /* last receive pid */
+       __kernel_ulong_t __unused4;
+       __kernel_ulong_t __unused5;
+};
+
+#endif
+
+#endif /* __ASM_GENERIC_MSGBUF_H */
index 83c05fc..644421f 100644 (file)
@@ -1 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_X86_SHMBUF_H
+#define __ASM_X86_SHMBUF_H
+
+#if !defined(__x86_64__) || !defined(__ILP32__)
 #include <asm-generic/shmbuf.h>
+#else
+/*
+ * The shmid64_ds structure for x86 architecture with x32 ABI.
+ *
+ * On x86-32 and x86-64 we can just use the generic definition, but
+ * x32 uses the same binary layout as x86_64, which is differnet
+ * from other 32-bit architectures.
+ */
+
+struct shmid64_ds {
+       struct ipc64_perm       shm_perm;       /* operation perms */
+       size_t                  shm_segsz;      /* size of segment (bytes) */
+       __kernel_time_t         shm_atime;      /* last attach time */
+       __kernel_time_t         shm_dtime;      /* last detach time */
+       __kernel_time_t         shm_ctime;      /* last change time */
+       __kernel_pid_t          shm_cpid;       /* pid of creator */
+       __kernel_pid_t          shm_lpid;       /* pid of last operator */
+       __kernel_ulong_t        shm_nattch;     /* no. of current attaches */
+       __kernel_ulong_t        __unused4;
+       __kernel_ulong_t        __unused5;
+};
+
+struct shminfo64 {
+       __kernel_ulong_t        shmmax;
+       __kernel_ulong_t        shmmin;
+       __kernel_ulong_t        shmmni;
+       __kernel_ulong_t        shmseg;
+       __kernel_ulong_t        shmall;
+       __kernel_ulong_t        __unused1;
+       __kernel_ulong_t        __unused2;
+       __kernel_ulong_t        __unused3;
+       __kernel_ulong_t        __unused4;
+};
+
+#endif
+
+#endif /* __ASM_X86_SHMBUF_H */
index 8a5b185..ce243f7 100644 (file)
@@ -848,6 +848,11 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
                c->x86_power = edx;
        }
 
+       if (c->extended_cpuid_level >= 0x80000008) {
+               cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
+               c->x86_capability[CPUID_8000_0008_EBX] = ebx;
+       }
+
        if (c->extended_cpuid_level >= 0x8000000a)
                c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
 
@@ -871,7 +876,6 @@ static void get_cpu_address_sizes(struct cpuinfo_x86 *c)
 
                c->x86_virt_bits = (eax >> 8) & 0xff;
                c->x86_phys_bits = eax & 0xff;
-               c->x86_capability[CPUID_8000_0008_EBX] = ebx;
        }
 #ifdef CONFIG_X86_32
        else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
index b9693b8..60d1897 100644 (file)
@@ -835,6 +835,9 @@ static const struct _tlb_table intel_tlb_table[] = {
        { 0x5d, TLB_DATA_4K_4M,         256,    " TLB_DATA 4 KByte and 4 MByte pages" },
        { 0x61, TLB_INST_4K,            48,     " TLB_INST 4 KByte pages, full associative" },
        { 0x63, TLB_DATA_1G,            4,      " TLB_DATA 1 GByte pages, 4-way set associative" },
+       { 0x6b, TLB_DATA_4K,            256,    " TLB_DATA 4 KByte pages, 8-way associative" },
+       { 0x6c, TLB_DATA_2M_4M,         128,    " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
+       { 0x6d, TLB_DATA_1G,            16,     " TLB_DATA 1 GByte pages, fully associative" },
        { 0x76, TLB_INST_2M_4M,         8,      " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
        { 0xb0, TLB_INST_4K,            128,    " TLB_INST 4 KByte pages, 4-way set associative" },
        { 0xb1, TLB_INST_2M_4M,         4,      " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
index 10c4fc2..77e2013 100644 (file)
@@ -564,14 +564,12 @@ static int __reload_late(void *info)
        apply_microcode_local(&err);
        spin_unlock(&update_lock);
 
+       /* siblings return UCODE_OK because their engine got updated already */
        if (err > UCODE_NFOUND) {
                pr_warn("Error reloading microcode on CPU %d\n", cpu);
-               return -1;
-       /* siblings return UCODE_OK because their engine got updated already */
+               ret = -1;
        } else if (err == UCODE_UPDATED || err == UCODE_OK) {
                ret = 1;
-       } else {
-               return ret;
        }
 
        /*
index 32b8e57..1c2cfa0 100644 (file)
@@ -485,7 +485,6 @@ static void show_saved_mc(void)
  */
 static void save_mc_for_early(u8 *mc, unsigned int size)
 {
-#ifdef CONFIG_HOTPLUG_CPU
        /* Synchronization during CPU hotplug. */
        static DEFINE_MUTEX(x86_cpu_microcode_mutex);
 
@@ -495,7 +494,6 @@ static void save_mc_for_early(u8 *mc, unsigned int size)
        show_saved_mc();
 
        mutex_unlock(&x86_cpu_microcode_mutex);
-#endif
 }
 
 static bool load_builtin_intel_microcode(struct cpio_data *cp)
index fa183a1..a15fe0e 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL2.0
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Jailhouse paravirt_ops implementation
  *
index 6285697..5c623df 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/init_ohci1394_dma.h>
 #include <linux/kvm_para.h>
 #include <linux/dma-contiguous.h>
+#include <xen/xen.h>
 
 #include <linux/errno.h>
 #include <linux/kernel.h>
@@ -534,6 +535,11 @@ static void __init reserve_crashkernel(void)
                high = true;
        }
 
+       if (xen_pv_domain()) {
+               pr_info("Ignoring crashkernel for a Xen PV domain\n");
+               return;
+       }
+
        /* 0 means: find the address automatically */
        if (crash_base <= 0) {
                /*
index 45175b8..0f1cbb0 100644 (file)
@@ -1571,6 +1571,8 @@ static inline void mwait_play_dead(void)
        void *mwait_ptr;
        int i;
 
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+               return;
        if (!this_cpu_has(X86_FEATURE_MWAIT))
                return;
        if (!this_cpu_has(X86_FEATURE_CLFLUSH))
index 91e6da4..74392d9 100644 (file)
@@ -1067,6 +1067,7 @@ static struct clocksource clocksource_tsc_early = {
        .resume                 = tsc_resume,
        .mark_unstable          = tsc_cs_mark_unstable,
        .tick_stable            = tsc_cs_tick_stable,
+       .list                   = LIST_HEAD_INIT(clocksource_tsc_early.list),
 };
 
 /*
@@ -1086,6 +1087,7 @@ static struct clocksource clocksource_tsc = {
        .resume                 = tsc_resume,
        .mark_unstable          = tsc_cs_mark_unstable,
        .tick_stable            = tsc_cs_tick_stable,
+       .list                   = LIST_HEAD_INIT(clocksource_tsc.list),
 };
 
 void mark_tsc_unstable(char *reason)
@@ -1098,13 +1100,9 @@ void mark_tsc_unstable(char *reason)
                clear_sched_clock_stable();
        disable_sched_clock_irqtime();
        pr_info("Marking TSC unstable due to %s\n", reason);
-       /* Change only the rating, when not registered */
-       if (clocksource_tsc.mult) {
-               clocksource_mark_unstable(&clocksource_tsc);
-       } else {
-               clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
-               clocksource_tsc.rating = 0;
-       }
+
+       clocksource_mark_unstable(&clocksource_tsc_early);
+       clocksource_mark_unstable(&clocksource_tsc);
 }
 
 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
@@ -1244,7 +1242,7 @@ static void tsc_refine_calibration_work(struct work_struct *work)
 
        /* Don't bother refining TSC on unstable systems */
        if (tsc_unstable)
-               return;
+               goto unreg;
 
        /*
         * Since the work is started early in boot, we may be
@@ -1297,11 +1295,12 @@ static void tsc_refine_calibration_work(struct work_struct *work)
 
 out:
        if (tsc_unstable)
-               return;
+               goto unreg;
 
        if (boot_cpu_has(X86_FEATURE_ART))
                art_related_clocksource = &clocksource_tsc;
        clocksource_register_khz(&clocksource_tsc, tsc_khz);
+unreg:
        clocksource_unregister(&clocksource_tsc_early);
 }
 
@@ -1311,8 +1310,8 @@ static int __init init_tsc_clocksource(void)
        if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz)
                return 0;
 
-       if (check_tsc_unstable())
-               return 0;
+       if (tsc_unstable)
+               goto unreg;
 
        if (tsc_clocksource_reliable)
                clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
@@ -1328,6 +1327,7 @@ static int __init init_tsc_clocksource(void)
                if (boot_cpu_has(X86_FEATURE_ART))
                        art_related_clocksource = &clocksource_tsc;
                clocksource_register_khz(&clocksource_tsc, tsc_khz);
+unreg:
                clocksource_unregister(&clocksource_tsc_early);
                return 0;
        }
index 70dcb55..b74c9c1 100644 (file)
@@ -1463,23 +1463,6 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
        local_irq_restore(flags);
 }
 
-static void start_sw_period(struct kvm_lapic *apic)
-{
-       if (!apic->lapic_timer.period)
-               return;
-
-       if (apic_lvtt_oneshot(apic) &&
-           ktime_after(ktime_get(),
-                       apic->lapic_timer.target_expiration)) {
-               apic_timer_expired(apic);
-               return;
-       }
-
-       hrtimer_start(&apic->lapic_timer.timer,
-               apic->lapic_timer.target_expiration,
-               HRTIMER_MODE_ABS_PINNED);
-}
-
 static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
 {
        ktime_t now, remaining;
@@ -1546,6 +1529,26 @@ static void advance_periodic_target_expiration(struct kvm_lapic *apic)
                                apic->lapic_timer.period);
 }
 
+static void start_sw_period(struct kvm_lapic *apic)
+{
+       if (!apic->lapic_timer.period)
+               return;
+
+       if (ktime_after(ktime_get(),
+                       apic->lapic_timer.target_expiration)) {
+               apic_timer_expired(apic);
+
+               if (apic_lvtt_oneshot(apic))
+                       return;
+
+               advance_periodic_target_expiration(apic);
+       }
+
+       hrtimer_start(&apic->lapic_timer.timer,
+               apic->lapic_timer.target_expiration,
+               HRTIMER_MODE_ABS_PINNED);
+}
+
 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
 {
        if (!lapic_in_kernel(vcpu))
index aa66ccd..c766880 100644 (file)
@@ -4544,12 +4544,6 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
        __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
 }
 
-static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
-{
-       if (enable_ept)
-               vmx_flush_tlb(vcpu, true);
-}
-
 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
 {
        ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@@ -9278,7 +9272,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
        } else {
                sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
                sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
-               vmx_flush_tlb_ept_only(vcpu);
+               vmx_flush_tlb(vcpu, true);
        }
        vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
 
@@ -9306,7 +9300,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
            !nested_cpu_has2(get_vmcs12(&vmx->vcpu),
                             SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
                vmcs_write64(APIC_ACCESS_ADDR, hpa);
-               vmx_flush_tlb_ept_only(vcpu);
+               vmx_flush_tlb(vcpu, true);
        }
 }
 
@@ -11220,7 +11214,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
                }
        } else if (nested_cpu_has2(vmcs12,
                                   SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
-               vmx_flush_tlb_ept_only(vcpu);
+               vmx_flush_tlb(vcpu, true);
        }
 
        /*
@@ -12073,7 +12067,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
        } else if (!nested_cpu_has_ept(vmcs12) &&
                   nested_cpu_has2(vmcs12,
                                   SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
-               vmx_flush_tlb_ept_only(vcpu);
+               vmx_flush_tlb(vcpu, true);
        }
 
        /* This is needed for same reason as it was needed in prepare_vmcs02 */
index 7d35ce6..c9492f7 100644 (file)
@@ -302,13 +302,6 @@ static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
            __rem;                                              \
         })
 
-#define KVM_X86_DISABLE_EXITS_MWAIT          (1 << 0)
-#define KVM_X86_DISABLE_EXITS_HTL            (1 << 1)
-#define KVM_X86_DISABLE_EXITS_PAUSE          (1 << 2)
-#define KVM_X86_DISABLE_VALID_EXITS          (KVM_X86_DISABLE_EXITS_MWAIT | \
-                                              KVM_X86_DISABLE_EXITS_HTL | \
-                                              KVM_X86_DISABLE_EXITS_PAUSE)
-
 static inline bool kvm_mwait_in_guest(struct kvm *kvm)
 {
        return kvm->arch.mwait_in_guest;
index 0f3d50f..3bded76 100644 (file)
@@ -93,6 +93,18 @@ void arch_report_meminfo(struct seq_file *m)
 static inline void split_page_count(int level) { }
 #endif
 
+static inline int
+within(unsigned long addr, unsigned long start, unsigned long end)
+{
+       return addr >= start && addr < end;
+}
+
+static inline int
+within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
+{
+       return addr >= start && addr <= end;
+}
+
 #ifdef CONFIG_X86_64
 
 static inline unsigned long highmap_start_pfn(void)
@@ -106,20 +118,25 @@ static inline unsigned long highmap_end_pfn(void)
        return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT;
 }
 
-#endif
-
-static inline int
-within(unsigned long addr, unsigned long start, unsigned long end)
+static bool __cpa_pfn_in_highmap(unsigned long pfn)
 {
-       return addr >= start && addr < end;
+       /*
+        * Kernel text has an alias mapping at a high address, known
+        * here as "highmap".
+        */
+       return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn());
 }
 
-static inline int
-within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
+#else
+
+static bool __cpa_pfn_in_highmap(unsigned long pfn)
 {
-       return addr >= start && addr <= end;
+       /* There is no highmap on 32-bit */
+       return false;
 }
 
+#endif
+
 /*
  * Flushing functions
  */
@@ -172,7 +189,7 @@ static void __cpa_flush_all(void *arg)
 
 static void cpa_flush_all(unsigned long cache)
 {
-       BUG_ON(irqs_disabled());
+       BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
 
        on_each_cpu(__cpa_flush_all, (void *) cache, 1);
 }
@@ -236,7 +253,7 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
        unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
 #endif
 
-       BUG_ON(irqs_disabled());
+       BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
 
        on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
 
@@ -1183,6 +1200,10 @@ static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
                cpa->numpages = 1;
                cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
                return 0;
+
+       } else if (__cpa_pfn_in_highmap(cpa->pfn)) {
+               /* Faults in the highmap are OK, so do not warn: */
+               return -EFAULT;
        } else {
                WARN(1, KERN_WARNING "CPA: called for zero pte. "
                        "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
@@ -1335,8 +1356,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
         * to touch the high mapped kernel as well:
         */
        if (!within(vaddr, (unsigned long)_text, _brk_end) &&
-           within_inclusive(cpa->pfn, highmap_start_pfn(),
-                            highmap_end_pfn())) {
+           __cpa_pfn_in_highmap(cpa->pfn)) {
                unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
                                               __START_KERNEL_map - phys_base;
                alias_cpa = *cpa;
index f1fd52f..4d418e7 100644 (file)
@@ -421,6 +421,16 @@ static inline bool pti_kernel_image_global_ok(void)
        if (boot_cpu_has(X86_FEATURE_K8))
                return false;
 
+       /*
+        * RANDSTRUCT derives its hardening benefits from the
+        * attacker's lack of knowledge about the layout of kernel
+        * data structures.  Keep the kernel image non-global in
+        * cases where RANDSTRUCT is in use to help keep the layout a
+        * secret.
+        */
+       if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
+               return false;
+
        return true;
 }
 
@@ -430,12 +440,24 @@ static inline bool pti_kernel_image_global_ok(void)
  */
 void pti_clone_kernel_text(void)
 {
+       /*
+        * rodata is part of the kernel image and is normally
+        * readable on the filesystem or on the web.  But, do not
+        * clone the areas past rodata, they might contain secrets.
+        */
        unsigned long start = PFN_ALIGN(_text);
-       unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
+       unsigned long end = (unsigned long)__end_rodata_hpage_align;
 
        if (!pti_kernel_image_global_ok())
                return;
 
+       pr_debug("mapping partial kernel image into user address space\n");
+
+       /*
+        * Note that this will undo _some_ of the work that
+        * pti_set_kernel_image_nonglobal() did to clear the
+        * global bit.
+        */
        pti_clone_pmds(start, end, _PAGE_RW);
 }
 
@@ -458,8 +480,6 @@ void pti_set_kernel_image_nonglobal(void)
        if (pti_kernel_image_global_ok())
                return;
 
-       pr_debug("set kernel image non-global\n");
-
        set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
 }
 
index b725154..263c845 100644 (file)
@@ -1027,7 +1027,17 @@ emit_cond_jmp:           /* convert BPF opcode to x86 */
                        break;
 
                case BPF_JMP | BPF_JA:
-                       jmp_offset = addrs[i + insn->off] - addrs[i];
+                       if (insn->off == -1)
+                               /* -1 jmp instructions will always jump
+                                * backwards two bytes. Explicitly handling
+                                * this case avoids wasting too many passes
+                                * when there are long sequences of replaced
+                                * dead code.
+                                */
+                               jmp_offset = -2;
+                       else
+                               jmp_offset = addrs[i + insn->off] - addrs[i];
+
                        if (!jmp_offset)
                                /* optimize out nop jumps */
                                break;
@@ -1226,6 +1236,7 @@ skip_init_addrs:
        for (pass = 0; pass < 20 || image; pass++) {
                proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
                if (proglen <= 0) {
+out_image:
                        image = NULL;
                        if (header)
                                bpf_jit_binary_free(header);
@@ -1236,8 +1247,7 @@ skip_init_addrs:
                        if (proglen != oldproglen) {
                                pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
                                       proglen, oldproglen);
-                               prog = orig_prog;
-                               goto out_addrs;
+                               goto out_image;
                        }
                        break;
                }
@@ -1273,7 +1283,7 @@ skip_init_addrs:
                prog = orig_prog;
        }
 
-       if (!prog->is_func || extra_pass) {
+       if (!image || !prog->is_func || extra_pass) {
 out_addrs:
                kfree(addrs);
                kfree(jit_data);
index 8268987..19c1ff5 100644 (file)
@@ -65,6 +65,19 @@ static void __init xen_hvm_init_mem_mapping(void)
 {
        early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
        HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
+
+       /*
+        * The virtual address of the shared_info page has changed, so
+        * the vcpu_info pointer for VCPU 0 is now stale.
+        *
+        * The prepare_boot_cpu callback will re-initialize it via
+        * xen_vcpu_setup, but we can't rely on that to be called for
+        * old Xen versions (xen_have_vector_callback == 0).
+        *
+        * It is, in any case, bad to have a stale vcpu_info pointer
+        * so reset it now.
+        */
+       xen_vcpu_info_reset(0);
 }
 
 static void __init init_hvm_pv_info(void)
index c36d23a..357969a 100644 (file)
@@ -421,45 +421,33 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
 {
        unsigned long va = dtr->address;
        unsigned int size = dtr->size + 1;
-       unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE);
-       unsigned long frames[pages];
-       int f;
-
-       /*
-        * A GDT can be up to 64k in size, which corresponds to 8192
-        * 8-byte entries, or 16 4k pages..
-        */
+       unsigned long pfn, mfn;
+       int level;
+       pte_t *ptep;
+       void *virt;
 
-       BUG_ON(size > 65536);
+       /* @size should be at most GDT_SIZE which is smaller than PAGE_SIZE. */
+       BUG_ON(size > PAGE_SIZE);
        BUG_ON(va & ~PAGE_MASK);
 
-       for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
-               int level;
-               pte_t *ptep;
-               unsigned long pfn, mfn;
-               void *virt;
-
-               /*
-                * The GDT is per-cpu and is in the percpu data area.
-                * That can be virtually mapped, so we need to do a
-                * page-walk to get the underlying MFN for the
-                * hypercall.  The page can also be in the kernel's
-                * linear range, so we need to RO that mapping too.
-                */
-               ptep = lookup_address(va, &level);
-               BUG_ON(ptep == NULL);
-
-               pfn = pte_pfn(*ptep);
-               mfn = pfn_to_mfn(pfn);
-               virt = __va(PFN_PHYS(pfn));
+       /*
+        * The GDT is per-cpu and is in the percpu data area.
+        * That can be virtually mapped, so we need to do a
+        * page-walk to get the underlying MFN for the
+        * hypercall.  The page can also be in the kernel's
+        * linear range, so we need to RO that mapping too.
+        */
+       ptep = lookup_address(va, &level);
+       BUG_ON(ptep == NULL);
 
-               frames[f] = mfn;
+       pfn = pte_pfn(*ptep);
+       mfn = pfn_to_mfn(pfn);
+       virt = __va(PFN_PHYS(pfn));
 
-               make_lowmem_page_readonly((void *)va);
-               make_lowmem_page_readonly(virt);
-       }
+       make_lowmem_page_readonly((void *)va);
+       make_lowmem_page_readonly(virt);
 
-       if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
+       if (HYPERVISOR_set_gdt(&mfn, size / sizeof(struct desc_struct)))
                BUG();
 }
 
@@ -470,34 +458,22 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
 {
        unsigned long va = dtr->address;
        unsigned int size = dtr->size + 1;
-       unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE);
-       unsigned long frames[pages];
-       int f;
-
-       /*
-        * A GDT can be up to 64k in size, which corresponds to 8192
-        * 8-byte entries, or 16 4k pages..
-        */
+       unsigned long pfn, mfn;
+       pte_t pte;
 
-       BUG_ON(size > 65536);
+       /* @size should be at most GDT_SIZE which is smaller than PAGE_SIZE. */
+       BUG_ON(size > PAGE_SIZE);
        BUG_ON(va & ~PAGE_MASK);
 
-       for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
-               pte_t pte;
-               unsigned long pfn, mfn;
+       pfn = virt_to_pfn(va);
+       mfn = pfn_to_mfn(pfn);
 
-               pfn = virt_to_pfn(va);
-               mfn = pfn_to_mfn(pfn);
+       pte = pfn_pte(pfn, PAGE_KERNEL_RO);
 
-               pte = pfn_pte(pfn, PAGE_KERNEL_RO);
-
-               if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
-                       BUG();
-
-               frames[f] = mfn;
-       }
+       if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
+               BUG();
 
-       if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
+       if (HYPERVISOR_set_gdt(&mfn, size / sizeof(struct desc_struct)))
                BUG();
 }
 
index d33e7db..2d76106 100644 (file)
@@ -42,13 +42,11 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr)
 }
 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
 
-static void xen_flush_tlb_all(void)
+static noinline void xen_flush_tlb_all(void)
 {
        struct mmuext_op *op;
        struct multicall_space mcs;
 
-       trace_xen_mmu_flush_tlb_all(0);
-
        preempt_disable();
 
        mcs = xen_mc_entry(sizeof(*op));
index 486c0a3..2c30cab 100644 (file)
@@ -1310,13 +1310,11 @@ unsigned long xen_read_cr2_direct(void)
        return this_cpu_read(xen_vcpu_info.arch.cr2);
 }
 
-static void xen_flush_tlb(void)
+static noinline void xen_flush_tlb(void)
 {
        struct mmuext_op *op;
        struct multicall_space mcs;
 
-       trace_xen_mmu_flush_tlb(0);
-
        preempt_disable();
 
        mcs = xen_mc_entry(sizeof(*op));
index f0ecd98..771ae97 100644 (file)
@@ -4934,8 +4934,16 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio)
        bool new_queue = false;
        bool bfqq_already_existing = false, split = false;
 
-       if (!rq->elv.icq)
+       /*
+        * Even if we don't have an icq attached, we should still clear
+        * the scheduler pointers, as they might point to previously
+        * allocated bic/bfqq structs.
+        */
+       if (!rq->elv.icq) {
+               rq->elv.priv[0] = rq->elv.priv[1] = NULL;
                return;
+       }
+
        bic = icq_to_bic(rq->elv.icq);
 
        spin_lock_irq(&bfqd->lock);
index 1c16694..eb85cb8 100644 (file)
@@ -1177,26 +1177,20 @@ int blkcg_init_queue(struct request_queue *q)
 
        preloaded = !radix_tree_preload(GFP_KERNEL);
 
-       /*
-        * Make sure the root blkg exists and count the existing blkgs.  As
-        * @q is bypassing at this point, blkg_lookup_create() can't be
-        * used.  Open code insertion.
-        */
+       /* Make sure the root blkg exists. */
        rcu_read_lock();
        spin_lock_irq(q->queue_lock);
        blkg = blkg_create(&blkcg_root, q, new_blkg);
+       if (IS_ERR(blkg))
+               goto err_unlock;
+       q->root_blkg = blkg;
+       q->root_rl.blkg = blkg;
        spin_unlock_irq(q->queue_lock);
        rcu_read_unlock();
 
        if (preloaded)
                radix_tree_preload_end();
 
-       if (IS_ERR(blkg))
-               return PTR_ERR(blkg);
-
-       q->root_blkg = blkg;
-       q->root_rl.blkg = blkg;
-
        ret = blk_throtl_init(q);
        if (ret) {
                spin_lock_irq(q->queue_lock);
@@ -1204,6 +1198,13 @@ int blkcg_init_queue(struct request_queue *q)
                spin_unlock_irq(q->queue_lock);
        }
        return ret;
+
+err_unlock:
+       spin_unlock_irq(q->queue_lock);
+       rcu_read_unlock();
+       if (preloaded)
+               radix_tree_preload_end();
+       return PTR_ERR(blkg);
 }
 
 /**
@@ -1410,9 +1411,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
        __clear_bit(pol->plid, q->blkcg_pols);
 
        list_for_each_entry(blkg, &q->blkg_list, q_node) {
-               /* grab blkcg lock too while removing @pd from @blkg */
-               spin_lock(&blkg->blkcg->lock);
-
                if (blkg->pd[pol->plid]) {
                        if (!blkg->pd[pol->plid]->offline &&
                            pol->pd_offline_fn) {
@@ -1422,8 +1420,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
                        pol->pd_free_fn(blkg->pd[pol->plid]);
                        blkg->pd[pol->plid] = NULL;
                }
-
-               spin_unlock(&blkg->blkcg->lock);
        }
 
        spin_unlock_irq(q->queue_lock);
index 806ce24..85909b4 100644 (file)
@@ -201,6 +201,10 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
        rq->part = NULL;
        seqcount_init(&rq->gstate_seq);
        u64_stats_init(&rq->aborted_gstate_sync);
+       /*
+        * See comment of blk_mq_init_request
+        */
+       WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
 }
 EXPORT_SYMBOL(blk_rq_init);
 
@@ -915,7 +919,6 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 
        while (true) {
                bool success = false;
-               int ret;
 
                rcu_read_lock();
                if (percpu_ref_tryget_live(&q->q_usage_counter)) {
@@ -947,14 +950,12 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
                 */
                smp_rmb();
 
-               ret = wait_event_interruptible(q->mq_freeze_wq,
-                               (atomic_read(&q->mq_freeze_depth) == 0 &&
-                                (preempt || !blk_queue_preempt_only(q))) ||
-                               blk_queue_dying(q));
+               wait_event(q->mq_freeze_wq,
+                          (atomic_read(&q->mq_freeze_depth) == 0 &&
+                           (preempt || !blk_queue_preempt_only(q))) ||
+                          blk_queue_dying(q));
                if (blk_queue_dying(q))
                        return -ENODEV;
-               if (ret)
-                       return ret;
        }
 }
 
index 0dc9e34..9ce9cac 100644 (file)
@@ -95,18 +95,15 @@ static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
 {
        struct mq_inflight *mi = priv;
 
-       if (blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) {
-               /*
-                * index[0] counts the specific partition that was asked
-                * for. index[1] counts the ones that are active on the
-                * whole device, so increment that if mi->part is indeed
-                * a partition, and not a whole device.
-                */
-               if (rq->part == mi->part)
-                       mi->inflight[0]++;
-               if (mi->part->partno)
-                       mi->inflight[1]++;
-       }
+       /*
+        * index[0] counts the specific partition that was asked for. index[1]
+        * counts the ones that are active on the whole device, so increment
+        * that if mi->part is indeed a partition, and not a whole device.
+        */
+       if (rq->part == mi->part)
+               mi->inflight[0]++;
+       if (mi->part->partno)
+               mi->inflight[1]++;
 }
 
 void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
@@ -118,6 +115,25 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
        blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
 }
 
+static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
+                                    struct request *rq, void *priv,
+                                    bool reserved)
+{
+       struct mq_inflight *mi = priv;
+
+       if (rq->part == mi->part)
+               mi->inflight[rq_data_dir(rq)]++;
+}
+
+void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
+                        unsigned int inflight[2])
+{
+       struct mq_inflight mi = { .part = part, .inflight = inflight, };
+
+       inflight[0] = inflight[1] = 0;
+       blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
+}
+
 void blk_freeze_queue_start(struct request_queue *q)
 {
        int freeze_depth;
@@ -2042,6 +2058,13 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
 
        seqcount_init(&rq->gstate_seq);
        u64_stats_init(&rq->aborted_gstate_sync);
+       /*
+        * start gstate with gen 1 instead of 0, otherwise it will be equal
+        * to aborted_gstate, and be identified timed out by
+        * blk_mq_terminate_expired.
+        */
+       WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
+
        return 0;
 }
 
@@ -2329,7 +2352,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
 
 static void blk_mq_map_swqueue(struct request_queue *q)
 {
-       unsigned int i;
+       unsigned int i, hctx_idx;
        struct blk_mq_hw_ctx *hctx;
        struct blk_mq_ctx *ctx;
        struct blk_mq_tag_set *set = q->tag_set;
@@ -2346,8 +2369,23 @@ static void blk_mq_map_swqueue(struct request_queue *q)
 
        /*
         * Map software to hardware queues.
+        *
+        * If the cpu isn't present, the cpu is mapped to first hctx.
         */
        for_each_possible_cpu(i) {
+               hctx_idx = q->mq_map[i];
+               /* unmapped hw queue can be remapped after CPU topo changed */
+               if (!set->tags[hctx_idx] &&
+                   !__blk_mq_alloc_rq_map(set, hctx_idx)) {
+                       /*
+                        * If tags initialization fail for some hctx,
+                        * that hctx won't be brought online.  In this
+                        * case, remap the current ctx to hctx[0] which
+                        * is guaranteed to always have tags allocated
+                        */
+                       q->mq_map[i] = 0;
+               }
+
                ctx = per_cpu_ptr(q->queue_ctx, i);
                hctx = blk_mq_map_queue(q, i);
 
@@ -2359,8 +2397,21 @@ static void blk_mq_map_swqueue(struct request_queue *q)
        mutex_unlock(&q->sysfs_lock);
 
        queue_for_each_hw_ctx(q, hctx, i) {
-               /* every hctx should get mapped by at least one CPU */
-               WARN_ON(!hctx->nr_ctx);
+               /*
+                * If no software queues are mapped to this hardware queue,
+                * disable it and free the request entries.
+                */
+               if (!hctx->nr_ctx) {
+                       /* Never unmap queue 0.  We need it as a
+                        * fallback in case of a new remap fails
+                        * allocation
+                        */
+                       if (i && set->tags[i])
+                               blk_mq_free_map_and_requests(set, i);
+
+                       hctx->tags = NULL;
+                       continue;
+               }
 
                hctx->tags = set->tags[i];
                WARN_ON(!hctx->tags);
index 88c558f..e1bb420 100644 (file)
@@ -7,6 +7,9 @@
 
 struct blk_mq_tag_set;
 
+/**
+ * struct blk_mq_ctx - State for a software queue facing the submitting CPUs
+ */
 struct blk_mq_ctx {
        struct {
                spinlock_t              lock;
@@ -185,7 +188,9 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
 }
 
 void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
-                       unsigned int inflight[2]);
+                     unsigned int inflight[2]);
+void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
+                        unsigned int inflight[2]);
 
 static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
 {
index dc7e089..c4513fe 100644 (file)
@@ -82,6 +82,18 @@ void part_in_flight(struct request_queue *q, struct hd_struct *part,
        }
 }
 
+void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
+                      unsigned int inflight[2])
+{
+       if (q->mq_ops) {
+               blk_mq_in_flight_rw(q, part, inflight);
+               return;
+       }
+
+       inflight[0] = atomic_read(&part->in_flight[0]);
+       inflight[1] = atomic_read(&part->in_flight[1]);
+}
+
 struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)
 {
        struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl);
index 08dabcd..db57cce 100644 (file)
@@ -145,13 +145,15 @@ ssize_t part_stat_show(struct device *dev,
                jiffies_to_msecs(part_stat_read(p, time_in_queue)));
 }
 
-ssize_t part_inflight_show(struct device *dev,
-                       struct device_attribute *attr, char *buf)
+ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
+                          char *buf)
 {
        struct hd_struct *p = dev_to_part(dev);
+       struct request_queue *q = part_to_disk(p)->queue;
+       unsigned int inflight[2];
 
-       return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]),
-               atomic_read(&p->in_flight[1]));
+       part_in_flight_rw(q, p, inflight);
+       return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]);
 }
 
 #ifdef CONFIG_FAIL_MAKE_REQUEST
index 1d5290c..0ee632b 100644 (file)
@@ -204,9 +204,14 @@ static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
 
        down_read(&crypto_alg_sem);
        alg = __crypto_alg_lookup(name, type | test, mask | test);
-       if (!alg && test)
-               alg = __crypto_alg_lookup(name, type, mask) ?
-                     ERR_PTR(-ELIBBAD) : NULL;
+       if (!alg && test) {
+               alg = __crypto_alg_lookup(name, type, mask);
+               if (alg && !crypto_is_larval(alg)) {
+                       /* Test failed */
+                       crypto_mod_put(alg);
+                       alg = ERR_PTR(-ELIBBAD);
+               }
+       }
        up_read(&crypto_alg_sem);
 
        return alg;
index 4faa278..466a112 100644 (file)
@@ -1134,8 +1134,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
        if (!drbg)
                return;
        kzfree(drbg->Vbuf);
+       drbg->Vbuf = NULL;
        drbg->V = NULL;
        kzfree(drbg->Cbuf);
+       drbg->Cbuf = NULL;
        drbg->C = NULL;
        kzfree(drbg->scratchpadbuf);
        drbg->scratchpadbuf = NULL;
index 76fb969..2f2e737 100644 (file)
@@ -2123,6 +2123,25 @@ static int __init intel_opregion_present(void)
        return opregion;
 }
 
+static bool dmi_is_desktop(void)
+{
+       const char *chassis_type;
+
+       chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
+       if (!chassis_type)
+               return false;
+
+       if (!strcmp(chassis_type, "3") || /*  3: Desktop */
+           !strcmp(chassis_type, "4") || /*  4: Low Profile Desktop */
+           !strcmp(chassis_type, "5") || /*  5: Pizza Box */
+           !strcmp(chassis_type, "6") || /*  6: Mini Tower */
+           !strcmp(chassis_type, "7") || /*  7: Tower */
+           !strcmp(chassis_type, "11"))  /* 11: Main Server Chassis */
+               return true;
+
+       return false;
+}
+
 int acpi_video_register(void)
 {
        int ret = 0;
@@ -2143,8 +2162,12 @@ int acpi_video_register(void)
         * win8 ready (where we also prefer the native backlight driver, so
         * normally the acpi_video code should not register there anyways).
         */
-       if (only_lcd == -1)
-               only_lcd = acpi_osi_is_win8();
+       if (only_lcd == -1) {
+               if (dmi_is_desktop() && acpi_osi_is_win8())
+                       only_lcd = true;
+               else
+                       only_lcd = false;
+       }
 
        dmi_check_system(video_dmi_table);
 
index ebb626f..4bde16f 100644 (file)
 #define pr_fmt(fmt) "ACPI: watchdog: " fmt
 
 #include <linux/acpi.h>
+#include <linux/dmi.h>
 #include <linux/ioport.h>
 #include <linux/platform_device.h>
 
 #include "internal.h"
 
+static const struct dmi_system_id acpi_watchdog_skip[] = {
+       {
+               /*
+                * On Lenovo Z50-70 there are two issues with the WDAT
+                * table. First some of the instructions use RTC SRAM
+                * to store persistent information. This does not work well
+                * with Linux RTC driver. Second, more important thing is
+                * that the instructions do not actually reset the system.
+                *
+                * On this particular system iTCO_wdt seems to work just
+                * fine so we prefer that over WDAT for now.
+                *
+                * See also https://bugzilla.kernel.org/show_bug.cgi?id=199033.
+                */
+               .ident = "Lenovo Z50-70",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20354"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Z50-70"),
+               },
+       },
+       {}
+};
+
+static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
+{
+       const struct acpi_table_wdat *wdat = NULL;
+       acpi_status status;
+
+       if (acpi_disabled)
+               return NULL;
+
+       if (dmi_check_system(acpi_watchdog_skip))
+               return NULL;
+
+       status = acpi_get_table(ACPI_SIG_WDAT, 0,
+                               (struct acpi_table_header **)&wdat);
+       if (ACPI_FAILURE(status)) {
+               /* It is fine if there is no WDAT */
+               return NULL;
+       }
+
+       return wdat;
+}
+
 /**
  * Returns true if this system should prefer ACPI based watchdog instead of
  * the native one (which are typically the same hardware).
  */
 bool acpi_has_watchdog(void)
 {
-       struct acpi_table_header hdr;
-
-       if (acpi_disabled)
-               return false;
-
-       return ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_WDAT, 0, &hdr));
+       return !!acpi_watchdog_get_wdat();
 }
 EXPORT_SYMBOL_GPL(acpi_has_watchdog);
 
@@ -41,12 +82,10 @@ void __init acpi_watchdog_init(void)
        struct platform_device *pdev;
        struct resource *resources;
        size_t nresources = 0;
-       acpi_status status;
        int i;
 
-       status = acpi_get_table(ACPI_SIG_WDAT, 0,
-                               (struct acpi_table_header **)&wdat);
-       if (ACPI_FAILURE(status)) {
+       wdat = acpi_watchdog_get_wdat();
+       if (!wdat) {
                /* It is fine if there is no WDAT */
                return;
        }
index e1eee7a..f1cc4f9 100644 (file)
@@ -635,4 +635,26 @@ module_param_call(lid_init_state,
                  NULL, 0644);
 MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state");
 
-module_acpi_driver(acpi_button_driver);
+static int acpi_button_register_driver(struct acpi_driver *driver)
+{
+       /*
+        * Modules such as nouveau.ko and i915.ko have a link time dependency
+        * on acpi_lid_open(), and would therefore not be loadable on ACPI
+        * capable kernels booted in non-ACPI mode if the return value of
+        * acpi_bus_register_driver() is returned from here with ACPI disabled
+        * when this driver is built as a module.
+        */
+       if (acpi_disabled)
+               return 0;
+
+       return acpi_bus_register_driver(driver);
+}
+
+static void acpi_button_unregister_driver(struct acpi_driver *driver)
+{
+       if (!acpi_disabled)
+               acpi_bus_unregister_driver(driver);
+}
+
+module_driver(acpi_button_driver, acpi_button_register_driver,
+              acpi_button_unregister_driver);
index cc234e6..970dd87 100644 (file)
@@ -2166,10 +2166,10 @@ int __init acpi_scan_init(void)
        acpi_cmos_rtc_init();
        acpi_container_init();
        acpi_memory_hotplug_init();
+       acpi_watchdog_init();
        acpi_pnp_init();
        acpi_int340x_thermal_init();
        acpi_amba_init();
-       acpi_watchdog_init();
        acpi_init_lpit();
 
        acpi_scan_add_handler(&generic_device_handler);
index 99a1a65..974e584 100644 (file)
@@ -364,6 +364,19 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
                DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
                },
        },
+       /*
+        * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
+        * the Low Power S0 Idle firmware interface (see
+        * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
+        */
+       {
+       .callback = init_no_lps0,
+       .ident = "ThinkPad X1 Tablet(2016)",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
+               },
+       },
        {},
 };
 
index 594c228..4a3ac31 100644 (file)
@@ -69,11 +69,12 @@ static ssize_t driver_override_show(struct device *_dev,
                                    struct device_attribute *attr, char *buf)
 {
        struct amba_device *dev = to_amba_device(_dev);
+       ssize_t len;
 
-       if (!dev->driver_override)
-               return 0;
-
-       return sprintf(buf, "%s\n", dev->driver_override);
+       device_lock(_dev);
+       len = sprintf(buf, "%s\n", dev->driver_override);
+       device_unlock(_dev);
+       return len;
 }
 
 static ssize_t driver_override_store(struct device *_dev,
@@ -81,9 +82,10 @@ static ssize_t driver_override_store(struct device *_dev,
                                     const char *buf, size_t count)
 {
        struct amba_device *dev = to_amba_device(_dev);
-       char *driver_override, *old = dev->driver_override, *cp;
+       char *driver_override, *old, *cp;
 
-       if (count > PATH_MAX)
+       /* We need to keep extra room for a newline */
+       if (count >= (PAGE_SIZE - 1))
                return -EINVAL;
 
        driver_override = kstrndup(buf, count, GFP_KERNEL);
@@ -94,12 +96,15 @@ static ssize_t driver_override_store(struct device *_dev,
        if (cp)
                *cp = '\0';
 
+       device_lock(_dev);
+       old = dev->driver_override;
        if (strlen(driver_override)) {
                dev->driver_override = driver_override;
        } else {
               kfree(driver_override);
               dev->driver_override = NULL;
        }
+       device_unlock(_dev);
 
        kfree(old);
 
index 764b63a..e578eee 100644 (file)
@@ -2839,6 +2839,14 @@ static void binder_transaction(struct binder_proc *proc,
                        else
                                return_error = BR_DEAD_REPLY;
                        mutex_unlock(&context->context_mgr_node_lock);
+                       if (target_node && target_proc == proc) {
+                               binder_user_error("%d:%d got transaction to context manager from process owning it\n",
+                                                 proc->pid, thread->pid);
+                               return_error = BR_FAILED_REPLY;
+                               return_error_param = -EINVAL;
+                               return_error_line = __LINE__;
+                               goto err_invalid_target_handle;
+                       }
                }
                if (!target_node) {
                        /*
index 1ff1779..6389c88 100644 (file)
@@ -698,7 +698,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
 
        DPRINTK("ENTER\n");
 
-       ahci_stop_engine(ap);
+       hpriv->stop_engine(ap);
 
        rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
                                 deadline, &online, NULL);
@@ -724,7 +724,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
        bool online;
        int rc;
 
-       ahci_stop_engine(ap);
+       hpriv->stop_engine(ap);
 
        /* clear D2H reception area to properly wait for D2H FIS */
        ata_tf_init(link->device, &tf);
@@ -788,7 +788,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
 
        DPRINTK("ENTER\n");
 
-       ahci_stop_engine(ap);
+       hpriv->stop_engine(ap);
 
        for (i = 0; i < 2; i++) {
                u16 val;
index 4356ef1..824bd39 100644 (file)
@@ -350,7 +350,6 @@ struct ahci_host_priv {
        u32                     em_msg_type;    /* EM message type */
        bool                    got_runtime_pm; /* Did we do pm_runtime_get? */
        struct clk              *clks[AHCI_MAX_CLKS]; /* Optional */
-       struct reset_control    *rsts;          /* Optional */
        struct regulator        **target_pwrs;  /* Optional */
        /*
         * If platform uses PHYs. There is a 1:1 relation between the port number and
@@ -366,6 +365,13 @@ struct ahci_host_priv {
         * be overridden anytime before the host is activated.
         */
        void                    (*start_engine)(struct ata_port *ap);
+       /*
+        * Optional ahci_stop_engine override, if not set this gets set to the
+        * default ahci_stop_engine during ahci_save_initial_config, this can
+        * be overridden anytime before the host is activated.
+        */
+       int                     (*stop_engine)(struct ata_port *ap);
+
        irqreturn_t             (*irq_handler)(int irq, void *dev_instance);
 
        /* only required for per-port MSI(-X) support */
index de7128d..0045dac 100644 (file)
@@ -62,6 +62,60 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
        writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
 }
 
+/**
+ * ahci_mvebu_stop_engine
+ *
+ * @ap:        Target ata port
+ *
+ * Errata Ref#226 - SATA Disk HOT swap issue when connected through
+ * Port Multiplier in FIS-based Switching mode.
+ *
+ * To avoid the issue, according to design, the bits[11:8, 0] of
+ * register PxFBS are cleared when Port Command and Status (0x18) bit[0]
+ * changes its value from 1 to 0, i.e. falling edge of Port
+ * Command and Status bit[0] sends PULSE that resets PxFBS
+ * bits[11:8; 0].
+ *
+ * This function is used to override function of "ahci_stop_engine"
+ * from libahci.c by adding the mvebu work around(WA) to save PxFBS
+ * value before the PxCMD ST write of 0, then restore PxFBS value.
+ *
+ * Return: 0 on success; Error code otherwise.
+ */
+int ahci_mvebu_stop_engine(struct ata_port *ap)
+{
+       void __iomem *port_mmio = ahci_port_base(ap);
+       u32 tmp, port_fbs;
+
+       tmp = readl(port_mmio + PORT_CMD);
+
+       /* check if the HBA is idle */
+       if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
+               return 0;
+
+       /* save the port PxFBS register for later restore */
+       port_fbs = readl(port_mmio + PORT_FBS);
+
+       /* setting HBA to idle */
+       tmp &= ~PORT_CMD_START;
+       writel(tmp, port_mmio + PORT_CMD);
+
+       /*
+        * bit #15 PxCMD signal doesn't clear PxFBS,
+        * restore the PxFBS register right after clearing the PxCMD ST,
+        * no need to wait for the PxCMD bit #15.
+        */
+       writel(port_fbs, port_mmio + PORT_FBS);
+
+       /* wait for engine to stop. This could be as long as 500 msec */
+       tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
+                               PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
+       if (tmp & PORT_CMD_LIST_ON)
+               return -EIO;
+
+       return 0;
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
 {
@@ -112,6 +166,8 @@ static int ahci_mvebu_probe(struct platform_device *pdev)
        if (rc)
                return rc;
 
+       hpriv->stop_engine = ahci_mvebu_stop_engine;
+
        if (of_device_is_compatible(pdev->dev.of_node,
                                    "marvell,armada-380-ahci")) {
                dram = mv_mbus_dram_info();
index 2685f28..cfdef4d 100644 (file)
@@ -96,7 +96,7 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
 
        DPRINTK("ENTER\n");
 
-       ahci_stop_engine(ap);
+       hpriv->stop_engine(ap);
 
        /*
         * There is a errata on ls1021a Rev1.0 and Rev2.0 which is:
index c2b5941..ad58da7 100644 (file)
@@ -165,7 +165,7 @@ static int xgene_ahci_restart_engine(struct ata_port *ap)
                                    PORT_CMD_ISSUE, 0x0, 1, 100))
                  return -EBUSY;
 
-       ahci_stop_engine(ap);
+       hpriv->stop_engine(ap);
        ahci_start_fis_rx(ap);
 
        /*
@@ -421,7 +421,7 @@ static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
        portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
        portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
 
-       ahci_stop_engine(ap);
+       hpriv->stop_engine(ap);
 
        rc = xgene_ahci_do_hardreset(link, deadline, &online);
 
index 7adcf3c..e5d9097 100644 (file)
@@ -560,6 +560,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
        if (!hpriv->start_engine)
                hpriv->start_engine = ahci_start_engine;
 
+       if (!hpriv->stop_engine)
+               hpriv->stop_engine = ahci_stop_engine;
+
        if (!hpriv->irq_handler)
                hpriv->irq_handler = ahci_single_level_irq_intr;
 }
@@ -897,9 +900,10 @@ static void ahci_start_port(struct ata_port *ap)
 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
 {
        int rc;
+       struct ahci_host_priv *hpriv = ap->host->private_data;
 
        /* disable DMA */
-       rc = ahci_stop_engine(ap);
+       rc = hpriv->stop_engine(ap);
        if (rc) {
                *emsg = "failed to stop engine";
                return rc;
@@ -1310,7 +1314,7 @@ int ahci_kick_engine(struct ata_port *ap)
        int busy, rc;
 
        /* stop engine */
-       rc = ahci_stop_engine(ap);
+       rc = hpriv->stop_engine(ap);
        if (rc)
                goto out_restart;
 
@@ -1549,7 +1553,7 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
 
        DPRINTK("ENTER\n");
 
-       ahci_stop_engine(ap);
+       hpriv->stop_engine(ap);
 
        /* clear D2H reception area to properly wait for D2H FIS */
        ata_tf_init(link->device, &tf);
@@ -2075,14 +2079,14 @@ void ahci_error_handler(struct ata_port *ap)
 
        if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
                /* restart engine */
-               ahci_stop_engine(ap);
+               hpriv->stop_engine(ap);
                hpriv->start_engine(ap);
        }
 
        sata_pmp_error_handler(ap);
 
        if (!ata_dev_enabled(ap->link.device))
-               ahci_stop_engine(ap);
+               hpriv->stop_engine(ap);
 }
 EXPORT_SYMBOL_GPL(ahci_error_handler);
 
@@ -2129,7 +2133,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
                return;
 
        /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
-       rc = ahci_stop_engine(ap);
+       rc = hpriv->stop_engine(ap);
        if (rc)
                return;
 
@@ -2189,7 +2193,7 @@ static void ahci_enable_fbs(struct ata_port *ap)
                return;
        }
 
-       rc = ahci_stop_engine(ap);
+       rc = hpriv->stop_engine(ap);
        if (rc)
                return;
 
@@ -2222,7 +2226,7 @@ static void ahci_disable_fbs(struct ata_port *ap)
                return;
        }
 
-       rc = ahci_stop_engine(ap);
+       rc = hpriv->stop_engine(ap);
        if (rc)
                return;
 
index 46a7624..30cc8f1 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/phy/phy.h>
 #include <linux/pm_runtime.h>
 #include <linux/of_platform.h>
-#include <linux/reset.h>
 #include "ahci.h"
 
 static void ahci_host_stop(struct ata_host *host);
@@ -196,8 +195,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators);
  * following order:
  * 1) Regulator
  * 2) Clocks (through ahci_platform_enable_clks)
- * 3) Resets
- * 4) Phys
+ * 3) Phys
  *
  * If resource enabling fails at any point the previous enabled resources
  * are disabled in reverse order.
@@ -217,19 +215,12 @@ int ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
        if (rc)
                goto disable_regulator;
 
-       rc = reset_control_deassert(hpriv->rsts);
-       if (rc)
-               goto disable_clks;
-
        rc = ahci_platform_enable_phys(hpriv);
        if (rc)
-               goto disable_resets;
+               goto disable_clks;
 
        return 0;
 
-disable_resets:
-       reset_control_assert(hpriv->rsts);
-
 disable_clks:
        ahci_platform_disable_clks(hpriv);
 
@@ -248,15 +239,12 @@ EXPORT_SYMBOL_GPL(ahci_platform_enable_resources);
  * following order:
  * 1) Phys
  * 2) Clocks (through ahci_platform_disable_clks)
- * 3) Resets
- * 4) Regulator
+ * 3) Regulator
  */
 void ahci_platform_disable_resources(struct ahci_host_priv *hpriv)
 {
        ahci_platform_disable_phys(hpriv);
 
-       reset_control_assert(hpriv->rsts);
-
        ahci_platform_disable_clks(hpriv);
 
        ahci_platform_disable_regulators(hpriv);
@@ -405,12 +393,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
                hpriv->clks[i] = clk;
        }
 
-       hpriv->rsts = devm_reset_control_array_get_optional_shared(dev);
-       if (IS_ERR(hpriv->rsts)) {
-               rc = PTR_ERR(hpriv->rsts);
-               goto err_out;
-       }
-
        hpriv->nports = child_nodes = of_get_child_count(dev->of_node);
 
        /*
index 8bc71ca..68596bd 100644 (file)
@@ -4549,6 +4549,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
                                                ATA_HORKAGE_ZERO_AFTER_TRIM |
                                                ATA_HORKAGE_NOLPM, },
 
+       /* This specific Samsung model/firmware-rev does not handle LPM well */
+       { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
+
+       /* Sandisk devices which are known to not handle LPM well */
+       { "SanDisk SD7UB3Q*G1001",      NULL,   ATA_HORKAGE_NOLPM, },
+
        /* devices that don't properly handle queued TRIM commands */
        { "Micron_M500_*",              NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
index c016829..513b260 100644 (file)
@@ -175,8 +175,8 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
 { }
 #endif /* CONFIG_PM */
 
-static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
-                                va_list args)
+static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi,
+                                const char *fmt, va_list args)
 {
        ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
                                     ATA_EH_DESC_LEN - ehi->desc_len,
index aafb8cc..e67815b 100644 (file)
@@ -410,7 +410,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
        int rc;
        int retry = 100;
 
-       ahci_stop_engine(ap);
+       hpriv->stop_engine(ap);
 
        /* clear D2H reception area to properly wait for D2H FIS */
        ata_tf_init(link->device, &tf);
index 4b1995e..010ca10 100644 (file)
@@ -285,13 +285,13 @@ static const struct sil24_cerr_info {
        [PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET,
                                     "protocol mismatch" },
        [PORT_CERR_DIRECTION]   = { AC_ERR_HSM, ATA_EH_RESET,
-                                   "data directon mismatch" },
+                                   "data direction mismatch" },
        [PORT_CERR_UNDERRUN]    = { AC_ERR_HSM, ATA_EH_RESET,
                                    "ran out of SGEs while writing" },
        [PORT_CERR_OVERRUN]     = { AC_ERR_HSM, ATA_EH_RESET,
                                    "ran out of SGEs while reading" },
        [PORT_CERR_PKT_PROT]    = { AC_ERR_HSM, ATA_EH_RESET,
-                                   "invalid data directon for ATAPI CDB" },
+                                   "invalid data direction for ATAPI CDB" },
        [PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET,
                                     "SGT not on qword boundary" },
        [PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,
index d97c056..4e46dc9 100644 (file)
@@ -191,7 +191,7 @@ static char *res_strings[] = {
        "reserved 37",
        "reserved 38",
        "reserved 39",
-       "reseverd 40",
+       "reserved 40",
        "reserved 41", 
        "reserved 42", 
        "reserved 43", 
index 1ef67db..9c9a229 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/io.h>
 #include <linux/atomic.h>
 #include <linux/uaccess.h>
+#include <linux/nospec.h>
 
 #include "uPD98401.h"
 #include "uPD98402.h"
@@ -1458,6 +1459,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
                                        return -EFAULT;
                                if (pool < 0 || pool > ZATM_LAST_POOL)
                                        return -EINVAL;
+                               pool = array_index_nospec(pool,
+                                                         ZATM_LAST_POOL + 1);
                                spin_lock_irqsave(&zatm_dev->lock, flags);
                                info = zatm_dev->pool_info[pool];
                                if (cmd == ZATM_GETPOOLZ) {
index 1e6396b..597d408 100644 (file)
@@ -312,8 +312,9 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
  * This checks whether the memory was allocated from the per-device
  * coherent memory pool and if so, maps that memory to the provided vma.
  *
- * Returns 1 if we correctly mapped the memory, or 0 if the caller should
- * proceed with mapping memory from generic pools.
+ * Returns 1 if @vaddr belongs to the device coherent pool and the caller
+ * should return @ret, or 0 if they should proceed with mapping memory from
+ * generic areas.
  */
 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
                           void *vaddr, size_t size, int *ret)
index 3b11835..d82566d 100644 (file)
@@ -226,7 +226,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
 #ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
        unsigned long user_count = vma_pages(vma);
        unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
        unsigned long off = vma->vm_pgoff;
 
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -234,12 +233,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
        if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
                return ret;
 
-       if (off < count && user_count <= (count - off)) {
+       if (off < count && user_count <= (count - off))
                ret = remap_pfn_range(vma, vma->vm_start,
-                                     pfn + off,
+                                     page_to_pfn(virt_to_page(cpu_addr)) + off,
                                      user_count << PAGE_SHIFT,
                                      vma->vm_page_prot);
-       }
 #endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
 
        return ret;
index 31b5015..3583541 100644 (file)
@@ -537,8 +537,8 @@ exit:
 }
 
 /**
- * fw_load_sysfs_fallback - load a firmware via the syfs fallback mechanism
- * @fw_sysfs: firmware syfs information for the firmware to load
+ * fw_load_sysfs_fallback - load a firmware via the sysfs fallback mechanism
+ * @fw_sysfs: firmware sysfs information for the firmware to load
  * @opt_flags: flags of options, FW_OPT_*
  * @timeout: timeout to wait for the load
  *
index dfebc64..f825567 100644 (file)
@@ -6,7 +6,7 @@
 #include <linux/device.h>
 
 /**
- * struct firmware_fallback_config - firmware fallback configuratioon settings
+ * struct firmware_fallback_config - firmware fallback configuration settings
  *
  * Helps describe and fine tune the fallback mechanism.
  *
index c9d0449..5d4e316 100644 (file)
@@ -451,25 +451,47 @@ static int lo_req_flush(struct loop_device *lo, struct request *rq)
 static void lo_complete_rq(struct request *rq)
 {
        struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+       blk_status_t ret = BLK_STS_OK;
 
-       if (unlikely(req_op(cmd->rq) == REQ_OP_READ && cmd->use_aio &&
-                    cmd->ret >= 0 && cmd->ret < blk_rq_bytes(cmd->rq))) {
-               struct bio *bio = cmd->rq->bio;
-
-               bio_advance(bio, cmd->ret);
-               zero_fill_bio(bio);
+       if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
+           req_op(rq) != REQ_OP_READ) {
+               if (cmd->ret < 0)
+                       ret = BLK_STS_IOERR;
+               goto end_io;
        }
 
-       blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK);
+       /*
+        * Short READ - if we got some data, advance our request and
+        * retry it. If we got no data, end the rest with EIO.
+        */
+       if (cmd->ret) {
+               blk_update_request(rq, BLK_STS_OK, cmd->ret);
+               cmd->ret = 0;
+               blk_mq_requeue_request(rq, true);
+       } else {
+               if (cmd->use_aio) {
+                       struct bio *bio = rq->bio;
+
+                       while (bio) {
+                               zero_fill_bio(bio);
+                               bio = bio->bi_next;
+                       }
+               }
+               ret = BLK_STS_IOERR;
+end_io:
+               blk_mq_end_request(rq, ret);
+       }
 }
 
 static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
 {
+       struct request *rq = blk_mq_rq_from_pdu(cmd);
+
        if (!atomic_dec_and_test(&cmd->ref))
                return;
        kfree(cmd->bvec);
        cmd->bvec = NULL;
-       blk_mq_complete_request(cmd->rq);
+       blk_mq_complete_request(rq);
 }
 
 static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
@@ -487,7 +509,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
 {
        struct iov_iter iter;
        struct bio_vec *bvec;
-       struct request *rq = cmd->rq;
+       struct request *rq = blk_mq_rq_from_pdu(cmd);
        struct bio *bio = rq->bio;
        struct file *file = lo->lo_backing_file;
        unsigned int offset;
@@ -1702,15 +1724,16 @@ EXPORT_SYMBOL(loop_unregister_transfer);
 static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                const struct blk_mq_queue_data *bd)
 {
-       struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
-       struct loop_device *lo = cmd->rq->q->queuedata;
+       struct request *rq = bd->rq;
+       struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
+       struct loop_device *lo = rq->q->queuedata;
 
-       blk_mq_start_request(bd->rq);
+       blk_mq_start_request(rq);
 
        if (lo->lo_state != Lo_bound)
                return BLK_STS_IOERR;
 
-       switch (req_op(cmd->rq)) {
+       switch (req_op(rq)) {
        case REQ_OP_FLUSH:
        case REQ_OP_DISCARD:
        case REQ_OP_WRITE_ZEROES:
@@ -1723,8 +1746,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        /* always use the first bio's css */
 #ifdef CONFIG_BLK_CGROUP
-       if (cmd->use_aio && cmd->rq->bio && cmd->rq->bio->bi_css) {
-               cmd->css = cmd->rq->bio->bi_css;
+       if (cmd->use_aio && rq->bio && rq->bio->bi_css) {
+               cmd->css = rq->bio->bi_css;
                css_get(cmd->css);
        } else
 #endif
@@ -1736,8 +1759,9 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 static void loop_handle_cmd(struct loop_cmd *cmd)
 {
-       const bool write = op_is_write(req_op(cmd->rq));
-       struct loop_device *lo = cmd->rq->q->queuedata;
+       struct request *rq = blk_mq_rq_from_pdu(cmd);
+       const bool write = op_is_write(req_op(rq));
+       struct loop_device *lo = rq->q->queuedata;
        int ret = 0;
 
        if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
@@ -1745,12 +1769,12 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
                goto failed;
        }
 
-       ret = do_req_filebacked(lo, cmd->rq);
+       ret = do_req_filebacked(lo, rq);
  failed:
        /* complete non-aio request */
        if (!cmd->use_aio || ret) {
                cmd->ret = ret ? -EIO : 0;
-               blk_mq_complete_request(cmd->rq);
+               blk_mq_complete_request(rq);
        }
 }
 
@@ -1767,9 +1791,7 @@ static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
 {
        struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
 
-       cmd->rq = rq;
        kthread_init_work(&cmd->work, loop_queue_work);
-
        return 0;
 }
 
index 0f45416..b78de98 100644 (file)
@@ -66,7 +66,6 @@ struct loop_device {
 
 struct loop_cmd {
        struct kthread_work work;
-       struct request *rq;
        bool use_aio; /* use AIO interface to handle I/O */
        atomic_t ref; /* only for aio */
        long ret;
index 8e8b04c..33b36fe 100644 (file)
@@ -2366,7 +2366,9 @@ static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
        osd_req_op_cls_init(obj_req->osd_req, 0, CEPH_OSD_OP_CALL, "rbd",
                            "copyup");
        osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0,
-                                         obj_req->copyup_bvecs, bytes);
+                                         obj_req->copyup_bvecs,
+                                         obj_req->copyup_bvec_count,
+                                         bytes);
 
        switch (obj_req->img_request->op_type) {
        case OBJ_OP_WRITE:
index 64e066e..0e31884 100644 (file)
@@ -110,7 +110,7 @@ struct iwm {
 /* Select values for swim_select and swim_readbit */
 
 #define READ_DATA_0    0x074
-#define TWOMEG_DRIVE   0x075
+#define ONEMEG_DRIVE   0x075
 #define SINGLE_SIDED   0x076
 #define DRIVE_PRESENT  0x077
 #define DISK_IN                0x170
@@ -118,9 +118,9 @@ struct iwm {
 #define TRACK_ZERO     0x172
 #define TACHO          0x173
 #define READ_DATA_1    0x174
-#define MFM_MODE       0x175
+#define GCR_MODE       0x175
 #define SEEK_COMPLETE  0x176
-#define ONEMEG_MEDIA   0x177
+#define TWOMEG_MEDIA   0x177
 
 /* Bits in handshake register */
 
@@ -612,7 +612,6 @@ static void setup_medium(struct floppy_state *fs)
                struct floppy_struct *g;
                fs->disk_in = 1;
                fs->write_protected = swim_readbit(base, WRITE_PROT);
-               fs->type = swim_readbit(base, ONEMEG_MEDIA);
 
                if (swim_track00(base))
                        printk(KERN_ERR
@@ -620,6 +619,9 @@ static void setup_medium(struct floppy_state *fs)
 
                swim_track00(base);
 
+               fs->type = swim_readbit(base, TWOMEG_MEDIA) ?
+                       HD_MEDIA : DD_MEDIA;
+               fs->head_number = swim_readbit(base, SINGLE_SIDED) ? 1 : 2;
                get_floppy_geometry(fs, 0, &g);
                fs->total_secs = g->size;
                fs->secpercyl = g->head * g->sect;
@@ -646,7 +648,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
 
        swim_write(base, setup, S_IBM_DRIVE  | S_FCLK_DIV2);
        udelay(10);
-       swim_drive(base, INTERNAL_DRIVE);
+       swim_drive(base, fs->location);
        swim_motor(base, ON);
        swim_action(base, SETMFM);
        if (fs->ejected)
@@ -656,6 +658,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
                goto out;
        }
 
+       set_capacity(fs->disk, fs->total_secs);
+
        if (mode & FMODE_NDELAY)
                return 0;
 
@@ -727,14 +731,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode,
                if (copy_to_user((void __user *) param, (void *) &floppy_type,
                                 sizeof(struct floppy_struct)))
                        return -EFAULT;
-               break;
-
-       default:
-               printk(KERN_DEBUG "SWIM floppy_ioctl: unknown cmd %d\n",
-                      cmd);
-               return -ENOSYS;
+               return 0;
        }
-       return 0;
+       return -ENOTTY;
 }
 
 static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -795,7 +794,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
        struct swim_priv *swd = data;
        int drive = (*part & 3);
 
-       if (drive > swd->floppy_count)
+       if (drive >= swd->floppy_count)
                return NULL;
 
        *part = 0;
@@ -813,10 +812,9 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
 
        swim_motor(base, OFF);
 
-       if (swim_readbit(base, SINGLE_SIDED))
-               fs->head_number = 1;
-       else
-               fs->head_number = 2;
+       fs->type = HD_MEDIA;
+       fs->head_number = 2;
+
        fs->ref_count = 0;
        fs->ejected = 1;
 
@@ -834,10 +832,12 @@ static int swim_floppy_init(struct swim_priv *swd)
        /* scan floppy drives */
 
        swim_drive(base, INTERNAL_DRIVE);
-       if (swim_readbit(base, DRIVE_PRESENT))
+       if (swim_readbit(base, DRIVE_PRESENT) &&
+           !swim_readbit(base, ONEMEG_DRIVE))
                swim_add_floppy(swd, INTERNAL_DRIVE);
        swim_drive(base, EXTERNAL_DRIVE);
-       if (swim_readbit(base, DRIVE_PRESENT))
+       if (swim_readbit(base, DRIVE_PRESENT) &&
+           !swim_readbit(base, ONEMEG_DRIVE))
                swim_add_floppy(swd, EXTERNAL_DRIVE);
 
        /* register floppy drives */
@@ -861,7 +861,6 @@ static int swim_floppy_init(struct swim_priv *swd)
                                                              &swd->lock);
                if (!swd->unit[drive].disk->queue) {
                        err = -ENOMEM;
-                       put_disk(swd->unit[drive].disk);
                        goto exit_put_disks;
                }
                blk_queue_bounce_limit(swd->unit[drive].disk->queue,
@@ -911,7 +910,7 @@ static int swim_probe(struct platform_device *dev)
                goto out;
        }
 
-       swim_base = ioremap(res->start, resource_size(res));
+       swim_base = (struct swim __iomem *)res->start;
        if (!swim_base) {
                ret = -ENOMEM;
                goto out_release_io;
@@ -923,7 +922,7 @@ static int swim_probe(struct platform_device *dev)
        if (!get_swim_mode(swim_base)) {
                printk(KERN_INFO "SWIM device not found !\n");
                ret = -ENODEV;
-               goto out_iounmap;
+               goto out_release_io;
        }
 
        /* set platform driver data */
@@ -931,7 +930,7 @@ static int swim_probe(struct platform_device *dev)
        swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL);
        if (!swd) {
                ret = -ENOMEM;
-               goto out_iounmap;
+               goto out_release_io;
        }
        platform_set_drvdata(dev, swd);
 
@@ -945,8 +944,6 @@ static int swim_probe(struct platform_device *dev)
 
 out_kfree:
        kfree(swd);
-out_iounmap:
-       iounmap(swim_base);
 out_release_io:
        release_mem_region(res->start, resource_size(res));
 out:
@@ -974,8 +971,6 @@ static int swim_remove(struct platform_device *dev)
        for (drive = 0; drive < swd->floppy_count; drive++)
                floppy_eject(&swd->unit[drive]);
 
-       iounmap(swd->base);
-
        res = platform_get_resource(dev, IORESOURCE_MEM, 0);
        if (res)
                release_mem_region(res->start, resource_size(res));
index af51015..469541c 100644 (file)
@@ -148,7 +148,7 @@ struct swim3 {
 #define MOTOR_ON       2
 #define RELAX          3       /* also eject in progress */
 #define READ_DATA_0    4
-#define TWOMEG_DRIVE   5
+#define ONEMEG_DRIVE   5
 #define SINGLE_SIDED   6       /* drive or diskette is 4MB type? */
 #define DRIVE_PRESENT  7
 #define DISK_IN                8
@@ -156,9 +156,9 @@ struct swim3 {
 #define TRACK_ZERO     10
 #define TACHO          11
 #define READ_DATA_1    12
-#define MFM_MODE       13
+#define GCR_MODE       13
 #define SEEK_COMPLETE  14
-#define ONEMEG_MEDIA   15
+#define TWOMEG_MEDIA   15
 
 /* Definitions of values used in writing and formatting */
 #define DATA_ESCAPE    0x99
index c8c8b0b..b937cc1 100644 (file)
@@ -231,6 +231,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
@@ -263,7 +264,6 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
 
        /* QCA ROME chipset */
-       { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
        { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
        { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
        { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
@@ -399,6 +399,13 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"),
                },
        },
+       {
+               /* Dell XPS 9360 (QCA ROME device 0cf3:e300) */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
+               },
+       },
        {}
 };
 
@@ -2852,6 +2859,12 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
 }
 #endif
 
+static void btusb_check_needs_reset_resume(struct usb_interface *intf)
+{
+       if (dmi_check_system(btusb_needs_reset_resume_table))
+               interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
+}
+
 static int btusb_probe(struct usb_interface *intf,
                       const struct usb_device_id *id)
 {
@@ -2974,9 +2987,6 @@ static int btusb_probe(struct usb_interface *intf,
        hdev->send   = btusb_send_frame;
        hdev->notify = btusb_notify;
 
-       if (dmi_check_system(btusb_needs_reset_resume_table))
-               interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
-
 #ifdef CONFIG_PM
        err = btusb_config_oob_wake(hdev);
        if (err)
@@ -3064,6 +3074,7 @@ static int btusb_probe(struct usb_interface *intf,
                data->setup_on_usb = btusb_setup_qca;
                hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
                set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+               btusb_check_needs_reset_resume(intf);
        }
 
 #ifdef CONFIG_BT_HCIBTUSB_RTL
index d1c0b60..6dc177b 100644 (file)
@@ -33,6 +33,7 @@ config HISILICON_LPC
        bool "Support for ISA I/O space on HiSilicon Hip06/7"
        depends on ARM64 && (ARCH_HISI || COMPILE_TEST)
        select INDIRECT_PIO
+       select MFD_CORE if ACPI
        help
          Driver to enable I/O access to devices attached to the Low Pin
          Count bus on the HiSilicon Hip06/7 SoC.
index 8327478..bfc566d 100644 (file)
@@ -2371,7 +2371,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
        if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
                return media_changed(cdi, 1);
 
-       if ((unsigned int)arg >= cdi->capacity)
+       if (arg >= cdi->capacity)
                return -EINVAL;
 
        info = kmalloc(sizeof(*info), GFP_KERNEL);
index c381c8e..79d8c84 100644 (file)
@@ -195,7 +195,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty
        return 0;
 }
 
-int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
+static int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
 {
        size_t i;
        u32 *gp;
@@ -470,7 +470,7 @@ static int uninorth_free_gatt_table(struct agp_bridge_data *bridge)
        return 0;
 }
 
-void null_cache_flush(void)
+static void null_cache_flush(void)
 {
        mb();
 }
index 3cd3aae..cd888d4 100644 (file)
 #include <linux/ptrace.h>
 #include <linux/workqueue.h>
 #include <linux/irq.h>
+#include <linux/ratelimit.h>
 #include <linux/syscalls.h>
 #include <linux/completion.h>
 #include <linux/uuid.h>
@@ -438,6 +439,16 @@ static void _crng_backtrack_protect(struct crng_state *crng,
 static void process_random_ready_list(void);
 static void _get_random_bytes(void *buf, int nbytes);
 
+static struct ratelimit_state unseeded_warning =
+       RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
+static struct ratelimit_state urandom_warning =
+       RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
+
+static int ratelimit_disable __read_mostly;
+
+module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
+MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
+
 /**********************************************************************
  *
  * OS independent entropy store.   Here are the functions which handle
@@ -789,7 +800,7 @@ static void crng_initialize(struct crng_state *crng)
 }
 
 #ifdef CONFIG_NUMA
-static void numa_crng_init(void)
+static void do_numa_crng_init(struct work_struct *work)
 {
        int i;
        struct crng_state *crng;
@@ -810,6 +821,13 @@ static void numa_crng_init(void)
                kfree(pool);
        }
 }
+
+static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
+
+static void numa_crng_init(void)
+{
+       schedule_work(&numa_crng_init_work);
+}
 #else
 static void numa_crng_init(void) {}
 #endif
@@ -925,6 +943,18 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
                process_random_ready_list();
                wake_up_interruptible(&crng_init_wait);
                pr_notice("random: crng init done\n");
+               if (unseeded_warning.missed) {
+                       pr_notice("random: %d get_random_xx warning(s) missed "
+                                 "due to ratelimiting\n",
+                                 unseeded_warning.missed);
+                       unseeded_warning.missed = 0;
+               }
+               if (urandom_warning.missed) {
+                       pr_notice("random: %d urandom warning(s) missed "
+                                 "due to ratelimiting\n",
+                                 urandom_warning.missed);
+                       urandom_warning.missed = 0;
+               }
        }
 }
 
@@ -1565,8 +1595,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
 #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
        print_once = true;
 #endif
-       pr_notice("random: %s called from %pS with crng_init=%d\n",
-                 func_name, caller, crng_init);
+       if (__ratelimit(&unseeded_warning))
+               pr_notice("random: %s called from %pS with crng_init=%d\n",
+                         func_name, caller, crng_init);
 }
 
 /*
@@ -1760,6 +1791,10 @@ static int rand_initialize(void)
        init_std_data(&blocking_pool);
        crng_initialize(&primary_crng);
        crng_global_init_time = jiffies;
+       if (ratelimit_disable) {
+               urandom_warning.interval = 0;
+               unseeded_warning.interval = 0;
+       }
        return 0;
 }
 early_initcall(rand_initialize);
@@ -1827,9 +1862,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
 
        if (!crng_ready() && maxwarn > 0) {
                maxwarn--;
-               printk(KERN_NOTICE "random: %s: uninitialized urandom read "
-                      "(%zd bytes read)\n",
-                      current->comm, nbytes);
+               if (__ratelimit(&urandom_warning))
+                       printk(KERN_NOTICE "random: %s: uninitialized "
+                              "urandom read (%zd bytes read)\n",
+                              current->comm, nbytes);
                spin_lock_irqsave(&primary_crng.lock, flags);
                crng_init_cnt = 0;
                spin_unlock_irqrestore(&primary_crng.lock, flags);
index 468f061..2108551 100644 (file)
@@ -422,7 +422,7 @@ static void reclaim_dma_bufs(void)
        }
 }
 
-static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
+static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size,
                                     int pages)
 {
        struct port_buffer *buf;
@@ -445,16 +445,16 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
                return buf;
        }
 
-       if (is_rproc_serial(vq->vdev)) {
+       if (is_rproc_serial(vdev)) {
                /*
                 * Allocate DMA memory from ancestor. When a virtio
                 * device is created by remoteproc, the DMA memory is
                 * associated with the grandparent device:
                 * vdev => rproc => platform-dev.
                 */
-               if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent)
+               if (!vdev->dev.parent || !vdev->dev.parent->parent)
                        goto free_buf;
-               buf->dev = vq->vdev->dev.parent->parent;
+               buf->dev = vdev->dev.parent->parent;
 
                /* Increase device refcnt to avoid freeing it */
                get_device(buf->dev);
@@ -838,7 +838,7 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
 
        count = min((size_t)(32 * 1024), count);
 
-       buf = alloc_buf(port->out_vq, count, 0);
+       buf = alloc_buf(port->portdev->vdev, count, 0);
        if (!buf)
                return -ENOMEM;
 
@@ -957,7 +957,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
        if (ret < 0)
                goto error_out;
 
-       buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
+       buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs);
        if (!buf) {
                ret = -ENOMEM;
                goto error_out;
@@ -1374,7 +1374,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
 
        nr_added_bufs = 0;
        do {
-               buf = alloc_buf(vq, PAGE_SIZE, 0);
+               buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
                if (!buf)
                        break;
 
@@ -1402,7 +1402,6 @@ static int add_port(struct ports_device *portdev, u32 id)
 {
        char debugfs_name[16];
        struct port *port;
-       struct port_buffer *buf;
        dev_t devt;
        unsigned int nr_added_bufs;
        int err;
@@ -1513,8 +1512,6 @@ static int add_port(struct ports_device *portdev, u32 id)
        return 0;
 
 free_inbufs:
-       while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
-               free_buf(buf, true);
 free_device:
        device_destroy(pdrvdata.class, port->dev->devt);
 free_cdev:
@@ -1539,34 +1536,14 @@ static void remove_port(struct kref *kref)
 
 static void remove_port_data(struct port *port)
 {
-       struct port_buffer *buf;
-
        spin_lock_irq(&port->inbuf_lock);
        /* Remove unused data this port might have received. */
        discard_port_data(port);
        spin_unlock_irq(&port->inbuf_lock);
 
-       /* Remove buffers we queued up for the Host to send us data in. */
-       do {
-               spin_lock_irq(&port->inbuf_lock);
-               buf = virtqueue_detach_unused_buf(port->in_vq);
-               spin_unlock_irq(&port->inbuf_lock);
-               if (buf)
-                       free_buf(buf, true);
-       } while (buf);
-
        spin_lock_irq(&port->outvq_lock);
        reclaim_consumed_buffers(port);
        spin_unlock_irq(&port->outvq_lock);
-
-       /* Free pending buffers from the out-queue. */
-       do {
-               spin_lock_irq(&port->outvq_lock);
-               buf = virtqueue_detach_unused_buf(port->out_vq);
-               spin_unlock_irq(&port->outvq_lock);
-               if (buf)
-                       free_buf(buf, true);
-       } while (buf);
 }
 
 /*
@@ -1791,13 +1768,24 @@ static void control_work_handler(struct work_struct *work)
        spin_unlock(&portdev->c_ivq_lock);
 }
 
+static void flush_bufs(struct virtqueue *vq, bool can_sleep)
+{
+       struct port_buffer *buf;
+       unsigned int len;
+
+       while ((buf = virtqueue_get_buf(vq, &len)))
+               free_buf(buf, can_sleep);
+}
+
 static void out_intr(struct virtqueue *vq)
 {
        struct port *port;
 
        port = find_port_by_vq(vq->vdev->priv, vq);
-       if (!port)
+       if (!port) {
+               flush_bufs(vq, false);
                return;
+       }
 
        wake_up_interruptible(&port->waitqueue);
 }
@@ -1808,8 +1796,10 @@ static void in_intr(struct virtqueue *vq)
        unsigned long flags;
 
        port = find_port_by_vq(vq->vdev->priv, vq);
-       if (!port)
+       if (!port) {
+               flush_bufs(vq, false);
                return;
+       }
 
        spin_lock_irqsave(&port->inbuf_lock, flags);
        port->inbuf = get_inbuf(port);
@@ -1984,24 +1974,54 @@ static const struct file_operations portdev_fops = {
 
 static void remove_vqs(struct ports_device *portdev)
 {
+       struct virtqueue *vq;
+
+       virtio_device_for_each_vq(portdev->vdev, vq) {
+               struct port_buffer *buf;
+
+               flush_bufs(vq, true);
+               while ((buf = virtqueue_detach_unused_buf(vq)))
+                       free_buf(buf, true);
+       }
        portdev->vdev->config->del_vqs(portdev->vdev);
        kfree(portdev->in_vqs);
        kfree(portdev->out_vqs);
 }
 
-static void remove_controlq_data(struct ports_device *portdev)
+static void virtcons_remove(struct virtio_device *vdev)
 {
-       struct port_buffer *buf;
-       unsigned int len;
+       struct ports_device *portdev;
+       struct port *port, *port2;
 
-       if (!use_multiport(portdev))
-               return;
+       portdev = vdev->priv;
 
-       while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
-               free_buf(buf, true);
+       spin_lock_irq(&pdrvdata_lock);
+       list_del(&portdev->list);
+       spin_unlock_irq(&pdrvdata_lock);
 
-       while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
-               free_buf(buf, true);
+       /* Disable interrupts for vqs */
+       vdev->config->reset(vdev);
+       /* Finish up work that's lined up */
+       if (use_multiport(portdev))
+               cancel_work_sync(&portdev->control_work);
+       else
+               cancel_work_sync(&portdev->config_work);
+
+       list_for_each_entry_safe(port, port2, &portdev->ports, list)
+               unplug_port(port);
+
+       unregister_chrdev(portdev->chr_major, "virtio-portsdev");
+
+       /*
+        * When yanking out a device, we immediately lose the
+        * (device-side) queues.  So there's no point in keeping the
+        * guest side around till we drop our final reference.  This
+        * also means that any ports which are in an open state will
+        * have to just stop using the port, as the vqs are going
+        * away.
+        */
+       remove_vqs(portdev);
+       kfree(portdev);
 }
 
 /*
@@ -2070,6 +2090,7 @@ static int virtcons_probe(struct virtio_device *vdev)
 
        spin_lock_init(&portdev->ports_lock);
        INIT_LIST_HEAD(&portdev->ports);
+       INIT_LIST_HEAD(&portdev->list);
 
        virtio_device_ready(portdev->vdev);
 
@@ -2087,8 +2108,15 @@ static int virtcons_probe(struct virtio_device *vdev)
                if (!nr_added_bufs) {
                        dev_err(&vdev->dev,
                                "Error allocating buffers for control queue\n");
-                       err = -ENOMEM;
-                       goto free_vqs;
+                       /*
+                        * The host might want to notify mgmt sw about device
+                        * add failure.
+                        */
+                       __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
+                                          VIRTIO_CONSOLE_DEVICE_READY, 0);
+                       /* Device was functional: we need full cleanup. */
+                       virtcons_remove(vdev);
+                       return -ENOMEM;
                }
        } else {
                /*
@@ -2119,11 +2147,6 @@ static int virtcons_probe(struct virtio_device *vdev)
 
        return 0;
 
-free_vqs:
-       /* The host might want to notify mgmt sw about device add failure */
-       __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
-                          VIRTIO_CONSOLE_DEVICE_READY, 0);
-       remove_vqs(portdev);
 free_chrdev:
        unregister_chrdev(portdev->chr_major, "virtio-portsdev");
 free:
@@ -2132,43 +2155,6 @@ fail:
        return err;
 }
 
-static void virtcons_remove(struct virtio_device *vdev)
-{
-       struct ports_device *portdev;
-       struct port *port, *port2;
-
-       portdev = vdev->priv;
-
-       spin_lock_irq(&pdrvdata_lock);
-       list_del(&portdev->list);
-       spin_unlock_irq(&pdrvdata_lock);
-
-       /* Disable interrupts for vqs */
-       vdev->config->reset(vdev);
-       /* Finish up work that's lined up */
-       if (use_multiport(portdev))
-               cancel_work_sync(&portdev->control_work);
-       else
-               cancel_work_sync(&portdev->config_work);
-
-       list_for_each_entry_safe(port, port2, &portdev->ports, list)
-               unplug_port(port);
-
-       unregister_chrdev(portdev->chr_major, "virtio-portsdev");
-
-       /*
-        * When yanking out a device, we immediately lose the
-        * (device-side) queues.  So there's no point in keeping the
-        * guest side around till we drop our final reference.  This
-        * also means that any ports which are in an open state will
-        * have to just stop using the port, as the vqs are going
-        * away.
-        */
-       remove_controlq_data(portdev);
-       remove_vqs(portdev);
-       kfree(portdev);
-}
-
 static struct virtio_device_id id_table[] = {
        { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
        { 0 },
@@ -2209,7 +2195,6 @@ static int virtcons_freeze(struct virtio_device *vdev)
         */
        if (use_multiport(portdev))
                virtqueue_disable_cb(portdev->c_ivq);
-       remove_controlq_data(portdev);
 
        list_for_each_entry(port, &portdev->ports, list) {
                virtqueue_disable_cb(port->in_vq);
index c580197..a2f8c42 100644 (file)
@@ -541,7 +541,7 @@ probe_err:
        return ret;
 }
 
-static int cs2000_resume(struct device *dev)
+static int __maybe_unused cs2000_resume(struct device *dev)
 {
        struct cs2000_priv *priv = dev_get_drvdata(dev);
 
index ac4a042..1628b93 100644 (file)
@@ -112,10 +112,18 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
        return 0;
 }
 
+static int clk_mux_determine_rate(struct clk_hw *hw,
+                                 struct clk_rate_request *req)
+{
+       struct clk_mux *mux = to_clk_mux(hw);
+
+       return clk_mux_determine_rate_flags(hw, req, mux->flags);
+}
+
 const struct clk_ops clk_mux_ops = {
        .get_parent = clk_mux_get_parent,
        .set_parent = clk_mux_set_parent,
-       .determine_rate = __clk_mux_determine_rate,
+       .determine_rate = clk_mux_determine_rate,
 };
 EXPORT_SYMBOL_GPL(clk_mux_ops);
 
index f1d5967..edd3cf4 100644 (file)
@@ -216,7 +216,7 @@ static const char * const usart1_src[] = {
        "pclk5", "pll3_q", "ck_hsi", "ck_csi", "pll4_q", "ck_hse"
 };
 
-const char * const usart234578_src[] = {
+static const char * const usart234578_src[] = {
        "pclk1", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
 };
 
@@ -224,10 +224,6 @@ static const char * const usart6_src[] = {
        "pclk2", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
 };
 
-static const char * const dfsdm_src[] = {
-       "pclk2", "ck_mcu"
-};
-
 static const char * const fdcan_src[] = {
        "ck_hse", "pll3_q", "pll4_q"
 };
@@ -316,10 +312,8 @@ struct stm32_clk_mgate {
 struct clock_config {
        u32 id;
        const char *name;
-       union {
-               const char *parent_name;
-               const char * const *parent_names;
-       };
+       const char *parent_name;
+       const char * const *parent_names;
        int num_parents;
        unsigned long flags;
        void *cfg;
@@ -469,7 +463,7 @@ static void mp1_gate_clk_disable(struct clk_hw *hw)
        }
 }
 
-const struct clk_ops mp1_gate_clk_ops = {
+static const struct clk_ops mp1_gate_clk_ops = {
        .enable         = mp1_gate_clk_enable,
        .disable        = mp1_gate_clk_disable,
        .is_enabled     = clk_gate_is_enabled,
@@ -698,7 +692,7 @@ static void mp1_mgate_clk_disable(struct clk_hw *hw)
                mp1_gate_clk_disable(hw);
 }
 
-const struct clk_ops mp1_mgate_clk_ops = {
+static const struct clk_ops mp1_mgate_clk_ops = {
        .enable         = mp1_mgate_clk_enable,
        .disable        = mp1_mgate_clk_disable,
        .is_enabled     = clk_gate_is_enabled,
@@ -732,7 +726,7 @@ static int clk_mmux_set_parent(struct clk_hw *hw, u8 index)
        return 0;
 }
 
-const struct clk_ops clk_mmux_ops = {
+static const struct clk_ops clk_mmux_ops = {
        .get_parent     = clk_mmux_get_parent,
        .set_parent     = clk_mmux_set_parent,
        .determine_rate = __clk_mux_determine_rate,
@@ -1048,10 +1042,10 @@ struct stm32_pll_cfg {
        u32 offset;
 };
 
-struct clk_hw *_clk_register_pll(struct device *dev,
-                                struct clk_hw_onecell_data *clk_data,
-                                void __iomem *base, spinlock_t *lock,
-                                const struct clock_config *cfg)
+static struct clk_hw *_clk_register_pll(struct device *dev,
+                                       struct clk_hw_onecell_data *clk_data,
+                                       void __iomem *base, spinlock_t *lock,
+                                       const struct clock_config *cfg)
 {
        struct stm32_pll_cfg *stm_pll_cfg = cfg->cfg;
 
@@ -1405,7 +1399,8 @@ enum {
        G_USBH,
        G_ETHSTP,
        G_RTCAPB,
-       G_TZC,
+       G_TZC1,
+       G_TZC2,
        G_TZPC,
        G_IWDG1,
        G_BSEC,
@@ -1417,7 +1412,7 @@ enum {
        G_LAST
 };
 
-struct stm32_mgate mp1_mgate[G_LAST];
+static struct stm32_mgate mp1_mgate[G_LAST];
 
 #define _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
               _mgate, _ops)\
@@ -1440,7 +1435,7 @@ struct stm32_mgate mp1_mgate[G_LAST];
               &mp1_mgate[_id], &mp1_mgate_clk_ops)
 
 /* Peripheral gates */
-struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
+static struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
        /* Multi gates */
        K_GATE(G_MDIO,          RCC_APB1ENSETR, 31, 0),
        K_MGATE(G_DAC12,        RCC_APB1ENSETR, 29, 0),
@@ -1506,7 +1501,8 @@ struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
        K_GATE(G_BSEC,          RCC_APB5ENSETR, 16, 0),
        K_GATE(G_IWDG1,         RCC_APB5ENSETR, 15, 0),
        K_GATE(G_TZPC,          RCC_APB5ENSETR, 13, 0),
-       K_GATE(G_TZC,           RCC_APB5ENSETR, 12, 0),
+       K_GATE(G_TZC2,          RCC_APB5ENSETR, 12, 0),
+       K_GATE(G_TZC1,          RCC_APB5ENSETR, 11, 0),
        K_GATE(G_RTCAPB,        RCC_APB5ENSETR, 8, 0),
        K_MGATE(G_USART1,       RCC_APB5ENSETR, 4, 0),
        K_MGATE(G_I2C6,         RCC_APB5ENSETR, 3, 0),
@@ -1600,7 +1596,7 @@ enum {
        M_LAST
 };
 
-struct stm32_mmux ker_mux[M_LAST];
+static struct stm32_mmux ker_mux[M_LAST];
 
 #define _K_MUX(_id, _offset, _shift, _width, _mux_flags, _mmux, _ops)\
        [_id] = {\
@@ -1623,7 +1619,7 @@ struct stm32_mmux ker_mux[M_LAST];
        _K_MUX(_id, _offset, _shift, _width, _mux_flags,\
                        &ker_mux[_id], &clk_mmux_ops)
 
-const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
+static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
        /* Kernel multi mux */
        K_MMUX(M_SDMMC12, RCC_SDMMC12CKSELR, 0, 3, 0),
        K_MMUX(M_SPI23, RCC_SPI2S23CKSELR, 0, 3, 0),
@@ -1860,7 +1856,8 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
        PCLK(USART1, "usart1", "pclk5", 0, G_USART1),
        PCLK(RTCAPB, "rtcapb", "pclk5", CLK_IGNORE_UNUSED |
             CLK_IS_CRITICAL, G_RTCAPB),
-       PCLK(TZC, "tzc", "pclk5", CLK_IGNORE_UNUSED, G_TZC),
+       PCLK(TZC1, "tzc1", "ck_axi", CLK_IGNORE_UNUSED, G_TZC1),
+       PCLK(TZC2, "tzc2", "ck_axi", CLK_IGNORE_UNUSED, G_TZC2),
        PCLK(TZPC, "tzpc", "pclk5", CLK_IGNORE_UNUSED, G_TZPC),
        PCLK(IWDG1, "iwdg1", "pclk5", 0, G_IWDG1),
        PCLK(BSEC, "bsec", "pclk5", CLK_IGNORE_UNUSED, G_BSEC),
@@ -1916,8 +1913,7 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
        KCLK(RNG1_K, "rng1_k", rng_src, 0, G_RNG1, M_RNG1),
        KCLK(RNG2_K, "rng2_k", rng_src, 0, G_RNG2, M_RNG2),
        KCLK(USBPHY_K, "usbphy_k", usbphy_src, 0, G_USBPHY, M_USBPHY),
-       KCLK(STGEN_K, "stgen_k",  stgen_src, CLK_IGNORE_UNUSED,
-            G_STGEN, M_STGEN),
+       KCLK(STGEN_K, "stgen_k", stgen_src, CLK_IS_CRITICAL, G_STGEN, M_STGEN),
        KCLK(SPDIF_K, "spdif_k", spdif_src, 0, G_SPDIF, M_SPDIF),
        KCLK(SPI1_K, "spi1_k", spi123_src, 0, G_SPI1, M_SPI1),
        KCLK(SPI2_K, "spi2_k", spi123_src, 0, G_SPI2, M_SPI23),
@@ -1948,8 +1944,8 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
        KCLK(FDCAN_K, "fdcan_k", fdcan_src, 0, G_FDCAN, M_FDCAN),
        KCLK(SAI1_K, "sai1_k", sai_src, 0, G_SAI1, M_SAI1),
        KCLK(SAI2_K, "sai2_k", sai2_src, 0, G_SAI2, M_SAI2),
-       KCLK(SAI3_K, "sai3_k", sai_src, 0, G_SAI2, M_SAI3),
-       KCLK(SAI4_K, "sai4_k", sai_src, 0, G_SAI2, M_SAI4),
+       KCLK(SAI3_K, "sai3_k", sai_src, 0, G_SAI3, M_SAI3),
+       KCLK(SAI4_K, "sai4_k", sai_src, 0, G_SAI4, M_SAI4),
        KCLK(ADC12_K, "adc12_k", adc12_src, 0, G_ADC12, M_ADC12),
        KCLK(DSI_K, "dsi_k", dsi_src, 0, G_DSI, M_DSI),
        KCLK(ADFSDM_K, "adfsdm_k", sai_src, 0, G_ADFSDM, M_SAI1),
@@ -1992,10 +1988,6 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
                  _DIV(RCC_MCO2CFGR, 4, 4, 0, NULL)),
 
        /* Debug clocks */
-       FIXED_FACTOR(NO_ID, "ck_axi_div2", "ck_axi", 0, 1, 2),
-
-       GATE(DBG, "ck_apb_dbg", "ck_axi_div2", 0, RCC_DBGCFGR, 8, 0),
-
        GATE(CK_DBG, "ck_sys_dbg", "ck_axi", 0, RCC_DBGCFGR, 8, 0),
 
        COMPOSITE(CK_TRACE, "ck_trace", ck_trace_src, CLK_OPS_PARENT_ENABLE,
index ea67ac8..7af555f 100644 (file)
@@ -426,9 +426,9 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now,
        return now <= rate && now > best;
 }
 
-static int
-clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req,
-                            unsigned long flags)
+int clk_mux_determine_rate_flags(struct clk_hw *hw,
+                                struct clk_rate_request *req,
+                                unsigned long flags)
 {
        struct clk_core *core = hw->core, *parent, *best_parent = NULL;
        int i, num_parents, ret;
@@ -488,6 +488,7 @@ out:
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
 
 struct clk *__clk_lookup(const char *name)
 {
index 3645fdb..ab7a355 100644 (file)
@@ -153,10 +153,19 @@ static int clk_regmap_mux_set_parent(struct clk_hw *hw, u8 index)
                                  val << mux->shift);
 }
 
+static int clk_regmap_mux_determine_rate(struct clk_hw *hw,
+                                        struct clk_rate_request *req)
+{
+       struct clk_regmap *clk = to_clk_regmap(hw);
+       struct clk_regmap_mux_data *mux = clk_get_regmap_mux_data(clk);
+
+       return clk_mux_determine_rate_flags(hw, req, mux->flags);
+}
+
 const struct clk_ops clk_regmap_mux_ops = {
        .get_parent = clk_regmap_mux_get_parent,
        .set_parent = clk_regmap_mux_set_parent,
-       .determine_rate = __clk_mux_determine_rate,
+       .determine_rate = clk_regmap_mux_determine_rate,
 };
 EXPORT_SYMBOL_GPL(clk_regmap_mux_ops);
 
index 0be7838..badc4c2 100644 (file)
@@ -17,8 +17,6 @@
 #define AO_RTC_ALT_CLK_CNTL0   0x94
 #define AO_RTC_ALT_CLK_CNTL1   0x98
 
-extern const struct clk_ops meson_aoclk_gate_regmap_ops;
-
 struct aoclk_cec_32k {
        struct clk_hw hw;
        struct regmap *regmap;
index cc29924..d0524ec 100644 (file)
@@ -253,7 +253,7 @@ static struct clk_fixed_factor meson8b_fclk_div3_div = {
        .mult = 1,
        .div = 3,
        .hw.init = &(struct clk_init_data){
-               .name = "fclk_div_div3",
+               .name = "fclk_div3_div",
                .ops = &clk_fixed_factor_ops,
                .parent_names = (const char *[]){ "fixed_pll" },
                .num_parents = 1,
@@ -632,7 +632,8 @@ static struct clk_regmap meson8b_cpu_clk = {
        .hw.init = &(struct clk_init_data){
                .name = "cpu_clk",
                .ops = &clk_regmap_mux_ro_ops,
-               .parent_names = (const char *[]){ "xtal", "cpu_out_sel" },
+               .parent_names = (const char *[]){ "xtal",
+                                                 "cpu_scale_out_sel" },
                .num_parents = 2,
                .flags = (CLK_SET_RATE_PARENT |
                          CLK_SET_RATE_NO_REPARENT),
index 7f56fe5..de55c7d 100644 (file)
@@ -71,16 +71,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ
 
          Say Y, if you have a Broadcom SoC with AVS support for DFS or DVFS.
 
-config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
-       bool "Broadcom STB AVS CPUfreq driver sysfs debug capability"
-       depends on ARM_BRCMSTB_AVS_CPUFREQ
-       help
-         Enabling this option turns on debug support via sysfs under
-         /sys/kernel/debug/brcmstb-avs-cpufreq. It is possible to read all and
-         write some AVS mailbox registers through sysfs entries.
-
-         If in doubt, say N.
-
 config ARM_EXYNOS5440_CPUFREQ
        tristate "SAMSUNG EXYNOS5440"
        depends on SOC_EXYNOS5440
index 6cdac1a..b07559b 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/semaphore.h>
 
-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
-#include <linux/ctype.h>
-#include <linux/debugfs.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#endif
-
 /* Max number of arguments AVS calls take */
 #define AVS_MAX_CMD_ARGS       4
 /*
@@ -182,88 +175,11 @@ struct private_data {
        void __iomem *base;
        void __iomem *avs_intr_base;
        struct device *dev;
-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
-       struct dentry *debugfs;
-#endif
        struct completion done;
        struct semaphore sem;
        struct pmap pmap;
 };
 
-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
-
-enum debugfs_format {
-       DEBUGFS_NORMAL,
-       DEBUGFS_FLOAT,
-       DEBUGFS_REV,
-};
-
-struct debugfs_data {
-       struct debugfs_entry *entry;
-       struct private_data *priv;
-};
-
-struct debugfs_entry {
-       char *name;
-       u32 offset;
-       fmode_t mode;
-       enum debugfs_format format;
-};
-
-#define DEBUGFS_ENTRY(name, mode, format)      { \
-       #name, AVS_MBOX_##name, mode, format \
-}
-
-/*
- * These are used for debugfs only. Otherwise we use AVS_MBOX_PARAM() directly.
- */
-#define AVS_MBOX_PARAM1                AVS_MBOX_PARAM(0)
-#define AVS_MBOX_PARAM2                AVS_MBOX_PARAM(1)
-#define AVS_MBOX_PARAM3                AVS_MBOX_PARAM(2)
-#define AVS_MBOX_PARAM4                AVS_MBOX_PARAM(3)
-
-/*
- * This table stores the name, access permissions and offset for each hardware
- * register and is used to generate debugfs entries.
- */
-static struct debugfs_entry debugfs_entries[] = {
-       DEBUGFS_ENTRY(COMMAND, S_IWUSR, DEBUGFS_NORMAL),
-       DEBUGFS_ENTRY(STATUS, S_IWUSR, DEBUGFS_NORMAL),
-       DEBUGFS_ENTRY(VOLTAGE0, 0, DEBUGFS_FLOAT),
-       DEBUGFS_ENTRY(TEMP0, 0, DEBUGFS_FLOAT),
-       DEBUGFS_ENTRY(PV0, 0, DEBUGFS_FLOAT),
-       DEBUGFS_ENTRY(MV0, 0, DEBUGFS_FLOAT),
-       DEBUGFS_ENTRY(PARAM1, S_IWUSR, DEBUGFS_NORMAL),
-       DEBUGFS_ENTRY(PARAM2, S_IWUSR, DEBUGFS_NORMAL),
-       DEBUGFS_ENTRY(PARAM3, S_IWUSR, DEBUGFS_NORMAL),
-       DEBUGFS_ENTRY(PARAM4, S_IWUSR, DEBUGFS_NORMAL),
-       DEBUGFS_ENTRY(REVISION, 0, DEBUGFS_REV),
-       DEBUGFS_ENTRY(PSTATE, 0, DEBUGFS_NORMAL),
-       DEBUGFS_ENTRY(HEARTBEAT, 0, DEBUGFS_NORMAL),
-       DEBUGFS_ENTRY(MAGIC, S_IWUSR, DEBUGFS_NORMAL),
-       DEBUGFS_ENTRY(SIGMA_HVT, 0, DEBUGFS_NORMAL),
-       DEBUGFS_ENTRY(SIGMA_SVT, 0, DEBUGFS_NORMAL),
-       DEBUGFS_ENTRY(VOLTAGE1, 0, DEBUGFS_FLOAT),
-       DEBUGFS_ENTRY(TEMP1, 0, DEBUGFS_FLOAT),
-       DEBUGFS_ENTRY(PV1, 0, DEBUGFS_FLOAT),
-       DEBUGFS_ENTRY(MV1, 0, DEBUGFS_FLOAT),
-       DEBUGFS_ENTRY(FREQUENCY, 0, DEBUGFS_NORMAL),
-};
-
-static int brcm_avs_target_index(struct cpufreq_policy *, unsigned int);
-
-static char *__strtolower(char *s)
-{
-       char *p;
-
-       for (p = s; *p; p++)
-               *p = tolower(*p);
-
-       return s;
-}
-
-#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
-
 static void __iomem *__map_region(const char *name)
 {
        struct device_node *np;
@@ -516,238 +432,6 @@ brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
        return table;
 }
 
-#ifdef CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG
-
-#define MANT(x)        (unsigned int)(abs((x)) / 1000)
-#define FRAC(x)        (unsigned int)(abs((x)) - abs((x)) / 1000 * 1000)
-
-static int brcm_avs_debug_show(struct seq_file *s, void *data)
-{
-       struct debugfs_data *dbgfs = s->private;
-       void __iomem *base;
-       u32 val, offset;
-
-       if (!dbgfs) {
-               seq_puts(s, "No device pointer\n");
-               return 0;
-       }
-
-       base = dbgfs->priv->base;
-       offset = dbgfs->entry->offset;
-       val = readl(base + offset);
-       switch (dbgfs->entry->format) {
-       case DEBUGFS_NORMAL:
-               seq_printf(s, "%u\n", val);
-               break;
-       case DEBUGFS_FLOAT:
-               seq_printf(s, "%d.%03d\n", MANT(val), FRAC(val));
-               break;
-       case DEBUGFS_REV:
-               seq_printf(s, "%c.%c.%c.%c\n", (val >> 24 & 0xff),
-                          (val >> 16 & 0xff), (val >> 8 & 0xff),
-                          val & 0xff);
-               break;
-       }
-       seq_printf(s, "0x%08x\n", val);
-
-       return 0;
-}
-
-#undef MANT
-#undef FRAC
-
-static ssize_t brcm_avs_seq_write(struct file *file, const char __user *buf,
-                                 size_t size, loff_t *ppos)
-{
-       struct seq_file *s = file->private_data;
-       struct debugfs_data *dbgfs = s->private;
-       struct private_data *priv = dbgfs->priv;
-       void __iomem *base, *avs_intr_base;
-       bool use_issue_command = false;
-       unsigned long val, offset;
-       char str[128];
-       int ret;
-       char *str_ptr = str;
-
-       if (size >= sizeof(str))
-               return -E2BIG;
-
-       memset(str, 0, sizeof(str));
-       ret = copy_from_user(str, buf, size);
-       if (ret)
-               return ret;
-
-       base = priv->base;
-       avs_intr_base = priv->avs_intr_base;
-       offset = dbgfs->entry->offset;
-       /*
-        * Special case writing to "command" entry only: if the string starts
-        * with a 'c', we use the driver's __issue_avs_command() function.
-        * Otherwise, we perform a raw write. This should allow testing of raw
-        * access as well as using the higher level function. (Raw access
-        * doesn't clear the firmware return status after issuing the command.)
-        */
-       if (str_ptr[0] == 'c' && offset == AVS_MBOX_COMMAND) {
-               use_issue_command = true;
-               str_ptr++;
-       }
-       if (kstrtoul(str_ptr, 0, &val) != 0)
-               return -EINVAL;
-
-       /*
-        * Setting the P-state is a special case. We need to update the CPU
-        * frequency we report.
-        */
-       if (val == AVS_CMD_SET_PSTATE) {
-               struct cpufreq_policy *policy;
-               unsigned int pstate;
-
-               policy = cpufreq_cpu_get(smp_processor_id());
-               /* Read back the P-state we are about to set */
-               pstate = readl(base + AVS_MBOX_PARAM(0));
-               if (use_issue_command) {
-                       ret = brcm_avs_target_index(policy, pstate);
-                       return ret ? ret : size;
-               }
-               policy->cur = policy->freq_table[pstate].frequency;
-       }
-
-       if (use_issue_command) {
-               ret = __issue_avs_command(priv, val, false, NULL);
-       } else {
-               /* Locking here is not perfect, but is only for debug. */
-               ret = down_interruptible(&priv->sem);
-               if (ret)
-                       return ret;
-
-               writel(val, base + offset);
-               /* We have to wake up the firmware to process a command. */
-               if (offset == AVS_MBOX_COMMAND)
-                       writel(AVS_CPU_L2_INT_MASK,
-                              avs_intr_base + AVS_CPU_L2_SET0);
-               up(&priv->sem);
-       }
-
-       return ret ? ret : size;
-}
-
-static struct debugfs_entry *__find_debugfs_entry(const char *name)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++)
-               if (strcasecmp(debugfs_entries[i].name, name) == 0)
-                       return &debugfs_entries[i];
-
-       return NULL;
-}
-
-static int brcm_avs_debug_open(struct inode *inode, struct file *file)
-{
-       struct debugfs_data *data;
-       fmode_t fmode;
-       int ret;
-
-       /*
-        * seq_open(), which is called by single_open(), clears "write" access.
-        * We need write access to some files, so we preserve our access mode
-        * and restore it.
-        */
-       fmode = file->f_mode;
-       /*
-        * Check access permissions even for root. We don't want to be writing
-        * to read-only registers. Access for regular users has already been
-        * checked by the VFS layer.
-        */
-       if ((fmode & FMODE_WRITER) && !(inode->i_mode & S_IWUSR))
-               return -EACCES;
-
-       data = kmalloc(sizeof(*data), GFP_KERNEL);
-       if (!data)
-               return -ENOMEM;
-       /*
-        * We use the same file system operations for all our debug files. To
-        * produce specific output, we look up the file name upon opening a
-        * debugfs entry and map it to a memory offset. This offset is then used
-        * in the generic "show" function to read a specific register.
-        */
-       data->entry = __find_debugfs_entry(file->f_path.dentry->d_iname);
-       data->priv = inode->i_private;
-
-       ret = single_open(file, brcm_avs_debug_show, data);
-       if (ret)
-               kfree(data);
-       file->f_mode = fmode;
-
-       return ret;
-}
-
-static int brcm_avs_debug_release(struct inode *inode, struct file *file)
-{
-       struct seq_file *seq_priv = file->private_data;
-       struct debugfs_data *data = seq_priv->private;
-
-       kfree(data);
-       return single_release(inode, file);
-}
-
-static const struct file_operations brcm_avs_debug_ops = {
-       .open           = brcm_avs_debug_open,
-       .read           = seq_read,
-       .write          = brcm_avs_seq_write,
-       .llseek         = seq_lseek,
-       .release        = brcm_avs_debug_release,
-};
-
-static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev)
-{
-       struct private_data *priv = platform_get_drvdata(pdev);
-       struct dentry *dir;
-       int i;
-
-       if (!priv)
-               return;
-
-       dir = debugfs_create_dir(BRCM_AVS_CPUFREQ_NAME, NULL);
-       if (IS_ERR_OR_NULL(dir))
-               return;
-       priv->debugfs = dir;
-
-       for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++) {
-               /*
-                * The DEBUGFS_ENTRY macro generates uppercase strings. We
-                * convert them to lowercase before creating the debugfs
-                * entries.
-                */
-               char *entry = __strtolower(debugfs_entries[i].name);
-               fmode_t mode = debugfs_entries[i].mode;
-
-               if (!debugfs_create_file(entry, S_IFREG | S_IRUGO | mode,
-                                        dir, priv, &brcm_avs_debug_ops)) {
-                       priv->debugfs = NULL;
-                       debugfs_remove_recursive(dir);
-                       break;
-               }
-       }
-}
-
-static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev)
-{
-       struct private_data *priv = platform_get_drvdata(pdev);
-
-       if (priv && priv->debugfs) {
-               debugfs_remove_recursive(priv->debugfs);
-               priv->debugfs = NULL;
-       }
-}
-
-#else
-
-static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev) {}
-static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev) {}
-
-#endif /* CONFIG_ARM_BRCMSTB_AVS_CPUFREQ_DEBUG */
-
 /*
  * To ensure the right firmware is running we need to
  *    - check the MAGIC matches what we expect
@@ -1016,11 +700,8 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
                return ret;
 
        brcm_avs_driver.driver_data = pdev;
-       ret = cpufreq_register_driver(&brcm_avs_driver);
-       if (!ret)
-               brcm_avs_cpufreq_debug_init(pdev);
 
-       return ret;
+       return cpufreq_register_driver(&brcm_avs_driver);
 }
 
 static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
@@ -1032,8 +713,6 @@ static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       brcm_avs_cpufreq_debug_exit(pdev);
-
        priv = platform_get_drvdata(pdev);
        iounmap(priv->base);
        iounmap(priv->avs_intr_base);
index bc5fc16..b15115a 100644 (file)
@@ -126,6 +126,49 @@ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
                                cpu->perf_caps.lowest_perf, cpu_num, ret);
 }
 
+/*
+ * The PCC subspace describes the rate at which platform can accept commands
+ * on the shared PCC channel (including READs which do not count towards freq
+ * trasition requests), so ideally we need to use the PCC values as a fallback
+ * if we don't have a platform specific transition_delay_us
+ */
+#ifdef CONFIG_ARM64
+#include <asm/cputype.h>
+
+static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
+{
+       unsigned long implementor = read_cpuid_implementor();
+       unsigned long part_num = read_cpuid_part_number();
+       unsigned int delay_us = 0;
+
+       switch (implementor) {
+       case ARM_CPU_IMP_QCOM:
+               switch (part_num) {
+               case QCOM_CPU_PART_FALKOR_V1:
+               case QCOM_CPU_PART_FALKOR:
+                       delay_us = 10000;
+                       break;
+               default:
+                       delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+                       break;
+               }
+               break;
+       default:
+               delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+               break;
+       }
+
+       return delay_us;
+}
+
+#else
+
+static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
+{
+       return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+}
+#endif
+
 static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
        struct cppc_cpudata *cpu;
@@ -162,8 +205,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
                cpu->perf_caps.highest_perf;
        policy->cpuinfo.max_freq = cppc_dmi_max_khz;
 
-       policy->transition_delay_us = cppc_get_transition_latency(cpu_num) /
-               NSEC_PER_USEC;
+       policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num);
        policy->shared_type = cpu->shared_type;
 
        if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
index 0591874..54edaec 100644 (file)
@@ -679,6 +679,16 @@ void gpstate_timer_handler(struct timer_list *t)
 
        if (!spin_trylock(&gpstates->gpstate_lock))
                return;
+       /*
+        * If the timer has migrated to the different cpu then bring
+        * it back to one of the policy->cpus
+        */
+       if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
+               gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
+               add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
+               spin_unlock(&gpstates->gpstate_lock);
+               return;
+       }
 
        /*
         * If PMCR was last updated was using fast_swtich then
@@ -718,10 +728,8 @@ void gpstate_timer_handler(struct timer_list *t)
        if (gpstate_idx != gpstates->last_lpstate_idx)
                queue_gpstate_timer(gpstates);
 
+       set_pstate(&freq_data);
        spin_unlock(&gpstates->gpstate_lock);
-
-       /* Timer may get migrated to a different cpu on cpu hot unplug */
-       smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
 }
 
 /*
index e6f1782..2b90606 100644 (file)
@@ -284,7 +284,7 @@ scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
        struct clock_info *ci = handle->clk_priv;
        struct scmi_clock_info *clk = ci->clk + clk_id;
 
-       if (!clk->name || !clk->name[0])
+       if (!clk->name[0])
                return NULL;
 
        return clk;
index 14f14ef..06d212a 100644 (file)
@@ -249,7 +249,7 @@ static int altera_ps_probe(struct spi_device *spi)
 
        conf->data = of_id->data;
        conf->spi = spi;
-       conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_HIGH);
+       conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW);
        if (IS_ERR(conf->config)) {
                dev_err(&spi->dev, "Failed to get config gpio: %ld\n",
                        PTR_ERR(conf->config));
index 77e4855..6f693b7 100644 (file)
@@ -384,7 +384,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
        if (set)
                reg |= bit;
        else
-               reg &= bit;
+               reg &= ~bit;
        iowrite32(reg, addr);
 
        spin_unlock_irqrestore(&gpio->lock, flags);
index 1948724..25d16b2 100644 (file)
@@ -116,9 +116,9 @@ static int idio_16_gpio_get_multiple(struct gpio_chip *chip,
        unsigned long word_mask;
        const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
        unsigned long port_state;
-       u8 __iomem ports[] = {
-               idio16gpio->reg->out0_7, idio16gpio->reg->out8_15,
-               idio16gpio->reg->in0_7, idio16gpio->reg->in8_15,
+       void __iomem *ports[] = {
+               &idio16gpio->reg->out0_7, &idio16gpio->reg->out8_15,
+               &idio16gpio->reg->in0_7, &idio16gpio->reg->in8_15,
        };
 
        /* clear bits array to a clean slate */
@@ -143,7 +143,7 @@ static int idio_16_gpio_get_multiple(struct gpio_chip *chip,
                }
 
                /* read bits from current gpio port */
-               port_state = ioread8(ports + i);
+               port_state = ioread8(ports[i]);
 
                /* store acquired bits at respective bits array offset */
                bits[word_index] |= port_state << word_offset;
index 835607e..f953541 100644 (file)
@@ -206,10 +206,10 @@ static int idio_24_gpio_get_multiple(struct gpio_chip *chip,
        unsigned long word_mask;
        const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
        unsigned long port_state;
-       u8 __iomem ports[] = {
-               idio24gpio->reg->out0_7, idio24gpio->reg->out8_15,
-               idio24gpio->reg->out16_23, idio24gpio->reg->in0_7,
-               idio24gpio->reg->in8_15, idio24gpio->reg->in16_23,
+       void __iomem *ports[] = {
+               &idio24gpio->reg->out0_7, &idio24gpio->reg->out8_15,
+               &idio24gpio->reg->out16_23, &idio24gpio->reg->in0_7,
+               &idio24gpio->reg->in8_15, &idio24gpio->reg->in16_23,
        };
        const unsigned long out_mode_mask = BIT(1);
 
@@ -217,7 +217,7 @@ static int idio_24_gpio_get_multiple(struct gpio_chip *chip,
        bitmap_zero(bits, chip->ngpio);
 
        /* get bits are evaluated a gpio port register at a time */
-       for (i = 0; i < ARRAY_SIZE(ports); i++) {
+       for (i = 0; i < ARRAY_SIZE(ports) + 1; i++) {
                /* gpio offset in bits array */
                bits_offset = i * gpio_reg_size;
 
@@ -236,7 +236,7 @@ static int idio_24_gpio_get_multiple(struct gpio_chip *chip,
 
                /* read bits from current gpio port (port 6 is TTL GPIO) */
                if (i < 6)
-                       port_state = ioread8(ports + i);
+                       port_state = ioread8(ports[i]);
                else if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask)
                        port_state = ioread8(&idio24gpio->reg->ttl_out0_7);
                else
@@ -301,9 +301,9 @@ static void idio_24_gpio_set_multiple(struct gpio_chip *chip,
        const unsigned long port_mask = GENMASK(gpio_reg_size, 0);
        unsigned long flags;
        unsigned int out_state;
-       u8 __iomem ports[] = {
-               idio24gpio->reg->out0_7, idio24gpio->reg->out8_15,
-               idio24gpio->reg->out16_23
+       void __iomem *ports[] = {
+               &idio24gpio->reg->out0_7, &idio24gpio->reg->out8_15,
+               &idio24gpio->reg->out16_23
        };
        const unsigned long out_mode_mask = BIT(1);
        const unsigned int ttl_offset = 48;
@@ -327,9 +327,9 @@ static void idio_24_gpio_set_multiple(struct gpio_chip *chip,
                raw_spin_lock_irqsave(&idio24gpio->lock, flags);
 
                /* process output lines */
-               out_state = ioread8(ports + i) & ~gpio_mask;
+               out_state = ioread8(ports[i]) & ~gpio_mask;
                out_state |= (*bits >> bits_offset) & gpio_mask;
-               iowrite8(out_state, ports + i);
+               iowrite8(out_state, ports[i]);
 
                raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
        }
index 43aeb07..d8ccb50 100644 (file)
@@ -497,7 +497,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
        struct gpiohandle_request handlereq;
        struct linehandle_state *lh;
        struct file *file;
-       int fd, i, ret;
+       int fd, i, count = 0, ret;
        u32 lflags;
 
        if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
@@ -558,6 +558,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
                if (ret)
                        goto out_free_descs;
                lh->descs[i] = desc;
+               count = i;
 
                if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
                        set_bit(FLAG_ACTIVE_LOW, &desc->flags);
@@ -628,7 +629,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
 out_put_unused_fd:
        put_unused_fd(fd);
 out_free_descs:
-       for (; i >= 0; i--)
+       for (i = 0; i < count; i++)
                gpiod_free(lh->descs[i]);
        kfree(lh->label);
 out_free_lh:
@@ -902,7 +903,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
        desc = &gdev->descs[offset];
        ret = gpiod_request(desc, le->label);
        if (ret)
-               goto out_free_desc;
+               goto out_free_label;
        le->desc = desc;
        le->eflags = eflags;
 
index 09d3505..3fabf9f 100644 (file)
@@ -419,9 +419,11 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
 
        if (other) {
                signed long r;
-               r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+               r = dma_fence_wait(other, true);
                if (r < 0) {
-                       DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+                       if (r != -ERESTARTSYS)
+                               DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+
                        return r;
                }
        }
index b0e591e..e14263f 100644 (file)
@@ -1459,10 +1459,11 @@ static const u32 sgpr_init_compute_shader[] =
 static const u32 vgpr_init_regs[] =
 {
        mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
-       mmCOMPUTE_RESOURCE_LIMITS, 0,
+       mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
        mmCOMPUTE_NUM_THREAD_X, 256*4,
        mmCOMPUTE_NUM_THREAD_Y, 1,
        mmCOMPUTE_NUM_THREAD_Z, 1,
+       mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
        mmCOMPUTE_PGM_RSRC2, 20,
        mmCOMPUTE_USER_DATA_0, 0xedcedc00,
        mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1479,10 +1480,11 @@ static const u32 vgpr_init_regs[] =
 static const u32 sgpr1_init_regs[] =
 {
        mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
-       mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
+       mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
        mmCOMPUTE_NUM_THREAD_X, 256*5,
        mmCOMPUTE_NUM_THREAD_Y, 1,
        mmCOMPUTE_NUM_THREAD_Z, 1,
+       mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
        mmCOMPUTE_PGM_RSRC2, 20,
        mmCOMPUTE_USER_DATA_0, 0xedcedc00,
        mmCOMPUTE_USER_DATA_1, 0xedcedc01,
@@ -1503,6 +1505,7 @@ static const u32 sgpr2_init_regs[] =
        mmCOMPUTE_NUM_THREAD_X, 256*5,
        mmCOMPUTE_NUM_THREAD_Y, 1,
        mmCOMPUTE_NUM_THREAD_Z, 1,
+       mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
        mmCOMPUTE_PGM_RSRC2, 20,
        mmCOMPUTE_USER_DATA_0, 0xedcedc00,
        mmCOMPUTE_USER_DATA_1, 0xedcedc01,
index ed2f06c..3858820 100644 (file)
@@ -6,5 +6,6 @@ config HSA_AMD
        tristate "HSA kernel driver for AMD GPU devices"
        depends on DRM_AMDGPU && X86_64
        imply AMD_IOMMU_V2
+       select MMU_NOTIFIER
        help
          Enable this if you want to use HSA features on AMD GPU devices.
index cd679cf..59808a3 100644 (file)
@@ -749,12 +749,13 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
        struct timespec64 time;
 
        dev = kfd_device_by_id(args->gpu_id);
-       if (dev == NULL)
-               return -EINVAL;
-
-       /* Reading GPU clock counter from KGD */
-       args->gpu_clock_counter =
-               dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
+       if (dev)
+               /* Reading GPU clock counter from KGD */
+               args->gpu_clock_counter =
+                       dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
+       else
+               /* Node without GPU resource */
+               args->gpu_clock_counter = 0;
 
        /* No access to rdtsc. Using raw monotonic time */
        getrawmonotonic64(&time);
@@ -1147,7 +1148,7 @@ err_unlock:
        return ret;
 }
 
-bool kfd_dev_is_large_bar(struct kfd_dev *dev)
+static bool kfd_dev_is_large_bar(struct kfd_dev *dev)
 {
        struct kfd_local_mem_info mem_info;
 
@@ -1421,7 +1422,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
 
        pdd = kfd_get_process_device_data(dev, p);
        if (!pdd) {
-               err = PTR_ERR(pdd);
+               err = -EINVAL;
                goto bind_process_to_device_failed;
        }
 
index 4e2f379..1dd1142 100644 (file)
@@ -4557,6 +4557,7 @@ static int dm_update_crtcs_state(struct dc *dc,
                struct amdgpu_dm_connector *aconnector = NULL;
                struct drm_connector_state *new_con_state = NULL;
                struct dm_connector_state *dm_conn_state = NULL;
+               struct drm_plane_state *new_plane_state = NULL;
 
                new_stream = NULL;
 
@@ -4564,6 +4565,13 @@ static int dm_update_crtcs_state(struct dc *dc,
                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
                acrtc = to_amdgpu_crtc(crtc);
 
+               new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
+
+               if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
+                       ret = -EINVAL;
+                       goto fail;
+               }
+
                aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
 
                /* TODO This hack should go away */
@@ -4760,7 +4768,7 @@ static int dm_update_planes_state(struct dc *dc,
                        if (!dm_old_crtc_state->stream)
                                continue;
 
-                       DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
+                       DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
                                        plane->base.id, old_plane_crtc->base.id);
 
                        if (!dc_remove_plane_from_context(
index 490017d..4be21bf 100644 (file)
@@ -329,14 +329,15 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
 {
        int src;
        struct irq_list_head *lh;
+       unsigned long irq_table_flags;
        DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
-
        for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
-
+               DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
                /* The handler was removed from the table,
                 * it means it is safe to flush all the 'work'
                 * (because no code can schedule a new one). */
                lh = &adev->dm.irq_handler_list_low_tab[src];
+               DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
                flush_work(&lh->work);
        }
 }
index 8291d74..4304d9e 100644 (file)
@@ -83,21 +83,22 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
        enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
                I2C_MOT_TRUE : I2C_MOT_FALSE;
        enum ddc_result res;
-       ssize_t read_bytes;
+       uint32_t read_bytes = msg->size;
 
        if (WARN_ON(msg->size > 16))
                return -E2BIG;
 
        switch (msg->request & ~DP_AUX_I2C_MOT) {
        case DP_AUX_NATIVE_READ:
-               read_bytes = dal_ddc_service_read_dpcd_data(
+               res = dal_ddc_service_read_dpcd_data(
                                TO_DM_AUX(aux)->ddc_service,
                                false,
                                I2C_MOT_UNDEF,
                                msg->address,
                                msg->buffer,
-                               msg->size);
-               return read_bytes;
+                               msg->size,
+                               &read_bytes);
+               break;
        case DP_AUX_NATIVE_WRITE:
                res = dal_ddc_service_write_dpcd_data(
                                TO_DM_AUX(aux)->ddc_service,
@@ -108,14 +109,15 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
                                msg->size);
                break;
        case DP_AUX_I2C_READ:
-               read_bytes = dal_ddc_service_read_dpcd_data(
+               res = dal_ddc_service_read_dpcd_data(
                                TO_DM_AUX(aux)->ddc_service,
                                true,
                                mot,
                                msg->address,
                                msg->buffer,
-                               msg->size);
-               return read_bytes;
+                               msg->size,
+                               &read_bytes);
+               break;
        case DP_AUX_I2C_WRITE:
                res = dal_ddc_service_write_dpcd_data(
                                TO_DM_AUX(aux)->ddc_service,
@@ -137,7 +139,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
                 r == DDC_RESULT_SUCESSFULL);
 #endif
 
-       return msg->size;
+       if (res != DDC_RESULT_SUCESSFULL)
+               return -EIO;
+       return read_bytes;
 }
 
 static enum drm_connector_status
@@ -161,6 +165,11 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
        struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
        struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
 
+       if (amdgpu_dm_connector->edid) {
+               kfree(amdgpu_dm_connector->edid);
+               amdgpu_dm_connector->edid = NULL;
+       }
+
        drm_encoder_cleanup(&amdgpu_encoder->base);
        kfree(amdgpu_encoder);
        drm_connector_cleanup(connector);
@@ -181,28 +190,22 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
 void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
 {
        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-       struct edid *edid;
        struct dc_sink *dc_sink;
        struct dc_sink_init_data init_params = {
                        .link = aconnector->dc_link,
                        .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
 
+       /* FIXME none of this is safe. we shouldn't touch aconnector here in
+        * atomic_check
+        */
+
        /*
         * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
         */
        if (!aconnector->port || !aconnector->port->aux.ddc.algo)
                return;
 
-       edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
-
-       if (!edid) {
-               drm_mode_connector_update_edid_property(
-                       &aconnector->base,
-                       NULL);
-               return;
-       }
-
-       aconnector->edid = edid;
+       ASSERT(aconnector->edid);
 
        dc_sink = dc_link_add_remote_sink(
                aconnector->dc_link,
@@ -215,9 +218,6 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
 
        amdgpu_dm_add_sink_to_freesync_module(
                        connector, aconnector->edid);
-
-       drm_mode_connector_update_edid_property(
-                                       &aconnector->base, aconnector->edid);
 }
 
 static int dm_dp_mst_get_modes(struct drm_connector *connector)
@@ -230,10 +230,6 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
 
        if (!aconnector->edid) {
                struct edid *edid;
-               struct dc_sink *dc_sink;
-               struct dc_sink_init_data init_params = {
-                               .link = aconnector->dc_link,
-                               .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
                edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
 
                if (!edid) {
@@ -244,11 +240,17 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
                }
 
                aconnector->edid = edid;
+       }
 
+       if (!aconnector->dc_sink) {
+               struct dc_sink *dc_sink;
+               struct dc_sink_init_data init_params = {
+                               .link = aconnector->dc_link,
+                               .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
                dc_sink = dc_link_add_remote_sink(
                        aconnector->dc_link,
-                       (uint8_t *)edid,
-                       (edid->extensions + 1) * EDID_LENGTH,
+                       (uint8_t *)aconnector->edid,
+                       (aconnector->edid->extensions + 1) * EDID_LENGTH,
                        &init_params);
 
                dc_sink->priv = aconnector;
@@ -256,12 +258,12 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
 
                if (aconnector->dc_sink)
                        amdgpu_dm_add_sink_to_freesync_module(
-                                       connector, edid);
-
-               drm_mode_connector_update_edid_property(
-                                               &aconnector->base, edid);
+                                       connector, aconnector->edid);
        }
 
+       drm_mode_connector_update_edid_property(
+                                       &aconnector->base, aconnector->edid);
+
        ret = drm_add_edid_modes(connector, aconnector->edid);
 
        return ret;
@@ -424,14 +426,6 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
                dc_sink_release(aconnector->dc_sink);
                aconnector->dc_sink = NULL;
        }
-       if (aconnector->edid) {
-               kfree(aconnector->edid);
-               aconnector->edid = NULL;
-       }
-
-       drm_mode_connector_update_edid_property(
-                       &aconnector->base,
-                       NULL);
 
        aconnector->mst_connected = false;
 }
index 985fe8c..10a5807 100644 (file)
@@ -70,6 +70,10 @@ static enum bp_result get_firmware_info_v3_1(
        struct bios_parser *bp,
        struct dc_firmware_info *info);
 
+static enum bp_result get_firmware_info_v3_2(
+       struct bios_parser *bp,
+       struct dc_firmware_info *info);
+
 static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp,
                struct atom_display_object_path_v2 *object);
 
@@ -1321,9 +1325,11 @@ static enum bp_result bios_parser_get_firmware_info(
                case 3:
                        switch (revision.minor) {
                        case 1:
-                       case 2:
                                result = get_firmware_info_v3_1(bp, info);
                                break;
+                       case 2:
+                               result = get_firmware_info_v3_2(bp, info);
+                               break;
                        default:
                                break;
                        }
@@ -1383,6 +1389,84 @@ static enum bp_result get_firmware_info_v3_1(
        return BP_RESULT_OK;
 }
 
+static enum bp_result get_firmware_info_v3_2(
+       struct bios_parser *bp,
+       struct dc_firmware_info *info)
+{
+       struct atom_firmware_info_v3_2 *firmware_info;
+       struct atom_display_controller_info_v4_1 *dce_info = NULL;
+       struct atom_common_table_header *header;
+       struct atom_data_revision revision;
+       struct atom_smu_info_v3_2 *smu_info_v3_2 = NULL;
+       struct atom_smu_info_v3_3 *smu_info_v3_3 = NULL;
+
+       if (!info)
+               return BP_RESULT_BADINPUT;
+
+       firmware_info = GET_IMAGE(struct atom_firmware_info_v3_2,
+                       DATA_TABLES(firmwareinfo));
+
+       dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1,
+                       DATA_TABLES(dce_info));
+
+       if (!firmware_info || !dce_info)
+               return BP_RESULT_BADBIOSTABLE;
+
+       memset(info, 0, sizeof(*info));
+
+       header = GET_IMAGE(struct atom_common_table_header,
+                                       DATA_TABLES(smu_info));
+       get_atom_data_table_revision(header, &revision);
+
+       if (revision.minor == 2) {
+               /* Vega12 */
+               smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2,
+                                                       DATA_TABLES(smu_info));
+
+               if (!smu_info_v3_2)
+                       return BP_RESULT_BADBIOSTABLE;
+
+               info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10;
+       } else if (revision.minor == 3) {
+               /* Vega20 */
+               smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3,
+                                                       DATA_TABLES(smu_info));
+
+               if (!smu_info_v3_3)
+                       return BP_RESULT_BADBIOSTABLE;
+
+               info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10;
+       }
+
+        // We need to convert from 10KHz units into KHz units.
+       info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10;
+
+        /* 27MHz for Vega10 & Vega12; 100MHz for Vega20 */
+       info->pll_info.crystal_frequency = dce_info->dce_refclk_10khz * 10;
+       /* Hardcode frequency if BIOS gives no DCE Ref Clk */
+       if (info->pll_info.crystal_frequency == 0) {
+               if (revision.minor == 2)
+                       info->pll_info.crystal_frequency = 27000;
+               else if (revision.minor == 3)
+                       info->pll_info.crystal_frequency = 100000;
+       }
+       /*dp_phy_ref_clk is not correct for atom_display_controller_info_v4_2, but we don't use it*/
+       info->dp_phy_ref_clk     = dce_info->dpphy_refclk_10khz * 10;
+       info->i2c_engine_ref_clk = dce_info->i2c_engine_refclk_10khz * 10;
+
+       /* Get GPU PLL VCO Clock */
+       if (bp->cmd_tbl.get_smu_clock_info != NULL) {
+               if (revision.minor == 2)
+                       info->smu_gpu_pll_output_freq =
+                                       bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10;
+               else if (revision.minor == 3)
+                       info->smu_gpu_pll_output_freq =
+                                       bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10;
+       }
+
+       return BP_RESULT_OK;
+}
+
 static enum bp_result bios_parser_get_encoder_cap_info(
        struct dc_bios *dcb,
        struct graphics_object_id object_id,
index 49c2fac..ae48d60 100644 (file)
@@ -629,13 +629,14 @@ bool dal_ddc_service_query_ddc_data(
        return ret;
 }
 
-ssize_t dal_ddc_service_read_dpcd_data(
+enum ddc_result dal_ddc_service_read_dpcd_data(
        struct ddc_service *ddc,
        bool i2c,
        enum i2c_mot_mode mot,
        uint32_t address,
        uint8_t *data,
-       uint32_t len)
+       uint32_t len,
+       uint32_t *read)
 {
        struct aux_payload read_payload = {
                .i2c_over_aux = i2c,
@@ -652,6 +653,8 @@ ssize_t dal_ddc_service_read_dpcd_data(
                .mot = mot
        };
 
+       *read = 0;
+
        if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
                BREAK_TO_DEBUGGER();
                return DDC_RESULT_FAILED_INVALID_OPERATION;
@@ -661,7 +664,8 @@ ssize_t dal_ddc_service_read_dpcd_data(
                ddc->ctx->i2caux,
                ddc->ddc_pin,
                &command)) {
-               return (ssize_t)command.payloads->length;
+               *read = command.payloads->length;
+               return DDC_RESULT_SUCESSFULL;
        }
 
        return DDC_RESULT_FAILED_OPERATION;
index ade5b8e..132eef3 100644 (file)
@@ -66,8 +66,8 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc)
 {
        struct dc *core_dc = dc;
 
-       struct dc_plane_state *plane_state = kzalloc(sizeof(*plane_state),
-                                                    GFP_KERNEL);
+       struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state),
+                                                     GFP_KERNEL);
 
        if (NULL == plane_state)
                return NULL;
@@ -120,7 +120,7 @@ static void dc_plane_state_free(struct kref *kref)
 {
        struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount);
        destruct(plane_state);
-       kfree(plane_state);
+       kvfree(plane_state);
 }
 
 void dc_plane_state_release(struct dc_plane_state *plane_state)
@@ -136,7 +136,7 @@ void dc_gamma_retain(struct dc_gamma *gamma)
 static void dc_gamma_free(struct kref *kref)
 {
        struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount);
-       kfree(gamma);
+       kvfree(gamma);
 }
 
 void dc_gamma_release(struct dc_gamma **gamma)
@@ -147,7 +147,7 @@ void dc_gamma_release(struct dc_gamma **gamma)
 
 struct dc_gamma *dc_create_gamma(void)
 {
-       struct dc_gamma *gamma = kzalloc(sizeof(*gamma), GFP_KERNEL);
+       struct dc_gamma *gamma = kvzalloc(sizeof(*gamma), GFP_KERNEL);
 
        if (gamma == NULL)
                goto alloc_fail;
@@ -167,7 +167,7 @@ void dc_transfer_func_retain(struct dc_transfer_func *tf)
 static void dc_transfer_func_free(struct kref *kref)
 {
        struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount);
-       kfree(tf);
+       kvfree(tf);
 }
 
 void dc_transfer_func_release(struct dc_transfer_func *tf)
@@ -177,7 +177,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf)
 
 struct dc_transfer_func *dc_create_transfer_func(void)
 {
-       struct dc_transfer_func *tf = kzalloc(sizeof(*tf), GFP_KERNEL);
+       struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL);
 
        if (tf == NULL)
                goto alloc_fail;
index 090b7a8..30b3a08 100644 (file)
@@ -102,13 +102,14 @@ bool dal_ddc_service_query_ddc_data(
                uint8_t *read_buf,
                uint32_t read_size);
 
-ssize_t dal_ddc_service_read_dpcd_data(
+enum ddc_result dal_ddc_service_read_dpcd_data(
                struct ddc_service *ddc,
                bool i2c,
                enum i2c_mot_mode mot,
                uint32_t address,
                uint8_t *data,
-               uint32_t len);
+               uint32_t len,
+               uint32_t *read);
 
 enum ddc_result dal_ddc_service_write_dpcd_data(
                struct ddc_service *ddc,
index 9831cb5..9b0a04f 100644 (file)
 
 #define AI_GREENLAND_P_A0 1
 #define AI_GREENLAND_P_A1 2
+#define AI_UNKNOWN 0xFF
 
-#define ASICREV_IS_GREENLAND_M(eChipRev)  (eChipRev < AI_UNKNOWN)
-#define ASICREV_IS_GREENLAND_P(eChipRev)  (eChipRev < AI_UNKNOWN)
+#define AI_VEGA12_P_A0 20
+#define ASICREV_IS_GREENLAND_M(eChipRev)  (eChipRev < AI_VEGA12_P_A0)
+#define ASICREV_IS_GREENLAND_P(eChipRev)  (eChipRev < AI_VEGA12_P_A0)
+
+#define ASICREV_IS_VEGA12_P(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN))
+#define ASICREV_IS_VEGA12_p(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN))
 
 /* DCN1_0 */
 #define INTERNAL_REV_RAVEN_A0             0x00    /* First spin of Raven */
index e7e374f..b3747a0 100644 (file)
@@ -1093,19 +1093,19 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
 
        output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
 
-       rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
-                          GFP_KERNEL);
+       rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
+                           GFP_KERNEL);
        if (!rgb_user)
                goto rgb_user_alloc_fail;
-       rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
-                       GFP_KERNEL);
+       rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS),
+                              GFP_KERNEL);
        if (!rgb_regamma)
                goto rgb_regamma_alloc_fail;
-       axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + 3),
-                        GFP_KERNEL);
+       axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + 3),
+                         GFP_KERNEL);
        if (!axix_x)
                goto axix_x_alloc_fail;
-       coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
+       coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
        if (!coeff)
                goto coeff_alloc_fail;
 
@@ -1157,13 +1157,13 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
 
        ret = true;
 
-       kfree(coeff);
+       kvfree(coeff);
 coeff_alloc_fail:
-       kfree(axix_x);
+       kvfree(axix_x);
 axix_x_alloc_fail:
-       kfree(rgb_regamma);
+       kvfree(rgb_regamma);
 rgb_regamma_alloc_fail:
-       kfree(rgb_user);
+       kvfree(rgb_user);
 rgb_user_alloc_fail:
        return ret;
 }
@@ -1192,19 +1192,19 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
 
        input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
 
-       rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
-                          GFP_KERNEL);
+       rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS),
+                           GFP_KERNEL);
        if (!rgb_user)
                goto rgb_user_alloc_fail;
-       curve = kzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS),
-                       GFP_KERNEL);
+       curve = kvzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS),
+                        GFP_KERNEL);
        if (!curve)
                goto curve_alloc_fail;
-       axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS),
-                        GFP_KERNEL);
+       axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS),
+                         GFP_KERNEL);
        if (!axix_x)
                goto axix_x_alloc_fail;
-       coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
+       coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL);
        if (!coeff)
                goto coeff_alloc_fail;
 
@@ -1246,13 +1246,13 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
 
        ret = true;
 
-       kfree(coeff);
+       kvfree(coeff);
 coeff_alloc_fail:
-       kfree(axix_x);
+       kvfree(axix_x);
 axix_x_alloc_fail:
-       kfree(curve);
+       kvfree(curve);
 curve_alloc_fail:
-       kfree(rgb_user);
+       kvfree(rgb_user);
 rgb_user_alloc_fail:
 
        return ret;
@@ -1281,8 +1281,9 @@ bool  mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
                }
                ret = true;
        } else if (trans == TRANSFER_FUNCTION_PQ) {
-               rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS +
-                                               _EXTRA_POINTS), GFP_KERNEL);
+               rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
+                                      (MAX_HW_POINTS + _EXTRA_POINTS),
+                                      GFP_KERNEL);
                if (!rgb_regamma)
                        goto rgb_regamma_alloc_fail;
                points->end_exponent = 7;
@@ -1302,11 +1303,12 @@ bool  mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
                }
                ret = true;
 
-               kfree(rgb_regamma);
+               kvfree(rgb_regamma);
        } else if (trans == TRANSFER_FUNCTION_SRGB ||
                          trans == TRANSFER_FUNCTION_BT709) {
-               rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS +
-                                               _EXTRA_POINTS), GFP_KERNEL);
+               rgb_regamma = kvzalloc(sizeof(*rgb_regamma) *
+                                      (MAX_HW_POINTS + _EXTRA_POINTS),
+                                      GFP_KERNEL);
                if (!rgb_regamma)
                        goto rgb_regamma_alloc_fail;
                points->end_exponent = 0;
@@ -1324,7 +1326,7 @@ bool  mod_color_calculate_curve(enum dc_transfer_func_predefined trans,
                }
                ret = true;
 
-               kfree(rgb_regamma);
+               kvfree(rgb_regamma);
        }
 rgb_regamma_alloc_fail:
        return ret;
@@ -1348,8 +1350,9 @@ bool  mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
                }
                ret = true;
        } else if (trans == TRANSFER_FUNCTION_PQ) {
-               rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS +
-                                               _EXTRA_POINTS), GFP_KERNEL);
+               rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
+                                      (MAX_HW_POINTS + _EXTRA_POINTS),
+                                      GFP_KERNEL);
                if (!rgb_degamma)
                        goto rgb_degamma_alloc_fail;
 
@@ -1364,11 +1367,12 @@ bool  mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
                }
                ret = true;
 
-               kfree(rgb_degamma);
+               kvfree(rgb_degamma);
        } else if (trans == TRANSFER_FUNCTION_SRGB ||
                          trans == TRANSFER_FUNCTION_BT709) {
-               rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS +
-                                               _EXTRA_POINTS), GFP_KERNEL);
+               rgb_degamma = kvzalloc(sizeof(*rgb_degamma) *
+                                      (MAX_HW_POINTS + _EXTRA_POINTS),
+                                      GFP_KERNEL);
                if (!rgb_degamma)
                        goto rgb_degamma_alloc_fail;
 
@@ -1382,7 +1386,7 @@ bool  mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
                }
                ret = true;
 
-               kfree(rgb_degamma);
+               kvfree(rgb_degamma);
        }
        points->end_exponent = 0;
        points->x_point_at_y1_red = 1;
index 0f5ad54..de177ce 100644 (file)
@@ -501,6 +501,32 @@ enum atom_cooling_solution_id{
   LIQUID_COOLING = 0x01
 };
 
+struct atom_firmware_info_v3_2 {
+  struct atom_common_table_header table_header;
+  uint32_t firmware_revision;
+  uint32_t bootup_sclk_in10khz;
+  uint32_t bootup_mclk_in10khz;
+  uint32_t firmware_capability;             // enum atombios_firmware_capability
+  uint32_t main_call_parser_entry;          /* direct address of main parser call in VBIOS binary. */
+  uint32_t bios_scratch_reg_startaddr;      // 1st bios scratch register dword address
+  uint16_t bootup_vddc_mv;
+  uint16_t bootup_vddci_mv;
+  uint16_t bootup_mvddc_mv;
+  uint16_t bootup_vddgfx_mv;
+  uint8_t  mem_module_id;
+  uint8_t  coolingsolution_id;              /*0: Air cooling; 1: Liquid cooling ... */
+  uint8_t  reserved1[2];
+  uint32_t mc_baseaddr_high;
+  uint32_t mc_baseaddr_low;
+  uint8_t  board_i2c_feature_id;            // enum of atom_board_i2c_feature_id_def
+  uint8_t  board_i2c_feature_gpio_id;       // i2c id find in gpio_lut data table gpio_id
+  uint8_t  board_i2c_feature_slave_addr;
+  uint8_t  reserved3;
+  uint16_t bootup_mvddq_mv;
+  uint16_t bootup_mvpp_mv;
+  uint32_t zfbstartaddrin16mb;
+  uint32_t reserved2[3];
+};
 
 /* 
   ***************************************************************************
@@ -1169,7 +1195,29 @@ struct  atom_gfx_info_v2_2
   uint32_t rlc_gpu_timer_refclk; 
 };
 
-
+struct  atom_gfx_info_v2_3 {
+  struct  atom_common_table_header  table_header;
+  uint8_t gfxip_min_ver;
+  uint8_t gfxip_max_ver;
+  uint8_t max_shader_engines;
+  uint8_t max_tile_pipes;
+  uint8_t max_cu_per_sh;
+  uint8_t max_sh_per_se;
+  uint8_t max_backends_per_se;
+  uint8_t max_texture_channel_caches;
+  uint32_t regaddr_cp_dma_src_addr;
+  uint32_t regaddr_cp_dma_src_addr_hi;
+  uint32_t regaddr_cp_dma_dst_addr;
+  uint32_t regaddr_cp_dma_dst_addr_hi;
+  uint32_t regaddr_cp_dma_command;
+  uint32_t regaddr_cp_status;
+  uint32_t regaddr_rlc_gpu_clock_32;
+  uint32_t rlc_gpu_timer_refclk;
+  uint8_t active_cu_per_sh;
+  uint8_t active_rb_per_se;
+  uint16_t gcgoldenoffset;
+  uint32_t rm21_sram_vmin_value;
+};
 
 /* 
   ***************************************************************************
@@ -1198,6 +1246,76 @@ struct atom_smu_info_v3_1
   uint8_t  fw_ctf_polarity;         // GPIO polarity for CTF
 };
 
+struct atom_smu_info_v3_2 {
+  struct   atom_common_table_header  table_header;
+  uint8_t  smuip_min_ver;
+  uint8_t  smuip_max_ver;
+  uint8_t  smu_rsd1;
+  uint8_t  gpuclk_ss_mode;
+  uint16_t sclk_ss_percentage;
+  uint16_t sclk_ss_rate_10hz;
+  uint16_t gpuclk_ss_percentage;    // in unit of 0.001%
+  uint16_t gpuclk_ss_rate_10hz;
+  uint32_t core_refclk_10khz;
+  uint8_t  ac_dc_gpio_bit;          // GPIO bit shift in SMU_GPIOPAD_A  configured for AC/DC switching, =0xff means invalid
+  uint8_t  ac_dc_polarity;          // GPIO polarity for AC/DC switching
+  uint8_t  vr0hot_gpio_bit;         // GPIO bit shift in SMU_GPIOPAD_A  configured for VR0 HOT event, =0xff means invalid
+  uint8_t  vr0hot_polarity;         // GPIO polarity for VR0 HOT event
+  uint8_t  vr1hot_gpio_bit;         // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid
+  uint8_t  vr1hot_polarity;         // GPIO polarity for VR1 HOT event
+  uint8_t  fw_ctf_gpio_bit;         // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid
+  uint8_t  fw_ctf_polarity;         // GPIO polarity for CTF
+  uint8_t  pcc_gpio_bit;            // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid
+  uint8_t  pcc_gpio_polarity;       // GPIO polarity for CTF
+  uint16_t smugoldenoffset;
+  uint32_t gpupll_vco_freq_10khz;
+  uint32_t bootup_smnclk_10khz;
+  uint32_t bootup_socclk_10khz;
+  uint32_t bootup_mp0clk_10khz;
+  uint32_t bootup_mp1clk_10khz;
+  uint32_t bootup_lclk_10khz;
+  uint32_t bootup_dcefclk_10khz;
+  uint32_t ctf_threshold_override_value;
+  uint32_t reserved[5];
+};
+
+struct atom_smu_info_v3_3 {
+  struct   atom_common_table_header  table_header;
+  uint8_t  smuip_min_ver;
+  uint8_t  smuip_max_ver;
+  uint8_t  smu_rsd1;
+  uint8_t  gpuclk_ss_mode;
+  uint16_t sclk_ss_percentage;
+  uint16_t sclk_ss_rate_10hz;
+  uint16_t gpuclk_ss_percentage;    // in unit of 0.001%
+  uint16_t gpuclk_ss_rate_10hz;
+  uint32_t core_refclk_10khz;
+  uint8_t  ac_dc_gpio_bit;          // GPIO bit shift in SMU_GPIOPAD_A  configured for AC/DC switching, =0xff means invalid
+  uint8_t  ac_dc_polarity;          // GPIO polarity for AC/DC switching
+  uint8_t  vr0hot_gpio_bit;         // GPIO bit shift in SMU_GPIOPAD_A  configured for VR0 HOT event, =0xff means invalid
+  uint8_t  vr0hot_polarity;         // GPIO polarity for VR0 HOT event
+  uint8_t  vr1hot_gpio_bit;         // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid
+  uint8_t  vr1hot_polarity;         // GPIO polarity for VR1 HOT event
+  uint8_t  fw_ctf_gpio_bit;         // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid
+  uint8_t  fw_ctf_polarity;         // GPIO polarity for CTF
+  uint8_t  pcc_gpio_bit;            // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid
+  uint8_t  pcc_gpio_polarity;       // GPIO polarity for CTF
+  uint16_t smugoldenoffset;
+  uint32_t gpupll_vco_freq_10khz;
+  uint32_t bootup_smnclk_10khz;
+  uint32_t bootup_socclk_10khz;
+  uint32_t bootup_mp0clk_10khz;
+  uint32_t bootup_mp1clk_10khz;
+  uint32_t bootup_lclk_10khz;
+  uint32_t bootup_dcefclk_10khz;
+  uint32_t ctf_threshold_override_value;
+  uint32_t syspll3_0_vco_freq_10khz;
+  uint32_t syspll3_1_vco_freq_10khz;
+  uint32_t bootup_fclk_10khz;
+  uint32_t bootup_waflclk_10khz;
+  uint32_t reserved[3];
+};
+
 /*
  ***************************************************************************
    Data Table smc_dpm_info  structure
@@ -1283,7 +1401,6 @@ struct atom_smc_dpm_info_v4_1
        uint32_t boardreserved[10];
 };
 
-
 /* 
   ***************************************************************************
     Data Table asic_profiling_info  structure
@@ -1864,6 +1981,55 @@ enum atom_smu9_syspll0_clock_id
   SMU9_SYSPLL0_DISPCLK_ID  = 11,      //       DISPCLK
 };
 
+enum atom_smu11_syspll_id {
+  SMU11_SYSPLL0_ID            = 0,
+  SMU11_SYSPLL1_0_ID          = 1,
+  SMU11_SYSPLL1_1_ID          = 2,
+  SMU11_SYSPLL1_2_ID          = 3,
+  SMU11_SYSPLL2_ID            = 4,
+  SMU11_SYSPLL3_0_ID          = 5,
+  SMU11_SYSPLL3_1_ID          = 6,
+};
+
+
+enum atom_smu11_syspll0_clock_id {
+  SMU11_SYSPLL0_SOCCLK_ID   = 0,       //      SOCCLK
+  SMU11_SYSPLL0_MP0CLK_ID   = 1,       //      MP0CLK
+  SMU11_SYSPLL0_DCLK_ID     = 2,       //      DCLK
+  SMU11_SYSPLL0_VCLK_ID     = 3,       //      VCLK
+  SMU11_SYSPLL0_ECLK_ID     = 4,       //      ECLK
+  SMU11_SYSPLL0_DCEFCLK_ID  = 5,       //      DCEFCLK
+};
+
+
+enum atom_smu11_syspll1_0_clock_id {
+  SMU11_SYSPLL1_0_UCLKA_ID   = 0,       // UCLK_a
+};
+
+enum atom_smu11_syspll1_1_clock_id {
+  SMU11_SYSPLL1_0_UCLKB_ID   = 0,       // UCLK_b
+};
+
+enum atom_smu11_syspll1_2_clock_id {
+  SMU11_SYSPLL1_0_FCLK_ID   = 0,        // FCLK
+};
+
+enum atom_smu11_syspll2_clock_id {
+  SMU11_SYSPLL2_GFXCLK_ID   = 0,        // GFXCLK
+};
+
+enum atom_smu11_syspll3_0_clock_id {
+  SMU11_SYSPLL3_0_WAFCLK_ID = 0,       //      WAFCLK
+  SMU11_SYSPLL3_0_DISPCLK_ID = 1,      //      DISPCLK
+  SMU11_SYSPLL3_0_DPREFCLK_ID = 2,     //      DPREFCLK
+};
+
+enum atom_smu11_syspll3_1_clock_id {
+  SMU11_SYSPLL3_1_MP1CLK_ID = 0,       //      MP1CLK
+  SMU11_SYSPLL3_1_SMNCLK_ID = 1,       //      SMNCLK
+  SMU11_SYSPLL3_1_LCLK_ID = 2,         //      LCLK
+};
+
 struct  atom_get_smu_clock_info_output_parameters_v3_1
 {
   union {
index 26fbeaf..18b5b2f 100644 (file)
 #define PCIE_BUS_CLK                10000
 #define TCLK                        (PCIE_BUS_CLK / 10)
 
-static const struct profile_mode_setting smu7_profiling[5] =
+static const struct profile_mode_setting smu7_profiling[6] =
                                        {{1, 0, 100, 30, 1, 0, 100, 10},
                                         {1, 10, 0, 30, 0, 0, 0, 0},
                                         {0, 0, 0, 0, 1, 10, 16, 31},
                                         {1, 0, 11, 50, 1, 0, 100, 10},
                                         {1, 0, 5, 30, 0, 0, 0, 0},
+                                        {0, 0, 0, 0, 0, 0, 0, 0},
                                        };
 
 /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
@@ -4864,6 +4865,17 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
        len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting);
 
        for (i = 0; i < len; i++) {
+               if (i == hwmgr->power_profile_mode) {
+                       size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
+                       i, profile_name[i], "*",
+                       data->current_profile_setting.sclk_up_hyst,
+                       data->current_profile_setting.sclk_down_hyst,
+                       data->current_profile_setting.sclk_activity,
+                       data->current_profile_setting.mclk_up_hyst,
+                       data->current_profile_setting.mclk_down_hyst,
+                       data->current_profile_setting.mclk_activity);
+                       continue;
+               }
                if (smu7_profiling[i].bupdate_sclk)
                        size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
                        i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
@@ -4883,24 +4895,6 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
                        "-", "-", "-");
        }
 
-       size += sprintf(buf + size, "%3d %16s: %8d %16d %16d %16d %16d %16d\n",
-                       i, profile_name[i],
-                       data->custom_profile_setting.sclk_up_hyst,
-                       data->custom_profile_setting.sclk_down_hyst,
-                       data->custom_profile_setting.sclk_activity,
-                       data->custom_profile_setting.mclk_up_hyst,
-                       data->custom_profile_setting.mclk_down_hyst,
-                       data->custom_profile_setting.mclk_activity);
-
-       size += sprintf(buf + size, "%3s %16s: %8d %16d %16d %16d %16d %16d\n",
-                       "*", "CURRENT",
-                       data->current_profile_setting.sclk_up_hyst,
-                       data->current_profile_setting.sclk_down_hyst,
-                       data->current_profile_setting.sclk_activity,
-                       data->current_profile_setting.mclk_up_hyst,
-                       data->current_profile_setting.mclk_down_hyst,
-                       data->current_profile_setting.mclk_activity);
-
        return size;
 }
 
@@ -4939,16 +4933,16 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
                if (size < 8)
                        return -EINVAL;
 
-               data->custom_profile_setting.bupdate_sclk = input[0];
-               data->custom_profile_setting.sclk_up_hyst = input[1];
-               data->custom_profile_setting.sclk_down_hyst = input[2];
-               data->custom_profile_setting.sclk_activity = input[3];
-               data->custom_profile_setting.bupdate_mclk = input[4];
-               data->custom_profile_setting.mclk_up_hyst = input[5];
-               data->custom_profile_setting.mclk_down_hyst = input[6];
-               data->custom_profile_setting.mclk_activity = input[7];
-               if (!smum_update_dpm_settings(hwmgr, &data->custom_profile_setting)) {
-                       memcpy(&data->current_profile_setting, &data->custom_profile_setting, sizeof(struct profile_mode_setting));
+               tmp.bupdate_sclk = input[0];
+               tmp.sclk_up_hyst = input[1];
+               tmp.sclk_down_hyst = input[2];
+               tmp.sclk_activity = input[3];
+               tmp.bupdate_mclk = input[4];
+               tmp.mclk_up_hyst = input[5];
+               tmp.mclk_down_hyst = input[6];
+               tmp.mclk_activity = input[7];
+               if (!smum_update_dpm_settings(hwmgr, &tmp)) {
+                       memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
                        hwmgr->power_profile_mode = mode;
                }
                break;
index f40179c..b8d0bb3 100644 (file)
@@ -325,7 +325,6 @@ struct smu7_hwmgr {
        uint16_t                              mem_latency_high;
        uint16_t                              mem_latency_low;
        uint32_t                              vr_config;
-       struct profile_mode_setting           custom_profile_setting;
        struct profile_mode_setting           current_profile_setting;
 };
 
index 03bc745..d9e92e3 100644 (file)
@@ -852,12 +852,10 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
 {
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
 
-       n = (n & 0xff) << 8;
-
        if (data->power_containment_features &
                        POWERCONTAINMENT_FEATURE_PkgPwrLimit)
                return smum_send_msg_to_smc_with_parameter(hwmgr,
-                               PPSMC_MSG_PkgPwrSetLimit, n);
+                               PPSMC_MSG_PkgPwrSetLimit, n<<8);
        return 0;
 }
 
index 3aa65bd..684ac62 100644 (file)
@@ -74,6 +74,7 @@ config DRM_SIL_SII8620
        tristate "Silicon Image SII8620 HDMI/MHL bridge"
        depends on OF && RC_CORE
        select DRM_KMS_HELPER
+       imply EXTCON
        help
          Silicon Image SII8620 HDMI/MHL bridge chip driver.
 
index 498d594..9837c8d 100644 (file)
@@ -56,7 +56,9 @@ static int dumb_vga_get_modes(struct drm_connector *connector)
        }
 
        drm_mode_connector_update_edid_property(connector, edid);
-       return drm_add_edid_modes(connector, edid);
+       ret = drm_add_edid_modes(connector, edid);
+       kfree(edid);
+       return ret;
 
 fallback:
        /*
index 7d25c42..c825c76 100644 (file)
@@ -155,6 +155,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
                                                       state->connectors[i].state);
                state->connectors[i].ptr = NULL;
                state->connectors[i].state = NULL;
+               state->connectors[i].old_state = NULL;
+               state->connectors[i].new_state = NULL;
                drm_connector_put(connector);
        }
 
@@ -169,6 +171,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
 
                state->crtcs[i].ptr = NULL;
                state->crtcs[i].state = NULL;
+               state->crtcs[i].old_state = NULL;
+               state->crtcs[i].new_state = NULL;
        }
 
        for (i = 0; i < config->num_total_plane; i++) {
@@ -181,6 +185,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
                                                   state->planes[i].state);
                state->planes[i].ptr = NULL;
                state->planes[i].state = NULL;
+               state->planes[i].old_state = NULL;
+               state->planes[i].new_state = NULL;
        }
 
        for (i = 0; i < state->num_private_objs; i++) {
@@ -190,6 +196,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
                                                 state->private_objs[i].state);
                state->private_objs[i].ptr = NULL;
                state->private_objs[i].state = NULL;
+               state->private_objs[i].old_state = NULL;
+               state->private_objs[i].new_state = NULL;
        }
        state->num_private_objs = 0;
 
index 134069f..39f1db4 100644 (file)
@@ -4451,6 +4451,7 @@ drm_reset_display_info(struct drm_connector *connector)
        info->max_tmds_clock = 0;
        info->dvi_dual = false;
        info->has_hdmi_infoframe = false;
+       memset(&info->hdmi, 0, sizeof(info->hdmi));
 
        info->non_desktop = 0;
 }
@@ -4462,17 +4463,11 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
 
        u32 quirks = edid_get_quirks(edid);
 
+       drm_reset_display_info(connector);
+
        info->width_mm = edid->width_cm * 10;
        info->height_mm = edid->height_cm * 10;
 
-       /* driver figures it out in this case */
-       info->bpc = 0;
-       info->color_formats = 0;
-       info->cea_rev = 0;
-       info->max_tmds_clock = 0;
-       info->dvi_dual = false;
-       info->has_hdmi_infoframe = false;
-
        info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
 
        DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
index e394799..6d9b945 100644 (file)
@@ -212,6 +212,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
                return -ENOMEM;
 
        filp->private_data = priv;
+       filp->f_mode |= FMODE_UNSIGNED_OFFSET;
        priv->filp = filp;
        priv->pid = get_pid(task_pid(current));
        priv->minor = minor;
index abd84cb..09c4bc0 100644 (file)
@@ -954,8 +954,6 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
        drm_mode_connector_attach_encoder(connector, encoder);
 
        if (hdata->bridge) {
-               encoder->bridge = hdata->bridge;
-               hdata->bridge->encoder = encoder;
                ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
                if (ret)
                        DRM_ERROR("Failed to attach bridge\n");
index 257299e..272c79f 100644 (file)
@@ -473,7 +473,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
                        chroma_addr[1] = chroma_addr[0] + 0x40;
                } else {
                        luma_addr[1] = luma_addr[0] + fb->pitches[0];
-                       chroma_addr[1] = chroma_addr[0] + fb->pitches[0];
+                       chroma_addr[1] = chroma_addr[0] + fb->pitches[1];
                }
        } else {
                luma_addr[1] = 0;
@@ -482,6 +482,7 @@ static void vp_video_buffer(struct mixer_context *ctx,
 
        spin_lock_irqsave(&ctx->reg_slock, flags);
 
+       vp_reg_write(ctx, VP_SHADOW_UPDATE, 1);
        /* interlace or progressive scan mode */
        val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
        vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
@@ -495,21 +496,23 @@ static void vp_video_buffer(struct mixer_context *ctx,
        vp_reg_write(ctx, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) |
                VP_IMG_VSIZE(fb->height));
        /* chroma plane for NV12/NV21 is half the height of the luma plane */
-       vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) |
+       vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[1]) |
                VP_IMG_VSIZE(fb->height / 2));
 
        vp_reg_write(ctx, VP_SRC_WIDTH, state->src.w);
-       vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h);
        vp_reg_write(ctx, VP_SRC_H_POSITION,
                        VP_SRC_H_POSITION_VAL(state->src.x));
-       vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y);
-
        vp_reg_write(ctx, VP_DST_WIDTH, state->crtc.w);
        vp_reg_write(ctx, VP_DST_H_POSITION, state->crtc.x);
+
        if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
+               vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h / 2);
+               vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y / 2);
                vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h / 2);
                vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y / 2);
        } else {
+               vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h);
+               vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y);
                vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h);
                vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y);
        }
@@ -699,6 +702,15 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
 
                /* interlace scan need to check shadow register */
                if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) {
+                       if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
+                           vp_reg_read(ctx, VP_SHADOW_UPDATE))
+                               goto out;
+
+                       base = mixer_reg_read(ctx, MXR_CFG);
+                       shadow = mixer_reg_read(ctx, MXR_CFG_S);
+                       if (base != shadow)
+                               goto out;
+
                        base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
                        shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
                        if (base != shadow)
index c311f57..189cfa2 100644 (file)
@@ -47,6 +47,7 @@
 #define MXR_MO                         0x0304
 #define MXR_RESOLUTION                 0x0310
 
+#define MXR_CFG_S                      0x2004
 #define MXR_GRAPHIC0_BASE_S            0x2024
 #define MXR_GRAPHIC1_BASE_S            0x2044
 
index fc8b2c6..704ddb4 100644 (file)
@@ -2140,10 +2140,22 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
                }
        }
 
-       /* According to BSpec, "The CD clock frequency must be at least twice
+       /*
+        * According to BSpec, "The CD clock frequency must be at least twice
         * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
+        *
+        * FIXME: Check the actual, not default, BCLK being used.
+        *
+        * FIXME: This does not depend on ->has_audio because the higher CDCLK
+        * is required for audio probe, also when there are no audio capable
+        * displays connected at probe time. This leads to unnecessarily high
+        * CDCLK when audio is not required.
+        *
+        * FIXME: This limit is only applied when there are displays connected
+        * at probe time. If we probe without displays, we'll still end up using
+        * the platform minimum CDCLK, failing audio probe.
         */
-       if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
+       if (INTEL_GEN(dev_priv) >= 9)
                min_cdclk = max(2 * 96000, min_cdclk);
 
        /*
@@ -2290,9 +2302,44 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
        return 0;
 }
 
+static int skl_dpll0_vco(struct intel_atomic_state *intel_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
+       struct intel_crtc *crtc;
+       struct intel_crtc_state *crtc_state;
+       int vco, i;
+
+       vco = intel_state->cdclk.logical.vco;
+       if (!vco)
+               vco = dev_priv->skl_preferred_vco_freq;
+
+       for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+               if (!crtc_state->base.enable)
+                       continue;
+
+               if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+                       continue;
+
+               /*
+                * DPLL0 VCO may need to be adjusted to get the correct
+                * clock for eDP. This will affect cdclk as well.
+                */
+               switch (crtc_state->port_clock / 2) {
+               case 108000:
+               case 216000:
+                       vco = 8640000;
+                       break;
+               default:
+                       vco = 8100000;
+                       break;
+               }
+       }
+
+       return vco;
+}
+
 static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
 {
-       struct drm_i915_private *dev_priv = to_i915(state->dev);
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        int min_cdclk, cdclk, vco;
 
@@ -2300,9 +2347,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
        if (min_cdclk < 0)
                return min_cdclk;
 
-       vco = intel_state->cdclk.logical.vco;
-       if (!vco)
-               vco = dev_priv->skl_preferred_vco_freq;
+       vco = skl_dpll0_vco(intel_state);
 
        /*
         * FIXME should also account for plane ratio
index 41e6c75..f9550ea 100644 (file)
@@ -35,6 +35,7 @@
  */
 
 #define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
+MODULE_FIRMWARE(I915_CSR_GLK);
 #define GLK_CSR_VERSION_REQUIRED       CSR_VERSION(1, 4)
 
 #define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin"
index 3b48fd2..56004ff 100644 (file)
@@ -15178,6 +15178,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
                if (crtc_state->base.active) {
                        intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
+                       crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
+                       crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
                        intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
                        WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
 
index 9a4a51e..b7b4cfd 100644 (file)
@@ -1881,26 +1881,6 @@ found:
                                reduce_m_n);
        }
 
-       /*
-        * DPLL0 VCO may need to be adjusted to get the correct
-        * clock for eDP. This will affect cdclk as well.
-        */
-       if (intel_dp_is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
-               int vco;
-
-               switch (pipe_config->port_clock / 2) {
-               case 108000:
-               case 216000:
-                       vco = 8640000;
-                       break;
-               default:
-                       vco = 8100000;
-                       break;
-               }
-
-               to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco;
-       }
-
        if (!HAS_DDI(dev_priv))
                intel_dp_set_clock(encoder, pipe_config);
 
index d436858..a80fbad 100644 (file)
  * check the condition before the timeout.
  */
 #define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
-       unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1;   \
+       const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
        long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
        int ret__;                                                      \
        might_sleep();                                                  \
        for (;;) {                                                      \
-               bool expired__ = time_after(jiffies, timeout__);        \
+               const bool expired__ = ktime_after(ktime_get_raw(), end__); \
                OP;                                                     \
                if (COND) {                                             \
                        ret__ = 0;                                      \
index 6f12adc..6467a5c 100644 (file)
@@ -806,7 +806,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
                return;
 
        intel_fbdev_sync(ifbdev);
-       if (ifbdev->vma)
+       if (ifbdev->vma || ifbdev->helper.deferred_setup)
                drm_fb_helper_hotplug_event(&ifbdev->helper);
 }
 
index d35d2d5..8691c86 100644 (file)
@@ -326,7 +326,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
 
        I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
        POSTING_READ(lvds_encoder->reg);
-       if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000))
+
+       if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000))
                DRM_ERROR("timed out waiting for panel to power on\n");
 
        intel_panel_enable_backlight(pipe_config, conn_state);
index 53ea564..66de4b2 100644 (file)
@@ -641,19 +641,18 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
 
        DRM_DEBUG_KMS("Enabling DC6\n");
 
-       gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+       /* Wa Display #1183: skl,kbl,cfl */
+       if (IS_GEN9_BC(dev_priv))
+               I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+                          SKL_SELECT_ALTERNATE_DC_EXIT);
 
+       gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
 }
 
 void skl_disable_dc6(struct drm_i915_private *dev_priv)
 {
        DRM_DEBUG_KMS("Disabling DC6\n");
 
-       /* Wa Display #1183: skl,kbl,cfl */
-       if (IS_GEN9_BC(dev_priv))
-               I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
-                          SKL_SELECT_ALTERNATE_DC_EXIT);
-
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
 }
 
index 6e5e1aa..b001699 100644 (file)
@@ -351,6 +351,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
 
        spin_lock_irqsave(&dev->event_lock, flags);
        mdp4_crtc->event = crtc->state->event;
+       crtc->state->event = NULL;
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
        blend_setup(crtc);
index 9893e43..76b9608 100644 (file)
@@ -708,6 +708,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
 
        spin_lock_irqsave(&dev->event_lock, flags);
        mdp5_crtc->event = crtc->state->event;
+       crtc->state->event = NULL;
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
        /*
index b4a8aa4..005760b 100644 (file)
@@ -171,7 +171,8 @@ uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats,
        return i;
 }
 
-const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format)
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format,
+               uint64_t modifier)
 {
        int i;
        for (i = 0; i < ARRAY_SIZE(formats); i++) {
index 1185487..4fa8dbe 100644 (file)
@@ -98,7 +98,7 @@ struct mdp_format {
 #define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
 
 uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
-const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
+const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format, uint64_t modifier);
 
 /* MDP capabilities */
 #define MDP_CAP_SMP            BIT(0)  /* Shared Memory Pool                 */
index 7a03a94..8baba30 100644 (file)
@@ -173,6 +173,7 @@ struct msm_dsi_host {
 
        bool registered;
        bool power_on;
+       bool enabled;
        int irq;
 };
 
@@ -775,7 +776,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
        switch (mipi_fmt) {
        case MIPI_DSI_FMT_RGB888:       return CMD_DST_FORMAT_RGB888;
        case MIPI_DSI_FMT_RGB666_PACKED:
-       case MIPI_DSI_FMT_RGB666:       return VID_DST_FORMAT_RGB666;
+       case MIPI_DSI_FMT_RGB666:       return CMD_DST_FORMAT_RGB666;
        case MIPI_DSI_FMT_RGB565:       return CMD_DST_FORMAT_RGB565;
        default:                        return CMD_DST_FORMAT_RGB888;
        }
@@ -986,13 +987,19 @@ static void dsi_set_tx_power_mode(int mode, struct msm_dsi_host *msm_host)
 
 static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
 {
+       u32 ret = 0;
+       struct device *dev = &msm_host->pdev->dev;
+
        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 1);
 
        reinit_completion(&msm_host->video_comp);
 
-       wait_for_completion_timeout(&msm_host->video_comp,
+       ret = wait_for_completion_timeout(&msm_host->video_comp,
                        msecs_to_jiffies(70));
 
+       if (ret <= 0)
+               dev_err(dev, "wait for video done timed out\n");
+
        dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
 }
 
@@ -1001,7 +1008,7 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
        if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
                return;
 
-       if (msm_host->power_on) {
+       if (msm_host->power_on && msm_host->enabled) {
                dsi_wait4video_done(msm_host);
                /* delay 4 ms to skip BLLP */
                usleep_range(2000, 4000);
@@ -2203,7 +2210,7 @@ int msm_dsi_host_enable(struct mipi_dsi_host *host)
         *      pm_runtime_put_autosuspend(&msm_host->pdev->dev);
         * }
         */
-
+       msm_host->enabled = true;
        return 0;
 }
 
@@ -2211,6 +2218,7 @@ int msm_dsi_host_disable(struct mipi_dsi_host *host)
 {
        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
 
+       msm_host->enabled = false;
        dsi_op_mode_config(msm_host,
                !!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO), false);
 
index 8e9d5c2..9a9fa0c 100644 (file)
@@ -265,6 +265,115 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
        return 0;
 }
 
+int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
+       struct msm_dsi_phy_clk_request *clk_req)
+{
+       const unsigned long bit_rate = clk_req->bitclk_rate;
+       const unsigned long esc_rate = clk_req->escclk_rate;
+       s32 ui, ui_x8, lpx;
+       s32 tmax, tmin;
+       s32 pcnt0 = 50;
+       s32 pcnt1 = 50;
+       s32 pcnt2 = 10;
+       s32 pcnt3 = 30;
+       s32 pcnt4 = 10;
+       s32 pcnt5 = 2;
+       s32 coeff = 1000; /* Precision, should avoid overflow */
+       s32 hb_en, hb_en_ckln;
+       s32 temp;
+
+       if (!bit_rate || !esc_rate)
+               return -EINVAL;
+
+       timing->hs_halfbyte_en = 0;
+       hb_en = 0;
+       timing->hs_halfbyte_en_ckln = 0;
+       hb_en_ckln = 0;
+
+       ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+       ui_x8 = ui << 3;
+       lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
+
+       temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
+       tmin = max_t(s32, temp, 0);
+       temp = (95 * coeff) / ui_x8;
+       tmax = max_t(s32, temp, 0);
+       timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false);
+
+       temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
+       tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+       tmax = (tmin > 255) ? 511 : 255;
+       timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false);
+
+       tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
+       temp = 105 * coeff + 12 * ui - 20 * coeff;
+       tmax = (temp + 3 * ui) / ui_x8;
+       timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+       temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
+       tmin = max_t(s32, temp, 0);
+       temp = (85 * coeff + 6 * ui) / ui_x8;
+       tmax = max_t(s32, temp, 0);
+       timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false);
+
+       temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
+       tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+       tmax = 255;
+       timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false);
+
+       tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
+       temp = 105 * coeff + 12 * ui - 20 * coeff;
+       tmax = (temp / ui_x8) - 1;
+       timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false);
+
+       temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
+       timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
+
+       tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
+       tmax = 255;
+       timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false);
+
+       temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui;
+       timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8);
+
+       temp = 60 * coeff + 52 * ui - 43 * ui;
+       tmin = DIV_ROUND_UP(temp, ui_x8) - 1;
+       tmax = 63;
+       timing->shared_timings.clk_post =
+               linear_inter(tmax, tmin, pcnt2, 0, false);
+
+       temp = 8 * ui + (timing->clk_prepare << 3) * ui;
+       temp += (((timing->clk_zero + 3) << 3) + 11) * ui;
+       temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) :
+               (((timing->hs_rqst_ckln << 3) + 8) * ui);
+       tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+       tmax = 63;
+       if (tmin > tmax) {
+               temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false);
+               timing->shared_timings.clk_pre = temp >> 1;
+               timing->shared_timings.clk_pre_inc_by_2 = 1;
+       } else {
+               timing->shared_timings.clk_pre =
+                       linear_inter(tmax, tmin, pcnt2, 0, false);
+                       timing->shared_timings.clk_pre_inc_by_2 = 0;
+       }
+
+       timing->ta_go = 3;
+       timing->ta_sure = 0;
+       timing->ta_get = 4;
+
+       DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+               timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+               timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero,
+               timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+               timing->hs_zero, timing->hs_prepare, timing->hs_trail,
+               timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en,
+               timing->hs_halfbyte_en_ckln, timing->hs_prep_dly,
+               timing->hs_prep_dly_ckln);
+
+       return 0;
+}
+
 void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
                                u32 bit_mask)
 {
index c56268c..a24ab80 100644 (file)
@@ -101,6 +101,8 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
                             struct msm_dsi_phy_clk_request *clk_req);
 int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
                                struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
+                               struct msm_dsi_phy_clk_request *clk_req);
 void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
                                u32 bit_mask);
 int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
index 0af951a..b3fffc8 100644 (file)
@@ -79,34 +79,6 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
        dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
 }
 
-static int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
-                                      struct msm_dsi_phy_clk_request *clk_req)
-{
-       /*
-        * TODO: These params need to be computed, they're currently hardcoded
-        * for a 1440x2560@60Hz panel with a byteclk of 100.618 Mhz, and a
-        * default escape clock of 19.2 Mhz.
-        */
-
-       timing->hs_halfbyte_en = 0;
-       timing->clk_zero = 0x1c;
-       timing->clk_prepare = 0x07;
-       timing->clk_trail = 0x07;
-       timing->hs_exit = 0x23;
-       timing->hs_zero = 0x21;
-       timing->hs_prepare = 0x07;
-       timing->hs_trail = 0x07;
-       timing->hs_rqst = 0x05;
-       timing->ta_sure = 0x00;
-       timing->ta_go = 0x03;
-       timing->ta_get = 0x04;
-
-       timing->shared_timings.clk_pre = 0x2d;
-       timing->shared_timings.clk_post = 0x0d;
-
-       return 0;
-}
-
 static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
                               struct msm_dsi_phy_clk_request *clk_req)
 {
index 0e0c872..7a16242 100644 (file)
@@ -183,7 +183,8 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
        hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
        vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
 
-       format = kms->funcs->get_format(kms, mode_cmd->pixel_format);
+       format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
+                       mode_cmd->modifier[0]);
        if (!format) {
                dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
                                (char *)&mode_cmd->pixel_format);
index c178563..456622b 100644 (file)
@@ -92,8 +92,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
 
        if (IS_ERR(fb)) {
                dev_err(dev->dev, "failed to allocate fb\n");
-               ret = PTR_ERR(fb);
-               goto fail;
+               return PTR_ERR(fb);
        }
 
        bo = msm_framebuffer_bo(fb, 0);
@@ -151,13 +150,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
 
 fail_unlock:
        mutex_unlock(&dev->struct_mutex);
-fail:
-
-       if (ret) {
-               if (fb)
-                       drm_framebuffer_remove(fb);
-       }
-
+       drm_framebuffer_remove(fb);
        return ret;
 }
 
index 9519647..f583bb4 100644 (file)
@@ -132,17 +132,19 @@ static void put_pages(struct drm_gem_object *obj)
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
        if (msm_obj->pages) {
-               /* For non-cached buffers, ensure the new pages are clean
-                * because display controller, GPU, etc. are not coherent:
-                */
-               if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
-                       dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
-                                       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+               if (msm_obj->sgt) {
+                       /* For non-cached buffers, ensure the new
+                        * pages are clean because display controller,
+                        * GPU, etc. are not coherent:
+                        */
+                       if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+                               dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
+                                            msm_obj->sgt->nents,
+                                            DMA_BIDIRECTIONAL);
 
-               if (msm_obj->sgt)
                        sg_free_table(msm_obj->sgt);
-
-               kfree(msm_obj->sgt);
+                       kfree(msm_obj->sgt);
+               }
 
                if (use_pages(obj))
                        drm_gem_put_pages(obj, msm_obj->pages, true, false);
index 17d5824..aaa329d 100644 (file)
@@ -48,8 +48,11 @@ struct msm_kms_funcs {
        /* functions to wait for atomic commit completed on each CRTC */
        void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
                                        struct drm_crtc *crtc);
+       /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
+       const struct msm_format *(*get_format)(struct msm_kms *kms,
+                                       const uint32_t format,
+                                       const uint64_t modifiers);
        /* misc: */
-       const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
        long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
                        struct drm_encoder *encoder);
        int (*set_split_display)(struct msm_kms *kms,
index 6f402c4..ab61c03 100644 (file)
@@ -214,7 +214,6 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
        INIT_LIST_HEAD(&nvbo->entry);
        INIT_LIST_HEAD(&nvbo->vma_list);
        nvbo->bo.bdev = &drm->ttm.bdev;
-       nvbo->cli = cli;
 
        /* This is confusing, and doesn't actually mean we want an uncached
         * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
index be8e00b..73c4844 100644 (file)
@@ -26,8 +26,6 @@ struct nouveau_bo {
 
        struct list_head vma_list;
 
-       struct nouveau_cli *cli;
-
        unsigned contig:1;
        unsigned page:5;
        unsigned kind:8;
index dff51a0..8c093ca 100644 (file)
@@ -63,7 +63,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
                         struct ttm_mem_reg *reg)
 {
        struct nouveau_bo *nvbo = nouveau_bo(bo);
-       struct nouveau_drm *drm = nvbo->cli->drm;
+       struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
        struct nouveau_mem *mem;
        int ret;
 
@@ -103,7 +103,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
                         struct ttm_mem_reg *reg)
 {
        struct nouveau_bo *nvbo = nouveau_bo(bo);
-       struct nouveau_drm *drm = nvbo->cli->drm;
+       struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
        struct nouveau_mem *mem;
        int ret;
 
@@ -131,7 +131,7 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
                      struct ttm_mem_reg *reg)
 {
        struct nouveau_bo *nvbo = nouveau_bo(bo);
-       struct nouveau_drm *drm = nvbo->cli->drm;
+       struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
        struct nouveau_mem *mem;
        int ret;
 
index 8bd739c..2b3ccd8 100644 (file)
@@ -3264,10 +3264,11 @@ nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
 
        drm_connector_unregister(&mstc->connector);
 
-       drm_modeset_lock_all(drm->dev);
        drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
+
+       drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL);
        mstc->port = NULL;
-       drm_modeset_unlock_all(drm->dev);
+       drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
 
        drm_connector_unreference(&mstc->connector);
 }
@@ -3277,9 +3278,7 @@ nv50_mstm_register_connector(struct drm_connector *connector)
 {
        struct nouveau_drm *drm = nouveau_drm(connector->dev);
 
-       drm_modeset_lock_all(drm->dev);
        drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
-       drm_modeset_unlock_all(drm->dev);
 
        drm_connector_register(connector);
 }
index 5e2e65e..7f3ac6b 100644 (file)
@@ -828,6 +828,12 @@ static void dispc_ovl_set_scale_coef(struct dispc_device *dispc,
        h_coef = dispc_ovl_get_scale_coef(fir_hinc, true);
        v_coef = dispc_ovl_get_scale_coef(fir_vinc, five_taps);
 
+       if (!h_coef || !v_coef) {
+               dev_err(&dispc->pdev->dev, "%s: failed to find scale coefs\n",
+                       __func__);
+               return;
+       }
+
        for (i = 0; i < 8; i++) {
                u32 h, hv;
 
@@ -2342,7 +2348,7 @@ static int dispc_ovl_calc_scaling_24xx(struct dispc_device *dispc,
        }
 
        if (in_width > maxsinglelinewidth) {
-               DSSERR("Cannot scale max input width exceeded");
+               DSSERR("Cannot scale max input width exceeded\n");
                return -EINVAL;
        }
        return 0;
@@ -2424,13 +2430,13 @@ again:
        }
 
        if (in_width > (maxsinglelinewidth * 2)) {
-               DSSERR("Cannot setup scaling");
-               DSSERR("width exceeds maximum width possible");
+               DSSERR("Cannot setup scaling\n");
+               DSSERR("width exceeds maximum width possible\n");
                return -EINVAL;
        }
 
        if (in_width > maxsinglelinewidth && *five_taps) {
-               DSSERR("cannot setup scaling with five taps");
+               DSSERR("cannot setup scaling with five taps\n");
                return -EINVAL;
        }
        return 0;
@@ -2472,7 +2478,7 @@ static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc,
                        in_width > maxsinglelinewidth && ++*decim_x);
 
        if (in_width > maxsinglelinewidth) {
-               DSSERR("Cannot scale width exceeds max line width");
+               DSSERR("Cannot scale width exceeds max line width\n");
                return -EINVAL;
        }
 
@@ -2490,7 +2496,7 @@ static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc,
                 * bandwidth. Despite what theory says this appears to
                 * be true also for 16-bit color formats.
                 */
-               DSSERR("Not enough bandwidth, too much downscaling (x-decimation factor %d > 4)", *decim_x);
+               DSSERR("Not enough bandwidth, too much downscaling (x-decimation factor %d > 4)\n", *decim_x);
 
                return -EINVAL;
        }
@@ -4633,7 +4639,7 @@ static int dispc_errata_i734_wa_init(struct dispc_device *dispc)
                                                i734_buf.size, &i734_buf.paddr,
                                                GFP_KERNEL);
        if (!i734_buf.vaddr) {
-               dev_err(&dispc->pdev->dev, "%s: dma_alloc_writecombine failed",
+               dev_err(&dispc->pdev->dev, "%s: dma_alloc_writecombine failed\n",
                        __func__);
                return -ENOMEM;
        }
index 97c8886..5879f45 100644 (file)
@@ -679,7 +679,7 @@ static int hdmi_audio_config(struct device *dev,
                             struct omap_dss_audio *dss_audio)
 {
        struct omap_hdmi *hd = dev_get_drvdata(dev);
-       int ret;
+       int ret = 0;
 
        mutex_lock(&hd->lock);
 
index 35ed2ad..813ba42 100644 (file)
@@ -922,8 +922,13 @@ int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
 {
        const struct hdmi4_features *features;
        struct resource *res;
+       const struct soc_device_attribute *soc;
 
-       features = soc_device_match(hdmi4_soc_devices)->data;
+       soc = soc_device_match(hdmi4_soc_devices);
+       if (!soc)
+               return -ENODEV;
+
+       features = soc->data;
        core->cts_swmode = features->cts_swmode;
        core->audio_use_mclk = features->audio_use_mclk;
 
index d28da9a..ae1a001 100644 (file)
@@ -671,7 +671,7 @@ static int hdmi_audio_config(struct device *dev,
                             struct omap_dss_audio *dss_audio)
 {
        struct omap_hdmi *hd = dev_get_drvdata(dev);
-       int ret;
+       int ret = 0;
 
        mutex_lock(&hd->lock);
 
index a0d7b1d..5cde26a 100644 (file)
@@ -121,6 +121,9 @@ static int omap_connector_get_modes(struct drm_connector *connector)
        if (dssdrv->read_edid) {
                void *edid = kzalloc(MAX_EDID, GFP_KERNEL);
 
+               if (!edid)
+                       return 0;
+
                if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
                                drm_edid_is_valid(edid)) {
                        drm_mode_connector_update_edid_property(
@@ -139,6 +142,9 @@ static int omap_connector_get_modes(struct drm_connector *connector)
                struct drm_display_mode *mode = drm_mode_create(dev);
                struct videomode vm = {0};
 
+               if (!mode)
+                       return 0;
+
                dssdrv->get_timings(dssdev, &vm);
 
                drm_display_mode_from_videomode(&vm, mode);
@@ -200,6 +206,10 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
        if (!r) {
                /* check if vrefresh is still valid */
                new_mode = drm_mode_duplicate(dev, mode);
+
+               if (!new_mode)
+                       return MODE_BAD;
+
                new_mode->clock = vm.pixelclock / 1000;
                new_mode->vrefresh = 0;
                if (mode->vrefresh == drm_mode_vrefresh(new_mode))
index f9fa1c9..401c02e 100644 (file)
@@ -401,12 +401,16 @@ int tiler_unpin(struct tiler_block *block)
 struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, u16 w,
                u16 h, u16 align)
 {
-       struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+       struct tiler_block *block;
        u32 min_align = 128;
        int ret;
        unsigned long flags;
        u32 slot_bytes;
 
+       block = kzalloc(sizeof(*block), GFP_KERNEL);
+       if (!block)
+               return ERR_PTR(-ENOMEM);
+
        BUG_ON(!validfmt(fmt));
 
        /* convert width/height to slots */
index d7f7bc9..817be3c 100644 (file)
@@ -90,7 +90,7 @@ static int l2r_t2b(u16 w, u16 h, u16 a, s16 offset,
 {
        int i;
        unsigned long index;
-       bool area_free;
+       bool area_free = false;
        unsigned long slots_per_band = PAGE_SIZE / slot_bytes;
        unsigned long bit_offset = (offset > 0) ? offset / slot_bytes : 0;
        unsigned long curr_bit = bit_offset;
index c0fb52c..01665b9 100644 (file)
@@ -179,10 +179,9 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea
                              uint32_t type, bool interruptible)
 {
        struct qxl_command cmd;
-       struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
 
        cmd.type = type;
-       cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
+       cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
 
        return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
 }
@@ -192,10 +191,9 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
                             uint32_t type, bool interruptible)
 {
        struct qxl_command cmd;
-       struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
 
        cmd.type = type;
-       cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
+       cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset);
 
        return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
 }
index 00a1a66..864b456 100644 (file)
@@ -167,6 +167,7 @@ struct qxl_release {
 
        int id;
        int type;
+       struct qxl_bo *release_bo;
        uint32_t release_offset;
        uint32_t surface_release_id;
        struct ww_acquire_ctx ticket;
index e238a1a..6cc9f33 100644 (file)
@@ -182,9 +182,9 @@ static int qxl_process_single_command(struct qxl_device *qdev,
                goto out_free_reloc;
 
        /* TODO copy slow path code from i915 */
-       fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
+       fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
        unwritten = __copy_from_user_inatomic_nocache
-               (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE),
+               (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
                 u64_to_user_ptr(cmd->command), cmd->command_size);
 
        {
index 5d84a66..7cb2145 100644 (file)
@@ -173,6 +173,7 @@ qxl_release_free_list(struct qxl_release *release)
                list_del(&entry->tv.head);
                kfree(entry);
        }
+       release->release_bo = NULL;
 }
 
 void
@@ -296,7 +297,6 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
 {
        if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
                int idr_ret;
-               struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
                struct qxl_bo *bo;
                union qxl_release_info *info;
 
@@ -304,8 +304,9 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
                idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
                if (idr_ret < 0)
                        return idr_ret;
-               bo = to_qxl_bo(entry->tv.bo);
+               bo = create_rel->release_bo;
 
+               (*release)->release_bo = bo;
                (*release)->release_offset = create_rel->release_offset + 64;
 
                qxl_release_list_add(*release, bo);
@@ -365,6 +366,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
 
        bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
 
+       (*release)->release_bo = bo;
        (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
        qdev->current_release_bo_offset[cur_idx]++;
 
@@ -408,13 +410,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
 {
        void *ptr;
        union qxl_release_info *info;
-       struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
-       struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
+       struct qxl_bo *bo = release->release_bo;
 
-       ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
+       ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
        if (!ptr)
                return NULL;
-       info = ptr + (release->release_offset & ~PAGE_SIZE);
+       info = ptr + (release->release_offset & ~PAGE_MASK);
        return info;
 }
 
@@ -422,11 +423,10 @@ void qxl_release_unmap(struct qxl_device *qdev,
                       struct qxl_release *release,
                       union qxl_release_info *info)
 {
-       struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
-       struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
+       struct qxl_bo *bo = release->release_bo;
        void *ptr;
 
-       ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
+       ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
        qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
 }
 
index bffff4c..be3f14d 100644 (file)
@@ -94,64 +94,9 @@ static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
        }
 }
 
-static enum drm_mode_status sun4i_lvds_encoder_mode_valid(struct drm_encoder *crtc,
-                                                         const struct drm_display_mode *mode)
-{
-       struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(crtc);
-       struct sun4i_tcon *tcon = lvds->tcon;
-       u32 hsync = mode->hsync_end - mode->hsync_start;
-       u32 vsync = mode->vsync_end - mode->vsync_start;
-       unsigned long rate = mode->clock * 1000;
-       long rounded_rate;
-
-       DRM_DEBUG_DRIVER("Validating modes...\n");
-
-       if (hsync < 1)
-               return MODE_HSYNC_NARROW;
-
-       if (hsync > 0x3ff)
-               return MODE_HSYNC_WIDE;
-
-       if ((mode->hdisplay < 1) || (mode->htotal < 1))
-               return MODE_H_ILLEGAL;
-
-       if ((mode->hdisplay > 0x7ff) || (mode->htotal > 0xfff))
-               return MODE_BAD_HVALUE;
-
-       DRM_DEBUG_DRIVER("Horizontal parameters OK\n");
-
-       if (vsync < 1)
-               return MODE_VSYNC_NARROW;
-
-       if (vsync > 0x3ff)
-               return MODE_VSYNC_WIDE;
-
-       if ((mode->vdisplay < 1) || (mode->vtotal < 1))
-               return MODE_V_ILLEGAL;
-
-       if ((mode->vdisplay > 0x7ff) || (mode->vtotal > 0xfff))
-               return MODE_BAD_VVALUE;
-
-       DRM_DEBUG_DRIVER("Vertical parameters OK\n");
-
-       tcon->dclk_min_div = 7;
-       tcon->dclk_max_div = 7;
-       rounded_rate = clk_round_rate(tcon->dclk, rate);
-       if (rounded_rate < rate)
-               return MODE_CLOCK_LOW;
-
-       if (rounded_rate > rate)
-               return MODE_CLOCK_HIGH;
-
-       DRM_DEBUG_DRIVER("Clock rate OK\n");
-
-       return MODE_OK;
-}
-
 static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = {
        .disable        = sun4i_lvds_encoder_disable,
        .enable         = sun4i_lvds_encoder_enable,
-       .mode_valid     = sun4i_lvds_encoder_mode_valid,
 };
 
 static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = {
index f0481b7..06c94e3 100644 (file)
@@ -910,7 +910,8 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
                        while (npages >= HPAGE_PMD_NR) {
                                gfp_t huge_flags = gfp_flags;
 
-                               huge_flags |= GFP_TRANSHUGE;
+                               huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+                                       __GFP_KSWAPD_RECLAIM;
                                huge_flags &= ~__GFP_MOVABLE;
                                huge_flags &= ~__GFP_COMP;
                                p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
@@ -1027,11 +1028,15 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
                                  GFP_USER | GFP_DMA32, "uc dma", 0);
 
        ttm_page_pool_init_locked(&_manager->wc_pool_huge,
-                                 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP),
+                                 (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+                                  __GFP_KSWAPD_RECLAIM) &
+                                 ~(__GFP_MOVABLE | __GFP_COMP),
                                  "wc huge", order);
 
        ttm_page_pool_init_locked(&_manager->uc_pool_huge,
-                                 GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP)
+                                 (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+                                  __GFP_KSWAPD_RECLAIM) &
+                                 ~(__GFP_MOVABLE | __GFP_COMP)
                                  , "uc huge", order);
 
        _manager->options.max_size = max_pages;
index 8a25d19..f63d99c 100644 (file)
@@ -910,7 +910,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
                gfp_flags |= __GFP_ZERO;
 
        if (huge) {
-               gfp_flags |= GFP_TRANSHUGE;
+               gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+                       __GFP_KSWAPD_RECLAIM;
                gfp_flags &= ~__GFP_MOVABLE;
                gfp_flags &= ~__GFP_COMP;
        }
index bf46674..c61dff5 100644 (file)
@@ -760,6 +760,7 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
 struct vc4_async_flip_state {
        struct drm_crtc *crtc;
        struct drm_framebuffer *fb;
+       struct drm_framebuffer *old_fb;
        struct drm_pending_vblank_event *event;
 
        struct vc4_seqno_cb cb;
@@ -789,6 +790,23 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
 
        drm_crtc_vblank_put(crtc);
        drm_framebuffer_put(flip_state->fb);
+
+       /* Decrement the BO usecnt in order to keep the inc/dec calls balanced
+        * when the planes are updated through the async update path.
+        * FIXME: we should move to generic async-page-flip when it's
+        * available, so that we can get rid of this hand-made cleanup_fb()
+        * logic.
+        */
+       if (flip_state->old_fb) {
+               struct drm_gem_cma_object *cma_bo;
+               struct vc4_bo *bo;
+
+               cma_bo = drm_fb_cma_get_gem_obj(flip_state->old_fb, 0);
+               bo = to_vc4_bo(&cma_bo->base);
+               vc4_bo_dec_usecnt(bo);
+               drm_framebuffer_put(flip_state->old_fb);
+       }
+
        kfree(flip_state);
 
        up(&vc4->async_modeset);
@@ -813,9 +831,22 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
        struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
        struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
 
+       /* Increment the BO usecnt here, so that we never end up with an
+        * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
+        * plane is later updated through the non-async path.
+        * FIXME: we should move to generic async-page-flip when it's
+        * available, so that we can get rid of this hand-made prepare_fb()
+        * logic.
+        */
+       ret = vc4_bo_inc_usecnt(bo);
+       if (ret)
+               return ret;
+
        flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
-       if (!flip_state)
+       if (!flip_state) {
+               vc4_bo_dec_usecnt(bo);
                return -ENOMEM;
+       }
 
        drm_framebuffer_get(fb);
        flip_state->fb = fb;
@@ -826,10 +857,23 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
        ret = down_interruptible(&vc4->async_modeset);
        if (ret) {
                drm_framebuffer_put(fb);
+               vc4_bo_dec_usecnt(bo);
                kfree(flip_state);
                return ret;
        }
 
+       /* Save the current FB before it's replaced by the new one in
+        * drm_atomic_set_fb_for_plane(). We'll need the old FB in
+        * vc4_async_page_flip_complete() to decrement the BO usecnt and keep
+        * it consistent.
+        * FIXME: we should move to generic async-page-flip when it's
+        * available, so that we can get rid of this hand-made cleanup_fb()
+        * logic.
+        */
+       flip_state->old_fb = plane->state->fb;
+       if (flip_state->old_fb)
+               drm_framebuffer_get(flip_state->old_fb);
+
        WARN_ON(drm_crtc_vblank_get(crtc) != 0);
 
        /* Immediately update the plane's legacy fb pointer, so that later
index 72c9dbd..f185812 100644 (file)
@@ -96,7 +96,6 @@ struct vc4_dpi {
        struct platform_device *pdev;
 
        struct drm_encoder *encoder;
-       struct drm_connector *connector;
 
        void __iomem *regs;
 
@@ -164,14 +163,31 @@ static void vc4_dpi_encoder_disable(struct drm_encoder *encoder)
 
 static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
 {
+       struct drm_device *dev = encoder->dev;
        struct drm_display_mode *mode = &encoder->crtc->mode;
        struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
        struct vc4_dpi *dpi = vc4_encoder->dpi;
+       struct drm_connector_list_iter conn_iter;
+       struct drm_connector *connector = NULL, *connector_scan;
        u32 dpi_c = DPI_ENABLE | DPI_OUTPUT_ENABLE_MODE;
        int ret;
 
-       if (dpi->connector->display_info.num_bus_formats) {
-               u32 bus_format = dpi->connector->display_info.bus_formats[0];
+       /* Look up the connector attached to DPI so we can get the
+        * bus_format.  Ideally the bridge would tell us the
+        * bus_format we want, but it doesn't yet, so assume that it's
+        * uniform throughout the bridge chain.
+        */
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       drm_for_each_connector_iter(connector_scan, &conn_iter) {
+               if (connector_scan->encoder == encoder) {
+                       connector = connector_scan;
+                       break;
+               }
+       }
+       drm_connector_list_iter_end(&conn_iter);
+
+       if (connector && connector->display_info.num_bus_formats) {
+               u32 bus_format = connector->display_info.bus_formats[0];
 
                switch (bus_format) {
                case MEDIA_BUS_FMT_RGB888_1X24:
@@ -199,6 +215,9 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
                        DRM_ERROR("Unknown media bus format %d\n", bus_format);
                        break;
                }
+       } else {
+               /* Default to 24bit if no connector found. */
+               dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, DPI_FORMAT);
        }
 
        if (mode->flags & DRM_MODE_FLAG_NHSYNC)
index ce39390..13dcaad 100644 (file)
@@ -503,7 +503,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
         * the scl fields here.
         */
        if (num_planes == 1) {
-               scl0 = vc4_get_scl_field(state, 1);
+               scl0 = vc4_get_scl_field(state, 0);
                scl1 = scl0;
        } else {
                scl0 = vc4_get_scl_field(state, 1);
index 48e4f1d..020070d 100644 (file)
@@ -293,7 +293,7 @@ retry:
        ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
        if (ret == -ENOSPC) {
                spin_unlock(&vgdev->ctrlq.qlock);
-               wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
+               wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
                spin_lock(&vgdev->ctrlq.qlock);
                goto retry;
        } else {
@@ -368,7 +368,7 @@ retry:
        ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
        if (ret == -ENOSPC) {
                spin_unlock(&vgdev->cursorq.qlock);
-               wait_event(vgdev->cursorq.ack_queue, vq->num_free);
+               wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
                spin_lock(&vgdev->cursorq.qlock);
                goto retry;
        } else {
index 2582ffd..ba0cdb7 100644 (file)
@@ -441,11 +441,11 @@ static int vmwgfx_set_config_internal(struct drm_mode_set *set)
        struct drm_crtc *crtc = set->crtc;
        struct drm_framebuffer *fb;
        struct drm_crtc *tmp;
-       struct drm_modeset_acquire_ctx *ctx;
        struct drm_device *dev = set->crtc->dev;
+       struct drm_modeset_acquire_ctx ctx;
        int ret;
 
-       ctx = dev->mode_config.acquire_ctx;
+       drm_modeset_acquire_init(&ctx, 0);
 
 restart:
        /*
@@ -458,7 +458,7 @@ restart:
 
        fb = set->fb;
 
-       ret = crtc->funcs->set_config(set, ctx);
+       ret = crtc->funcs->set_config(set, &ctx);
        if (ret == 0) {
                crtc->primary->crtc = crtc;
                crtc->primary->fb = fb;
@@ -473,20 +473,13 @@ restart:
        }
 
        if (ret == -EDEADLK) {
-               dev->mode_config.acquire_ctx = NULL;
-
-retry_locking:
-               drm_modeset_backoff(ctx);
-
-               ret = drm_modeset_lock_all_ctx(dev, ctx);
-               if (ret)
-                       goto retry_locking;
-
-               dev->mode_config.acquire_ctx = ctx;
-
+               drm_modeset_backoff(&ctx);
                goto restart;
        }
 
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
+
        return ret;
 }
 
@@ -624,7 +617,6 @@ static int vmw_fb_set_par(struct fb_info *info)
        }
 
        mutex_lock(&par->bo_mutex);
-       drm_modeset_lock_all(vmw_priv->dev);
        ret = vmw_fb_kms_framebuffer(info);
        if (ret)
                goto out_unlock;
@@ -657,7 +649,6 @@ out_unlock:
                drm_mode_destroy(vmw_priv->dev, old_mode);
        par->set_mode = mode;
 
-       drm_modeset_unlock_all(vmw_priv->dev);
        mutex_unlock(&par->bo_mutex);
 
        return ret;
@@ -713,18 +704,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
        par->max_width = fb_width;
        par->max_height = fb_height;
 
-       drm_modeset_lock_all(vmw_priv->dev);
        ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
                                      par->max_height, &par->con,
                                      &par->crtc, &init_mode);
-       if (ret) {
-               drm_modeset_unlock_all(vmw_priv->dev);
+       if (ret)
                goto err_kms;
-       }
 
        info->var.xres = init_mode->hdisplay;
        info->var.yres = init_mode->vdisplay;
-       drm_modeset_unlock_all(vmw_priv->dev);
 
        /*
         * Create buffers and alloc memory
@@ -832,7 +819,9 @@ int vmw_fb_close(struct vmw_private *vmw_priv)
        cancel_delayed_work_sync(&par->local_work);
        unregister_framebuffer(info);
 
+       mutex_lock(&par->bo_mutex);
        (void) vmw_fb_kms_detach(par, true, true);
+       mutex_unlock(&par->bo_mutex);
 
        vfree(par->vmalloc);
        framebuffer_release(info);
index f11601b..96fd7a0 100644 (file)
@@ -2595,6 +2595,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
                vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
                                             out_fence, NULL);
 
+       vmw_dmabuf_unreference(&ctx->buf);
        vmw_resource_unreserve(res, false, NULL, 0);
        mutex_unlock(&res->dev_priv->cmdbuf_mutex);
 }
@@ -2680,7 +2681,9 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
        struct vmw_display_unit *du;
        struct drm_display_mode *mode;
        int i = 0;
+       int ret = 0;
 
+       mutex_lock(&dev_priv->dev->mode_config.mutex);
        list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
                            head) {
                if (i == unit)
@@ -2691,7 +2694,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
 
        if (i != unit) {
                DRM_ERROR("Could not find initial display unit.\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out_unlock;
        }
 
        if (list_empty(&con->modes))
@@ -2699,7 +2703,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
 
        if (list_empty(&con->modes)) {
                DRM_ERROR("Could not find initial display mode.\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out_unlock;
        }
 
        du = vmw_connector_to_du(con);
@@ -2720,7 +2725,10 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
                                           head);
        }
 
-       return 0;
+ out_unlock:
+       mutex_unlock(&dev_priv->dev->mode_config.mutex);
+
+       return ret;
 }
 
 /**
index 60252fd..0000434 100644 (file)
@@ -462,10 +462,11 @@ config HID_LENOVO
        select NEW_LEDS
        select LEDS_CLASS
        ---help---
-       Support for Lenovo devices that are not fully compliant with HID standard.
+       Support for IBM/Lenovo devices that are not fully compliant with HID standard.
 
-       Say Y if you want support for the non-compliant features of the Lenovo
-       Thinkpad standalone keyboards, e.g:
+       Say Y if you want support for horizontal scrolling of the IBM/Lenovo
+       Scrollpoint mice or the non-compliant features of the Lenovo Thinkpad
+       standalone keyboards, e.g:
        - ThinkPad USB Keyboard with TrackPoint (supports extra LEDs and trackpoint
          configuration)
        - ThinkPad Compact Bluetooth Keyboard with TrackPoint (supports Fn keys)
index 0b5cc91..46f5ecd 100644 (file)
 #define USB_VENDOR_ID_HUION            0x256c
 #define USB_DEVICE_ID_HUION_TABLET     0x006e
 
+#define USB_VENDOR_ID_IBM                                      0x04b3
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_III                      0x3100
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_PRO                      0x3103
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL                  0x3105
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL           0x3108
+#define USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO       0x3109
+
 #define USB_VENDOR_ID_IDEACOM          0x1cb6
 #define USB_DEVICE_ID_IDEACOM_IDC6650  0x6650
 #define USB_DEVICE_ID_IDEACOM_IDC6651  0x6651
 #define USB_DEVICE_ID_LENOVO_TPKBD     0x6009
 #define USB_DEVICE_ID_LENOVO_CUSBKBD   0x6047
 #define USB_DEVICE_ID_LENOVO_CBTKBD    0x6048
+#define USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL       0x6049
 #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
 #define USB_DEVICE_ID_LENOVO_X1_COVER  0x6085
 #define USB_DEVICE_ID_LENOVO_X1_TAB    0x60a3
 #define USB_DEVICE_ID_SIS817_TOUCH     0x0817
 #define USB_DEVICE_ID_SIS_TS           0x1013
 #define USB_DEVICE_ID_SIS1030_TOUCH    0x1030
+#define USB_DEVICE_ID_SIS10FB_TOUCH    0x10fb
 
 #define USB_VENDOR_ID_SKYCABLE                 0x1223
 #define        USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER       0x3F07
index 1ac4ff4..643b6eb 100644 (file)
@@ -6,6 +6,17 @@
  *
  *  Copyright (c) 2012 Bernhard Seibold
  *  Copyright (c) 2014 Jamie Lentin <jm@lentin.co.uk>
+ *
+ * Linux IBM/Lenovo Scrollpoint mouse driver:
+ * - IBM Scrollpoint III
+ * - IBM Scrollpoint Pro
+ * - IBM Scrollpoint Optical
+ * - IBM Scrollpoint Optical 800dpi
+ * - IBM Scrollpoint Optical 800dpi Pro
+ * - Lenovo Scrollpoint Optical
+ *
+ *  Copyright (c) 2012 Peter De Wachter <pdewacht@gmail.com>
+ *  Copyright (c) 2018 Peter Ganzhorn <peter.ganzhorn@gmail.com>
  */
 
 /*
@@ -160,6 +171,17 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
        return 0;
 }
 
+static int lenovo_input_mapping_scrollpoint(struct hid_device *hdev,
+               struct hid_input *hi, struct hid_field *field,
+               struct hid_usage *usage, unsigned long **bit, int *max)
+{
+       if (usage->hid == HID_GD_Z) {
+               hid_map_usage(hi, usage, bit, max, EV_REL, REL_HWHEEL);
+               return 1;
+       }
+       return 0;
+}
+
 static int lenovo_input_mapping(struct hid_device *hdev,
                struct hid_input *hi, struct hid_field *field,
                struct hid_usage *usage, unsigned long **bit, int *max)
@@ -172,6 +194,14 @@ static int lenovo_input_mapping(struct hid_device *hdev,
        case USB_DEVICE_ID_LENOVO_CBTKBD:
                return lenovo_input_mapping_cptkbd(hdev, hi, field,
                                                        usage, bit, max);
+       case USB_DEVICE_ID_IBM_SCROLLPOINT_III:
+       case USB_DEVICE_ID_IBM_SCROLLPOINT_PRO:
+       case USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL:
+       case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL:
+       case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO:
+       case USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL:
+               return lenovo_input_mapping_scrollpoint(hdev, hi, field,
+                                                       usage, bit, max);
        default:
                return 0;
        }
@@ -883,6 +913,12 @@ static const struct hid_device_id lenovo_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_III) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_PRO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL) },
        { }
 };
 
index 9633286..cc33622 100644 (file)
@@ -174,6 +174,8 @@ static const struct i2c_hid_quirks {
                I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
        { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,
                I2C_HID_QUIRK_RESEND_REPORT_DESCR },
+       { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
+               I2C_HID_QUIRK_RESEND_REPORT_DESCR },
        { 0, 0 }
 };
 
index 157b44a..acc2536 100644 (file)
@@ -77,21 +77,21 @@ static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
        struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
        int curr_hid_dev = client_data->cur_hid_dev;
 
-       if (data_len < sizeof(struct hostif_msg_hdr)) {
-               dev_err(&client_data->cl_device->dev,
-                       "[hid-ish]: error, received %u which is less than data header %u\n",
-                       (unsigned int)data_len,
-                       (unsigned int)sizeof(struct hostif_msg_hdr));
-               ++client_data->bad_recv_cnt;
-               ish_hw_reset(hid_ishtp_cl->dev);
-               return;
-       }
-
        payload = recv_buf + sizeof(struct hostif_msg_hdr);
        total_len = data_len;
        cur_pos = 0;
 
        do {
+               if (cur_pos + sizeof(struct hostif_msg) > total_len) {
+                       dev_err(&client_data->cl_device->dev,
+                               "[hid-ish]: error, received %u which is less than data header %u\n",
+                               (unsigned int)data_len,
+                               (unsigned int)sizeof(struct hostif_msg_hdr));
+                       ++client_data->bad_recv_cnt;
+                       ish_hw_reset(hid_ishtp_cl->dev);
+                       break;
+               }
+
                recv_msg = (struct hostif_msg *)(recv_buf + cur_pos);
                payload_len = recv_msg->hdr.size;
 
@@ -412,9 +412,7 @@ void hid_ishtp_get_report(struct hid_device *hid, int report_id,
 {
        struct ishtp_hid_data *hid_data =  hid->driver_data;
        struct ishtp_cl_data *client_data = hid_data->client_data;
-       static unsigned char    buf[10];
-       unsigned int    len;
-       struct hostif_msg_to_sensor *msg = (struct hostif_msg_to_sensor *)buf;
+       struct hostif_msg_to_sensor msg = {};
        int     rv;
        int     i;
 
@@ -426,14 +424,11 @@ void hid_ishtp_get_report(struct hid_device *hid, int report_id,
                return;
        }
 
-       len = sizeof(struct hostif_msg_to_sensor);
-
-       memset(msg, 0, sizeof(struct hostif_msg_to_sensor));
-       msg->hdr.command = (report_type == HID_FEATURE_REPORT) ?
+       msg.hdr.command = (report_type == HID_FEATURE_REPORT) ?
                HOSTIF_GET_FEATURE_REPORT : HOSTIF_GET_INPUT_REPORT;
        for (i = 0; i < client_data->num_hid_devices; ++i) {
                if (hid == client_data->hid_sensor_hubs[i]) {
-                       msg->hdr.device_id =
+                       msg.hdr.device_id =
                                client_data->hid_devices[i].dev_id;
                        break;
                }
@@ -442,8 +437,9 @@ void hid_ishtp_get_report(struct hid_device *hid, int report_id,
        if (i == client_data->num_hid_devices)
                return;
 
-       msg->report_id = report_id;
-       rv = ishtp_cl_send(client_data->hid_ishtp_cl, buf, len);
+       msg.report_id = report_id;
+       rv = ishtp_cl_send(client_data->hid_ishtp_cl, (uint8_t *)&msg,
+                           sizeof(msg));
        if (rv)
                hid_ishtp_trace(client_data,  "%s hid %p send failed\n",
                                __func__, hid);
index f272cdd..2623a56 100644 (file)
@@ -418,7 +418,7 @@ static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
                list_del(&device->device_link);
                spin_unlock_irqrestore(&dev->device_list_lock, flags);
                dev_err(dev->devc, "Failed to register ISHTP client device\n");
-               kfree(device);
+               put_device(&device->dev);
                return NULL;
        }
 
index b54ef1f..ee7a37e 100644 (file)
@@ -1213,8 +1213,10 @@ static int __wacom_devm_sysfs_create_group(struct wacom *wacom,
        devres->root = root;
 
        error = sysfs_create_group(devres->root, group);
-       if (error)
+       if (error) {
+               devres_free(devres);
                return error;
+       }
 
        devres_add(&wacom->hdev->dev, devres);
 
index 051a72e..d2cc55e 100644 (file)
@@ -40,6 +40,10 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
 #define PCI_DEVICE_ID_AMD_17H_DF_F3    0x1463
 #endif
 
+#ifndef PCI_DEVICE_ID_AMD_17H_RR_NB
+#define PCI_DEVICE_ID_AMD_17H_RR_NB    0x15d0
+#endif
+
 /* CPUID function 0x80000001, ebx */
 #define CPUID_PKGTYPE_MASK     0xf0000000
 #define CPUID_PKGTYPE_F                0x00000000
@@ -72,6 +76,7 @@ struct k10temp_data {
        struct pci_dev *pdev;
        void (*read_tempreg)(struct pci_dev *pdev, u32 *regval);
        int temp_offset;
+       u32 temp_adjust_mask;
 };
 
 struct tctl_offset {
@@ -84,6 +89,7 @@ static const struct tctl_offset tctl_offset_table[] = {
        { 0x17, "AMD Ryzen 5 1600X", 20000 },
        { 0x17, "AMD Ryzen 7 1700X", 20000 },
        { 0x17, "AMD Ryzen 7 1800X", 20000 },
+       { 0x17, "AMD Ryzen 7 2700X", 10000 },
        { 0x17, "AMD Ryzen Threadripper 1950X", 27000 },
        { 0x17, "AMD Ryzen Threadripper 1920X", 27000 },
        { 0x17, "AMD Ryzen Threadripper 1900X", 27000 },
@@ -129,6 +135,8 @@ static ssize_t temp1_input_show(struct device *dev,
 
        data->read_tempreg(data->pdev, &regval);
        temp = (regval >> 21) * 125;
+       if (regval & data->temp_adjust_mask)
+               temp -= 49000;
        if (temp > data->temp_offset)
                temp -= data->temp_offset;
        else
@@ -259,12 +267,14 @@ static int k10temp_probe(struct pci_dev *pdev,
        data->pdev = pdev;
 
        if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 ||
-                                         boot_cpu_data.x86_model == 0x70))
+                                         boot_cpu_data.x86_model == 0x70)) {
                data->read_tempreg = read_tempreg_nb_f15;
-       else if (boot_cpu_data.x86 == 0x17)
+       } else if (boot_cpu_data.x86 == 0x17) {
+               data->temp_adjust_mask = 0x80000;
                data->read_tempreg = read_tempreg_nb_f17;
-       else
+       } else {
                data->read_tempreg = read_tempreg_pci;
+       }
 
        for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) {
                const struct tctl_offset *entry = &tctl_offset_table[i];
@@ -292,6 +302,7 @@ static const struct pci_device_id k10temp_id_table[] = {
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
        { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
+       { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) },
        {}
 };
 MODULE_DEVICE_TABLE(pci, k10temp_id_table);
index 8b0bc4f..b0bc77b 100644 (file)
@@ -1380,8 +1380,8 @@ static int __init nct6683_find(int sioaddr, struct nct6683_sio_data *sio_data)
        /* Activate logical device if needed */
        val = superio_inb(sioaddr, SIO_REG_ENABLE);
        if (!(val & 0x01)) {
-               pr_err("EC is disabled\n");
-               goto fail;
+               pr_warn("Forcibly enabling EC access. Data may be unusable.\n");
+               superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
        }
 
        superio_exit(sioaddr);
index 363bf56..91976b6 100644 (file)
@@ -170,7 +170,10 @@ static int scmi_hwmon_probe(struct scmi_device *sdev)
        scmi_chip_info.info = ptr_scmi_ci;
        chip_info = &scmi_chip_info;
 
-       for (type = 0; type < hwmon_max && nr_count[type]; type++) {
+       for (type = 0; type < hwmon_max; type++) {
+               if (!nr_count[type])
+                       continue;
+
                scmi_hwmon_add_chan_info(scmi_hwmon_chan, dev, nr_count[type],
                                         type, hwmon_attributes[type]);
                *ptr_scmi_ci++ = scmi_hwmon_chan++;
index c4865b0..8d21b98 100644 (file)
@@ -707,7 +707,6 @@ config I2C_MPC
 config I2C_MT65XX
        tristate "MediaTek I2C adapter"
        depends on ARCH_MEDIATEK || COMPILE_TEST
-       depends on HAS_DMA
        help
          This selects the MediaTek(R) Integrated Inter Circuit bus driver
          for MT65xx and MT81xx.
@@ -885,7 +884,6 @@ config I2C_SH7760
 
 config I2C_SH_MOBILE
        tristate "SuperH Mobile I2C Controller"
-       depends on HAS_DMA
        depends on ARCH_SHMOBILE || ARCH_RENESAS || COMPILE_TEST
        help
          If you say yes to this option, support will be included for the
@@ -1098,7 +1096,6 @@ config I2C_XLP9XX
 
 config I2C_RCAR
        tristate "Renesas R-Car I2C Controller"
-       depends on HAS_DMA
        depends on ARCH_RENESAS || COMPILE_TEST
        select I2C_SLAVE
        help
index 25fcc3c..4053259 100644 (file)
@@ -86,6 +86,7 @@ struct sprd_i2c {
        u32 count;
        int irq;
        int err;
+       bool is_suspended;
 };
 
 static void sprd_i2c_set_count(struct sprd_i2c *i2c_dev, u32 count)
@@ -283,6 +284,9 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap,
        struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
        int im, ret;
 
+       if (i2c_dev->is_suspended)
+               return -EBUSY;
+
        ret = pm_runtime_get_sync(i2c_dev->dev);
        if (ret < 0)
                return ret;
@@ -364,13 +368,12 @@ static irqreturn_t sprd_i2c_isr_thread(int irq, void *dev_id)
        struct sprd_i2c *i2c_dev = dev_id;
        struct i2c_msg *msg = i2c_dev->msg;
        bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK);
-       u32 i2c_count = readl(i2c_dev->base + I2C_COUNT);
        u32 i2c_tran;
 
        if (msg->flags & I2C_M_RD)
                i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD;
        else
-               i2c_tran = i2c_count;
+               i2c_tran = i2c_dev->count;
 
        /*
         * If we got one ACK from slave when writing data, and we did not
@@ -408,14 +411,13 @@ static irqreturn_t sprd_i2c_isr(int irq, void *dev_id)
 {
        struct sprd_i2c *i2c_dev = dev_id;
        struct i2c_msg *msg = i2c_dev->msg;
-       u32 i2c_count = readl(i2c_dev->base + I2C_COUNT);
        bool ack = !(readl(i2c_dev->base + I2C_STATUS) & I2C_RX_ACK);
        u32 i2c_tran;
 
        if (msg->flags & I2C_M_RD)
                i2c_tran = i2c_dev->count >= I2C_FIFO_FULL_THLD;
        else
-               i2c_tran = i2c_count;
+               i2c_tran = i2c_dev->count;
 
        /*
         * If we did not get one ACK from slave when writing data, then we
@@ -586,11 +588,23 @@ static int sprd_i2c_remove(struct platform_device *pdev)
 
 static int __maybe_unused sprd_i2c_suspend_noirq(struct device *pdev)
 {
+       struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
+
+       i2c_lock_adapter(&i2c_dev->adap);
+       i2c_dev->is_suspended = true;
+       i2c_unlock_adapter(&i2c_dev->adap);
+
        return pm_runtime_force_suspend(pdev);
 }
 
 static int __maybe_unused sprd_i2c_resume_noirq(struct device *pdev)
 {
+       struct sprd_i2c *i2c_dev = dev_get_drvdata(pdev);
+
+       i2c_lock_adapter(&i2c_dev->adap);
+       i2c_dev->is_suspended = false;
+       i2c_unlock_adapter(&i2c_dev->adap);
+
        return pm_runtime_force_resume(pdev);
 }
 
index 036a03f..1667b6e 100644 (file)
@@ -280,7 +280,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
                 */
                if (msgs[i].flags & I2C_M_RECV_LEN) {
                        if (!(msgs[i].flags & I2C_M_RD) ||
-                           msgs[i].buf[0] < 1 ||
+                           msgs[i].len < 1 || msgs[i].buf[0] < 1 ||
                            msgs[i].len < msgs[i].buf[0] +
                                             I2C_SMBUS_BLOCK_MAX) {
                                res = -EINVAL;
index ee270e0..2a972ed 100644 (file)
@@ -61,9 +61,12 @@ config INFINIBAND_ON_DEMAND_PAGING
          pages on demand instead.
 
 config INFINIBAND_ADDR_TRANS
-       bool
+       bool "RDMA/CM"
        depends on INFINIBAND
        default y
+       ---help---
+         Support for RDMA communication manager (CM).
+         This allows for a generic connection abstraction over RDMA.
 
 config INFINIBAND_ADDR_TRANS_CONFIGFS
        bool
index e337b08..fb2d347 100644 (file)
@@ -291,14 +291,18 @@ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
                 * so lookup free slot only if requested.
                 */
                if (pempty && empty < 0) {
-                       if (data->props & GID_TABLE_ENTRY_INVALID) {
-                               /* Found an invalid (free) entry; allocate it */
-                               if (data->props & GID_TABLE_ENTRY_DEFAULT) {
-                                       if (default_gid)
-                                               empty = curr_index;
-                               } else {
-                                       empty = curr_index;
-                               }
+                       if (data->props & GID_TABLE_ENTRY_INVALID &&
+                           (default_gid ==
+                            !!(data->props & GID_TABLE_ENTRY_DEFAULT))) {
+                               /*
+                                * Found an invalid (free) entry; allocate it.
+                                * If default GID is requested, then our
+                                * found slot must be one of the DEFAULT
+                                * reserved slots or we fail.
+                                * This ensures that only DEFAULT reserved
+                                * slots are used for default property GIDs.
+                                */
+                               empty = curr_index;
                        }
                }
 
@@ -420,8 +424,10 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
        return ret;
 }
 
-int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
-                    union ib_gid *gid, struct ib_gid_attr *attr)
+static int
+_ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
+                 union ib_gid *gid, struct ib_gid_attr *attr,
+                 unsigned long mask, bool default_gid)
 {
        struct ib_gid_table *table;
        int ret = 0;
@@ -431,11 +437,7 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
 
        mutex_lock(&table->lock);
 
-       ix = find_gid(table, gid, attr, false,
-                     GID_ATTR_FIND_MASK_GID      |
-                     GID_ATTR_FIND_MASK_GID_TYPE |
-                     GID_ATTR_FIND_MASK_NETDEV,
-                     NULL);
+       ix = find_gid(table, gid, attr, default_gid, mask, NULL);
        if (ix < 0) {
                ret = -EINVAL;
                goto out_unlock;
@@ -452,6 +454,17 @@ out_unlock:
        return ret;
 }
 
+int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
+                    union ib_gid *gid, struct ib_gid_attr *attr)
+{
+       unsigned long mask = GID_ATTR_FIND_MASK_GID       |
+                            GID_ATTR_FIND_MASK_GID_TYPE |
+                            GID_ATTR_FIND_MASK_DEFAULT  |
+                            GID_ATTR_FIND_MASK_NETDEV;
+
+       return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
+}
+
 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
                                     struct net_device *ndev)
 {
@@ -728,7 +741,7 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
                                  unsigned long gid_type_mask,
                                  enum ib_cache_gid_default_mode mode)
 {
-       union ib_gid gid;
+       union ib_gid gid = { };
        struct ib_gid_attr gid_attr;
        struct ib_gid_table *table;
        unsigned int gid_type;
@@ -736,7 +749,9 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
 
        table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
 
-       make_default_gid(ndev, &gid);
+       mask = GID_ATTR_FIND_MASK_GID_TYPE |
+              GID_ATTR_FIND_MASK_DEFAULT |
+              GID_ATTR_FIND_MASK_NETDEV;
        memset(&gid_attr, 0, sizeof(gid_attr));
        gid_attr.ndev = ndev;
 
@@ -747,12 +762,12 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
                gid_attr.gid_type = gid_type;
 
                if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
-                       mask = GID_ATTR_FIND_MASK_GID_TYPE |
-                              GID_ATTR_FIND_MASK_DEFAULT;
+                       make_default_gid(ndev, &gid);
                        __ib_cache_gid_add(ib_dev, port, &gid,
                                           &gid_attr, mask, true);
                } else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
-                       ib_cache_gid_del(ib_dev, port, &gid, &gid_attr);
+                       _ib_cache_gid_del(ib_dev, port, &gid,
+                                         &gid_attr, mask, true);
                }
        }
 }
index 51a6410..a693fcd 100644 (file)
@@ -382,6 +382,8 @@ struct cma_hdr {
 #define CMA_VERSION 0x00
 
 struct cma_req_info {
+       struct sockaddr_storage listen_addr_storage;
+       struct sockaddr_storage src_addr_storage;
        struct ib_device *device;
        int port;
        union ib_gid local_gid;
@@ -866,7 +868,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
 {
        struct ib_qp_attr qp_attr;
        int qp_attr_mask, ret;
-       union ib_gid sgid;
 
        mutex_lock(&id_priv->qp_mutex);
        if (!id_priv->id.qp) {
@@ -889,12 +890,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
        if (ret)
                goto out;
 
-       ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
-                          rdma_ah_read_grh(&qp_attr.ah_attr)->sgid_index,
-                          &sgid, NULL);
-       if (ret)
-               goto out;
-
        BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
 
        if (conn_param)
@@ -1340,11 +1335,11 @@ static bool validate_net_dev(struct net_device *net_dev,
 }
 
 static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
-                                         const struct cma_req_info *req)
+                                         struct cma_req_info *req)
 {
-       struct sockaddr_storage listen_addr_storage, src_addr_storage;
-       struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage,
-                       *src_addr = (struct sockaddr *)&src_addr_storage;
+       struct sockaddr *listen_addr =
+                       (struct sockaddr *)&req->listen_addr_storage;
+       struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage;
        struct net_device *net_dev;
        const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
        int err;
@@ -1359,11 +1354,6 @@ static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
        if (!net_dev)
                return ERR_PTR(-ENODEV);
 
-       if (!validate_net_dev(net_dev, listen_addr, src_addr)) {
-               dev_put(net_dev);
-               return ERR_PTR(-EHOSTUNREACH);
-       }
-
        return net_dev;
 }
 
@@ -1490,15 +1480,51 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
                }
        }
 
+       /*
+        * Net namespace might be getting deleted while route lookup,
+        * cm_id lookup is in progress. Therefore, perform netdevice
+        * validation, cm_id lookup under rcu lock.
+        * RCU lock along with netdevice state check, synchronizes with
+        * netdevice migrating to different net namespace and also avoids
+        * case where net namespace doesn't get deleted while lookup is in
+        * progress.
+        * If the device state is not IFF_UP, its properties such as ifindex
+        * and nd_net cannot be trusted to remain valid without rcu lock.
+        * net/core/dev.c change_net_namespace() ensures to synchronize with
+        * ongoing operations on net device after device is closed using
+        * synchronize_net().
+        */
+       rcu_read_lock();
+       if (*net_dev) {
+               /*
+                * If netdevice is down, it is likely that it is administratively
+                * down or it might be migrating to different namespace.
+                * In that case avoid further processing, as the net namespace
+                * or ifindex may change.
+                */
+               if (((*net_dev)->flags & IFF_UP) == 0) {
+                       id_priv = ERR_PTR(-EHOSTUNREACH);
+                       goto err;
+               }
+
+               if (!validate_net_dev(*net_dev,
+                                (struct sockaddr *)&req.listen_addr_storage,
+                                (struct sockaddr *)&req.src_addr_storage)) {
+                       id_priv = ERR_PTR(-EHOSTUNREACH);
+                       goto err;
+               }
+       }
+
        bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
                                rdma_ps_from_service_id(req.service_id),
                                cma_port_from_service_id(req.service_id));
        id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
+err:
+       rcu_read_unlock();
        if (IS_ERR(id_priv) && *net_dev) {
                dev_put(*net_dev);
                *net_dev = NULL;
        }
-
        return id_priv;
 }
 
index 9821ae9..da12da1 100644 (file)
@@ -114,7 +114,7 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
                        struct sockaddr_storage *mapped_sockaddr,
                        u8 nl_client)
 {
-       struct hlist_head *hash_bucket_head;
+       struct hlist_head *hash_bucket_head = NULL;
        struct iwpm_mapping_info *map_info;
        unsigned long flags;
        int ret = -EINVAL;
@@ -142,6 +142,9 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
                }
        }
        spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
+
+       if (!hash_bucket_head)
+               kfree(map_info);
        return ret;
 }
 
index c50596f..b28452a 100644 (file)
@@ -59,7 +59,7 @@ module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
 
 static struct list_head ib_mad_port_list;
-static u32 ib_mad_client_id = 0;
+static atomic_t ib_mad_client_id = ATOMIC_INIT(0);
 
 /* Port list lock */
 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
@@ -377,7 +377,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
        }
 
        spin_lock_irqsave(&port_priv->reg_lock, flags);
-       mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
+       mad_agent_priv->agent.hi_tid = atomic_inc_return(&ib_mad_client_id);
 
        /*
         * Make sure MAD registration (if supplied)
index cc29663..c0e4fd5 100644 (file)
@@ -255,6 +255,7 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
                                            struct net_device *rdma_ndev)
 {
        struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
+       unsigned long gid_type_mask;
 
        if (!rdma_ndev)
                return;
@@ -264,21 +265,22 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
 
        rcu_read_lock();
 
-       if (rdma_is_upper_dev_rcu(rdma_ndev, event_ndev) &&
-           is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) ==
-           BONDING_SLAVE_STATE_INACTIVE) {
-               unsigned long gid_type_mask;
-
+       if (((rdma_ndev != event_ndev &&
+             !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
+            is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev)
+                                                ==
+            BONDING_SLAVE_STATE_INACTIVE)) {
                rcu_read_unlock();
+               return;
+       }
 
-               gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
+       rcu_read_unlock();
 
-               ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
-                                            gid_type_mask,
-                                            IB_CACHE_GID_DEFAULT_MODE_DELETE);
-       } else {
-               rcu_read_unlock();
-       }
+       gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
+
+       ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
+                                    gid_type_mask,
+                                    IB_CACHE_GID_DEFAULT_MODE_DELETE);
 }
 
 static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
index 7432948..eab43b1 100644 (file)
@@ -159,6 +159,23 @@ static void ucma_put_ctx(struct ucma_context *ctx)
                complete(&ctx->comp);
 }
 
+/*
+ * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the
+ * CM_ID is bound.
+ */
+static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id)
+{
+       struct ucma_context *ctx = ucma_get_ctx(file, id);
+
+       if (IS_ERR(ctx))
+               return ctx;
+       if (!ctx->cm_id->device) {
+               ucma_put_ctx(ctx);
+               return ERR_PTR(-EINVAL);
+       }
+       return ctx;
+}
+
 static void ucma_close_event_id(struct work_struct *work)
 {
        struct ucma_event *uevent_close =  container_of(work, struct ucma_event, close_work);
@@ -683,7 +700,7 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
                return -EFAULT;
 
-       if (!rdma_addr_size_in6(&cmd.src_addr) ||
+       if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
            !rdma_addr_size_in6(&cmd.dst_addr))
                return -EINVAL;
 
@@ -734,7 +751,7 @@ static ssize_t ucma_resolve_route(struct ucma_file *file,
        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
                return -EFAULT;
 
-       ctx = ucma_get_ctx(file, cmd.id);
+       ctx = ucma_get_ctx_dev(file, cmd.id);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
@@ -1050,7 +1067,7 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
        if (!cmd.conn_param.valid)
                return -EINVAL;
 
-       ctx = ucma_get_ctx(file, cmd.id);
+       ctx = ucma_get_ctx_dev(file, cmd.id);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
@@ -1092,7 +1109,7 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
                return -EFAULT;
 
-       ctx = ucma_get_ctx(file, cmd.id);
+       ctx = ucma_get_ctx_dev(file, cmd.id);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
@@ -1120,7 +1137,7 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
                return -EFAULT;
 
-       ctx = ucma_get_ctx(file, cmd.id);
+       ctx = ucma_get_ctx_dev(file, cmd.id);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
@@ -1139,7 +1156,7 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
                return -EFAULT;
 
-       ctx = ucma_get_ctx(file, cmd.id);
+       ctx = ucma_get_ctx_dev(file, cmd.id);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
@@ -1167,15 +1184,10 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
        if (cmd.qp_state > IB_QPS_ERR)
                return -EINVAL;
 
-       ctx = ucma_get_ctx(file, cmd.id);
+       ctx = ucma_get_ctx_dev(file, cmd.id);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
-       if (!ctx->cm_id->device) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        resp.qp_attr_mask = 0;
        memset(&qp_attr, 0, sizeof qp_attr);
        qp_attr.qp_state = cmd.qp_state;
@@ -1316,13 +1328,13 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
                return -EFAULT;
 
+       if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
+               return -EINVAL;
+
        ctx = ucma_get_ctx(file, cmd.id);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
-       if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
-               return -EINVAL;
-
        optval = memdup_user(u64_to_user_ptr(cmd.optval),
                             cmd.optlen);
        if (IS_ERR(optval)) {
@@ -1384,7 +1396,7 @@ static ssize_t ucma_process_join(struct ucma_file *file,
        else
                return -EINVAL;
 
-       ctx = ucma_get_ctx(file, cmd->id);
+       ctx = ucma_get_ctx_dev(file, cmd->id);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
index 13cb5e4..21a887c 100644 (file)
@@ -691,6 +691,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
 
        mr->device  = pd->device;
        mr->pd      = pd;
+       mr->dm      = NULL;
        mr->uobject = uobj;
        atomic_inc(&pd->usecnt);
        mr->res.type = RDMA_RESTRACK_MR;
@@ -765,6 +766,11 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
 
        mr = uobj->object;
 
+       if (mr->dm) {
+               ret = -EINVAL;
+               goto put_uobjs;
+       }
+
        if (cmd.flags & IB_MR_REREG_ACCESS) {
                ret = ib_check_mr_access(cmd.access_flags);
                if (ret)
index 8c93970..8d32c4a 100644 (file)
@@ -234,6 +234,15 @@ static int uverbs_validate_kernel_mandatory(const struct uverbs_method_spec *met
                        return -EINVAL;
        }
 
+       for (; i < method_spec->num_buckets; i++) {
+               struct uverbs_attr_spec_hash *attr_spec_bucket =
+                       method_spec->attr_buckets[i];
+
+               if (!bitmap_empty(attr_spec_bucket->mandatory_attrs_bitmask,
+                                 attr_spec_bucket->num_attrs))
+                       return -EINVAL;
+       }
+
        return 0;
 }
 
index cbcec3d..b4f016d 100644 (file)
@@ -363,28 +363,28 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(struct ib_device
 
 static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = {
        [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = {
-               .ptr = {
+               .ptr = {
                        .type = UVERBS_ATTR_TYPE_PTR_IN,
                        UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_keymat_aes_gcm),
                        .flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO,
-               },
+               } },
        },
 };
 
 static const struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = {
        [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = {
-               .ptr = {
+               .ptr = {
                        .type = UVERBS_ATTR_TYPE_PTR_IN,
                        /* No need to specify any data */
                        .len = 0,
-               }
+               } }
        },
        [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = {
-               .ptr = {
+               .ptr = {
                        .type = UVERBS_ATTR_TYPE_PTR_IN,
                        UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp, size),
                        .flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO,
-               }
+               } }
        },
 };
 
index 7eff3ae..6ddfb1f 100644 (file)
@@ -1656,6 +1656,7 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
        if (!IS_ERR(mr)) {
                mr->device  = pd->device;
                mr->pd      = pd;
+               mr->dm      = NULL;
                mr->uobject = NULL;
                atomic_inc(&pd->usecnt);
                mr->need_inval = false;
index 6f2b261..2be2e1a 100644 (file)
@@ -315,7 +315,7 @@ static void advance_oldest_read(struct t4_wq *wq)
  * Deal with out-of-order and/or completions that complete
  * prior unsignalled WRs.
  */
-void c4iw_flush_hw_cq(struct c4iw_cq *chp)
+void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp)
 {
        struct t4_cqe *hw_cqe, *swcqe, read_cqe;
        struct c4iw_qp *qhp;
@@ -339,6 +339,13 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
                if (qhp == NULL)
                        goto next_cqe;
 
+               if (flush_qhp != qhp) {
+                       spin_lock(&qhp->lock);
+
+                       if (qhp->wq.flushed == 1)
+                               goto next_cqe;
+               }
+
                if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
                        goto next_cqe;
 
@@ -390,6 +397,8 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
 next_cqe:
                t4_hwcq_consume(&chp->cq);
                ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
+               if (qhp && flush_qhp != qhp)
+                       spin_unlock(&qhp->lock);
        }
 }
 
index feeb8ee..44161ca 100644 (file)
@@ -875,6 +875,11 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
 
        rdev->status_page->db_off = 0;
 
+       init_completion(&rdev->rqt_compl);
+       init_completion(&rdev->pbl_compl);
+       kref_init(&rdev->rqt_kref);
+       kref_init(&rdev->pbl_kref);
+
        return 0;
 err_free_status_page_and_wr_log:
        if (c4iw_wr_log && rdev->wr_log)
@@ -893,13 +898,15 @@ destroy_resource:
 
 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
 {
-       destroy_workqueue(rdev->free_workq);
        kfree(rdev->wr_log);
        c4iw_release_dev_ucontext(rdev, &rdev->uctx);
        free_page((unsigned long)rdev->status_page);
        c4iw_pblpool_destroy(rdev);
        c4iw_rqtpool_destroy(rdev);
+       wait_for_completion(&rdev->pbl_compl);
+       wait_for_completion(&rdev->rqt_compl);
        c4iw_ocqp_pool_destroy(rdev);
+       destroy_workqueue(rdev->free_workq);
        c4iw_destroy_resource(&rdev->resource);
 }
 
index cc92900..8310277 100644 (file)
@@ -185,6 +185,10 @@ struct c4iw_rdev {
        struct wr_log_entry *wr_log;
        int wr_log_size;
        struct workqueue_struct *free_workq;
+       struct completion rqt_compl;
+       struct completion pbl_compl;
+       struct kref rqt_kref;
+       struct kref pbl_kref;
 };
 
 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -1049,7 +1053,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
 u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
 void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
-void c4iw_flush_hw_cq(struct c4iw_cq *chp);
+void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp);
 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
index de77b60..ae167b6 100644 (file)
@@ -1343,12 +1343,12 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
        qhp->wq.flushed = 1;
        t4_set_wq_in_error(&qhp->wq);
 
-       c4iw_flush_hw_cq(rchp);
+       c4iw_flush_hw_cq(rchp, qhp);
        c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
        rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
 
        if (schp != rchp)
-               c4iw_flush_hw_cq(schp);
+               c4iw_flush_hw_cq(schp, qhp);
        sq_flushed = c4iw_flush_sq(qhp);
 
        spin_unlock(&qhp->lock);
index 3cf2599..0ef25ae 100644 (file)
@@ -260,12 +260,22 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
                rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
                if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
                        rdev->stats.pbl.max = rdev->stats.pbl.cur;
+               kref_get(&rdev->pbl_kref);
        } else
                rdev->stats.pbl.fail++;
        mutex_unlock(&rdev->stats.lock);
        return (u32)addr;
 }
 
+static void destroy_pblpool(struct kref *kref)
+{
+       struct c4iw_rdev *rdev;
+
+       rdev = container_of(kref, struct c4iw_rdev, pbl_kref);
+       gen_pool_destroy(rdev->pbl_pool);
+       complete(&rdev->pbl_compl);
+}
+
 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
 {
        pr_debug("addr 0x%x size %d\n", addr, size);
@@ -273,6 +283,7 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
        rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
        mutex_unlock(&rdev->stats.lock);
        gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
+       kref_put(&rdev->pbl_kref, destroy_pblpool);
 }
 
 int c4iw_pblpool_create(struct c4iw_rdev *rdev)
@@ -310,7 +321,7 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
 
 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
 {
-       gen_pool_destroy(rdev->pbl_pool);
+       kref_put(&rdev->pbl_kref, destroy_pblpool);
 }
 
 /*
@@ -331,12 +342,22 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
                rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
                if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
                        rdev->stats.rqt.max = rdev->stats.rqt.cur;
+               kref_get(&rdev->rqt_kref);
        } else
                rdev->stats.rqt.fail++;
        mutex_unlock(&rdev->stats.lock);
        return (u32)addr;
 }
 
+static void destroy_rqtpool(struct kref *kref)
+{
+       struct c4iw_rdev *rdev;
+
+       rdev = container_of(kref, struct c4iw_rdev, rqt_kref);
+       gen_pool_destroy(rdev->rqt_pool);
+       complete(&rdev->rqt_compl);
+}
+
 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
 {
        pr_debug("addr 0x%x size %d\n", addr, size << 6);
@@ -344,6 +365,7 @@ void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
        rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
        mutex_unlock(&rdev->stats.lock);
        gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
+       kref_put(&rdev->rqt_kref, destroy_rqtpool);
 }
 
 int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
@@ -380,7 +402,7 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
 
 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
 {
-       gen_pool_destroy(rdev->rqt_pool);
+       kref_put(&rdev->rqt_kref, destroy_rqtpool);
 }
 
 /*
index a97055d..b5fab55 100644 (file)
@@ -412,7 +412,6 @@ static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
 static int get_irq_affinity(struct hfi1_devdata *dd,
                            struct hfi1_msix_entry *msix)
 {
-       int ret;
        cpumask_var_t diff;
        struct hfi1_affinity_node *entry;
        struct cpu_mask_set *set = NULL;
@@ -424,10 +423,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
        extra[0] = '\0';
        cpumask_clear(&msix->mask);
 
-       ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
-       if (!ret)
-               return -ENOMEM;
-
        entry = node_affinity_lookup(dd->node);
 
        switch (msix->type) {
@@ -458,6 +453,9 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
         * finds its CPU here.
         */
        if (cpu == -1 && set) {
+               if (!zalloc_cpumask_var(&diff, GFP_KERNEL))
+                       return -ENOMEM;
+
                if (cpumask_equal(&set->mask, &set->used)) {
                        /*
                         * We've used up all the CPUs, bump up the generation
@@ -469,6 +467,8 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
                cpumask_andnot(diff, &set->mask, &set->used);
                cpu = cpumask_first(diff);
                cpumask_set_cpu(cpu, &set->used);
+
+               free_cpumask_var(diff);
        }
 
        cpumask_set_cpu(cpu, &msix->mask);
@@ -482,7 +482,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
                hfi1_setup_sdma_notifier(msix);
        }
 
-       free_cpumask_var(diff);
        return 0;
 }
 
index 46d1475..bd837a0 100644 (file)
@@ -433,31 +433,43 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
                               bool do_cnp)
 {
        struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+       struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
        struct ib_other_headers *ohdr = pkt->ohdr;
        struct ib_grh *grh = pkt->grh;
        u32 rqpn = 0, bth1;
-       u16 pkey, rlid, dlid = ib_get_dlid(pkt->hdr);
+       u16 pkey;
+       u32 rlid, slid, dlid = 0;
        u8 hdr_type, sc, svc_type;
        bool is_mcast = false;
 
+       /* can be called from prescan */
        if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
                is_mcast = hfi1_is_16B_mcast(dlid);
                pkey = hfi1_16B_get_pkey(pkt->hdr);
                sc = hfi1_16B_get_sc(pkt->hdr);
+               dlid = hfi1_16B_get_dlid(pkt->hdr);
+               slid = hfi1_16B_get_slid(pkt->hdr);
                hdr_type = HFI1_PKT_TYPE_16B;
        } else {
                is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
                           (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
                pkey = ib_bth_get_pkey(ohdr);
                sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf);
+               dlid = ib_get_dlid(pkt->hdr);
+               slid = ib_get_slid(pkt->hdr);
                hdr_type = HFI1_PKT_TYPE_9B;
        }
 
        switch (qp->ibqp.qp_type) {
+       case IB_QPT_UD:
+               dlid = ppd->lid;
+               rlid = slid;
+               rqpn = ib_get_sqpn(pkt->ohdr);
+               svc_type = IB_CC_SVCTYPE_UD;
+               break;
        case IB_QPT_SMI:
        case IB_QPT_GSI:
-       case IB_QPT_UD:
-               rlid = ib_get_slid(pkt->hdr);
+               rlid = slid;
                rqpn = ib_get_sqpn(pkt->ohdr);
                svc_type = IB_CC_SVCTYPE_UD;
                break;
@@ -482,7 +494,6 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
                                              dlid, rlid, sc, grh);
 
        if (!is_mcast && (bth1 & IB_BECN_SMASK)) {
-               struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
                u32 lqpn = bth1 & RVT_QPN_MASK;
                u8 sl = ibp->sc_to_sl[sc];
 
index 32c4826..cac2c62 100644 (file)
@@ -1537,13 +1537,13 @@ void set_link_ipg(struct hfi1_pportdata *ppd);
 void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
                  u32 rqpn, u8 svc_type);
 void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
-               u32 pkey, u32 slid, u32 dlid, u8 sc5,
+               u16 pkey, u32 slid, u32 dlid, u8 sc5,
                const struct ib_grh *old_grh);
 void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
-                   u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
+                   u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
                    u8 sc5, const struct ib_grh *old_grh);
 typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp,
-                               u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
+                               u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
                                u8 sc5, const struct ib_grh *old_grh);
 
 #define PKEY_CHECK_INVALID -1
@@ -2437,7 +2437,7 @@ static inline void hfi1_make_16b_hdr(struct hfi1_16b_header *hdr,
                ((slid >> OPA_16B_SLID_SHIFT) << OPA_16B_SLID_HIGH_SHIFT);
        lrh2 = (lrh2 & ~OPA_16B_DLID_MASK) |
                ((dlid >> OPA_16B_DLID_SHIFT) << OPA_16B_DLID_HIGH_SHIFT);
-       lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | (pkey << OPA_16B_PKEY_SHIFT);
+       lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | ((u32)pkey << OPA_16B_PKEY_SHIFT);
        lrh2 = (lrh2 & ~OPA_16B_L4_MASK) | l4;
 
        hdr->lrh[0] = lrh0;
index 33eba23..6309edf 100644 (file)
@@ -88,9 +88,9 @@
  * pio buffers per ctxt, etc.)  Zero means use one user context per CPU.
  */
 int num_user_contexts = -1;
-module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO);
+module_param_named(num_user_contexts, num_user_contexts, int, 0444);
 MODULE_PARM_DESC(
-       num_user_contexts, "Set max number of user contexts to use");
+       num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)");
 
 uint krcvqs[RXE_NUM_DATA_VL];
 int krcvqsset;
@@ -1209,30 +1209,49 @@ static void finalize_asic_data(struct hfi1_devdata *dd,
        kfree(ad);
 }
 
-static void __hfi1_free_devdata(struct kobject *kobj)
+/**
+ * hfi1_clean_devdata - cleans up per-unit data structure
+ * @dd: pointer to a valid devdata structure
+ *
+ * It cleans up all data structures set up by
+ * by hfi1_alloc_devdata().
+ */
+static void hfi1_clean_devdata(struct hfi1_devdata *dd)
 {
-       struct hfi1_devdata *dd =
-               container_of(kobj, struct hfi1_devdata, kobj);
        struct hfi1_asic_data *ad;
        unsigned long flags;
 
        spin_lock_irqsave(&hfi1_devs_lock, flags);
-       idr_remove(&hfi1_unit_table, dd->unit);
-       list_del(&dd->list);
+       if (!list_empty(&dd->list)) {
+               idr_remove(&hfi1_unit_table, dd->unit);
+               list_del_init(&dd->list);
+       }
        ad = release_asic_data(dd);
        spin_unlock_irqrestore(&hfi1_devs_lock, flags);
-       if (ad)
-               finalize_asic_data(dd, ad);
+
+       finalize_asic_data(dd, ad);
        free_platform_config(dd);
        rcu_barrier(); /* wait for rcu callbacks to complete */
        free_percpu(dd->int_counter);
        free_percpu(dd->rcv_limit);
        free_percpu(dd->send_schedule);
        free_percpu(dd->tx_opstats);
+       dd->int_counter   = NULL;
+       dd->rcv_limit     = NULL;
+       dd->send_schedule = NULL;
+       dd->tx_opstats    = NULL;
        sdma_clean(dd, dd->num_sdma);
        rvt_dealloc_device(&dd->verbs_dev.rdi);
 }
 
+static void __hfi1_free_devdata(struct kobject *kobj)
+{
+       struct hfi1_devdata *dd =
+               container_of(kobj, struct hfi1_devdata, kobj);
+
+       hfi1_clean_devdata(dd);
+}
+
 static struct kobj_type hfi1_devdata_type = {
        .release = __hfi1_free_devdata,
 };
@@ -1265,6 +1284,8 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
                return ERR_PTR(-ENOMEM);
        dd->num_pports = nports;
        dd->pport = (struct hfi1_pportdata *)(dd + 1);
+       dd->pcidev = pdev;
+       pci_set_drvdata(pdev, dd);
 
        INIT_LIST_HEAD(&dd->list);
        idr_preload(GFP_KERNEL);
@@ -1331,9 +1352,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
        return dd;
 
 bail:
-       if (!list_empty(&dd->list))
-               list_del_init(&dd->list);
-       rvt_dealloc_device(&dd->verbs_dev.rdi);
+       hfi1_clean_devdata(dd);
        return ERR_PTR(ret);
 }
 
index 83d66e8..c1c9829 100644 (file)
@@ -163,9 +163,6 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
        resource_size_t addr;
        int ret = 0;
 
-       dd->pcidev = pdev;
-       pci_set_drvdata(pdev, dd);
-
        addr = pci_resource_start(pdev, 0);
        len = pci_resource_len(pdev, 0);
 
index d486355..cbf7faa 100644 (file)
@@ -199,6 +199,7 @@ void free_platform_config(struct hfi1_devdata *dd)
 {
        /* Release memory allocated for eprom or fallback file read. */
        kfree(dd->platform_config.data);
+       dd->platform_config.data = NULL;
 }
 
 void get_port_type(struct hfi1_pportdata *ppd)
index 1869f63..b596699 100644 (file)
@@ -204,6 +204,8 @@ static void clean_i2c_bus(struct hfi1_i2c_bus *bus)
 
 void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad)
 {
+       if (!ad)
+               return;
        clean_i2c_bus(ad->i2c_bus0);
        ad->i2c_bus0 = NULL;
        clean_i2c_bus(ad->i2c_bus1);
index 3daa94b..c0071ca 100644 (file)
@@ -733,6 +733,20 @@ static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
        ohdr->bth[2] = cpu_to_be32(bth2);
 }
 
+/**
+ * hfi1_make_ruc_header_16B - build a 16B header
+ * @qp: the queue pair
+ * @ohdr: a pointer to the destination header memory
+ * @bth0: bth0 passed in from the RC/UC builder
+ * @bth2: bth2 passed in from the RC/UC builder
+ * @middle: non zero implies indicates ahg "could" be used
+ * @ps: the current packet state
+ *
+ * This routine may disarm ahg under these situations:
+ * - packet needs a GRH
+ * - BECN needed
+ * - migration state not IB_MIG_MIGRATED
+ */
 static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
                                            struct ib_other_headers *ohdr,
                                            u32 bth0, u32 bth2, int middle,
@@ -777,6 +791,12 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
        else
                middle = 0;
 
+       if (qp->s_flags & RVT_S_ECN) {
+               qp->s_flags &= ~RVT_S_ECN;
+               /* we recently received a FECN, so return a BECN */
+               becn = true;
+               middle = 0;
+       }
        if (middle)
                build_ahg(qp, bth2);
        else
@@ -784,11 +804,6 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
 
        bth0 |= pkey;
        bth0 |= extra_bytes << 20;
-       if (qp->s_flags & RVT_S_ECN) {
-               qp->s_flags &= ~RVT_S_ECN;
-               /* we recently received a FECN, so return a BECN */
-               becn = true;
-       }
        hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
 
        if (!ppd->lid)
@@ -806,6 +821,20 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
                          pkey, becn, 0, l4, priv->s_sc);
 }
 
+/**
+ * hfi1_make_ruc_header_9B - build a 9B header
+ * @qp: the queue pair
+ * @ohdr: a pointer to the destination header memory
+ * @bth0: bth0 passed in from the RC/UC builder
+ * @bth2: bth2 passed in from the RC/UC builder
+ * @middle: non zero implies indicates ahg "could" be used
+ * @ps: the current packet state
+ *
+ * This routine may disarm ahg under these situations:
+ * - packet needs a GRH
+ * - BECN needed
+ * - migration state not IB_MIG_MIGRATED
+ */
 static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
                                           struct ib_other_headers *ohdr,
                                           u32 bth0, u32 bth2, int middle,
@@ -839,6 +868,12 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
        else
                middle = 0;
 
+       if (qp->s_flags & RVT_S_ECN) {
+               qp->s_flags &= ~RVT_S_ECN;
+               /* we recently received a FECN, so return a BECN */
+               bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
+               middle = 0;
+       }
        if (middle)
                build_ahg(qp, bth2);
        else
@@ -846,11 +881,6 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
 
        bth0 |= pkey;
        bth0 |= extra_bytes << 20;
-       if (qp->s_flags & RVT_S_ECN) {
-               qp->s_flags &= ~RVT_S_ECN;
-               /* we recently received a FECN, so return a BECN */
-               bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
-       }
        hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
        hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
                         lrh0,
index bcf3b0b..69c17a5 100644 (file)
@@ -628,7 +628,7 @@ int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
 }
 
 void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
-                   u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
+                   u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
                    u8 sc5, const struct ib_grh *old_grh)
 {
        u64 pbc, pbc_flags = 0;
@@ -687,7 +687,7 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
 }
 
 void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
-               u32 pkey, u32 slid, u32 dlid, u8 sc5,
+               u16 pkey, u32 slid, u32 dlid, u8 sc5,
                const struct ib_grh *old_grh)
 {
        u64 pbc, pbc_flags = 0;
index 0eeabfb..63b5b3e 100644 (file)
@@ -912,7 +912,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
                obj_per_chunk = buf_chunk_size / obj_size;
                num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
                bt_chunk_num = bt_chunk_size / 8;
-               if (table->type >= HEM_TYPE_MTT)
+               if (type >= HEM_TYPE_MTT)
                        num_bt_l0 = bt_chunk_num;
 
                table->hem = kcalloc(num_hem, sizeof(*table->hem),
@@ -920,7 +920,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
                if (!table->hem)
                        goto err_kcalloc_hem_buf;
 
-               if (check_whether_bt_num_3(table->type, hop_num)) {
+               if (check_whether_bt_num_3(type, hop_num)) {
                        unsigned long num_bt_l1;
 
                        num_bt_l1 = (num_hem + bt_chunk_num - 1) /
@@ -939,8 +939,8 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
                                goto err_kcalloc_l1_dma;
                }
 
-               if (check_whether_bt_num_2(table->type, hop_num) ||
-                       check_whether_bt_num_3(table->type, hop_num)) {
+               if (check_whether_bt_num_2(type, hop_num) ||
+                       check_whether_bt_num_3(type, hop_num)) {
                        table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
                                               GFP_KERNEL);
                        if (!table->bt_l0)
@@ -1039,14 +1039,14 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
 void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
 {
        hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
-       hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
        if (hr_dev->caps.trrl_entry_sz)
                hns_roce_cleanup_hem_table(hr_dev,
                                           &hr_dev->qp_table.trrl_table);
+       hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
        hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
        hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
-       hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
        if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
                hns_roce_cleanup_hem_table(hr_dev,
                                           &hr_dev->mr_table.mtt_cqe_table);
+       hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
 }
index 8b84ab7..25916e8 100644 (file)
@@ -71,6 +71,11 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        return -EINVAL;
                }
 
+               if (wr->opcode == IB_WR_RDMA_READ) {
+                       dev_err(hr_dev->dev, "Not support inline data!\n");
+                       return -EINVAL;
+               }
+
                for (i = 0; i < wr->num_sge; i++) {
                        memcpy(wqe, ((void *)wr->sg_list[i].addr),
                               wr->sg_list[i].length);
@@ -148,7 +153,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                     ibqp->qp_type != IB_QPT_GSI &&
                     ibqp->qp_type != IB_QPT_UD)) {
                dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
-               *bad_wr = NULL;
+               *bad_wr = wr;
                return -EOPNOTSUPP;
        }
 
@@ -182,7 +187,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
                                                                      wr->wr_id;
 
-               owner_bit = ~(qp->sq.head >> ilog2(qp->sq.wqe_cnt)) & 0x1;
+               owner_bit =
+                      ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
 
                /* Corresponding to the QP type, wqe process separately */
                if (ibqp->qp_type == IB_QPT_GSI) {
@@ -456,6 +462,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                } else {
                        dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
                        spin_unlock_irqrestore(&qp->sq.lock, flags);
+                       *bad_wr = wr;
                        return -EOPNOTSUPP;
                }
        }
@@ -2592,10 +2599,12 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
        roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
                       V2_QPC_BYTE_4_SQPN_S, 0);
 
-       roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
-                      V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
-       roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
-                      V2_QPC_BYTE_56_DQPN_S, 0);
+       if (attr_mask & IB_QP_DEST_QPN) {
+               roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+                              V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
+               roce_set_field(qpc_mask->byte_56_dqpn_err,
+                              V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
+       }
        roce_set_field(context->byte_168_irrl_idx,
                       V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
                       V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
@@ -2650,8 +2659,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
                return -EINVAL;
        }
 
-       if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) ||
-           (attr_mask & IB_QP_PKEY_INDEX) || (attr_mask & IB_QP_QKEY)) {
+       if (attr_mask & IB_QP_ALT_PATH) {
                dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
                return -EINVAL;
        }
@@ -2800,10 +2808,12 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
                               V2_QPC_BYTE_140_RR_MAX_S, 0);
        }
 
-       roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
-                      V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
-       roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
-                      V2_QPC_BYTE_56_DQPN_S, 0);
+       if (attr_mask & IB_QP_DEST_QPN) {
+               roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+                              V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
+               roce_set_field(qpc_mask->byte_56_dqpn_err,
+                              V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
+       }
 
        /* Configure GID index */
        port_num = rdma_ah_get_port_num(&attr->ah_attr);
@@ -2845,7 +2855,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
        if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
                roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
                               V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
-       else
+       else if (attr_mask & IB_QP_PATH_MTU)
                roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
                               V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
 
@@ -2922,11 +2932,9 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
                return -EINVAL;
        }
 
-       /* If exist optional param, return error */
-       if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) ||
-           (attr_mask & IB_QP_QKEY) || (attr_mask & IB_QP_PATH_MIG_STATE) ||
-           (attr_mask & IB_QP_CUR_STATE) ||
-           (attr_mask & IB_QP_MIN_RNR_TIMER)) {
+       /* Not support alternate path and path migration */
+       if ((attr_mask & IB_QP_ALT_PATH) ||
+           (attr_mask & IB_QP_PATH_MIG_STATE)) {
                dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
                return -EINVAL;
        }
@@ -3161,7 +3169,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
                   (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
                   (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
                   (cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) ||
-                  (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR)) {
+                  (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR) ||
+                  (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) {
                /* Nothing */
                ;
        } else {
@@ -4478,7 +4487,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
        ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
                                eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
        if (ret) {
-               dev_err(dev, "[mailbox cmd] creat eqc failed.\n");
+               dev_err(dev, "[mailbox cmd] create eqc failed.\n");
                goto err_cmd_mbox;
        }
 
index e289a92..d4aad34 100644 (file)
@@ -620,7 +620,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
                                        to_hr_ucontext(ib_pd->uobject->context),
                                        ucmd.db_addr, &hr_qp->rdb);
                        if (ret) {
-                               dev_err(dev, "rp record doorbell map failed!\n");
+                               dev_err(dev, "rq record doorbell map failed!\n");
                                goto err_mtt;
                        }
                }
index 17f4f15..61d8b06 100644 (file)
@@ -346,7 +346,7 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
        /* Add to the first block the misalignment that it suffers from. */
        total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
        last_block_end = current_block_start + current_block_len;
-       last_block_aligned_end = round_up(last_block_end, 1 << block_shift);
+       last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift);
        total_len += (last_block_aligned_end - last_block_end);
 
        if (total_len & ((1ULL << block_shift) - 1ULL))
index 50af891..199648a 100644 (file)
@@ -673,7 +673,8 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
                                          MLX4_IB_RX_HASH_SRC_PORT_TCP  |
                                          MLX4_IB_RX_HASH_DST_PORT_TCP  |
                                          MLX4_IB_RX_HASH_SRC_PORT_UDP  |
-                                         MLX4_IB_RX_HASH_DST_PORT_UDP)) {
+                                         MLX4_IB_RX_HASH_DST_PORT_UDP  |
+                                         MLX4_IB_RX_HASH_INNER)) {
                pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
                         ucmd->rx_hash_fields_mask);
                return (-EOPNOTSUPP);
index bce263b..fb4d77b 100644 (file)
@@ -1,6 +1,7 @@
 config MLX5_INFINIBAND
        tristate "Mellanox Connect-IB HCA support"
        depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
+       depends on INFINIBAND_USER_ACCESS || INFINIBAND_USER_ACCESS=n
        ---help---
          This driver provides low-level InfiniBand support for
          Mellanox Connect-IB PCI Express host channel adapters (HCAs).
index daa919e..b4d8ff8 100644 (file)
@@ -52,7 +52,6 @@
 #include <linux/mlx5/port.h>
 #include <linux/mlx5/vport.h>
 #include <linux/mlx5/fs.h>
-#include <linux/mlx5/fs_helpers.h>
 #include <linux/list.h>
 #include <rdma/ib_smi.h>
 #include <rdma/ib_umem.h>
@@ -180,7 +179,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
                        if (rep_ndev == ndev)
                                roce->netdev = (event == NETDEV_UNREGISTER) ?
                                        NULL : ndev;
-               } else if (ndev->dev.parent == &ibdev->mdev->pdev->dev) {
+               } else if (ndev->dev.parent == &mdev->pdev->dev) {
                        roce->netdev = (event == NETDEV_UNREGISTER) ?
                                NULL : ndev;
                }
@@ -4757,7 +4756,7 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
 {
        struct mlx5_ib_dev *dev = to_mdev(ibdev);
 
-       return mlx5_get_vector_affinity(dev->mdev, comp_vector);
+       return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector);
 }
 
 /* The mlx5_ib_multiport_mutex should be held when calling this function */
@@ -5427,9 +5426,7 @@ static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
 static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
 {
        dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
-       if (!dev->mdev->priv.uar)
-               return -ENOMEM;
-       return 0;
+       return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
 }
 
 static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
index 1520a2f..90a9c46 100644 (file)
@@ -866,25 +866,28 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
                       int *order)
 {
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
+       struct ib_umem *u;
        int err;
 
-       *umem = ib_umem_get(pd->uobject->context, start, length,
-                           access_flags, 0);
-       err = PTR_ERR_OR_ZERO(*umem);
+       *umem = NULL;
+
+       u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0);
+       err = PTR_ERR_OR_ZERO(u);
        if (err) {
-               *umem = NULL;
-               mlx5_ib_err(dev, "umem get failed (%d)\n", err);
+               mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
                return err;
        }
 
-       mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
+       mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
                           page_shift, ncont, order);
        if (!*npages) {
                mlx5_ib_warn(dev, "avoid zero region\n");
-               ib_umem_release(*umem);
+               ib_umem_release(u);
                return -EINVAL;
        }
 
+       *umem = u;
+
        mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
                    *npages, *ncont, *order, *page_shift);
 
@@ -1458,13 +1461,12 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
        int access_flags = flags & IB_MR_REREG_ACCESS ?
                            new_access_flags :
                            mr->access_flags;
-       u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
-       u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
        int page_shift = 0;
        int upd_flags = 0;
        int npages = 0;
        int ncont = 0;
        int order = 0;
+       u64 addr, len;
        int err;
 
        mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
@@ -1472,6 +1474,17 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
 
        atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
 
+       if (!mr->umem)
+               return -EINVAL;
+
+       if (flags & IB_MR_REREG_TRANS) {
+               addr = virt_addr;
+               len = length;
+       } else {
+               addr = mr->umem->address;
+               len = mr->umem->length;
+       }
+
        if (flags != IB_MR_REREG_PD) {
                /*
                 * Replace umem. This needs to be done whether or not UMR is
@@ -1479,6 +1492,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
                 */
                flags |= IB_MR_REREG_TRANS;
                ib_umem_release(mr->umem);
+               mr->umem = NULL;
                err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
                                  &npages, &page_shift, &ncont, &order);
                if (err)
index 7ed4b70..87b7c1b 100644 (file)
@@ -259,7 +259,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
        } else {
                if (ucmd) {
                        qp->rq.wqe_cnt = ucmd->rq_wqe_count;
+                       if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
+                               return -EINVAL;
                        qp->rq.wqe_shift = ucmd->rq_wqe_shift;
+                       if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
+                               return -EINVAL;
                        qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
                        qp->rq.max_post = qp->rq.wqe_cnt;
                } else {
@@ -2451,18 +2455,18 @@ enum {
 
 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
 {
-       if (rate == IB_RATE_PORT_CURRENT) {
+       if (rate == IB_RATE_PORT_CURRENT)
                return 0;
-       } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
+
+       if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS)
                return -EINVAL;
-       } else {
-               while (rate != IB_RATE_2_5_GBPS &&
-                      !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
-                        MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
-                       --rate;
-       }
 
-       return rate + MLX5_STAT_RATE_OFFSET;
+       while (rate != IB_RATE_PORT_CURRENT &&
+              !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
+                MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
+               --rate;
+
+       return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
 }
 
 static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
index 0a75164..007d5e8 100644 (file)
@@ -461,7 +461,7 @@ static bool nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
 /**
  * nes_netdev_start_xmit
  */
-static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
        struct nes_vnic *nesvnic = netdev_priv(netdev);
        struct nes_device *nesdev = nesvnic->nesdev;
index 61927c1..4cf1106 100644 (file)
@@ -390,7 +390,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
                .name   = "IB_OPCODE_RC_SEND_ONLY_INV",
                .mask   = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
                                | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
-                               | RXE_END_MASK,
+                               | RXE_END_MASK  | RXE_START_MASK,
                .length = RXE_BTH_BYTES + RXE_IETH_BYTES,
                .offset = {
                        [RXE_BTH]       = 0,
index 7bdaf71..7851999 100644 (file)
@@ -728,7 +728,6 @@ next_wqe:
                rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
 
                if (ret == -EAGAIN) {
-                       kfree_skb(skb);
                        rxe_run_task(&qp->req.task, 1);
                        goto exit;
                }
index a65c996..955ff3b 100644 (file)
@@ -742,7 +742,6 @@ static enum resp_states read_reply(struct rxe_qp *qp,
        err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
        if (err) {
                pr_err("Failed sending RDMA reply.\n");
-               kfree_skb(skb);
                return RESPST_ERR_RNR;
        }
 
@@ -954,10 +953,8 @@ static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
        }
 
        err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
-       if (err) {
+       if (err)
                pr_err_ratelimited("Failed sending ack\n");
-               kfree_skb(skb);
-       }
 
 err1:
        return err;
@@ -1141,7 +1138,6 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
                        if (rc) {
                                pr_err("Failed resending result. This flow is not handled - skb ignored\n");
                                rxe_drop_ref(qp);
-                               kfree_skb(skb_copy);
                                rc = RESPST_CLEANUP;
                                goto out;
                        }
index 161ba8c..cf291f9 100644 (file)
@@ -1094,7 +1094,7 @@ drop_and_unlock:
        spin_unlock_irqrestore(&priv->lock, flags);
 }
 
-static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = ipoib_priv(dev);
        struct rdma_netdev *rn = netdev_priv(dev);
index c74ee96..99db8fe 100644 (file)
@@ -1,6 +1,6 @@
 config INFINIBAND_SRP
        tristate "InfiniBand SCSI RDMA Protocol"
-       depends on SCSI
+       depends on SCSI && INFINIBAND_ADDR_TRANS
        select SCSI_SRP_ATTRS
        ---help---
          Support for the SCSI RDMA Protocol over InfiniBand.  This
index 31ee83d..fb8b718 100644 (file)
@@ -1,6 +1,6 @@
 config INFINIBAND_SRPT
        tristate "InfiniBand SCSI RDMA Protocol target support"
-       depends on INFINIBAND && TARGET_CORE
+       depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE
        ---help---
 
          Support for the SCSI RDMA Protocol (SRP) Target driver. The
index 46115a3..c81c79d 100644 (file)
@@ -31,6 +31,7 @@
 enum evdev_clock_type {
        EV_CLK_REAL = 0,
        EV_CLK_MONO,
+       EV_CLK_BOOT,
        EV_CLK_MAX
 };
 
@@ -197,10 +198,12 @@ static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
        case CLOCK_REALTIME:
                clk_type = EV_CLK_REAL;
                break;
-       case CLOCK_BOOTTIME:
        case CLOCK_MONOTONIC:
                clk_type = EV_CLK_MONO;
                break;
+       case CLOCK_BOOTTIME:
+               clk_type = EV_CLK_BOOT;
+               break;
        default:
                return -EINVAL;
        }
@@ -311,6 +314,8 @@ static void evdev_events(struct input_handle *handle,
 
        ev_time[EV_CLK_MONO] = ktime_get();
        ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]);
+       ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO],
+                                                TK_OFFS_BOOT);
 
        rcu_read_lock();
 
index 766bf26..5f04b2d 100644 (file)
@@ -88,6 +88,7 @@ static int input_leds_connect(struct input_handler *handler,
                              const struct input_device_id *id)
 {
        struct input_leds *leds;
+       struct input_led *led;
        unsigned int num_leds;
        unsigned int led_code;
        int led_no;
@@ -119,14 +120,13 @@ static int input_leds_connect(struct input_handler *handler,
 
        led_no = 0;
        for_each_set_bit(led_code, dev->ledbit, LED_CNT) {
-               struct input_led *led = &leds->leds[led_no];
+               if (!input_led_info[led_code].name)
+                       continue;
 
+               led = &leds->leds[led_no];
                led->handle = &leds->handle;
                led->code = led_code;
 
-               if (!input_led_info[led_code].name)
-                       continue;
-
                led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
                                           dev_name(&dev->dev),
                                           input_led_info[led_code].name);
index 0a67f23..38f9501 100644 (file)
@@ -583,7 +583,7 @@ static void alps_process_trackstick_packet_v3(struct psmouse *psmouse)
 
        x = (s8)(((packet[0] & 0x20) << 2) | (packet[1] & 0x7f));
        y = (s8)(((packet[0] & 0x10) << 3) | (packet[2] & 0x7f));
-       z = packet[4] & 0x7c;
+       z = packet[4] & 0x7f;
 
        /*
         * The x and y values tend to be quite large, and when used
index 76edbf2..082defc 100644 (file)
@@ -147,8 +147,11 @@ static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
        if (len > RMI_SPI_XFER_SIZE_LIMIT)
                return -EINVAL;
 
-       if (rmi_spi->xfer_buf_size < len)
-               rmi_spi_manage_pools(rmi_spi, len);
+       if (rmi_spi->xfer_buf_size < len) {
+               ret = rmi_spi_manage_pools(rmi_spi, len);
+               if (ret < 0)
+                       return ret;
+       }
 
        if (addr == 0)
                /*
index 4f15496..3e613af 100644 (file)
@@ -362,7 +362,7 @@ config TOUCHSCREEN_HIDEEP
 
          If unsure, say N.
 
-         To compile this driver as a moudle, choose M here : the
+         To compile this driver as a module, choose M here : the
          module will be called hideep_ts.
 
 config TOUCHSCREEN_ILI210X
index 5d9699f..0919472 100644 (file)
@@ -280,7 +280,8 @@ struct mxt_data {
        struct input_dev *input_dev;
        char phys[64];          /* device physical location */
        struct mxt_object *object_table;
-       struct mxt_info info;
+       struct mxt_info *info;
+       void *raw_info_block;
        unsigned int irq;
        unsigned int max_x;
        unsigned int max_y;
@@ -460,12 +461,13 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry)
 {
        u8 appmode = data->client->addr;
        u8 bootloader;
+       u8 family_id = data->info ? data->info->family_id : 0;
 
        switch (appmode) {
        case 0x4a:
        case 0x4b:
                /* Chips after 1664S use different scheme */
-               if (retry || data->info.family_id >= 0xa2) {
+               if (retry || family_id >= 0xa2) {
                        bootloader = appmode - 0x24;
                        break;
                }
@@ -692,7 +694,7 @@ mxt_get_object(struct mxt_data *data, u8 type)
        struct mxt_object *object;
        int i;
 
-       for (i = 0; i < data->info.object_num; i++) {
+       for (i = 0; i < data->info->object_num; i++) {
                object = data->object_table + i;
                if (object->type == type)
                        return object;
@@ -1462,12 +1464,12 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
                data_pos += offset;
        }
 
-       if (cfg_info.family_id != data->info.family_id) {
+       if (cfg_info.family_id != data->info->family_id) {
                dev_err(dev, "Family ID mismatch!\n");
                return -EINVAL;
        }
 
-       if (cfg_info.variant_id != data->info.variant_id) {
+       if (cfg_info.variant_id != data->info->variant_id) {
                dev_err(dev, "Variant ID mismatch!\n");
                return -EINVAL;
        }
@@ -1512,7 +1514,7 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
 
        /* Malloc memory to store configuration */
        cfg_start_ofs = MXT_OBJECT_START +
-                       data->info.object_num * sizeof(struct mxt_object) +
+                       data->info->object_num * sizeof(struct mxt_object) +
                        MXT_INFO_CHECKSUM_SIZE;
        config_mem_size = data->mem_size - cfg_start_ofs;
        config_mem = kzalloc(config_mem_size, GFP_KERNEL);
@@ -1563,20 +1565,6 @@ release_mem:
        return ret;
 }
 
-static int mxt_get_info(struct mxt_data *data)
-{
-       struct i2c_client *client = data->client;
-       struct mxt_info *info = &data->info;
-       int error;
-
-       /* Read 7-byte info block starting at address 0 */
-       error = __mxt_read_reg(client, 0, sizeof(*info), info);
-       if (error)
-               return error;
-
-       return 0;
-}
-
 static void mxt_free_input_device(struct mxt_data *data)
 {
        if (data->input_dev) {
@@ -1591,9 +1579,10 @@ static void mxt_free_object_table(struct mxt_data *data)
        video_unregister_device(&data->dbg.vdev);
        v4l2_device_unregister(&data->dbg.v4l2);
 #endif
-
-       kfree(data->object_table);
        data->object_table = NULL;
+       data->info = NULL;
+       kfree(data->raw_info_block);
+       data->raw_info_block = NULL;
        kfree(data->msg_buf);
        data->msg_buf = NULL;
        data->T5_address = 0;
@@ -1609,34 +1598,18 @@ static void mxt_free_object_table(struct mxt_data *data)
        data->max_reportid = 0;
 }
 
-static int mxt_get_object_table(struct mxt_data *data)
+static int mxt_parse_object_table(struct mxt_data *data,
+                                 struct mxt_object *object_table)
 {
        struct i2c_client *client = data->client;
-       size_t table_size;
-       struct mxt_object *object_table;
-       int error;
        int i;
        u8 reportid;
        u16 end_address;
 
-       table_size = data->info.object_num * sizeof(struct mxt_object);
-       object_table = kzalloc(table_size, GFP_KERNEL);
-       if (!object_table) {
-               dev_err(&data->client->dev, "Failed to allocate memory\n");
-               return -ENOMEM;
-       }
-
-       error = __mxt_read_reg(client, MXT_OBJECT_START, table_size,
-                       object_table);
-       if (error) {
-               kfree(object_table);
-               return error;
-       }
-
        /* Valid Report IDs start counting from 1 */
        reportid = 1;
        data->mem_size = 0;
-       for (i = 0; i < data->info.object_num; i++) {
+       for (i = 0; i < data->info->object_num; i++) {
                struct mxt_object *object = object_table + i;
                u8 min_id, max_id;
 
@@ -1660,8 +1633,8 @@ static int mxt_get_object_table(struct mxt_data *data)
 
                switch (object->type) {
                case MXT_GEN_MESSAGE_T5:
-                       if (data->info.family_id == 0x80 &&
-                           data->info.version < 0x20) {
+                       if (data->info->family_id == 0x80 &&
+                           data->info->version < 0x20) {
                                /*
                                 * On mXT224 firmware versions prior to V2.0
                                 * read and discard unused CRC byte otherwise
@@ -1716,24 +1689,102 @@ static int mxt_get_object_table(struct mxt_data *data)
        /* If T44 exists, T5 position has to be directly after */
        if (data->T44_address && (data->T5_address != data->T44_address + 1)) {
                dev_err(&client->dev, "Invalid T44 position\n");
-               error = -EINVAL;
-               goto free_object_table;
+               return -EINVAL;
        }
 
        data->msg_buf = kcalloc(data->max_reportid,
                                data->T5_msg_size, GFP_KERNEL);
-       if (!data->msg_buf) {
-               dev_err(&client->dev, "Failed to allocate message buffer\n");
+       if (!data->msg_buf)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static int mxt_read_info_block(struct mxt_data *data)
+{
+       struct i2c_client *client = data->client;
+       int error;
+       size_t size;
+       void *id_buf, *buf;
+       uint8_t num_objects;
+       u32 calculated_crc;
+       u8 *crc_ptr;
+
+       /* If info block already allocated, free it */
+       if (data->raw_info_block)
+               mxt_free_object_table(data);
+
+       /* Read 7-byte ID information block starting at address 0 */
+       size = sizeof(struct mxt_info);
+       id_buf = kzalloc(size, GFP_KERNEL);
+       if (!id_buf)
+               return -ENOMEM;
+
+       error = __mxt_read_reg(client, 0, size, id_buf);
+       if (error)
+               goto err_free_mem;
+
+       /* Resize buffer to give space for rest of info block */
+       num_objects = ((struct mxt_info *)id_buf)->object_num;
+       size += (num_objects * sizeof(struct mxt_object))
+               + MXT_INFO_CHECKSUM_SIZE;
+
+       buf = krealloc(id_buf, size, GFP_KERNEL);
+       if (!buf) {
                error = -ENOMEM;
-               goto free_object_table;
+               goto err_free_mem;
+       }
+       id_buf = buf;
+
+       /* Read rest of info block */
+       error = __mxt_read_reg(client, MXT_OBJECT_START,
+                              size - MXT_OBJECT_START,
+                              id_buf + MXT_OBJECT_START);
+       if (error)
+               goto err_free_mem;
+
+       /* Extract & calculate checksum */
+       crc_ptr = id_buf + size - MXT_INFO_CHECKSUM_SIZE;
+       data->info_crc = crc_ptr[0] | (crc_ptr[1] << 8) | (crc_ptr[2] << 16);
+
+       calculated_crc = mxt_calculate_crc(id_buf, 0,
+                                          size - MXT_INFO_CHECKSUM_SIZE);
+
+       /*
+        * CRC mismatch can be caused by data corruption due to I2C comms
+        * issue or else device is not using Object Based Protocol (eg i2c-hid)
+        */
+       if ((data->info_crc == 0) || (data->info_crc != calculated_crc)) {
+               dev_err(&client->dev,
+                       "Info Block CRC error calculated=0x%06X read=0x%06X\n",
+                       calculated_crc, data->info_crc);
+               error = -EIO;
+               goto err_free_mem;
+       }
+
+       data->raw_info_block = id_buf;
+       data->info = (struct mxt_info *)id_buf;
+
+       dev_info(&client->dev,
+                "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
+                data->info->family_id, data->info->variant_id,
+                data->info->version >> 4, data->info->version & 0xf,
+                data->info->build, data->info->object_num);
+
+       /* Parse object table information */
+       error = mxt_parse_object_table(data, id_buf + MXT_OBJECT_START);
+       if (error) {
+               dev_err(&client->dev, "Error %d parsing object table\n", error);
+               mxt_free_object_table(data);
+               goto err_free_mem;
        }
 
-       data->object_table = object_table;
+       data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START);
 
        return 0;
 
-free_object_table:
-       mxt_free_object_table(data);
+err_free_mem:
+       kfree(id_buf);
        return error;
 }
 
@@ -2046,7 +2097,7 @@ static int mxt_initialize(struct mxt_data *data)
        int error;
 
        while (1) {
-               error = mxt_get_info(data);
+               error = mxt_read_info_block(data);
                if (!error)
                        break;
 
@@ -2077,16 +2128,9 @@ static int mxt_initialize(struct mxt_data *data)
                msleep(MXT_FW_RESET_TIME);
        }
 
-       /* Get object table information */
-       error = mxt_get_object_table(data);
-       if (error) {
-               dev_err(&client->dev, "Error %d reading object table\n", error);
-               return error;
-       }
-
        error = mxt_acquire_irq(data);
        if (error)
-               goto err_free_object_table;
+               return error;
 
        error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME,
                                        &client->dev, GFP_KERNEL, data,
@@ -2094,14 +2138,10 @@ static int mxt_initialize(struct mxt_data *data)
        if (error) {
                dev_err(&client->dev, "Failed to invoke firmware loader: %d\n",
                        error);
-               goto err_free_object_table;
+               return error;
        }
 
        return 0;
-
-err_free_object_table:
-       mxt_free_object_table(data);
-       return error;
 }
 
 static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
@@ -2162,7 +2202,7 @@ recheck:
 static u16 mxt_get_debug_value(struct mxt_data *data, unsigned int x,
                               unsigned int y)
 {
-       struct mxt_info *info = &data->info;
+       struct mxt_info *info = data->info;
        struct mxt_dbg *dbg = &data->dbg;
        unsigned int ofs, page;
        unsigned int col = 0;
@@ -2490,7 +2530,7 @@ static const struct video_device mxt_video_device = {
 
 static void mxt_debug_init(struct mxt_data *data)
 {
-       struct mxt_info *info = &data->info;
+       struct mxt_info *info = data->info;
        struct mxt_dbg *dbg = &data->dbg;
        struct mxt_object *object;
        int error;
@@ -2576,7 +2616,6 @@ static int mxt_configure_objects(struct mxt_data *data,
                                 const struct firmware *cfg)
 {
        struct device *dev = &data->client->dev;
-       struct mxt_info *info = &data->info;
        int error;
 
        error = mxt_init_t7_power_cfg(data);
@@ -2601,11 +2640,6 @@ static int mxt_configure_objects(struct mxt_data *data,
 
        mxt_debug_init(data);
 
-       dev_info(dev,
-                "Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
-                info->family_id, info->variant_id, info->version >> 4,
-                info->version & 0xf, info->build, info->object_num);
-
        return 0;
 }
 
@@ -2614,7 +2648,7 @@ static ssize_t mxt_fw_version_show(struct device *dev,
                                   struct device_attribute *attr, char *buf)
 {
        struct mxt_data *data = dev_get_drvdata(dev);
-       struct mxt_info *info = &data->info;
+       struct mxt_info *info = data->info;
        return scnprintf(buf, PAGE_SIZE, "%u.%u.%02X\n",
                         info->version >> 4, info->version & 0xf, info->build);
 }
@@ -2624,7 +2658,7 @@ static ssize_t mxt_hw_version_show(struct device *dev,
                                   struct device_attribute *attr, char *buf)
 {
        struct mxt_data *data = dev_get_drvdata(dev);
-       struct mxt_info *info = &data->info;
+       struct mxt_info *info = data->info;
        return scnprintf(buf, PAGE_SIZE, "%u.%u\n",
                         info->family_id, info->variant_id);
 }
@@ -2663,7 +2697,7 @@ static ssize_t mxt_object_show(struct device *dev,
                return -ENOMEM;
 
        error = 0;
-       for (i = 0; i < data->info.object_num; i++) {
+       for (i = 0; i < data->info->object_num; i++) {
                object = data->object_table + i;
 
                if (!mxt_object_readable(object->type))
@@ -3034,6 +3068,15 @@ static const struct dmi_system_id mxt_dmi_table[] = {
                },
                .driver_data = samus_platform_data,
        },
+       {
+               /* Samsung Chromebook Pro */
+               .ident = "Samsung Chromebook Pro",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"),
+               },
+               .driver_data = samus_platform_data,
+       },
        {
                /* Other Google Chromebooks */
                .ident = "Chromebook",
@@ -3254,6 +3297,11 @@ static SIMPLE_DEV_PM_OPS(mxt_pm_ops, mxt_suspend, mxt_resume);
 
 static const struct of_device_id mxt_of_match[] = {
        { .compatible = "atmel,maxtouch", },
+       /* Compatibles listed below are deprecated */
+       { .compatible = "atmel,qt602240_ts", },
+       { .compatible = "atmel,atmel_mxt_ts", },
+       { .compatible = "atmel,atmel_mxt_tp", },
+       { .compatible = "atmel,mXT224", },
        {},
 };
 MODULE_DEVICE_TABLE(of, mxt_of_match);
index 2a99f0f..8fb8c73 100644 (file)
@@ -83,7 +83,6 @@
 
 static DEFINE_SPINLOCK(amd_iommu_devtable_lock);
 static DEFINE_SPINLOCK(pd_bitmap_lock);
-static DEFINE_SPINLOCK(iommu_table_lock);
 
 /* List of all available dev_data structures */
 static LLIST_HEAD(dev_data_list);
@@ -3562,6 +3561,7 @@ EXPORT_SYMBOL(amd_iommu_device_info);
  *****************************************************************************/
 
 static struct irq_chip amd_ir_chip;
+static DEFINE_SPINLOCK(iommu_table_lock);
 
 static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
 {
index f05f3cf..ddcbbdb 100644 (file)
@@ -167,40 +167,16 @@ EXPORT_SYMBOL(iommu_put_dma_cookie);
  * @list: Reserved region list from iommu_get_resv_regions()
  *
  * IOMMU drivers can use this to implement their .get_resv_regions callback
- * for general non-IOMMU-specific reservations. Currently, this covers host
- * bridge windows for PCI devices and GICv3 ITS region reservation on ACPI
- * based ARM platforms that may require HW MSI reservation.
+ * for general non-IOMMU-specific reservations. Currently, this covers GICv3
+ * ITS region reservation on ACPI based ARM platforms that may require HW MSI
+ * reservation.
  */
 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
 {
-       struct pci_host_bridge *bridge;
-       struct resource_entry *window;
-
-       if (!is_of_node(dev->iommu_fwspec->iommu_fwnode) &&
-               iort_iommu_msi_get_resv_regions(dev, list) < 0)
-               return;
-
-       if (!dev_is_pci(dev))
-               return;
-
-       bridge = pci_find_host_bridge(to_pci_dev(dev)->bus);
-       resource_list_for_each_entry(window, &bridge->windows) {
-               struct iommu_resv_region *region;
-               phys_addr_t start;
-               size_t length;
-
-               if (resource_type(window->res) != IORESOURCE_MEM)
-                       continue;
 
-               start = window->res->start - window->offset;
-               length = window->res->end - window->res->start + 1;
-               region = iommu_alloc_resv_region(start, length, 0,
-                               IOMMU_RESV_RESERVED);
-               if (!region)
-                       return;
+       if (!is_of_node(dev->iommu_fwspec->iommu_fwnode))
+               iort_iommu_msi_get_resv_regions(dev, list);
 
-               list_add_tail(&region->list, list);
-       }
 }
 EXPORT_SYMBOL(iommu_dma_get_resv_regions);
 
@@ -229,6 +205,23 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
        return 0;
 }
 
+static void iova_reserve_pci_windows(struct pci_dev *dev,
+               struct iova_domain *iovad)
+{
+       struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
+       struct resource_entry *window;
+       unsigned long lo, hi;
+
+       resource_list_for_each_entry(window, &bridge->windows) {
+               if (resource_type(window->res) != IORESOURCE_MEM)
+                       continue;
+
+               lo = iova_pfn(iovad, window->res->start - window->offset);
+               hi = iova_pfn(iovad, window->res->end - window->offset);
+               reserve_iova(iovad, lo, hi);
+       }
+}
+
 static int iova_reserve_iommu_regions(struct device *dev,
                struct iommu_domain *domain)
 {
@@ -238,6 +231,9 @@ static int iova_reserve_iommu_regions(struct device *dev,
        LIST_HEAD(resv_regions);
        int ret = 0;
 
+       if (dev_is_pci(dev))
+               iova_reserve_pci_windows(to_pci_dev(dev), iovad);
+
        iommu_get_resv_regions(dev, &resv_regions);
        list_for_each_entry(region, &resv_regions, list) {
                unsigned long lo, hi;
index accf583..460bed4 100644 (file)
@@ -1345,7 +1345,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
        struct qi_desc desc;
 
        if (mask) {
-               BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
+               WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
                addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
                desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
        } else
index 66f69af..3062a15 100644 (file)
@@ -1136,7 +1136,7 @@ static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
        irte->dest_id = IRTE_DEST(cfg->dest_apicid);
 
        /* Update the hardware only if the interrupt is in remapped mode. */
-       if (!force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
+       if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
                modify_irte(&ir_data->irq_2_iommu, irte);
 }
 
index 5fc8656..0468acf 100644 (file)
@@ -1098,7 +1098,7 @@ static int rk_iommu_of_xlate(struct device *dev,
        data->iommu = platform_get_drvdata(iommu_dev);
        dev->archdata.iommu = data;
 
-       of_dev_put(iommu_dev);
+       platform_device_put(iommu_dev);
 
        return 0;
 }
@@ -1175,8 +1175,15 @@ static int rk_iommu_probe(struct platform_device *pdev)
        for (i = 0; i < iommu->num_clocks; ++i)
                iommu->clocks[i].id = rk_iommu_clocks[i];
 
+       /*
+        * iommu clocks should be present for all new devices and devicetrees
+        * but there are older devicetrees without clocks out in the wild.
+        * So clocks as optional for the time being.
+        */
        err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
-       if (err)
+       if (err == -ENOENT)
+               iommu->num_clocks = 0;
+       else if (err)
                return err;
 
        err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);
index f312659..7f0c0be 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -68,7 +68,7 @@ static void combiner_handle_irq(struct irq_desc *desc)
 
                bit = readl_relaxed(combiner->regs[reg].addr);
                status = bit & combiner->regs[reg].enabled;
-               if (!status)
+               if (bit && !status)
                        pr_warn_ratelimited("Unexpected IRQ on CPU%d: (%08x %08lx %p)\n",
                                            smp_processor_id(), bit,
                                            combiner->regs[reg].enabled,
index 004cc3c..7fa2631 100644 (file)
@@ -290,7 +290,7 @@ do {                                                                        \
                if (kthread_should_stop() ||                            \
                    test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) {  \
                        set_current_state(TASK_RUNNING);                \
-                       return 0;                                       \
+                       goto out;                                       \
                }                                                       \
                                                                        \
                schedule();                                             \
@@ -378,6 +378,9 @@ retry_invalidate:
                        bch_prio_write(ca);
                }
        }
+out:
+       wait_for_kthread_stop();
+       return 0;
 }
 
 /* Allocation */
index d338b70..3a0cfb2 100644 (file)
@@ -392,6 +392,8 @@ struct cached_dev {
 #define DEFAULT_CACHED_DEV_ERROR_LIMIT 64
        atomic_t                io_errors;
        unsigned                error_limit;
+
+       char                    backing_dev_name[BDEVNAME_SIZE];
 };
 
 enum alloc_reserve {
@@ -464,6 +466,8 @@ struct cache {
        atomic_long_t           meta_sectors_written;
        atomic_long_t           btree_sectors_written;
        atomic_long_t           sectors_written;
+
+       char                    cache_dev_name[BDEVNAME_SIZE];
 };
 
 struct gc_stat {
index 028f7b3..4e63c6f 100644 (file)
@@ -106,7 +106,6 @@ void bch_btree_verify(struct btree *b)
 
 void bch_data_verify(struct cached_dev *dc, struct bio *bio)
 {
-       char name[BDEVNAME_SIZE];
        struct bio *check;
        struct bio_vec bv, cbv;
        struct bvec_iter iter, citer = { 0 };
@@ -134,7 +133,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
                                        bv.bv_len),
                                 dc->disk.c,
                                 "verify failed at dev %s sector %llu",
-                                bdevname(dc->bdev, name),
+                                dc->backing_dev_name,
                                 (uint64_t) bio->bi_iter.bi_sector);
 
                kunmap_atomic(p1);
index 7fac97a..2ddf851 100644 (file)
@@ -52,7 +52,6 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
 /* IO errors */
 void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
 {
-       char buf[BDEVNAME_SIZE];
        unsigned errors;
 
        WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
@@ -60,7 +59,7 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
        errors = atomic_add_return(1, &dc->io_errors);
        if (errors < dc->error_limit)
                pr_err("%s: IO error on backing device, unrecoverable",
-                       bio_devname(bio, buf));
+                       dc->backing_dev_name);
        else
                bch_cached_dev_error(dc);
 }
@@ -105,19 +104,18 @@ void bch_count_io_errors(struct cache *ca,
        }
 
        if (error) {
-               char buf[BDEVNAME_SIZE];
                unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
                                                    &ca->io_errors);
                errors >>= IO_ERROR_SHIFT;
 
                if (errors < ca->set->error_limit)
                        pr_err("%s: IO error on %s%s",
-                              bdevname(ca->bdev, buf), m,
+                              ca->cache_dev_name, m,
                               is_read ? ", recovering." : ".");
                else
                        bch_cache_set_error(ca->set,
                                            "%s: too many IO errors %s",
-                                           bdevname(ca->bdev, buf), m);
+                                           ca->cache_dev_name, m);
        }
 }
 
index a65e336..8e3e865 100644 (file)
@@ -649,11 +649,8 @@ static void backing_request_endio(struct bio *bio)
                 */
                if (unlikely(s->iop.writeback &&
                             bio->bi_opf & REQ_PREFLUSH)) {
-                       char buf[BDEVNAME_SIZE];
-
-                       bio_devname(bio, buf);
                        pr_err("Can't flush %s: returned bi_status %i",
-                               buf, bio->bi_status);
+                               dc->backing_dev_name, bio->bi_status);
                } else {
                        /* set to orig_bio->bi_status in bio_complete() */
                        s->iop.status = bio->bi_status;
index d90d9e5..3dea06b 100644 (file)
@@ -936,7 +936,6 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
 static void cached_dev_detach_finish(struct work_struct *w)
 {
        struct cached_dev *dc = container_of(w, struct cached_dev, detach);
-       char buf[BDEVNAME_SIZE];
        struct closure cl;
        closure_init_stack(&cl);
 
@@ -967,7 +966,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
 
        mutex_unlock(&bch_register_lock);
 
-       pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
+       pr_info("Caching disabled for %s", dc->backing_dev_name);
 
        /* Drop ref we took in cached_dev_detach() */
        closure_put(&dc->disk.cl);
@@ -999,29 +998,28 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
 {
        uint32_t rtime = cpu_to_le32(get_seconds());
        struct uuid_entry *u;
-       char buf[BDEVNAME_SIZE];
        struct cached_dev *exist_dc, *t;
 
-       bdevname(dc->bdev, buf);
-
        if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
            (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
                return -ENOENT;
 
        if (dc->disk.c) {
-               pr_err("Can't attach %s: already attached", buf);
+               pr_err("Can't attach %s: already attached",
+                      dc->backing_dev_name);
                return -EINVAL;
        }
 
        if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
-               pr_err("Can't attach %s: shutting down", buf);
+               pr_err("Can't attach %s: shutting down",
+                      dc->backing_dev_name);
                return -EINVAL;
        }
 
        if (dc->sb.block_size < c->sb.block_size) {
                /* Will die */
                pr_err("Couldn't attach %s: block size less than set's block size",
-                      buf);
+                      dc->backing_dev_name);
                return -EINVAL;
        }
 
@@ -1029,7 +1027,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
        list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
                if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
                        pr_err("Tried to attach %s but duplicate UUID already attached",
-                               buf);
+                               dc->backing_dev_name);
 
                        return -EINVAL;
                }
@@ -1047,13 +1045,15 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
 
        if (!u) {
                if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
-                       pr_err("Couldn't find uuid for %s in set", buf);
+                       pr_err("Couldn't find uuid for %s in set",
+                              dc->backing_dev_name);
                        return -ENOENT;
                }
 
                u = uuid_find_empty(c);
                if (!u) {
-                       pr_err("Not caching %s, no room for UUID", buf);
+                       pr_err("Not caching %s, no room for UUID",
+                              dc->backing_dev_name);
                        return -EINVAL;
                }
        }
@@ -1112,7 +1112,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
        up_write(&dc->writeback_lock);
 
        pr_info("Caching %s as %s on set %pU",
-               bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
+               dc->backing_dev_name,
+               dc->disk.disk->disk_name,
                dc->disk.c->sb.set_uuid);
        return 0;
 }
@@ -1225,10 +1226,10 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
                                 struct block_device *bdev,
                                 struct cached_dev *dc)
 {
-       char name[BDEVNAME_SIZE];
        const char *err = "cannot allocate memory";
        struct cache_set *c;
 
+       bdevname(bdev, dc->backing_dev_name);
        memcpy(&dc->sb, sb, sizeof(struct cache_sb));
        dc->bdev = bdev;
        dc->bdev->bd_holder = dc;
@@ -1237,6 +1238,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
        bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
        get_page(sb_page);
 
+
        if (cached_dev_init(dc, sb->block_size << 9))
                goto err;
 
@@ -1247,7 +1249,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
        if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
                goto err;
 
-       pr_info("registered backing device %s", bdevname(bdev, name));
+       pr_info("registered backing device %s", dc->backing_dev_name);
 
        list_add(&dc->list, &uncached_devices);
        list_for_each_entry(c, &bch_cache_sets, list)
@@ -1259,7 +1261,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
 
        return;
 err:
-       pr_notice("error %s: %s", bdevname(bdev, name), err);
+       pr_notice("error %s: %s", dc->backing_dev_name, err);
        bcache_device_stop(&dc->disk);
 }
 
@@ -1367,7 +1369,7 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
 
 bool bch_cached_dev_error(struct cached_dev *dc)
 {
-       char name[BDEVNAME_SIZE];
+       struct cache_set *c;
 
        if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
                return false;
@@ -1377,7 +1379,22 @@ bool bch_cached_dev_error(struct cached_dev *dc)
        smp_mb();
 
        pr_err("stop %s: too many IO errors on backing device %s\n",
-               dc->disk.disk->disk_name, bdevname(dc->bdev, name));
+               dc->disk.disk->disk_name, dc->backing_dev_name);
+
+       /*
+        * If the cached device is still attached to a cache set,
+        * even dc->io_disable is true and no more I/O requests
+        * accepted, cache device internal I/O (writeback scan or
+        * garbage collection) may still prevent bcache device from
+        * being stopped. So here CACHE_SET_IO_DISABLE should be
+        * set to c->flags too, to make the internal I/O to cache
+        * device rejected and stopped immediately.
+        * If c is NULL, that means the bcache device is not attached
+        * to any cache set, then no CACHE_SET_IO_DISABLE bit to set.
+        */
+       c = dc->disk.c;
+       if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
+               pr_info("CACHE_SET_IO_DISABLE already set");
 
        bcache_device_stop(&dc->disk);
        return true;
@@ -1395,7 +1412,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
                return false;
 
        if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
-               pr_warn("CACHE_SET_IO_DISABLE already set");
+               pr_info("CACHE_SET_IO_DISABLE already set");
 
        /* XXX: we can be called from atomic context
        acquire_console_sem();
@@ -1539,6 +1556,20 @@ static void conditional_stop_bcache_device(struct cache_set *c,
                 */
                pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.",
                        d->disk->disk_name);
+                       /*
+                        * There might be a small time gap that cache set is
+                        * released but bcache device is not. Inside this time
+                        * gap, regular I/O requests will directly go into
+                        * backing device as no cache set attached to. This
+                        * behavior may also introduce potential inconsistence
+                        * data in writeback mode while cache is dirty.
+                        * Therefore before calling bcache_device_stop() due
+                        * to a broken cache device, dc->io_disable should be
+                        * explicitly set to true.
+                        */
+                       dc->io_disable = true;
+                       /* make others know io_disable is true earlier */
+                       smp_mb();
                        bcache_device_stop(d);
        } else {
                /*
@@ -2003,12 +2034,10 @@ static int cache_alloc(struct cache *ca)
 static int register_cache(struct cache_sb *sb, struct page *sb_page,
                                struct block_device *bdev, struct cache *ca)
 {
-       char name[BDEVNAME_SIZE];
        const char *err = NULL; /* must be set for any error case */
        int ret = 0;
 
-       bdevname(bdev, name);
-
+       bdevname(bdev, ca->cache_dev_name);
        memcpy(&ca->sb, sb, sizeof(struct cache_sb));
        ca->bdev = bdev;
        ca->bdev->bd_holder = ca;
@@ -2045,14 +2074,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
                goto out;
        }
 
-       pr_info("registered cache device %s", name);
+       pr_info("registered cache device %s", ca->cache_dev_name);
 
 out:
        kobject_put(&ca->kobj);
 
 err:
        if (err)
-               pr_notice("error %s: %s", name, err);
+               pr_notice("error %s: %s", ca->cache_dev_name, err);
 
        return ret;
 }
index 4a9547c..ad45ebe 100644 (file)
@@ -244,8 +244,10 @@ static void dirty_endio(struct bio *bio)
        struct keybuf_key *w = bio->bi_private;
        struct dirty_io *io = w->private;
 
-       if (bio->bi_status)
+       if (bio->bi_status) {
                SET_KEY_DIRTY(&w->key, false);
+               bch_count_backing_io_errors(io->dc, bio);
+       }
 
        closure_put(&io->cl);
 }
index 12aa9ca..dc385b7 100644 (file)
@@ -1681,8 +1681,9 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
 
        if (block_size <= KMALLOC_MAX_SIZE &&
            (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
-               snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", c->block_size);
-               c->slab_cache = kmem_cache_create(slab_name, c->block_size, ARCH_KMALLOC_MINALIGN,
+               unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
+               snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
+               c->slab_cache = kmem_cache_create(slab_name, block_size, align,
                                                  SLAB_RECLAIM_ACCOUNT, NULL);
                if (!c->slab_cache) {
                        r = -ENOMEM;
index 1d0af0a..84814e8 100644 (file)
@@ -166,7 +166,7 @@ static bool max_work_reached(struct background_tracker *b)
                atomic_read(&b->pending_demotes) >= b->max_work;
 }
 
-struct bt_work *alloc_work(struct background_tracker *b)
+static struct bt_work *alloc_work(struct background_tracker *b)
 {
        if (max_work_reached(b))
                return NULL;
index 77d9fe5..514fb4a 100644 (file)
@@ -2440,7 +2440,7 @@ static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, str
        unsigned i;
        for (i = 0; i < ic->journal_sections; i++)
                kvfree(sl[i]);
-       kfree(sl);
+       kvfree(sl);
 }
 
 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
index 580c49c..5903e49 100644 (file)
@@ -23,6 +23,8 @@
 
 #define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
 
+#define MAX_NR_MIRRORS (DM_KCOPYD_MAX_REGIONS + 1)
+
 #define DM_RAID1_HANDLE_ERRORS 0x01
 #define DM_RAID1_KEEP_LOG      0x02
 #define errors_handled(p)      ((p)->features & DM_RAID1_HANDLE_ERRORS)
@@ -255,7 +257,7 @@ static int mirror_flush(struct dm_target *ti)
        unsigned long error_bits;
 
        unsigned int i;
-       struct dm_io_region io[ms->nr_mirrors];
+       struct dm_io_region io[MAX_NR_MIRRORS];
        struct mirror *m;
        struct dm_io_request io_req = {
                .bi_op = REQ_OP_WRITE,
@@ -651,7 +653,7 @@ static void write_callback(unsigned long error, void *context)
 static void do_write(struct mirror_set *ms, struct bio *bio)
 {
        unsigned int i;
-       struct dm_io_region io[ms->nr_mirrors], *dest = io;
+       struct dm_io_region io[MAX_NR_MIRRORS], *dest = io;
        struct mirror *m;
        struct dm_io_request io_req = {
                .bi_op = REQ_OP_WRITE,
@@ -1083,7 +1085,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        argc -= args_used;
 
        if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
-           nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
+           nr_mirrors < 2 || nr_mirrors > MAX_NR_MIRRORS) {
                ti->error = "Invalid number of mirrors";
                dm_dirty_log_destroy(dl);
                return -EINVAL;
@@ -1404,7 +1406,7 @@ static void mirror_status(struct dm_target *ti, status_type_t type,
        int num_feature_args = 0;
        struct mirror_set *ms = (struct mirror_set *) ti->private;
        struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
-       char buffer[ms->nr_mirrors + 1];
+       char buffer[MAX_NR_MIRRORS + 1];
 
        switch (type) {
        case STATUSTYPE_INFO:
index 4ea404d..0a7b010 100644 (file)
@@ -1020,7 +1020,8 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
 
 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
-               sector_t sector, int *srcu_idx)
+                                               sector_t sector, int *srcu_idx)
+       __acquires(md->io_barrier)
 {
        struct dm_table *map;
        struct dm_target *ti;
@@ -1037,7 +1038,7 @@ static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
 }
 
 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
-               long nr_pages, void **kaddr, pfn_t *pfn)
+                                long nr_pages, void **kaddr, pfn_t *pfn)
 {
        struct mapped_device *md = dax_get_private(dax_dev);
        sector_t sector = pgoff * PAGE_SECTORS;
@@ -1065,7 +1066,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
 }
 
 static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
-               void *addr, size_t bytes, struct iov_iter *i)
+                                   void *addr, size_t bytes, struct iov_iter *i)
 {
        struct mapped_device *md = dax_get_private(dax_dev);
        sector_t sector = pgoff * PAGE_SECTORS;
index e216cd7..b07114b 100644 (file)
@@ -20,7 +20,7 @@
 //
 // VBI support (2004) and cleanups (2005) by Hans Verkuil <hverkuil@xs4all.nl>
 //
-// Copyright (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+// Copyright (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org>
 //     SAA7111, SAA7113 and SAA7118 support
 
 #include "saa711x_regs.h"
index a50d480..44fabe0 100644 (file)
@@ -2,7 +2,7 @@
  * SPDX-License-Identifier: GPL-2.0+
  * saa711x - Philips SAA711x video decoder register specifications
  *
- * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
  */
 
 #define R_00_CHIP_VERSION                             0x00
index 1c5c61d..9b4f212 100644 (file)
@@ -8,7 +8,7 @@
  * Muting and tone control by Jonathan Isom <jisom@ematic.com>
  *
  * Copyright (c) 2000 Eric Sandeen <eric_sandeen@bigfoot.com>
- * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
  * This code is placed under the terms of the GNU General Public License
  * Based on tda9855.c by Steve VanDeBogart (vandebo@uclink.berkeley.edu)
  * Which was based on tda8425.c by Greg Alexander (c) 1998
index 2476d81..1734ed4 100644 (file)
@@ -2,7 +2,7 @@
 //
 // tvp5150 - Texas Instruments TVP5150A/AM1 and TVP5151 video decoder driver
 //
-// Copyright (c) 2005,2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+// Copyright (c) 2005,2006 Mauro Carvalho Chehab <mchehab@kernel.org>
 
 #include <dt-bindings/media/tvp5150.h>
 #include <linux/i2c.h>
index c43b7b8..d3a764c 100644 (file)
@@ -3,7 +3,7 @@
  *
  * tvp5150 - Texas Instruments TVP5150A/AM1 video decoder registers
  *
- * Copyright (c) 2005,2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Copyright (c) 2005,2006 Mauro Carvalho Chehab <mchehab@kernel.org>
  */
 
 #define TVP5150_VD_IN_SRC_SEL_1      0x00 /* Video input source selection #1 */
index a26c1a3..4599b7e 100644 (file)
@@ -5,7 +5,7 @@
  * Author: Santiago Nunez-Corrales <santiago.nunez@ridgerun.com>
  *
  * This code is partially based upon the TVP5150 driver
- * written by Mauro Carvalho Chehab (mchehab@infradead.org),
+ * written by Mauro Carvalho Chehab <mchehab@kernel.org>,
  * the TVP514x driver written by Vaibhav Hiremath <hvaibhav@ti.com>
  * and the TVP7002 driver in the TI LSP 2.10.00.14. Revisions by
  * Muralidharan Karicheri and Snehaprabha Narnakaje (TI).
index 3c8c8b0..7f56ba6 100644 (file)
@@ -5,7 +5,7 @@
  * Author: Santiago Nunez-Corrales <santiago.nunez@ridgerun.com>
  *
  * This code is partially based upon the TVP5150 driver
- * written by Mauro Carvalho Chehab (mchehab@infradead.org),
+ * written by Mauro Carvalho Chehab <mchehab@kernel.org>,
  * the TVP514x driver written by Vaibhav Hiremath <hvaibhav@ti.com>
  * and the TVP7002 driver in the TI LSP 2.10.00.14
  *
index 67ac51e..6b87a72 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2010 Nokia Corporation
  *
  * Based on drivers/media/video/v4l2_dev.c code authored by
- *     Mauro Carvalho Chehab <mchehab@infradead.org> (version 2)
+ *     Mauro Carvalho Chehab <mchehab@kernel.org> (version 2)
  *     Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
  *
  * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
index 9f1f916..346fc7f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Handlers for board audio hooks, splitted from bttv-cards
  *
- * Copyright (c) 2006 Mauro Carvalho Chehab (mchehab@infradead.org)
+ * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
  * This code is placed under the terms of the GNU General Public License
  */
 
index 159d07a..be16a53 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Handlers for board audio hooks, splitted from bttv-cards
  *
- * Copyright (c) 2006 Mauro Carvalho Chehab (mchehab@infradead.org)
+ * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
  * This code is placed under the terms of the GNU General Public License
  */
 
index 1902732..2616243 100644 (file)
@@ -2447,7 +2447,7 @@ struct tvcard bttv_tvcards[] = {
        },
                /* ---- card 0x88---------------------------------- */
        [BTTV_BOARD_ACORP_Y878F] = {
-               /* Mauro Carvalho Chehab <mchehab@infradead.org> */
+               /* Mauro Carvalho Chehab <mchehab@kernel.org> */
                .name           = "Acorp Y878F",
                .video_inputs   = 3,
                /* .audio_inputs= 1, */
@@ -2688,7 +2688,7 @@ struct tvcard bttv_tvcards[] = {
        },
        [BTTV_BOARD_ENLTV_FM_2] = {
                /* Encore TV Tuner Pro ENL TV-FM-2
-                  Mauro Carvalho Chehab <mchehab@infradead.org */
+                  Mauro Carvalho Chehab <mchehab@kernel.org> */
                .name           = "Encore ENL TV-FM-2",
                .video_inputs   = 3,
                /* .audio_inputs= 1, */
index 707f57a..de3f44b 100644 (file)
@@ -13,7 +13,7 @@
     (c) 2005-2006 Nickolay V. Shmyrev <nshmyrev@yandex.ru>
 
     Fixes to be fully V4L2 compliant by
-    (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+    (c) 2006 Mauro Carvalho Chehab <mchehab@kernel.org>
 
     Cropping and overscan support
     Copyright (C) 2005, 2006 Michael H. Schimek <mschimek@gmx.at>
index eccd1e3..c76823e 100644 (file)
@@ -8,7 +8,7 @@
                           & Marcus Metzler (mocm@thp.uni-koeln.de)
     (c) 1999-2003 Gerd Knorr <kraxel@bytesex.org>
 
-    (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org>
+    (c) 2005 Mauro Carvalho Chehab <mchehab@kernel.org>
        - Multituner support and i2c address binding
 
     This program is free software; you can redistribute it and/or modify
index be49589..395ff9b 100644 (file)
@@ -13,7 +13,7 @@
  *  Copyright (C) 2008 <srinivasa.deevi at conexant dot com>
  *  Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it>
  *                    Markus Rechberger <mrechberger@gmail.com>
- *                    Mauro Carvalho Chehab <mchehab@infradead.org>
+ *                    Mauro Carvalho Chehab <mchehab@kernel.org>
  *                    Sascha Sommer <saschasommer@freenet.de>
  *  Copyright (C) 2004, 2005 Chris Pascoe
  *  Copyright (C) 2003, 2004 Gerd Knorr
index ab09bb5..8a28fda 100644 (file)
@@ -4,7 +4,7 @@
  *
  *    (c) 2007 Trent Piepho <xyzzy@speakeasy.org>
  *    (c) 2005,2006 Ricardo Cerqueira <v4l@cerqueira.org>
- *    (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org>
+ *    (c) 2005 Mauro Carvalho Chehab <mchehab@kernel.org>
  *    Based on a dummy cx88 module by Gerd Knorr <kraxel@bytesex.org>
  *    Based on dummy.c by Jaroslav Kysela <perex@perex.cz>
  *
@@ -103,7 +103,7 @@ MODULE_PARM_DESC(index, "Index value for cx88x capture interface(s).");
 
 MODULE_DESCRIPTION("ALSA driver module for cx2388x based TV cards");
 MODULE_AUTHOR("Ricardo Cerqueira");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(CX88_VERSION);
 
index 0e0952e..7a4876c 100644 (file)
@@ -5,7 +5,7 @@
  *    (c) 2004 Jelle Foks <jelle@foks.us>
  *    (c) 2004 Gerd Knorr <kraxel@bytesex.org>
  *
- *    (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+ *    (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org>
  *        - video_ioctl2 conversion
  *
  *  Includes parts from the ivtv driver <http://sourceforge.net/projects/ivtv/>
index 8bfa5b7..60988e9 100644 (file)
@@ -4,7 +4,7 @@
  *
  * (c) 2003 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
  *
- * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org>
  *     - Multituner support
  *     - video_ioctl2 conversion
  *     - PAL/M fixes
index f769277..99f88a0 100644 (file)
@@ -8,7 +8,7 @@
  * (c) 2002 Yurij Sysoev <yurij@naturesoft.net>
  * (c) 1999-2003 Gerd Knorr <kraxel@bytesex.org>
  *
- * (c) 2005 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * (c) 2005 Mauro Carvalho Chehab <mchehab@kernel.org>
  *     - Multituner support and i2c address binding
  *
  * This program is free software; you can redistribute it and/or modify
index 9be682c..7b113ba 100644 (file)
@@ -5,7 +5,7 @@
  *
  * (c) 2003-04 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
  *
- * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org>
  *     - Multituner support
  *     - video_ioctl2 conversion
  *     - PAL/M fixes
index 5ef635e..4c52ac6 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright 1997 M. Kirkwood
  *
  * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com>
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
  * Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk>
  * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org>
  *
index 9e12c60..840b7d6 100644 (file)
@@ -2,7 +2,7 @@
  * radio-aztech.c - Aztech radio card driver
  *
  * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@xs4all.nl>
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
  * Adapted to support the Video for Linux API by
  * Russell Kroll <rkroll@exploits.org>.  Based on original tuner code by:
  *
index 3ff4c4e..f051f86 100644 (file)
@@ -15,7 +15,7 @@
  *    Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org>
  *
  * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com>
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
  *
  * Note: this card seems to swap the left and right audio channels!
  *
index 95f06f3..e4e7587 100644 (file)
@@ -27,7 +27,7 @@
  * BUGS:
  *   - card unmutes if you change frequency
  *
- * (c) 2006, 2007 by Mauro Carvalho Chehab <mchehab@infradead.org>:
+ * (c) 2006, 2007 by Mauro Carvalho Chehab <mchehab@kernel.org>:
  *     - Conversion to V4L2 API
  *      - Uses video_ioctl2 for parsing and to add debug support
  */
index abeaedd..5a1470e 100644 (file)
@@ -7,7 +7,7 @@
  * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org>
  *
  * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com>
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
  *
  * Fully tested with actual hardware and the v4l2-compliance tool.
  */
index fc4e63d..4f9b97e 100644 (file)
@@ -13,7 +13,7 @@
  *  No volume control - only mute/unmute - you have to use line volume
  *  control on SB-part of SF16-FMI/SF16-FMP/SF16-FMD
  *
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
  */
 
 #include <linux/kernel.h>      /* __setup                      */
index 4f116ea..1af8f29 100644 (file)
@@ -17,7 +17,7 @@
  *  Volume Control is done digitally
  *
  * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com>
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
  */
 
 #include <linux/module.h>      /* Modules                      */
index 26a8c60..a4bad32 100644 (file)
@@ -12,7 +12,7 @@
  * Scott McGrath    (smcgrath@twilight.vtc.vsc.edu)
  * William McGrath  (wmcgrath@twilight.vtc.vsc.edu)
  *
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
  */
 
 #include <stdarg.h>
index eb72a4d..d0d67ad 100644 (file)
@@ -25,7 +25,7 @@
  * The frequency change is necessary since the card never seems to be
  * completely silent.
  *
- * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@kernel.org>
  */
 
 #include <linux/module.h>      /* Modules                        */
index 026e88e..6007cd0 100644 (file)
@@ -27,7 +27,7 @@
  * 2002-07-15 - Fix Stereo typo
  *
  * 2006-07-24 - Converted to V4L2 API
- *             by Mauro Carvalho Chehab <mchehab@infradead.org>
+ *             by Mauro Carvalho Chehab <mchehab@kernel.org>
  *
  * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com>
  *
index f6977df..d275d98 100644 (file)
@@ -12,7 +12,7 @@
  *
  * On Avermedia M135A with IR model RM-JX, the same codes exist on both
  * Positivo (BR) and original IR, initial version and remote control codes
- * added by Mauro Carvalho Chehab <mchehab@infradead.org>
+ * added by Mauro Carvalho Chehab <mchehab@kernel.org>
  *
  * Positivo also ships Avermedia M135A with model RM-K6, extra control
  * codes added by Herton Ronaldo Krzesinski <herton@mandriva.com.br>
index e4e78c1..057c13b 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/module.h>
 
 /* Encore ENLTV-FM v5.3
-   Mauro Carvalho Chehab <mchehab@infradead.org>
+   Mauro Carvalho Chehab <mchehab@kernel.org>
  */
 
 static struct rc_map_table encore_enltv_fm53[] = {
index c3d4437..cd05559 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/module.h>
 
 /* Encore ENLTV2-FM  - silver plastic - "Wand Media" written at the botton
-    Mauro Carvalho Chehab <mchehab@infradead.org> */
+    Mauro Carvalho Chehab <mchehab@kernel.org> */
 
 static struct rc_map_table encore_enltv2[] = {
        { 0x4c, KEY_POWER2 },
index f0f88df..a000513 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/module.h>
 
 /* Kaiomy TVnPC U2
-   Mauro Carvalho Chehab <mchehab@infradead.org>
+   Mauro Carvalho Chehab <mchehab@kernel.org>
  */
 
 static struct rc_map_table kaiomy[] = {
index 453e043..db5edde 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/module.h>
 
 /* Kworld Plus TV Analog Lite PCI IR
-   Mauro Carvalho Chehab <mchehab@infradead.org>
+   Mauro Carvalho Chehab <mchehab@kernel.org>
  */
 
 static struct rc_map_table kworld_plus_tv_analog[] = {
index 791130f..e4e34f2 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/module.h>
 
 /*
-   Mauro Carvalho Chehab <mchehab@infradead.org>
+   Mauro Carvalho Chehab <mchehab@kernel.org>
    present on PV MPEG 8000GT
  */
 
index 88b3e80..d78a2bd 100644 (file)
@@ -2,7 +2,7 @@
 // For Philips TEA5761 FM Chip
 // I2C address is always 0x20 (0x10 at 7-bit mode).
 //
-// Copyright (c) 2005-2007 Mauro Carvalho Chehab (mchehab@infradead.org)
+// Copyright (c) 2005-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
 
 #include <linux/i2c.h>
 #include <linux/slab.h>
@@ -337,5 +337,5 @@ EXPORT_SYMBOL_GPL(tea5761_attach);
 EXPORT_SYMBOL_GPL(tea5761_autodetection);
 
 MODULE_DESCRIPTION("Philips TEA5761 FM tuner driver");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
 MODULE_LICENSE("GPL v2");
index 2b2c064..016d0d5 100644 (file)
@@ -2,7 +2,7 @@
 // For Philips TEA5767 FM Chip used on some TV Cards like Prolink Pixelview
 // I2C address is always 0xC0.
 //
-// Copyright (c) 2005 Mauro Carvalho Chehab (mchehab@infradead.org)
+// Copyright (c) 2005 Mauro Carvalho Chehab <mchehab@kernel.org>
 //
 // tea5767 autodetection thanks to Torsten Seeboth and Atsushi Nakagawa
 // from their contributions on DScaler.
@@ -469,5 +469,5 @@ EXPORT_SYMBOL_GPL(tea5767_attach);
 EXPORT_SYMBOL_GPL(tea5767_autodetection);
 
 MODULE_DESCRIPTION("Philips TEA5767 FM tuner driver");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
 MODULE_LICENSE("GPL v2");
index bb0437c..50d017a 100644 (file)
@@ -5,7 +5,7 @@
  * This file includes internal tipes to be used inside tuner-xc2028.
  * Shouldn't be included outside tuner-xc2028
  *
- * Copyright (c) 2007-2008 Mauro Carvalho Chehab (mchehab@infradead.org)
+ * Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org>
  */
 
 /* xc3028 firmware types */
index fca85e0..84744e1 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 // tuner-xc2028
 //
-// Copyright (c) 2007-2008 Mauro Carvalho Chehab (mchehab@infradead.org)
+// Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org>
 //
 // Copyright (c) 2007 Michel Ludwig (michel.ludwig@gmail.com)
 //       - frontend interface
@@ -1518,7 +1518,7 @@ EXPORT_SYMBOL(xc2028_attach);
 
 MODULE_DESCRIPTION("Xceive xc2028/xc3028 tuner driver");
 MODULE_AUTHOR("Michel Ludwig <michel.ludwig@gmail.com>");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
 MODULE_LICENSE("GPL v2");
 MODULE_FIRMWARE(XC2028_DEFAULT_FIRMWARE);
 MODULE_FIRMWARE(XC3028L_DEFAULT_FIRMWARE);
index 03fd6d4..7b58bc0 100644 (file)
@@ -2,7 +2,7 @@
  * SPDX-License-Identifier: GPL-2.0
  * tuner-xc2028
  *
- * Copyright (c) 2007-2008 Mauro Carvalho Chehab (mchehab@infradead.org)
+ * Copyright (c) 2007-2008 Mauro Carvalho Chehab <mchehab@kernel.org>
  */
 
 #ifndef __TUNER_XC2028_H__
index 3c2694a..d1e66b5 100644 (file)
@@ -2,7 +2,7 @@
 //
 // em28xx-camera.c - driver for Empia EM25xx/27xx/28xx USB video capture devices
 //
-// Copyright (C) 2009 Mauro Carvalho Chehab <mchehab@infradead.org>
+// Copyright (C) 2009 Mauro Carvalho Chehab <mchehab@kernel.org>
 // Copyright (C) 2013 Frank Schäfer <fschaefer.oss@googlemail.com>
 //
 // This program is free software; you can redistribute it and/or modify
index 6e0e67d..7c3203d 100644 (file)
@@ -5,7 +5,7 @@
 //
 // Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it>
 //                   Markus Rechberger <mrechberger@gmail.com>
-//                   Mauro Carvalho Chehab <mchehab@infradead.org>
+//                   Mauro Carvalho Chehab <mchehab@kernel.org>
 //                   Sascha Sommer <saschasommer@freenet.de>
 // Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
 //
index 36d341f..f289953 100644 (file)
@@ -4,7 +4,7 @@
 //
 // Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it>
 //                   Markus Rechberger <mrechberger@gmail.com>
-//                   Mauro Carvalho Chehab <mchehab@infradead.org>
+//                   Mauro Carvalho Chehab <mchehab@kernel.org>
 //                   Sascha Sommer <saschasommer@freenet.de>
 // Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
 //
@@ -32,7 +32,7 @@
 
 #define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \
                      "Markus Rechberger <mrechberger@gmail.com>, " \
-                     "Mauro Carvalho Chehab <mchehab@infradead.org>, " \
+                     "Mauro Carvalho Chehab <mchehab@kernel.org>, " \
                      "Sascha Sommer <saschasommer@freenet.de>"
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
index a54cb8d..3f493e0 100644 (file)
@@ -2,7 +2,7 @@
 //
 // DVB device driver for em28xx
 //
-// (c) 2008-2011 Mauro Carvalho Chehab <mchehab@infradead.org>
+// (c) 2008-2011 Mauro Carvalho Chehab <mchehab@kernel.org>
 //
 // (c) 2008 Devin Heitmueller <devin.heitmueller@gmail.com>
 //     - Fixes for the driver to properly work with HVR-950
@@ -63,7 +63,7 @@
 #include "tc90522.h"
 #include "qm1d1c0042.h"
 
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION(DRIVER_DESC " - digital TV interface");
 MODULE_VERSION(EM28XX_VERSION);
index 9151bcc..6458682 100644 (file)
@@ -4,7 +4,7 @@
 //
 // Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it>
 //                   Markus Rechberger <mrechberger@gmail.com>
-//                   Mauro Carvalho Chehab <mchehab@infradead.org>
+//                   Mauro Carvalho Chehab <mchehab@kernel.org>
 //                   Sascha Sommer <saschasommer@freenet.de>
 // Copyright (C) 2013 Frank Schäfer <fschaefer.oss@googlemail.com>
 //
index 2dc1be0..f84a120 100644 (file)
@@ -4,7 +4,7 @@
 //
 // Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it>
 //                   Markus Rechberger <mrechberger@gmail.com>
-//                   Mauro Carvalho Chehab <mchehab@infradead.org>
+//                   Mauro Carvalho Chehab <mchehab@kernel.org>
 //                   Sascha Sommer <saschasommer@freenet.de>
 //
 // This program is free software; you can redistribute it and/or modify
index d70ee13..68571bf 100644 (file)
@@ -5,7 +5,7 @@
 //
 // Copyright (C) 2005 Ludovico Cavedon <cavedon@sssup.it>
 //                   Markus Rechberger <mrechberger@gmail.com>
-//                   Mauro Carvalho Chehab <mchehab@infradead.org>
+//                   Mauro Carvalho Chehab <mchehab@kernel.org>
 //                   Sascha Sommer <saschasommer@freenet.de>
 // Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
 //
@@ -44,7 +44,7 @@
 
 #define DRIVER_AUTHOR "Ludovico Cavedon <cavedon@sssup.it>, " \
                      "Markus Rechberger <mrechberger@gmail.com>, " \
-                     "Mauro Carvalho Chehab <mchehab@infradead.org>, " \
+                     "Mauro Carvalho Chehab <mchehab@kernel.org>, " \
                      "Sascha Sommer <saschasommer@freenet.de>"
 
 static unsigned int isoc_debug;
index 63c7c61..b0378e7 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Copyright (C) 2005 Markus Rechberger <mrechberger@gmail.com>
  *                   Ludovico Cavedon <cavedon@sssup.it>
- *                   Mauro Carvalho Chehab <mchehab@infradead.org>
+ *                   Mauro Carvalho Chehab <mchehab@kernel.org>
  * Copyright (C) 2012 Frank Schäfer <fschaefer.oss@googlemail.com>
  *
  * Based on the em2800 driver from Sascha Sommer <saschasommer@freenet.de>
index a1bd94e..71fda38 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * zc030x registers
  *
- * Copyright (c) 2008 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Copyright (c) 2008 Mauro Carvalho Chehab <mchehab@kernel.org>
  *
  * The register aliases used here came from this driver:
  *     http://zc0302.sourceforge.net/zc0302.php
index 70939e9..23df50a 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 // tm6000-cards.c - driver for TM5600/TM6000/TM6010 USB video capture devices
 //
-// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
 
 #include <linux/init.h>
 #include <linux/module.h>
index 23a1332..d3229aa 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 // tm6000-core.c - driver for TM5600/TM6000/TM6010 USB video capture devices
 //
-// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
 //
 // Copyright (c) 2007 Michel Ludwig <michel.ludwig@gmail.com>
 //     - DVB-T support
index c9a62bb..659b63f 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 // tm6000-i2c.c - driver for TM5600/TM6000/TM6010 USB video capture devices
 //
-// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
 //
 // Copyright (c) 2007 Michel Ludwig <michel.ludwig@gmail.com>
 //     - Fix SMBus Read Byte command
index 21587fc..d104246 100644 (file)
@@ -2,7 +2,7 @@
  * SPDX-License-Identifier: GPL-2.0
  * tm6000-regs.h - driver for TM5600/TM6000/TM6010 USB video capture devices
  *
- * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
  */
 
 /*
index 5c615b0..b275dbc 100644 (file)
@@ -2,7 +2,7 @@
  * SPDX-License-Identifier: GPL-2.0
  * tm6000-buf.c - driver for TM5600/TM6000/TM6010 USB video capture devices
  *
- * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
  */
 
 #include <linux/videodev2.h>
index b2399d4..aa85fe3 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 // tm6000-video.c - driver for TM5600/TM6000/TM6010 USB video capture devices
 //
-// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+// Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
 //
 // Copyright (c) 2007 Michel Ludwig <michel.ludwig@gmail.com>
 //     - Fixed module load/unload
index e1e4577..0864ed7 100644 (file)
@@ -2,7 +2,7 @@
  * SPDX-License-Identifier: GPL-2.0
  * tm6000.h - driver for TM5600/TM6000/TM6010 USB video capture devices
  *
- * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+ * Copyright (c) 2006-2007 Mauro Carvalho Chehab <mchehab@kernel.org>
  *
  * Copyright (c) 2007 Michel Ludwig <michel.ludwig@gmail.com>
  *     - DVB-T support
index 1d0b220..c080dcc 100644 (file)
@@ -10,7 +10,7 @@
  *     2 of the License, or (at your option) any later version.
  *
  * Authors:    Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
- *              Mauro Carvalho Chehab <mchehab@infradead.org> (version 2)
+ *              Mauro Carvalho Chehab <mchehab@kernel.org> (version 2)
  *
  * Fixes:      20000516  Claudio Matsuoka <claudio@conectiva.com>
  *             - Added procfs support
@@ -1072,7 +1072,7 @@ static void __exit videodev_exit(void)
 subsys_initcall(videodev_init);
 module_exit(videodev_exit)
 
-MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Alan Cox, Mauro Carvalho Chehab <mchehab@kernel.org>");
 MODULE_DESCRIPTION("Device registrar for Video4Linux drivers v2");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_CHARDEV_MAJOR(VIDEO_MAJOR);
index f48c505..de5d96d 100644 (file)
@@ -9,7 +9,7 @@
  * 2 of the License, or (at your option) any later version.
  *
  * Authors:    Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
- *              Mauro Carvalho Chehab <mchehab@infradead.org> (version 2)
+ *              Mauro Carvalho Chehab <mchehab@kernel.org> (version 2)
  */
 
 #include <linux/mm.h>
index 2b39818..7491b33 100644 (file)
@@ -1,11 +1,11 @@
 /*
  * generic helper functions for handling video4linux capture buffers
  *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
  *
  * Highly based on video-buf written originally by:
  * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
  * (c) 2006 Ted Walther and John Sokol
  *
  * This program is free software; you can redistribute it and/or modify
@@ -38,7 +38,7 @@ static int debug;
 module_param(debug, int, 0644);
 
 MODULE_DESCRIPTION("helper module to manage video4linux buffers");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
 MODULE_LICENSE("GPL");
 
 #define dprintk(level, fmt, arg...)                                    \
index e02353e..f461325 100644 (file)
@@ -7,7 +7,7 @@
  * Copyright (c) 2008 Magnus Damm
  *
  * Based on videobuf-vmalloc.c,
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index add2edb..7770034 100644 (file)
@@ -6,11 +6,11 @@
  * into PAGE_SIZE chunks).  They also assume the driver does not need
  * to touch the video data.
  *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
  *
  * Highly based on video-buf written originally by:
  * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
  * (c) 2006 Ted Walther and John Sokol
  *
  * This program is free software; you can redistribute it and/or modify
@@ -48,7 +48,7 @@ static int debug;
 module_param(debug, int, 0644);
 
 MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
 MODULE_LICENSE("GPL");
 
 #define dprintk(level, fmt, arg...)                                    \
index 2ff7fcc..45fe781 100644 (file)
@@ -6,7 +6,7 @@
  * into PAGE_SIZE chunks).  They also assume the driver does not need
  * to touch the video data.
  *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -41,7 +41,7 @@ static int debug;
 module_param(debug, int, 0644);
 
 MODULE_DESCRIPTION("helper module to manage video4linux vmalloc buffers");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
 MODULE_LICENSE("GPL");
 
 #define dprintk(level, fmt, arg...)                                    \
index 71a89d5..db80430 100644 (file)
 
 int main(void)
 {
-       DEFINE(EMIF_SDCFG_VAL_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_sdcfg_val));
-       DEFINE(EMIF_TIMING1_VAL_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_timing1_val));
-       DEFINE(EMIF_TIMING2_VAL_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_timing2_val));
-       DEFINE(EMIF_TIMING3_VAL_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_timing3_val));
-       DEFINE(EMIF_REF_CTRL_VAL_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_ref_ctrl_val));
-       DEFINE(EMIF_ZQCFG_VAL_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_zqcfg_val));
-       DEFINE(EMIF_PMCR_VAL_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_pmcr_val));
-       DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val));
-       DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl));
-       DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh));
-       DEFINE(EMIF_COS_CONFIG_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_cos_config));
-       DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping));
-       DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map));
-       DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map));
-       DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_ocp_config_val));
-       DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim));
-       DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw));
-       DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val));
-       DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw));
-       DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1));
-       DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET,
-              offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals));
-       DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3));
-
-       BLANK();
-
-       DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET,
-              offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt));
-       DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET,
-              offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys));
-       DEFINE(EMIF_PM_CONFIG_OFFSET,
-              offsetof(struct ti_emif_pm_data, ti_emif_sram_config));
-       DEFINE(EMIF_PM_REGS_VIRT_OFFSET,
-              offsetof(struct ti_emif_pm_data, regs_virt));
-       DEFINE(EMIF_PM_REGS_PHYS_OFFSET,
-              offsetof(struct ti_emif_pm_data, regs_phys));
-       DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data));
-
-       BLANK();
-
-       DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET,
-              offsetof(struct ti_emif_pm_functions, save_context));
-       DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET,
-              offsetof(struct ti_emif_pm_functions, restore_context));
-       DEFINE(EMIF_PM_ENTER_SR_OFFSET,
-              offsetof(struct ti_emif_pm_functions, enter_sr));
-       DEFINE(EMIF_PM_EXIT_SR_OFFSET,
-              offsetof(struct ti_emif_pm_functions, exit_sr));
-       DEFINE(EMIF_PM_ABORT_SR_OFFSET,
-              offsetof(struct ti_emif_pm_functions, abort_sr));
-       DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions));
+       ti_emif_asm_offsets();
 
        return 0;
 }
index 231f3a1..86503f6 100644 (file)
@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
        .cmd_per_lun                    = 7,
        .use_clustering                 = ENABLE_CLUSTERING,
        .shost_attrs                    = mptscsih_host_attrs,
+       .no_write_same                  = 1,
 };
 
 static int mptsas_get_linkerrors(struct sas_phy *phy)
index d4c07b8..f5695be 100644 (file)
@@ -45,6 +45,7 @@
 #define I82802AB       0x00ad
 #define I82802AC       0x00ac
 #define PF38F4476      0x881c
+#define M28F00AP30     0x8963
 /* STMicroelectronics chips */
 #define M50LPW080       0x002F
 #define M50FLW080A     0x0080
@@ -375,6 +376,17 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
                extp->MinorVersion = '1';
 }
 
+static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
+{
+       /*
+        * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
+        * Erase Supend for their small Erase Blocks(0x8000)
+        */
+       if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
+               return 1;
+       return 0;
+}
+
 static inline struct cfi_pri_intelext *
 read_pri_intelext(struct map_info *map, __u16 adr)
 {
@@ -831,21 +843,30 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
                     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
                        goto sleep;
 
+               /* Do not allow suspend iff read/write to EB address */
+               if ((adr & chip->in_progress_block_mask) ==
+                   chip->in_progress_block_addr)
+                       goto sleep;
+
+               /* do not suspend small EBs, buggy Micron Chips */
+               if (cfi_is_micron_28F00AP30(cfi, chip) &&
+                   (chip->in_progress_block_mask == ~(0x8000-1)))
+                       goto sleep;
 
                /* Erase suspend */
-               map_write(map, CMD(0xB0), adr);
+               map_write(map, CMD(0xB0), chip->in_progress_block_addr);
 
                /* If the flash has finished erasing, then 'erase suspend'
                 * appears to make some (28F320) flash devices switch to
                 * 'read' mode.  Make sure that we switch to 'read status'
                 * mode so we get the right data. --rmk
                 */
-               map_write(map, CMD(0x70), adr);
+               map_write(map, CMD(0x70), chip->in_progress_block_addr);
                chip->oldstate = FL_ERASING;
                chip->state = FL_ERASE_SUSPENDING;
                chip->erase_suspended = 1;
                for (;;) {
-                       status = map_read(map, adr);
+                       status = map_read(map, chip->in_progress_block_addr);
                        if (map_word_andequal(map, status, status_OK, status_OK))
                                break;
 
@@ -1041,8 +1062,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
                   sending the 0x70 (Read Status) command to an erasing
                   chip and expecting it to be ignored, that's what we
                   do. */
-               map_write(map, CMD(0xd0), adr);
-               map_write(map, CMD(0x70), adr);
+               map_write(map, CMD(0xd0), chip->in_progress_block_addr);
+               map_write(map, CMD(0x70), chip->in_progress_block_addr);
                chip->oldstate = FL_READY;
                chip->state = FL_ERASING;
                break;
@@ -1933,6 +1954,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
        map_write(map, CMD(0xD0), adr);
        chip->state = FL_ERASING;
        chip->erase_suspended = 0;
+       chip->in_progress_block_addr = adr;
+       chip->in_progress_block_mask = ~(len - 1);
 
        ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
                                   adr, len,
index 668e2cb..692902d 100644 (file)
@@ -816,9 +816,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
                    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
                        goto sleep;
 
-               /* We could check to see if we're trying to access the sector
-                * that is currently being erased. However, no user will try
-                * anything like that so we just wait for the timeout. */
+               /* Do not allow suspend iff read/write to EB address */
+               if ((adr & chip->in_progress_block_mask) ==
+                   chip->in_progress_block_addr)
+                       goto sleep;
 
                /* Erase suspend */
                /* It's harmless to issue the Erase-Suspend and Erase-Resume
@@ -2267,6 +2268,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
        chip->state = FL_ERASING;
        chip->erase_suspended = 0;
        chip->in_progress_block_addr = adr;
+       chip->in_progress_block_mask = ~(map->size - 1);
 
        INVALIDATE_CACHE_UDELAY(map, chip,
                                adr, map->size,
@@ -2356,6 +2358,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
        chip->state = FL_ERASING;
        chip->erase_suspended = 0;
        chip->in_progress_block_addr = adr;
+       chip->in_progress_block_mask = ~(len - 1);
 
        INVALIDATE_CACHE_UDELAY(map, chip,
                                adr, len,
index d0cd6f8..9c9f893 100644 (file)
@@ -162,7 +162,6 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
                ret = nanddev_erase(nand, &pos);
                if (ret) {
                        einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
-                       einfo->state = MTD_ERASE_FAILED;
 
                        return ret;
                }
@@ -170,8 +169,6 @@ int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
                nanddev_pos_next_eraseblock(nand, &pos);
        }
 
-       einfo->state = MTD_ERASE_DONE;
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
index 9c159f0..3211371 100644 (file)
@@ -375,56 +375,42 @@ static int omap2_onenand_read_bufferram(struct mtd_info *mtd, int area,
 {
        struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
        struct onenand_chip *this = mtd->priv;
-       dma_addr_t dma_src, dma_dst;
-       int bram_offset;
+       struct device *dev = &c->pdev->dev;
        void *buf = (void *)buffer;
+       dma_addr_t dma_src, dma_dst;
+       int bram_offset, err;
        size_t xtra;
-       int ret;
 
        bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
-       if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
-               goto out_copy;
-
-       /* panic_write() may be in an interrupt context */
-       if (in_interrupt() || oops_in_progress)
+       /*
+        * If the buffer address is not DMA-able, len is not long enough to make
+        * DMA transfers profitable or panic_write() may be in an interrupt
+        * context fallback to PIO mode.
+        */
+       if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
+           count < 384 || in_interrupt() || oops_in_progress )
                goto out_copy;
 
-       if (buf >= high_memory) {
-               struct page *p1;
-
-               if (((size_t)buf & PAGE_MASK) !=
-                   ((size_t)(buf + count - 1) & PAGE_MASK))
-                       goto out_copy;
-               p1 = vmalloc_to_page(buf);
-               if (!p1)
-                       goto out_copy;
-               buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
-       }
-
        xtra = count & 3;
        if (xtra) {
                count -= xtra;
                memcpy(buf + count, this->base + bram_offset + count, xtra);
        }
 
+       dma_dst = dma_map_single(dev, buf, count, DMA_FROM_DEVICE);
        dma_src = c->phys_base + bram_offset;
-       dma_dst = dma_map_single(&c->pdev->dev, buf, count, DMA_FROM_DEVICE);
-       if (dma_mapping_error(&c->pdev->dev, dma_dst)) {
-               dev_err(&c->pdev->dev,
-                       "Couldn't DMA map a %d byte buffer\n",
-                       count);
-               goto out_copy;
-       }
 
-       ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
-       dma_unmap_single(&c->pdev->dev, dma_dst, count, DMA_FROM_DEVICE);
-
-       if (ret) {
-               dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
+       if (dma_mapping_error(dev, dma_dst)) {
+               dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
                goto out_copy;
        }
 
-       return 0;
+       err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
+       dma_unmap_single(dev, dma_dst, count, DMA_FROM_DEVICE);
+       if (!err)
+               return 0;
+
+       dev_err(dev, "timeout waiting for DMA\n");
 
 out_copy:
        memcpy(buf, this->base + bram_offset, count);
@@ -437,49 +423,34 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
 {
        struct omap2_onenand *c = container_of(mtd, struct omap2_onenand, mtd);
        struct onenand_chip *this = mtd->priv;
-       dma_addr_t dma_src, dma_dst;
-       int bram_offset;
+       struct device *dev = &c->pdev->dev;
        void *buf = (void *)buffer;
-       int ret;
+       dma_addr_t dma_src, dma_dst;
+       int bram_offset, err;
 
        bram_offset = omap2_onenand_bufferram_offset(mtd, area) + area + offset;
-       if (bram_offset & 3 || (size_t)buf & 3 || count < 384)
-               goto out_copy;
-
-       /* panic_write() may be in an interrupt context */
-       if (in_interrupt() || oops_in_progress)
+       /*
+        * If the buffer address is not DMA-able, len is not long enough to make
+        * DMA transfers profitable or panic_write() may be in an interrupt
+        * context fallback to PIO mode.
+        */
+       if (!virt_addr_valid(buf) || bram_offset & 3 || (size_t)buf & 3 ||
+           count < 384 || in_interrupt() || oops_in_progress )
                goto out_copy;
 
-       if (buf >= high_memory) {
-               struct page *p1;
-
-               if (((size_t)buf & PAGE_MASK) !=
-                   ((size_t)(buf + count - 1) & PAGE_MASK))
-                       goto out_copy;
-               p1 = vmalloc_to_page(buf);
-               if (!p1)
-                       goto out_copy;
-               buf = page_address(p1) + ((size_t)buf & ~PAGE_MASK);
-       }
-
-       dma_src = dma_map_single(&c->pdev->dev, buf, count, DMA_TO_DEVICE);
+       dma_src = dma_map_single(dev, buf, count, DMA_TO_DEVICE);
        dma_dst = c->phys_base + bram_offset;
-       if (dma_mapping_error(&c->pdev->dev, dma_src)) {
-               dev_err(&c->pdev->dev,
-                       "Couldn't DMA map a %d byte buffer\n",
-                       count);
-               return -1;
-       }
-
-       ret = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
-       dma_unmap_single(&c->pdev->dev, dma_src, count, DMA_TO_DEVICE);
-
-       if (ret) {
-               dev_err(&c->pdev->dev, "timeout waiting for DMA\n");
+       if (dma_mapping_error(dev, dma_src)) {
+               dev_err(dev, "Couldn't DMA map a %d byte buffer\n", count);
                goto out_copy;
        }
 
-       return 0;
+       err = omap2_onenand_dma_transfer(c, dma_src, dma_dst, count);
+       dma_unmap_page(dev, dma_src, count, DMA_TO_DEVICE);
+       if (!err)
+               return 0;
+
+       dev_err(dev, "timeout waiting for DMA\n");
 
 out_copy:
        memcpy(this->base + bram_offset, buf, count);
index 10e9532..db5ec4e 100644 (file)
@@ -1074,7 +1074,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
                return ret;
 
        ret = marvell_nfc_wait_op(chip,
-                                 chip->data_interface.timings.sdr.tPROG_max);
+                                 PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
        return ret;
 }
 
@@ -1408,6 +1408,7 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
        struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
        struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
        const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
+       u32 xtype;
        int ret;
        struct marvell_nfc_op nfc_op = {
                .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
@@ -1423,7 +1424,12 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
         * last naked write.
         */
        if (chunk == 0) {
-               nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_WRITE_DISPATCH) |
+               if (lt->nchunks == 1)
+                       xtype = XTYPE_MONOLITHIC_RW;
+               else
+                       xtype = XTYPE_WRITE_DISPATCH;
+
+               nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(xtype) |
                                  NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
                                  NDCB0_CMD1(NAND_CMD_SEQIN);
                nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
@@ -1494,7 +1500,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd,
        }
 
        ret = marvell_nfc_wait_op(chip,
-                                 chip->data_interface.timings.sdr.tPROG_max);
+                                 PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
 
        marvell_nfc_disable_hw_ecc(chip);
 
@@ -2299,29 +2305,20 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
        /*
         * The legacy "num-cs" property indicates the number of CS on the only
         * chip connected to the controller (legacy bindings does not support
-        * more than one chip). CS are only incremented one by one while the RB
-        * pin is always the #0.
+        * more than one chip). The CS and RB pins are always the #0.
         *
         * When not using legacy bindings, a couple of "reg" and "nand-rb"
         * properties must be filled. For each chip, expressed as a subnode,
         * "reg" points to the CS lines and "nand-rb" to the RB line.
         */
-       if (pdata) {
+       if (pdata || nfc->caps->legacy_of_bindings) {
                nsels = 1;
-       } else if (nfc->caps->legacy_of_bindings &&
-                  !of_get_property(np, "num-cs", &nsels)) {
-               dev_err(dev, "missing num-cs property\n");
-               return -EINVAL;
-       } else if (!of_get_property(np, "reg", &nsels)) {
-               dev_err(dev, "missing reg property\n");
-               return -EINVAL;
-       }
-
-       if (!pdata)
-               nsels /= sizeof(u32);
-       if (!nsels) {
-               dev_err(dev, "invalid reg property size\n");
-               return -EINVAL;
+       } else {
+               nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
+               if (nsels <= 0) {
+                       dev_err(dev, "missing/invalid reg property\n");
+                       return -EINVAL;
+               }
        }
 
        /* Alloc the nand chip structure */
index 72f3a89..f28c3a5 100644 (file)
@@ -706,12 +706,17 @@ static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
  */
 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
 {
+       const struct nand_sdr_timings *timings;
        u8 status = 0;
        int ret;
 
        if (!chip->exec_op)
                return -ENOTSUPP;
 
+       /* Wait tWB before polling the STATUS reg. */
+       timings = nand_get_sdr_timings(&chip->data_interface);
+       ndelay(PSEC_TO_NSEC(timings->tWB_max));
+
        ret = nand_status_op(chip, NULL);
        if (ret)
                return ret;
index f54518f..f2052fa 100644 (file)
@@ -645,7 +645,7 @@ static int tango_nand_probe(struct platform_device *pdev)
 
        writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
 
-       clk = clk_get(&pdev->dev, NULL);
+       clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(clk))
                return PTR_ERR(clk);
 
index 4b8e918..5872f31 100644 (file)
@@ -501,7 +501,9 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
        void __iomem *reg_base = cqspi->iobase;
        void __iomem *ahb_base = cqspi->ahb_base;
        unsigned int remaining = n_rx;
+       unsigned int mod_bytes = n_rx % 4;
        unsigned int bytes_to_read = 0;
+       u8 *rxbuf_end = rxbuf + n_rx;
        int ret = 0;
 
        writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
@@ -530,11 +532,24 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
                }
 
                while (bytes_to_read != 0) {
+                       unsigned int word_remain = round_down(remaining, 4);
+
                        bytes_to_read *= cqspi->fifo_width;
                        bytes_to_read = bytes_to_read > remaining ?
                                        remaining : bytes_to_read;
-                       ioread32_rep(ahb_base, rxbuf,
-                                    DIV_ROUND_UP(bytes_to_read, 4));
+                       bytes_to_read = round_down(bytes_to_read, 4);
+                       /* Read 4 byte word chunks then single bytes */
+                       if (bytes_to_read) {
+                               ioread32_rep(ahb_base, rxbuf,
+                                            (bytes_to_read / 4));
+                       } else if (!word_remain && mod_bytes) {
+                               unsigned int temp = ioread32(ahb_base);
+
+                               bytes_to_read = mod_bytes;
+                               memcpy(rxbuf, &temp, min((unsigned int)
+                                                        (rxbuf_end - rxbuf),
+                                                        bytes_to_read));
+                       }
                        rxbuf += bytes_to_read;
                        remaining -= bytes_to_read;
                        bytes_to_read = cqspi_get_rd_sram_level(cqspi);
index 1ed9529..5eb0df2 100644 (file)
@@ -450,7 +450,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
 {
        int i;
 
-       if (!client_info->slave)
+       if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
                return;
 
        for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
@@ -943,6 +943,10 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
        skb->priority = TC_PRIO_CONTROL;
        skb->dev = slave->dev;
 
+       netdev_dbg(slave->bond->dev,
+                  "Send learning packet: dev %s mac %pM vlan %d\n",
+                  slave->dev->name, mac_addr, vid);
+
        if (vid)
                __vlan_hwaccel_put_tag(skb, vlan_proto, vid);
 
@@ -965,14 +969,13 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data)
        u8 *mac_addr = data->mac_addr;
        struct bond_vlan_tag *tags;
 
-       if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
-               if (strict_match &&
-                   ether_addr_equal_64bits(mac_addr,
-                                           upper->dev_addr)) {
+       if (is_vlan_dev(upper) &&
+           bond->nest_level == vlan_get_encap_level(upper) - 1) {
+               if (upper->addr_assign_type == NET_ADDR_STOLEN) {
                        alb_send_lp_vid(slave, mac_addr,
                                        vlan_dev_vlan_proto(upper),
                                        vlan_dev_vlan_id(upper));
-               } else if (!strict_match) {
+               } else {
                        alb_send_lp_vid(slave, upper->dev_addr,
                                        vlan_dev_vlan_proto(upper),
                                        vlan_dev_vlan_id(upper));
index b7b1130..1f1e97b 100644 (file)
@@ -1660,8 +1660,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
        } /* switch(bond_mode) */
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       slave_dev->npinfo = bond->dev->npinfo;
-       if (slave_dev->npinfo) {
+       if (bond->dev->npinfo) {
                if (slave_enable_netpoll(new_slave)) {
                        netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
                        res = -EBUSY;
@@ -1739,6 +1738,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
        if (bond_mode_uses_xmit_hash(bond))
                bond_update_slave_arr(bond, NULL);
 
+       bond->nest_level = dev_get_nest_level(bond_dev);
+
        netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
                    slave_dev->name,
                    bond_is_active_slave(new_slave) ? "an active" : "a backup",
index b177956..3c71f1c 100644 (file)
@@ -605,7 +605,7 @@ void can_bus_off(struct net_device *dev)
 {
        struct can_priv *priv = netdev_priv(dev);
 
-       netdev_dbg(dev, "bus-off\n");
+       netdev_info(dev, "bus-off\n");
 
        netif_carrier_off(dev);
 
index 634c51e..d53a45b 100644 (file)
 #define FLEXCAN_QUIRK_DISABLE_MECR     BIT(4) /* Disable Memory error detection */
 #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP        BIT(5) /* Use timestamp based offloading */
 #define FLEXCAN_QUIRK_BROKEN_PERR_STATE        BIT(6) /* No interrupt for error passive */
+#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN       BIT(7) /* default to BE register access */
 
 /* Structure of the message buffer */
 struct flexcan_mb {
@@ -287,6 +288,12 @@ struct flexcan_priv {
 };
 
 static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
+       .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+               FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+               FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN,
+};
+
+static const struct flexcan_devtype_data fsl_imx25_devtype_data = {
        .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
                FLEXCAN_QUIRK_BROKEN_PERR_STATE,
 };
@@ -1251,9 +1258,9 @@ static void unregister_flexcandev(struct net_device *dev)
 static const struct of_device_id flexcan_of_match[] = {
        { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
        { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
-       { .compatible = "fsl,imx53-flexcan", .data = &fsl_p1010_devtype_data, },
-       { .compatible = "fsl,imx35-flexcan", .data = &fsl_p1010_devtype_data, },
-       { .compatible = "fsl,imx25-flexcan", .data = &fsl_p1010_devtype_data, },
+       { .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, },
+       { .compatible = "fsl,imx35-flexcan", .data = &fsl_imx25_devtype_data, },
+       { .compatible = "fsl,imx25-flexcan", .data = &fsl_imx25_devtype_data, },
        { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
        { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
        { .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
@@ -1337,18 +1344,13 @@ static int flexcan_probe(struct platform_device *pdev)
 
        priv = netdev_priv(dev);
 
-       if (of_property_read_bool(pdev->dev.of_node, "big-endian")) {
+       if (of_property_read_bool(pdev->dev.of_node, "big-endian") ||
+           devtype_data->quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) {
                priv->read = flexcan_read_be;
                priv->write = flexcan_write_be;
        } else {
-               if (of_device_is_compatible(pdev->dev.of_node,
-                                           "fsl,p1010-flexcan")) {
-                       priv->read = flexcan_read_be;
-                       priv->write = flexcan_write_be;
-               } else {
-                       priv->read = flexcan_read_le;
-                       priv->write = flexcan_write_le;
-               }
+               priv->read = flexcan_read_le;
+               priv->write = flexcan_write_le;
        }
 
        priv->can.clock.freq = clock_freq;
index 5590c55..53e320c 100644 (file)
@@ -91,6 +91,7 @@
 #define HI3110_STAT_BUSOFF BIT(2)
 #define HI3110_STAT_ERRP BIT(3)
 #define HI3110_STAT_ERRW BIT(4)
+#define HI3110_STAT_TXMTY BIT(7)
 
 #define HI3110_BTR0_SJW_SHIFT 6
 #define HI3110_BTR0_BRP_SHIFT 0
@@ -427,8 +428,10 @@ static int hi3110_get_berr_counter(const struct net_device *net,
        struct hi3110_priv *priv = netdev_priv(net);
        struct spi_device *spi = priv->spi;
 
+       mutex_lock(&priv->hi3110_lock);
        bec->txerr = hi3110_read(spi, HI3110_READ_TEC);
        bec->rxerr = hi3110_read(spi, HI3110_READ_REC);
+       mutex_unlock(&priv->hi3110_lock);
 
        return 0;
 }
@@ -735,10 +738,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
                        }
                }
 
-               if (intf == 0)
-                       break;
-
-               if (intf & HI3110_INT_TXCPLT) {
+               if (priv->tx_len && statf & HI3110_STAT_TXMTY) {
                        net->stats.tx_packets++;
                        net->stats.tx_bytes += priv->tx_len - 1;
                        can_led_event(net, CAN_LED_EVENT_TX);
@@ -748,6 +748,9 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
                        }
                        netif_wake_queue(net);
                }
+
+               if (intf == 0)
+                       break;
        }
        mutex_unlock(&priv->hi3110_lock);
        return IRQ_HANDLED;
index 63587b8..daed57d 100644 (file)
@@ -1179,7 +1179,7 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
 
        skb = alloc_can_skb(priv->netdev, &cf);
        if (!skb) {
-               stats->tx_dropped++;
+               stats->rx_dropped++;
                return;
        }
 
index 3d20910..5b4374f 100644 (file)
@@ -3370,6 +3370,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 5,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3391,6 +3392,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 0,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3410,6 +3412,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 8,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3431,6 +3434,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 5,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3452,6 +3456,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 0,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3472,6 +3477,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_gpio = 11,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x10,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 3750,
@@ -3493,6 +3499,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 5,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3514,6 +3521,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 0,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3535,6 +3543,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 5,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3557,6 +3566,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_gpio = 15,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3578,6 +3588,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 5,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3600,6 +3611,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_gpio = 15,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3621,6 +3633,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 0,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3641,6 +3654,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_gpio = 16,
                .max_vid = 8191,
                .port_base_addr = 0x0,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .tag_protocol = DSA_TAG_PROTO_DSA,
@@ -3663,6 +3677,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_gpio = 16,
                .max_vid = 8191,
                .port_base_addr = 0x0,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 3750,
@@ -3684,6 +3699,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 11,
                .max_vid = 8191,
                .port_base_addr = 0x0,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 3750,
@@ -3707,6 +3723,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_gpio = 15,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3730,6 +3747,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_gpio = 16,
                .max_vid = 8191,
                .port_base_addr = 0x0,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 3750,
@@ -3753,6 +3771,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_gpio = 15,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3776,6 +3795,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_gpio = 15,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3798,6 +3818,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_gpio = 11,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x10,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 3750,
@@ -3820,6 +3841,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 5,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3841,6 +3863,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_internal_phys = 5,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3863,6 +3886,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_gpio = 15,
                .max_vid = 4095,
                .port_base_addr = 0x10,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 15000,
@@ -3885,6 +3909,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_gpio = 16,
                .max_vid = 8191,
                .port_base_addr = 0x0,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 3750,
@@ -3907,6 +3932,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_gpio = 16,
                .max_vid = 8191,
                .port_base_addr = 0x0,
+               .phy_base_addr = 0x0,
                .global1_addr = 0x1b,
                .global2_addr = 0x1c,
                .age_time_coeff = 3750,
index 80490f6..12b7f46 100644 (file)
@@ -114,6 +114,7 @@ struct mv88e6xxx_info {
        unsigned int num_gpio;
        unsigned int max_vid;
        unsigned int port_base_addr;
+       unsigned int phy_base_addr;
        unsigned int global1_addr;
        unsigned int global2_addr;
        unsigned int age_time_coeff;
index 0ce627f..8d22d66 100644 (file)
@@ -1118,7 +1118,7 @@ int mv88e6xxx_g2_irq_mdio_setup(struct mv88e6xxx_chip *chip,
                        err = irq;
                        goto out;
                }
-               bus->irq[chip->info->port_base_addr + phy] = irq;
+               bus->irq[chip->info->phy_base_addr + phy] = irq;
        }
        return 0;
 out:
index 7ea72ef..d272dc6 100644 (file)
 #define MDIO_VEND2_AN_STAT             0x8002
 #endif
 
+#ifndef MDIO_VEND2_PMA_CDR_CONTROL
+#define MDIO_VEND2_PMA_CDR_CONTROL     0x8056
+#endif
+
 #ifndef MDIO_CTRL1_SPEED1G
 #define MDIO_CTRL1_SPEED1G             (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
 #endif
 #define XGBE_AN_CL37_TX_CONFIG_MASK    0x08
 #define XGBE_AN_CL37_MII_CTRL_8BIT     0x0100
 
+#define XGBE_PMA_CDR_TRACK_EN_MASK     0x01
+#define XGBE_PMA_CDR_TRACK_EN_OFF      0x00
+#define XGBE_PMA_CDR_TRACK_EN_ON       0x01
+
 /* Bit setting and getting macros
  *  The get macro will extract the current bit field value from within
  *  the variable
index 7d128be..b911439 100644 (file)
@@ -519,6 +519,22 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
                                   "debugfs_create_file failed\n");
        }
 
+       if (pdata->vdata->an_cdr_workaround) {
+               pfile = debugfs_create_bool("an_cdr_workaround", 0600,
+                                           pdata->xgbe_debugfs,
+                                           &pdata->debugfs_an_cdr_workaround);
+               if (!pfile)
+                       netdev_err(pdata->netdev,
+                                  "debugfs_create_bool failed\n");
+
+               pfile = debugfs_create_bool("an_cdr_track_early", 0600,
+                                           pdata->xgbe_debugfs,
+                                           &pdata->debugfs_an_cdr_track_early);
+               if (!pfile)
+                       netdev_err(pdata->netdev,
+                                  "debugfs_create_bool failed\n");
+       }
+
        kfree(buf);
 }
 
index 795e556..441d097 100644 (file)
@@ -349,6 +349,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
        XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 
        /* Call MDIO/PHY initialization routine */
+       pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround;
        ret = pdata->phy_if.phy_init(pdata);
        if (ret)
                return ret;
index 072b9f6..1b45cd7 100644 (file)
@@ -432,11 +432,16 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata)
        xgbe_an73_set(pdata, false, false);
        xgbe_an73_disable_interrupts(pdata);
 
+       pdata->an_start = 0;
+
        netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n");
 }
 
 static void xgbe_an_restart(struct xgbe_prv_data *pdata)
 {
+       if (pdata->phy_if.phy_impl.an_pre)
+               pdata->phy_if.phy_impl.an_pre(pdata);
+
        switch (pdata->an_mode) {
        case XGBE_AN_MODE_CL73:
        case XGBE_AN_MODE_CL73_REDRV:
@@ -453,6 +458,9 @@ static void xgbe_an_restart(struct xgbe_prv_data *pdata)
 
 static void xgbe_an_disable(struct xgbe_prv_data *pdata)
 {
+       if (pdata->phy_if.phy_impl.an_post)
+               pdata->phy_if.phy_impl.an_post(pdata);
+
        switch (pdata->an_mode) {
        case XGBE_AN_MODE_CL73:
        case XGBE_AN_MODE_CL73_REDRV:
@@ -505,11 +513,11 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
                XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
                            reg);
 
-               if (pdata->phy_if.phy_impl.kr_training_post)
-                       pdata->phy_if.phy_impl.kr_training_post(pdata);
-
                netif_dbg(pdata, link, pdata->netdev,
                          "KR training initiated\n");
+
+               if (pdata->phy_if.phy_impl.kr_training_post)
+                       pdata->phy_if.phy_impl.kr_training_post(pdata);
        }
 
        return XGBE_AN_PAGE_RECEIVED;
@@ -637,11 +645,11 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
                        return XGBE_AN_NO_LINK;
        }
 
-       xgbe_an73_disable(pdata);
+       xgbe_an_disable(pdata);
 
        xgbe_switch_mode(pdata);
 
-       xgbe_an73_restart(pdata);
+       xgbe_an_restart(pdata);
 
        return XGBE_AN_INCOMPAT_LINK;
 }
@@ -820,6 +828,9 @@ static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata)
                pdata->an_result = pdata->an_state;
                pdata->an_state = XGBE_AN_READY;
 
+               if (pdata->phy_if.phy_impl.an_post)
+                       pdata->phy_if.phy_impl.an_post(pdata);
+
                netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n",
                          xgbe_state_as_string(pdata->an_result));
        }
@@ -903,6 +914,9 @@ again:
                pdata->kx_state = XGBE_RX_BPA;
                pdata->an_start = 0;
 
+               if (pdata->phy_if.phy_impl.an_post)
+                       pdata->phy_if.phy_impl.an_post(pdata);
+
                netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n",
                          xgbe_state_as_string(pdata->an_result));
        }
index eb23f9b..82d1f41 100644 (file)
@@ -456,6 +456,7 @@ static const struct xgbe_version_data xgbe_v2a = {
        .irq_reissue_support            = 1,
        .tx_desc_prefetch               = 5,
        .rx_desc_prefetch               = 5,
+       .an_cdr_workaround              = 1,
 };
 
 static const struct xgbe_version_data xgbe_v2b = {
@@ -470,6 +471,7 @@ static const struct xgbe_version_data xgbe_v2b = {
        .irq_reissue_support            = 1,
        .tx_desc_prefetch               = 5,
        .rx_desc_prefetch               = 5,
+       .an_cdr_workaround              = 1,
 };
 
 static const struct pci_device_id xgbe_pci_table[] = {
index 3304a29..aac8843 100644 (file)
 /* Rate-change complete wait/retry count */
 #define XGBE_RATECHANGE_COUNT          500
 
+/* CDR delay values for KR support (in usec) */
+#define XGBE_CDR_DELAY_INIT            10000
+#define XGBE_CDR_DELAY_INC             10000
+#define XGBE_CDR_DELAY_MAX             100000
+
+/* RRC frequency during link status check */
+#define XGBE_RRC_FREQUENCY             10
+
 enum xgbe_port_mode {
        XGBE_PORT_MODE_RSVD = 0,
        XGBE_PORT_MODE_BACKPLANE,
@@ -245,6 +253,10 @@ enum xgbe_sfp_speed {
 #define XGBE_SFP_BASE_VENDOR_SN                        4
 #define XGBE_SFP_BASE_VENDOR_SN_LEN            16
 
+#define XGBE_SFP_EXTD_OPT1                     1
+#define XGBE_SFP_EXTD_OPT1_RX_LOS              BIT(1)
+#define XGBE_SFP_EXTD_OPT1_TX_FAULT            BIT(3)
+
 #define XGBE_SFP_EXTD_DIAG                     28
 #define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE         BIT(2)
 
@@ -324,6 +336,7 @@ struct xgbe_phy_data {
 
        unsigned int sfp_gpio_address;
        unsigned int sfp_gpio_mask;
+       unsigned int sfp_gpio_inputs;
        unsigned int sfp_gpio_rx_los;
        unsigned int sfp_gpio_tx_fault;
        unsigned int sfp_gpio_mod_absent;
@@ -355,6 +368,10 @@ struct xgbe_phy_data {
        unsigned int redrv_addr;
        unsigned int redrv_lane;
        unsigned int redrv_model;
+
+       /* KR AN support */
+       unsigned int phy_cdr_notrack;
+       unsigned int phy_cdr_delay;
 };
 
 /* I2C, MDIO and GPIO lines are muxed, so only one device at a time */
@@ -974,6 +991,49 @@ static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata)
        phy_data->sfp_phy_avail = 1;
 }
 
+static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data)
+{
+       u8 *sfp_extd = phy_data->sfp_eeprom.extd;
+
+       if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS))
+               return false;
+
+       if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS)
+               return false;
+
+       if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los))
+               return true;
+
+       return false;
+}
+
+static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data)
+{
+       u8 *sfp_extd = phy_data->sfp_eeprom.extd;
+
+       if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT))
+               return false;
+
+       if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT)
+               return false;
+
+       if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault))
+               return true;
+
+       return false;
+}
+
+static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data)
+{
+       if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT)
+               return false;
+
+       if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent))
+               return true;
+
+       return false;
+}
+
 static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata)
 {
        struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -1019,6 +1079,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
        if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP)
                return;
 
+       /* Update transceiver signals (eeprom extd/options) */
+       phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
+       phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
+
        if (xgbe_phy_sfp_parse_quirks(pdata))
                return;
 
@@ -1184,7 +1248,6 @@ put:
 static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
 {
        struct xgbe_phy_data *phy_data = pdata->phy_data;
-       unsigned int gpio_input;
        u8 gpio_reg, gpio_ports[2];
        int ret;
 
@@ -1199,23 +1262,9 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
                return;
        }
 
-       gpio_input = (gpio_ports[1] << 8) | gpio_ports[0];
-
-       if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) {
-               /* No GPIO, just assume the module is present for now */
-               phy_data->sfp_mod_absent = 0;
-       } else {
-               if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent)))
-                       phy_data->sfp_mod_absent = 0;
-       }
-
-       if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) &&
-           (gpio_input & (1 << phy_data->sfp_gpio_rx_los)))
-               phy_data->sfp_rx_los = 1;
+       phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0];
 
-       if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) &&
-           (gpio_input & (1 << phy_data->sfp_gpio_tx_fault)))
-               phy_data->sfp_tx_fault = 1;
+       phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data);
 }
 
 static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata)
@@ -2361,7 +2410,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
                return 1;
 
        /* No link, attempt a receiver reset cycle */
-       if (phy_data->rrc_count++) {
+       if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
                phy_data->rrc_count = 0;
                xgbe_phy_rrc(pdata);
        }
@@ -2669,6 +2718,103 @@ static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata)
        return true;
 }
 
+static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata)
+{
+       struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+       if (!pdata->debugfs_an_cdr_workaround)
+               return;
+
+       if (!phy_data->phy_cdr_notrack)
+               return;
+
+       usleep_range(phy_data->phy_cdr_delay,
+                    phy_data->phy_cdr_delay + 500);
+
+       XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+                        XGBE_PMA_CDR_TRACK_EN_MASK,
+                        XGBE_PMA_CDR_TRACK_EN_ON);
+
+       phy_data->phy_cdr_notrack = 0;
+}
+
+static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata)
+{
+       struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+       if (!pdata->debugfs_an_cdr_workaround)
+               return;
+
+       if (phy_data->phy_cdr_notrack)
+               return;
+
+       XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+                        XGBE_PMA_CDR_TRACK_EN_MASK,
+                        XGBE_PMA_CDR_TRACK_EN_OFF);
+
+       xgbe_phy_rrc(pdata);
+
+       phy_data->phy_cdr_notrack = 1;
+}
+
+static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
+{
+       if (!pdata->debugfs_an_cdr_track_early)
+               xgbe_phy_cdr_track(pdata);
+}
+
+static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
+{
+       if (pdata->debugfs_an_cdr_track_early)
+               xgbe_phy_cdr_track(pdata);
+}
+
+static void xgbe_phy_an_post(struct xgbe_prv_data *pdata)
+{
+       struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+       switch (pdata->an_mode) {
+       case XGBE_AN_MODE_CL73:
+       case XGBE_AN_MODE_CL73_REDRV:
+               if (phy_data->cur_mode != XGBE_MODE_KR)
+                       break;
+
+               xgbe_phy_cdr_track(pdata);
+
+               switch (pdata->an_result) {
+               case XGBE_AN_READY:
+               case XGBE_AN_COMPLETE:
+                       break;
+               default:
+                       if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX)
+                               phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC;
+                       else
+                               phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+}
+
+static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata)
+{
+       struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+       switch (pdata->an_mode) {
+       case XGBE_AN_MODE_CL73:
+       case XGBE_AN_MODE_CL73_REDRV:
+               if (phy_data->cur_mode != XGBE_MODE_KR)
+                       break;
+
+               xgbe_phy_cdr_notrack(pdata);
+               break;
+       default:
+               break;
+       }
+}
+
 static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
 {
        struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -2680,6 +2826,9 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
        xgbe_phy_sfp_reset(phy_data);
        xgbe_phy_sfp_mod_absent(pdata);
 
+       /* Reset CDR support */
+       xgbe_phy_cdr_track(pdata);
+
        /* Power off the PHY */
        xgbe_phy_power_off(pdata);
 
@@ -2712,6 +2861,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
        /* Start in highest supported mode */
        xgbe_phy_set_mode(pdata, phy_data->start_mode);
 
+       /* Reset CDR support */
+       xgbe_phy_cdr_track(pdata);
+
        /* After starting the I2C controller, we can check for an SFP */
        switch (phy_data->port_mode) {
        case XGBE_PORT_MODE_SFP:
@@ -3019,6 +3171,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
                }
        }
 
+       phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT;
+
        /* Register for driving external PHYs */
        mii = devm_mdiobus_alloc(pdata->dev);
        if (!mii) {
@@ -3071,4 +3225,10 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if)
        phy_impl->an_advertising        = xgbe_phy_an_advertising;
 
        phy_impl->an_outcome            = xgbe_phy_an_outcome;
+
+       phy_impl->an_pre                = xgbe_phy_an_pre;
+       phy_impl->an_post               = xgbe_phy_an_post;
+
+       phy_impl->kr_training_pre       = xgbe_phy_kr_training_pre;
+       phy_impl->kr_training_post      = xgbe_phy_kr_training_post;
 }
index ad102c8..95d4b56 100644 (file)
@@ -833,6 +833,7 @@ struct xgbe_hw_if {
 /* This structure represents implementation specific routines for an
  * implementation of a PHY. All routines are required unless noted below.
  *   Optional routines:
+ *     an_pre, an_post
  *     kr_training_pre, kr_training_post
  */
 struct xgbe_phy_impl_if {
@@ -875,6 +876,10 @@ struct xgbe_phy_impl_if {
        /* Process results of auto-negotiation */
        enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *);
 
+       /* Pre/Post auto-negotiation support */
+       void (*an_pre)(struct xgbe_prv_data *);
+       void (*an_post)(struct xgbe_prv_data *);
+
        /* Pre/Post KR training enablement support */
        void (*kr_training_pre)(struct xgbe_prv_data *);
        void (*kr_training_post)(struct xgbe_prv_data *);
@@ -989,6 +994,7 @@ struct xgbe_version_data {
        unsigned int irq_reissue_support;
        unsigned int tx_desc_prefetch;
        unsigned int rx_desc_prefetch;
+       unsigned int an_cdr_workaround;
 };
 
 struct xgbe_vxlan_data {
@@ -1257,6 +1263,9 @@ struct xgbe_prv_data {
        unsigned int debugfs_xprop_reg;
 
        unsigned int debugfs_xi2c_reg;
+
+       bool debugfs_an_cdr_workaround;
+       bool debugfs_an_cdr_track_early;
 };
 
 /* Function prototypes*/
index 32f6d2e..1a1a638 100644 (file)
@@ -95,6 +95,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
        /*rss rings */
        cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
        cfg->vecs = min(cfg->vecs, num_online_cpus());
+       cfg->vecs = min(cfg->vecs, self->irqvecs);
        /* cfg->vecs should be power of 2 for RSS */
        if (cfg->vecs >= 8U)
                cfg->vecs = 8U;
@@ -246,6 +247,8 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
 
        self->ndev->hw_features |= aq_hw_caps->hw_features;
        self->ndev->features = aq_hw_caps->hw_features;
+       self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
+                                    NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_LRO;
        self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
        self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 
index 219b550..faa533a 100644 (file)
@@ -80,6 +80,7 @@ struct aq_nic_s {
 
        struct pci_dev *pdev;
        unsigned int msix_entry_mask;
+       u32 irqvecs;
 };
 
 static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
index ecc6306..a50e08b 100644 (file)
@@ -267,16 +267,16 @@ static int aq_pci_probe(struct pci_dev *pdev,
        numvecs = min(numvecs, num_online_cpus());
        /*enable interrupts */
 #if !AQ_CFG_FORCE_LEGACY_INT
-       err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs,
-                                   PCI_IRQ_MSIX);
-
-       if (err < 0) {
-               err = pci_alloc_irq_vectors(self->pdev, 1, 1,
-                                           PCI_IRQ_MSI | PCI_IRQ_LEGACY);
-               if (err < 0)
-                       goto err_hwinit;
+       numvecs = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
+                                       PCI_IRQ_MSIX | PCI_IRQ_MSI |
+                                       PCI_IRQ_LEGACY);
+
+       if (numvecs < 0) {
+               err = numvecs;
+               goto err_hwinit;
        }
 #endif
+       self->irqvecs = numvecs;
 
        /* net device init */
        aq_nic_cfg_start(self);
@@ -298,9 +298,9 @@ err_free_aq_hw:
        kfree(self->aq_hw);
 err_ioremap:
        free_netdev(ndev);
-err_pci_func:
-       pci_release_regions(pdev);
 err_ndev:
+       pci_release_regions(pdev);
+err_pci_func:
        pci_disable_device(pdev);
        return err;
 }
index f9a3c1a..f33b25f 100644 (file)
@@ -2144,14 +2144,21 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
        .ndo_select_queue       = bcm_sysport_select_queue,
 };
 
-static int bcm_sysport_map_queues(struct net_device *dev,
+static int bcm_sysport_map_queues(struct notifier_block *nb,
                                  struct dsa_notifier_register_info *info)
 {
-       struct bcm_sysport_priv *priv = netdev_priv(dev);
        struct bcm_sysport_tx_ring *ring;
+       struct bcm_sysport_priv *priv;
        struct net_device *slave_dev;
        unsigned int num_tx_queues;
        unsigned int q, start, port;
+       struct net_device *dev;
+
+       priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
+       if (priv->netdev != info->master)
+               return 0;
+
+       dev = info->master;
 
        /* We can't be setting up queue inspection for non directly attached
         * switches
@@ -2174,11 +2181,12 @@ static int bcm_sysport_map_queues(struct net_device *dev,
        if (priv->is_lite)
                netif_set_real_num_tx_queues(slave_dev,
                                             slave_dev->num_tx_queues / 2);
+
        num_tx_queues = slave_dev->real_num_tx_queues;
 
        if (priv->per_port_num_tx_queues &&
            priv->per_port_num_tx_queues != num_tx_queues)
-               netdev_warn(slave_dev, "asymetric number of per-port queues\n");
+               netdev_warn(slave_dev, "asymmetric number of per-port queues\n");
 
        priv->per_port_num_tx_queues = num_tx_queues;
 
@@ -2201,7 +2209,7 @@ static int bcm_sysport_map_queues(struct net_device *dev,
        return 0;
 }
 
-static int bcm_sysport_dsa_notifier(struct notifier_block *unused,
+static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
                                    unsigned long event, void *ptr)
 {
        struct dsa_notifier_register_info *info;
@@ -2211,7 +2219,7 @@ static int bcm_sysport_dsa_notifier(struct notifier_block *unused,
 
        info = ptr;
 
-       return notifier_from_errno(bcm_sysport_map_queues(info->master, info));
+       return notifier_from_errno(bcm_sysport_map_queues(nb, info));
 }
 
 #define REV_FMT        "v%2x.%02x"
index 08bbb63..9f59b12 100644 (file)
@@ -8733,14 +8733,15 @@ static void tg3_free_consistent(struct tg3 *tp)
        tg3_mem_rx_release(tp);
        tg3_mem_tx_release(tp);
 
-       /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
-       tg3_full_lock(tp, 0);
+       /* tp->hw_stats can be referenced safely:
+        *     1. under rtnl_lock
+        *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
+        */
        if (tp->hw_stats) {
                dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
                                  tp->hw_stats, tp->stats_mapping);
                tp->hw_stats = NULL;
        }
-       tg3_full_unlock(tp);
 }
 
 /*
@@ -14178,7 +14179,7 @@ static void tg3_get_stats64(struct net_device *dev,
        struct tg3 *tp = netdev_priv(dev);
 
        spin_lock_bh(&tp->lock);
-       if (!tp->hw_stats) {
+       if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
                *stats = tp->net_stats_prev;
                spin_unlock_bh(&tp->lock);
                return;
index 24d2865..005283c 100644 (file)
@@ -3433,8 +3433,8 @@ static int adap_config_hma(struct adapter *adapter)
        sgl = adapter->hma.sgt->sgl;
        node = dev_to_node(adapter->pdev_dev);
        for_each_sg(sgl, iter, sgt->orig_nents, i) {
-               newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL,
-                                          page_order);
+               newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL |
+                                          __GFP_ZERO, page_order);
                if (!newpage) {
                        dev_err(adapter->pdev_dev,
                                "Not enough memory for HMA page allocation\n");
@@ -5474,6 +5474,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
        spin_lock_init(&adapter->mbox_lock);
        INIT_LIST_HEAD(&adapter->mlist.list);
+       adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
        pci_set_drvdata(pdev, adapter);
 
        if (func != ent->driver_data) {
@@ -5508,8 +5509,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_free_adapter;
        }
 
-       adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
-
        /* PCI device has been enabled */
        adapter->flags |= DEV_ENABLED;
        memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
index 4df282e..0beee2c 100644 (file)
@@ -61,7 +61,7 @@ static const char hw_stat_gstrings[][ETH_GSTRING_LEN] = {
 static const char tx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
        "tx-single-collision",
        "tx-multiple-collision",
-       "tx-late-collsion",
+       "tx-late-collision",
        "tx-aborted-frames",
        "tx-lost-frames",
        "tx-carrier-sense-errors",
index 2df01ad..6e8d6a6 100644 (file)
@@ -1128,7 +1128,7 @@ static void clean_rx_pools(struct ibmvnic_adapter *adapter)
        if (!adapter->rx_pool)
                return;
 
-       rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+       rx_scrqs = adapter->num_active_rx_pools;
        rx_entries = adapter->req_rx_add_entries_per_subcrq;
 
        /* Free any remaining skbs in the rx buffer pools */
@@ -1177,7 +1177,7 @@ static void clean_tx_pools(struct ibmvnic_adapter *adapter)
        if (!adapter->tx_pool || !adapter->tso_pool)
                return;
 
-       tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+       tx_scrqs = adapter->num_active_tx_pools;
 
        /* Free any remaining skbs in the tx buffer pools */
        for (i = 0; i < tx_scrqs; i++) {
index 5b13ca1..7dc5f04 100644 (file)
@@ -586,7 +586,7 @@ struct ice_sw_rule_lg_act {
 #define ICE_LG_ACT_MIRROR_VSI_ID_S     3
 #define ICE_LG_ACT_MIRROR_VSI_ID_M     (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S)
 
-       /* Action type = 5 - Large Action */
+       /* Action type = 5 - Generic Value */
 #define ICE_LG_ACT_GENERIC             0x5
 #define ICE_LG_ACT_GENERIC_VALUE_S     3
 #define ICE_LG_ACT_GENERIC_VALUE_M     (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S)
index 21977ec..71d032c 100644 (file)
@@ -78,6 +78,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
        struct ice_aq_desc desc;
        enum ice_status status;
        u16 flags;
+       u8 i;
 
        cmd = &desc.params.mac_read;
 
@@ -98,8 +99,16 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
                return ICE_ERR_CFG;
        }
 
-       ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr);
-       ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr);
+       /* A single port can report up to two (LAN and WoL) addresses */
+       for (i = 0; i < cmd->num_addr; i++)
+               if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
+                       ether_addr_copy(hw->port_info->mac.lan_addr,
+                                       resp[i].mac_addr);
+                       ether_addr_copy(hw->port_info->mac.perm_addr,
+                                       resp[i].mac_addr);
+                       break;
+               }
+
        return 0;
 }
 
@@ -464,9 +473,12 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
        if (status)
                goto err_unroll_sched;
 
-       /* Get port MAC information */
-       mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp);
-       mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL);
+       /* Get MAC information */
+       /* A single port can report up to two (LAN and WoL) addresses */
+       mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
+                              sizeof(struct ice_aqc_manage_mac_read_resp),
+                              GFP_KERNEL);
+       mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
 
        if (!mac_buf) {
                status = ICE_ERR_NO_MEMORY;
index 5909a44..7c511f1 100644 (file)
@@ -1014,10 +1014,10 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
        desc = ICE_CTL_Q_DESC(cq->rq, ntc);
        desc_idx = ntc;
 
+       cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
        flags = le16_to_cpu(desc->flags);
        if (flags & ICE_AQ_FLAG_ERR) {
                ret_code = ICE_ERR_AQ_ERROR;
-               cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
                ice_debug(hw, ICE_DBG_AQ_MSG,
                          "Control Receive Queue Event received with error 0x%x\n",
                          cq->rq_last_status);
index 1b9e2ef..4999048 100644 (file)
 #define PFINT_FW_CTL_CAUSE_ENA_S       30
 #define PFINT_FW_CTL_CAUSE_ENA_M       BIT(PFINT_FW_CTL_CAUSE_ENA_S)
 #define PFINT_OICR                     0x0016CA00
-#define PFINT_OICR_INTEVENT_S          0
-#define PFINT_OICR_INTEVENT_M          BIT(PFINT_OICR_INTEVENT_S)
 #define PFINT_OICR_HLP_RDY_S           14
 #define PFINT_OICR_HLP_RDY_M           BIT(PFINT_OICR_HLP_RDY_S)
 #define PFINT_OICR_CPM_RDY_S           15
index 210b791..5299caf 100644 (file)
@@ -1722,9 +1722,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
        oicr = rd32(hw, PFINT_OICR);
        ena_mask = rd32(hw, PFINT_OICR_ENA);
 
-       if (!(oicr & PFINT_OICR_INTEVENT_M))
-               goto ena_intr;
-
        if (oicr & PFINT_OICR_GRST_M) {
                u32 reset;
                /* we have a reset warning */
@@ -1782,7 +1779,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
        }
        ret = IRQ_HANDLED;
 
-ena_intr:
        /* re-enable interrupt causes that are not handled during this pass */
        wr32(hw, PFINT_OICR_ENA, ena_mask);
        if (!test_bit(__ICE_DOWN, pf->state)) {
index f16ff3e..2e6c1d9 100644 (file)
@@ -751,14 +751,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
        u16 num_added = 0;
        u32 temp;
 
+       *num_nodes_added = 0;
+
        if (!num_nodes)
                return status;
 
        if (!parent || layer < hw->sw_entry_point_layer)
                return ICE_ERR_PARAM;
 
-       *num_nodes_added = 0;
-
        /* max children per node per layer */
        max_child_nodes =
            le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children);
index c1c0bc3..cce7ada 100644 (file)
@@ -1700,7 +1700,22 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
        WARN_ON(hw->mac.type != e1000_i210);
        WARN_ON(queue < 0 || queue > 1);
 
-       if (enable) {
+       if (enable || queue == 0) {
+               /* i210 does not allow the queue 0 to be in the Strict
+                * Priority mode while the Qav mode is enabled, so,
+                * instead of disabling strict priority mode, we give
+                * queue 0 the maximum of credits possible.
+                *
+                * See section 8.12.19 of the i210 datasheet, "Note:
+                * Queue0 QueueMode must be set to 1b when
+                * TransmitMode is set to Qav."
+                */
+               if (queue == 0 && !enable) {
+                       /* max "linkspeed" idleslope in kbps */
+                       idleslope = 1000000;
+                       hicredit = ETH_FRAME_LEN;
+               }
+
                set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
                set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
 
index 68af127..cead23e 100644 (file)
@@ -943,8 +943,8 @@ err2:
        kfree(ipsec->ip_tbl);
        kfree(ipsec->rx_tbl);
        kfree(ipsec->tx_tbl);
+       kfree(ipsec);
 err1:
-       kfree(adapter->ipsec);
        netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
 }
 
index 3123267..9592f3e 100644 (file)
@@ -3427,6 +3427,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
                hw->phy.sfp_setup_needed = false;
        }
 
+       if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+               return status;
+
        /* Reset PHY */
        if (!hw->phy.reset_disable && hw->phy.ops.reset)
                hw->phy.ops.reset(hw);
index 3d9033f..850f8af 100644 (file)
@@ -3420,7 +3420,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
                if (!err)
                        continue;
                hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j);
-               break;
+               goto err_setup_tx;
        }
 
        return 0;
@@ -4137,7 +4137,7 @@ out_drop:
        return NETDEV_TX_OK;
 }
 
-static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbevf_ring *tx_ring;
index 4202f9b..6f41023 100644 (file)
@@ -942,6 +942,7 @@ struct mvpp2 {
        struct clk *pp_clk;
        struct clk *gop_clk;
        struct clk *mg_clk;
+       struct clk *mg_core_clk;
        struct clk *axi_clk;
 
        /* List of pointers to port structures */
@@ -8768,18 +8769,27 @@ static int mvpp2_probe(struct platform_device *pdev)
                        err = clk_prepare_enable(priv->mg_clk);
                        if (err < 0)
                                goto err_gop_clk;
+
+                       priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk");
+                       if (IS_ERR(priv->mg_core_clk)) {
+                               priv->mg_core_clk = NULL;
+                       } else {
+                               err = clk_prepare_enable(priv->mg_core_clk);
+                               if (err < 0)
+                                       goto err_mg_clk;
+                       }
                }
 
                priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
                if (IS_ERR(priv->axi_clk)) {
                        err = PTR_ERR(priv->axi_clk);
                        if (err == -EPROBE_DEFER)
-                               goto err_gop_clk;
+                               goto err_mg_core_clk;
                        priv->axi_clk = NULL;
                } else {
                        err = clk_prepare_enable(priv->axi_clk);
                        if (err < 0)
-                               goto err_gop_clk;
+                               goto err_mg_core_clk;
                }
 
                /* Get system's tclk rate */
@@ -8793,7 +8803,7 @@ static int mvpp2_probe(struct platform_device *pdev)
        if (priv->hw_version == MVPP22) {
                err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
                if (err)
-                       goto err_mg_clk;
+                       goto err_axi_clk;
                /* Sadly, the BM pools all share the same register to
                 * store the high 32 bits of their address. So they
                 * must all have the same high 32 bits, which forces
@@ -8801,14 +8811,14 @@ static int mvpp2_probe(struct platform_device *pdev)
                 */
                err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
                if (err)
-                       goto err_mg_clk;
+                       goto err_axi_clk;
        }
 
        /* Initialize network controller */
        err = mvpp2_init(pdev, priv);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to initialize controller\n");
-               goto err_mg_clk;
+               goto err_axi_clk;
        }
 
        /* Initialize ports */
@@ -8821,7 +8831,7 @@ static int mvpp2_probe(struct platform_device *pdev)
        if (priv->port_count == 0) {
                dev_err(&pdev->dev, "no ports enabled\n");
                err = -ENODEV;
-               goto err_mg_clk;
+               goto err_axi_clk;
        }
 
        /* Statistics must be gathered regularly because some of them (like
@@ -8849,8 +8859,13 @@ err_port_probe:
                        mvpp2_port_remove(priv->port_list[i]);
                i++;
        }
-err_mg_clk:
+err_axi_clk:
        clk_disable_unprepare(priv->axi_clk);
+
+err_mg_core_clk:
+       if (priv->hw_version == MVPP22)
+               clk_disable_unprepare(priv->mg_core_clk);
+err_mg_clk:
        if (priv->hw_version == MVPP22)
                clk_disable_unprepare(priv->mg_clk);
 err_gop_clk:
@@ -8897,6 +8912,7 @@ static int mvpp2_remove(struct platform_device *pdev)
                return 0;
 
        clk_disable_unprepare(priv->axi_clk);
+       clk_disable_unprepare(priv->mg_core_clk);
        clk_disable_unprepare(priv->mg_clk);
        clk_disable_unprepare(priv->pp_clk);
        clk_disable_unprepare(priv->gop_clk);
index a30a2e9..f11b450 100644 (file)
@@ -1027,6 +1027,22 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
        if (!coal->tx_max_coalesced_frames_irq)
                return -EINVAL;
 
+       if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
+           coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
+           coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
+           coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
+               netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
+                           __func__, MLX4_EN_MAX_COAL_TIME);
+               return -ERANGE;
+       }
+
+       if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
+           coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
+               netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
+                           __func__, MLX4_EN_MAX_COAL_PKTS);
+               return -ERANGE;
+       }
+
        priv->rx_frames = (coal->rx_max_coalesced_frames ==
                           MLX4_EN_AUTO_CONF) ?
                                MLX4_EN_RX_COAL_TARGET :
index e0adac4..9670b33 100644 (file)
@@ -3324,12 +3324,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                                           MAX_TX_RINGS, GFP_KERNEL);
                if (!priv->tx_ring[t]) {
                        err = -ENOMEM;
-                       goto err_free_tx;
+                       goto out;
                }
                priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
                                         MAX_TX_RINGS, GFP_KERNEL);
                if (!priv->tx_cq[t]) {
-                       kfree(priv->tx_ring[t]);
                        err = -ENOMEM;
                        goto out;
                }
@@ -3582,11 +3581,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
 
        return 0;
 
-err_free_tx:
-       while (t--) {
-               kfree(priv->tx_ring[t]);
-               kfree(priv->tx_cq[t]);
-       }
 out:
        mlx4_en_destroy_netdev(dev);
        return err;
index bfef692..211578f 100644 (file)
@@ -1317,7 +1317,7 @@ static int mlx4_mf_unbond(struct mlx4_dev *dev)
 
        ret = mlx4_unbond_fs_rules(dev);
        if (ret)
-               mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret);
+               mlx4_warn(dev, "multifunction unbond for flow rules failed (%d)\n", ret);
        ret1 = mlx4_unbond_mac_table(dev);
        if (ret1) {
                mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1);
index f7c8113..ace6545 100644 (file)
 #define MLX4_EN_TX_COAL_PKTS   16
 #define MLX4_EN_TX_COAL_TIME   0x10
 
+#define MLX4_EN_MAX_COAL_PKTS  U16_MAX
+#define MLX4_EN_MAX_COAL_TIME  U16_MAX
+
 #define MLX4_EN_RX_RATE_LOW            400000
 #define MLX4_EN_RX_COAL_TIME_LOW       0
 #define MLX4_EN_RX_RATE_HIGH           450000
@@ -552,8 +555,8 @@ struct mlx4_en_priv {
        u16 rx_usecs_low;
        u32 pkt_rate_high;
        u16 rx_usecs_high;
-       u16 sample_interval;
-       u16 adaptive_rx_coal;
+       u32 sample_interval;
+       u32 adaptive_rx_coal;
        u32 msg_enable;
        u32 loopback_ok;
        u32 validate_loopback;
index 3d46ef4..c641d56 100644 (file)
@@ -1007,12 +1007,14 @@ static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
 
        mutex_lock(&priv->state_lock);
 
-       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
-               goto out;
-
        new_channels.params = priv->channels.params;
        mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);
 
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               priv->channels.params = new_channels.params;
+               goto out;
+       }
+
        /* Skip if tx_min_inline is the same */
        if (new_channels.params.tx_min_inline_mode ==
            priv->channels.params.tx_min_inline_mode)
index d8f68e4..876c3e4 100644 (file)
@@ -877,13 +877,14 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
 };
 
 static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
-                                  struct mlx5e_params *params)
+                                  struct mlx5e_params *params, u16 mtu)
 {
        u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
                                         MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
                                         MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
 
        params->hard_mtu    = MLX5E_ETH_HARD_MTU;
+       params->sw_mtu      = mtu;
        params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
        params->rq_wq_type  = MLX5_WQ_TYPE_LINKED_LIST;
        params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
@@ -931,7 +932,7 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
 
        priv->channels.params.num_channels = profile->max_nch(mdev);
 
-       mlx5e_build_rep_params(mdev, &priv->channels.params);
+       mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
        mlx5e_build_rep_netdev(netdev);
 
        mlx5e_timestamp_init(priv);
index 7079764..027f54a 100644 (file)
@@ -290,7 +290,7 @@ static int mlx5e_test_loopback(struct mlx5e_priv *priv)
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
                netdev_err(priv->netdev,
-                          "\tCan't perform loobpack test while device is down\n");
+                          "\tCan't perform loopback test while device is down\n");
                return -ENODEV;
        }
 
index 4197001..b94276d 100644 (file)
@@ -1261,6 +1261,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                                                  f->mask);
                addr_type = key->addr_type;
 
+               /* the HW doesn't support frag first/later */
+               if (mask->flags & FLOW_DIS_FIRST_FRAG)
+                       return -EOPNOTSUPP;
+
                if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
                        MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
                        MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
@@ -1864,7 +1868,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
        }
 
        ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
-       if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+       if (modify_ip_header && ip_proto != IPPROTO_TCP &&
+           ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
                pr_info("can't offload re-write of ip proto %d\n", ip_proto);
                return false;
        }
index 2029710..5532aa3 100644 (file)
@@ -255,7 +255,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
                                          DMA_TO_DEVICE);
                if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
-                       return -ENOMEM;
+                       goto dma_unmap_wqe_err;
 
                dseg->addr       = cpu_to_be64(dma_addr);
                dseg->lkey       = sq->mkey_be;
@@ -273,7 +273,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
                                            DMA_TO_DEVICE);
                if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
-                       return -ENOMEM;
+                       goto dma_unmap_wqe_err;
 
                dseg->addr       = cpu_to_be64(dma_addr);
                dseg->lkey       = sq->mkey_be;
@@ -285,6 +285,10 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
        }
 
        return num_dma;
+
+dma_unmap_wqe_err:
+       mlx5e_dma_unmap_wqe_err(sq, num_dma);
+       return -ENOMEM;
 }
 
 static inline void
@@ -380,17 +384,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
        num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
                                          (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
        if (unlikely(num_dma < 0))
-               goto dma_unmap_wqe_err;
+               goto err_drop;
 
        mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
                             num_bytes, num_dma, wi, cseg);
 
        return NETDEV_TX_OK;
 
-dma_unmap_wqe_err:
+err_drop:
        sq->stats.dropped++;
-       mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
-
        dev_kfree_skb_any(skb);
 
        return NETDEV_TX_OK;
@@ -645,17 +647,15 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
        num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
                                          (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
        if (unlikely(num_dma < 0))
-               goto dma_unmap_wqe_err;
+               goto err_drop;
 
        mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
                             num_bytes, num_dma, wi, cseg);
 
        return NETDEV_TX_OK;
 
-dma_unmap_wqe_err:
+err_drop:
        sq->stats.dropped++;
-       mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
-
        dev_kfree_skb_any(skb);
 
        return NETDEV_TX_OK;
index c1c9497..1814f80 100644 (file)
@@ -34,6 +34,9 @@
 #include <linux/module.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cmd.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
 #include "mlx5_core.h"
 #include "fpga/core.h"
 #include "eswitch.h"
@@ -923,3 +926,28 @@ int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
        MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
        return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
 }
+
+/* This function should only be called after mlx5_cmd_force_teardown_hca */
+void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
+{
+       struct mlx5_eq_table *table = &dev->priv.eq_table;
+       struct mlx5_eq *eq;
+
+#ifdef CONFIG_RFS_ACCEL
+       if (dev->rmap) {
+               free_irq_cpu_rmap(dev->rmap);
+               dev->rmap = NULL;
+       }
+#endif
+       list_for_each_entry(eq, &table->comp_eqs_list, list)
+               free_irq(eq->irqn, eq);
+
+       free_irq(table->pages_eq.irqn, &table->pages_eq);
+       free_irq(table->async_eq.irqn, &table->async_eq);
+       free_irq(table->cmd_eq.irqn, &table->cmd_eq);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       if (MLX5_CAP_GEN(dev, pg))
+               free_irq(table->pfault_eq.irqn, &table->pfault_eq);
+#endif
+       pci_free_irq_vectors(dev->pdev);
+}
index 332bc56..1352d13 100644 (file)
@@ -2175,26 +2175,35 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
        memset(vf_stats, 0, sizeof(*vf_stats));
        vf_stats->rx_packets =
                MLX5_GET_CTR(out, received_eth_unicast.packets) +
+               MLX5_GET_CTR(out, received_ib_unicast.packets) +
                MLX5_GET_CTR(out, received_eth_multicast.packets) +
+               MLX5_GET_CTR(out, received_ib_multicast.packets) +
                MLX5_GET_CTR(out, received_eth_broadcast.packets);
 
        vf_stats->rx_bytes =
                MLX5_GET_CTR(out, received_eth_unicast.octets) +
+               MLX5_GET_CTR(out, received_ib_unicast.octets) +
                MLX5_GET_CTR(out, received_eth_multicast.octets) +
+               MLX5_GET_CTR(out, received_ib_multicast.octets) +
                MLX5_GET_CTR(out, received_eth_broadcast.octets);
 
        vf_stats->tx_packets =
                MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
+               MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
                MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
+               MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
                MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
 
        vf_stats->tx_bytes =
                MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
+               MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
                MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
+               MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
                MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
 
        vf_stats->multicast =
-               MLX5_GET_CTR(out, received_eth_multicast.packets);
+               MLX5_GET_CTR(out, received_eth_multicast.packets) +
+               MLX5_GET_CTR(out, received_ib_multicast.packets);
 
        vf_stats->broadcast =
                MLX5_GET_CTR(out, received_eth_broadcast.packets);
index de51e7c..c39c169 100644 (file)
@@ -187,6 +187,7 @@ static void del_sw_ns(struct fs_node *node);
 static void del_sw_hw_rule(struct fs_node *node);
 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
                                struct mlx5_flow_destination *d2);
+static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
 static struct mlx5_flow_rule *
 find_flow_rule(struct fs_fte *fte,
               struct mlx5_flow_destination *dest);
@@ -481,7 +482,8 @@ static void del_sw_hw_rule(struct fs_node *node)
 
        if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER  &&
            --fte->dests_size) {
-               modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+               modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
+                             BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
                fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
                update_fte = true;
                goto out;
@@ -2351,23 +2353,27 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
 
 static int init_root_ns(struct mlx5_flow_steering *steering)
 {
+       int err;
+
        steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
        if (!steering->root_ns)
-               goto cleanup;
+               return -ENOMEM;
 
-       if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
-               goto cleanup;
+       err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
+       if (err)
+               goto out_err;
 
        set_prio_attrs(steering->root_ns);
-
-       if (create_anchor_flow_table(steering))
-               goto cleanup;
+       err = create_anchor_flow_table(steering);
+       if (err)
+               goto out_err;
 
        return 0;
 
-cleanup:
-       mlx5_cleanup_fs(steering->dev);
-       return -ENOMEM;
+out_err:
+       cleanup_root_ns(steering->root_ns);
+       steering->root_ns = NULL;
+       return err;
 }
 
 static void clean_tree(struct fs_node *node)
index 63a8ea3..e2c465b 100644 (file)
@@ -1587,6 +1587,14 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
 
        mlx5_enter_error_state(dev, true);
 
+       /* Some platforms requiring freeing the IRQ's in the shutdown
+        * flow. If they aren't freed they can't be allocated after
+        * kexec. There is no need to cleanup the mlx5_core software
+        * contexts.
+        */
+       mlx5_irq_clear_affinity_hints(dev);
+       mlx5_core_eq_free_irqs(dev);
+
        return 0;
 }
 
index 7d001fe..023882d 100644 (file)
@@ -128,6 +128,8 @@ int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
                       u32 *out, int outlen);
 int mlx5_start_eqs(struct mlx5_core_dev *dev);
 void mlx5_stop_eqs(struct mlx5_core_dev *dev);
+/* This function should only be called after mlx5_cmd_force_teardown_hca */
+void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
 struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
 u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq);
 void mlx5_cq_tasklet_cb(unsigned long data);
index 93ea566..e13ac3b 100644 (file)
@@ -1100,11 +1100,11 @@ err_emad_init:
 err_alloc_lag_mapping:
        mlxsw_ports_fini(mlxsw_core);
 err_ports_init:
-       mlxsw_bus->fini(bus_priv);
-err_bus_init:
        if (!reload)
                devlink_resources_unregister(devlink, NULL);
 err_register_resources:
+       mlxsw_bus->fini(bus_priv);
+err_bus_init:
        if (!reload)
                devlink_free(devlink);
 err_devlink_alloc:
index c11c9a6..4ed0118 100644 (file)
@@ -1718,13 +1718,11 @@ __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
        struct net_device *dev = mlxsw_sp_port->dev;
        int err;
 
-       if (bridge_port->bridge_device->multicast_enabled) {
-               if (bridge_port->bridge_device->multicast_enabled) {
-                       err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid,
-                                                    false);
-                       if (err)
-                               netdev_err(dev, "Unable to remove port from SMID\n");
-               }
+       if (bridge_port->bridge_device->multicast_enabled &&
+           !bridge_port->mrouter) {
+               err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
+               if (err)
+                       netdev_err(dev, "Unable to remove port from SMID\n");
        }
 
        err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
index b3567a5..80df9a5 100644 (file)
@@ -183,17 +183,21 @@ static int
 nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
                        const struct tc_action *action,
                        struct nfp_fl_pre_tunnel *pre_tun,
-                       enum nfp_flower_tun_type tun_type)
+                       enum nfp_flower_tun_type tun_type,
+                       struct net_device *netdev)
 {
        size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
        struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
        u32 tmp_set_ip_tun_type_index = 0;
        /* Currently support one pre-tunnel so index is always 0. */
        int pretun_idx = 0;
+       struct net *net;
 
        if (ip_tun->options_len)
                return -EOPNOTSUPP;
 
+       net = dev_net(netdev);
+
        set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
        set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
 
@@ -204,6 +208,7 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
 
        set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
        set_tun->tun_id = ip_tun->key.tun_id;
+       set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
 
        /* Complete pre_tunnel action. */
        pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
@@ -511,7 +516,8 @@ nfp_flower_loop_action(const struct tc_action *a,
                *a_len += sizeof(struct nfp_fl_pre_tunnel);
 
                set_tun = (void *)&nfp_fl->action_data[*a_len];
-               err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type);
+               err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type,
+                                             netdev);
                if (err)
                        return err;
                *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
index b6c0fd0..bee4367 100644 (file)
@@ -190,7 +190,10 @@ struct nfp_fl_set_ipv4_udp_tun {
        __be16 reserved;
        __be64 tun_id __packed;
        __be32 tun_type_index;
-       __be32 extra[3];
+       __be16 reserved2;
+       u8 ttl;
+       u8 reserved3;
+       __be32 extra[2];
 };
 
 /* Metadata with L2 (1W/4B)
index ad02592..84e3b9f 100644 (file)
@@ -52,8 +52,6 @@
 
 #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
 
-#define NFP_FLOWER_FRAME_HEADROOM      158
-
 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
 {
        return "FLOWER";
@@ -360,7 +358,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
                }
 
                SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
-               nfp_net_get_mac_addr(app->pf, port);
+               nfp_net_get_mac_addr(app->pf, repr, port);
 
                cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
                err = nfp_repr_init(app, repr,
@@ -559,22 +557,6 @@ static void nfp_flower_clean(struct nfp_app *app)
        app->priv = NULL;
 }
 
-static int
-nfp_flower_check_mtu(struct nfp_app *app, struct net_device *netdev,
-                    int new_mtu)
-{
-       /* The flower fw reserves NFP_FLOWER_FRAME_HEADROOM bytes of the
-        * supported max MTU to allow for appending tunnel headers. To prevent
-        * unexpected behaviour this needs to be accounted for.
-        */
-       if (new_mtu > netdev->max_mtu - NFP_FLOWER_FRAME_HEADROOM) {
-               nfp_err(app->cpp, "New MTU (%d) is not valid\n", new_mtu);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
 static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv)
 {
        bool ret;
@@ -656,7 +638,6 @@ const struct nfp_app_type app_flower = {
        .init           = nfp_flower_init,
        .clean          = nfp_flower_clean,
 
-       .check_mtu      = nfp_flower_check_mtu,
        .repr_change_mtu  = nfp_flower_repr_change_mtu,
 
        .vnic_alloc     = nfp_flower_vnic_alloc,
index 2a2f2fb..b9618c3 100644 (file)
@@ -69,7 +69,7 @@ int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
        if (err)
                return err < 0 ? err : 0;
 
-       nfp_net_get_mac_addr(app->pf, nn->port);
+       nfp_net_get_mac_addr(app->pf, nn->dp.netdev, nn->port);
 
        return 0;
 }
index add46e2..4221108 100644 (file)
@@ -171,7 +171,9 @@ void nfp_net_pci_remove(struct nfp_pf *pf);
 int nfp_hwmon_register(struct nfp_pf *pf);
 void nfp_hwmon_unregister(struct nfp_pf *pf);
 
-void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port);
+void
+nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
+                    struct nfp_port *port);
 
 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
 
index 15fa47f..45cd209 100644 (file)
 /**
  * nfp_net_get_mac_addr() - Get the MAC address.
  * @pf:       NFP PF handle
+ * @netdev:   net_device to set MAC address on
  * @port:     NFP port structure
  *
  * First try to get the MAC address from NSP ETH table. If that
  * fails generate a random address.
  */
-void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port)
+void
+nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
+                    struct nfp_port *port)
 {
        struct nfp_eth_table_port *eth_port;
 
        eth_port = __nfp_port_get_eth_port(port);
        if (!eth_port) {
-               eth_hw_addr_random(port->netdev);
+               eth_hw_addr_random(netdev);
                return;
        }
 
-       ether_addr_copy(port->netdev->dev_addr, eth_port->mac_addr);
-       ether_addr_copy(port->netdev->perm_addr, eth_port->mac_addr);
+       ether_addr_copy(netdev->dev_addr, eth_port->mac_addr);
+       ether_addr_copy(netdev->perm_addr, eth_port->mac_addr);
 }
 
 static struct nfp_eth_table_port *
@@ -511,16 +514,18 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf)
                return PTR_ERR(mem);
        }
 
-       min_size =  NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
-       pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
-                                         "net.macstats", min_size,
-                                         &pf->mac_stats_bar);
-       if (IS_ERR(pf->mac_stats_mem)) {
-               if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
-                       err = PTR_ERR(pf->mac_stats_mem);
-                       goto err_unmap_ctrl;
+       if (pf->eth_tbl) {
+               min_size =  NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
+               pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
+                                                 "net.macstats", min_size,
+                                                 &pf->mac_stats_bar);
+               if (IS_ERR(pf->mac_stats_mem)) {
+                       if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
+                               err = PTR_ERR(pf->mac_stats_mem);
+                               goto err_unmap_ctrl;
+                       }
+                       pf->mac_stats_mem = NULL;
                }
-               pf->mac_stats_mem = NULL;
        }
 
        pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg",
index 27364b7..b092894 100644 (file)
@@ -1170,7 +1170,7 @@ static void *nixge_get_nvmem_address(struct device *dev)
 
        cell = nvmem_cell_get(dev, "address");
        if (IS_ERR(cell))
-               return cell;
+               return NULL;
 
        mac = nvmem_cell_read(cell, &cell_size);
        nvmem_cell_put(cell);
@@ -1183,7 +1183,7 @@ static int nixge_probe(struct platform_device *pdev)
        struct nixge_priv *priv;
        struct net_device *ndev;
        struct resource *dmares;
-       const char *mac_addr;
+       const u8 *mac_addr;
        int err;
 
        ndev = alloc_etherdev(sizeof(*priv));
@@ -1202,10 +1202,12 @@ static int nixge_probe(struct platform_device *pdev)
        ndev->max_mtu = NIXGE_JUMBO_MTU;
 
        mac_addr = nixge_get_nvmem_address(&pdev->dev);
-       if (mac_addr && is_valid_ether_addr(mac_addr))
+       if (mac_addr && is_valid_ether_addr(mac_addr)) {
                ether_addr_copy(ndev->dev_addr, mac_addr);
-       else
+               kfree(mac_addr);
+       } else {
                eth_hw_addr_random(ndev);
+       }
 
        priv = netdev_priv(ndev);
        priv->ndev = ndev;
index e874504..8667799 100644 (file)
@@ -115,8 +115,7 @@ int qed_l2_alloc(struct qed_hwfn *p_hwfn)
 
 void qed_l2_setup(struct qed_hwfn *p_hwfn)
 {
-       if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
-           p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+       if (!QED_IS_L2_PERSONALITY(p_hwfn))
                return;
 
        mutex_init(&p_hwfn->p_l2_info->lock);
@@ -126,8 +125,7 @@ void qed_l2_free(struct qed_hwfn *p_hwfn)
 {
        u32 i;
 
-       if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
-           p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
+       if (!QED_IS_L2_PERSONALITY(p_hwfn))
                return;
 
        if (!p_hwfn->p_l2_info)
index 74fc626..3850281 100644 (file)
@@ -2370,7 +2370,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
        u8 flags = 0;
 
        if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
-               DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
+               DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
                return -EINVAL;
        }
 
index 9854aa9..7870ae2 100644 (file)
@@ -680,7 +680,7 @@ static int qed_nic_stop(struct qed_dev *cdev)
                        tasklet_disable(p_hwfn->sp_dpc);
                        p_hwfn->b_sp_dpc_enabled = false;
                        DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
-                                  "Disabled sp taskelt [hwfn %d] at %p\n",
+                                  "Disabled sp tasklet [hwfn %d] at %p\n",
                                   i, p_hwfn->sp_dpc);
                }
        }
index fb7c2d1..6acfd43 100644 (file)
@@ -848,7 +848,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
 
        if (!(qp->resp_offloaded)) {
                DP_NOTICE(p_hwfn,
-                         "The responder's qp should be offloded before requester's\n");
+                         "The responder's qp should be offloaded before requester's\n");
                return -EINVAL;
        }
 
index 50b142f..1900bf7 100644 (file)
@@ -238,7 +238,7 @@ qede_rdma_get_free_event_node(struct qede_dev *edev)
        }
 
        if (!found) {
-               event_node = kzalloc(sizeof(*event_node), GFP_KERNEL);
+               event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC);
                if (!event_node) {
                        DP_NOTICE(edev,
                                  "qedr: Could not allocate memory for rdma work\n");
index d24b47b..d118da5 100644 (file)
@@ -2224,7 +2224,7 @@ static void rtl8139_poll_controller(struct net_device *dev)
        struct rtl8139_private *tp = netdev_priv(dev);
        const int irq = tp->pci_dev->irq;
 
-       disable_irq(irq);
+       disable_irq_nosync(irq);
        rtl8139_interrupt(irq, dev);
        enable_irq(irq);
 }
index 604ae78..c7aac1f 100644 (file)
@@ -4981,6 +4981,9 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
 static void rtl_pll_power_up(struct rtl8169_private *tp)
 {
        rtl_generic_op(tp, tp->pll_power_ops.up);
+
+       /* give MAC/PHY some time to resume */
+       msleep(20);
 }
 
 static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
index 83ce229..d90a7b1 100644 (file)
@@ -3999,29 +3999,6 @@ static void efx_ef10_prepare_flr(struct efx_nic *efx)
        atomic_set(&efx->active_queues, 0);
 }
 
-static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
-                                 const struct efx_filter_spec *right)
-{
-       if ((left->match_flags ^ right->match_flags) |
-           ((left->flags ^ right->flags) &
-            (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
-               return false;
-
-       return memcmp(&left->outer_vid, &right->outer_vid,
-                     sizeof(struct efx_filter_spec) -
-                     offsetof(struct efx_filter_spec, outer_vid)) == 0;
-}
-
-static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
-{
-       BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
-       return jhash2((const u32 *)&spec->outer_vid,
-                     (sizeof(struct efx_filter_spec) -
-                      offsetof(struct efx_filter_spec, outer_vid)) / 4,
-                     0);
-       /* XXX should we randomise the initval? */
-}
-
 /* Decide whether a filter should be exclusive or else should allow
  * delivery to additional recipients.  Currently we decide that
  * filters for specific local unicast MAC and IP addresses are
@@ -4346,7 +4323,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
                goto out_unlock;
        match_pri = rc;
 
-       hash = efx_ef10_filter_hash(spec);
+       hash = efx_filter_spec_hash(spec);
        is_mc_recip = efx_filter_is_mc_recipient(spec);
        if (is_mc_recip)
                bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
@@ -4378,7 +4355,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
                if (!saved_spec) {
                        if (ins_index < 0)
                                ins_index = i;
-               } else if (efx_ef10_filter_equal(spec, saved_spec)) {
+               } else if (efx_filter_spec_equal(spec, saved_spec)) {
                        if (spec->priority < saved_spec->priority &&
                            spec->priority != EFX_FILTER_PRI_AUTO) {
                                rc = -EPERM;
@@ -4762,27 +4739,63 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
 static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
                                           unsigned int filter_idx)
 {
+       struct efx_filter_spec *spec, saved_spec;
        struct efx_ef10_filter_table *table;
-       struct efx_filter_spec *spec;
-       bool ret;
+       struct efx_arfs_rule *rule = NULL;
+       bool ret = true, force = false;
+       u16 arfs_id;
 
        down_read(&efx->filter_sem);
        table = efx->filter_state;
        down_write(&table->lock);
        spec = efx_ef10_filter_entry_spec(table, filter_idx);
 
-       if (!spec || spec->priority != EFX_FILTER_PRI_HINT) {
-               ret = true;
+       if (!spec || spec->priority != EFX_FILTER_PRI_HINT)
                goto out_unlock;
-       }
 
-       if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, 0)) {
+       spin_lock_bh(&efx->rps_hash_lock);
+       if (!efx->rps_hash_table) {
+               /* In the absence of the table, we always return 0 to ARFS. */
+               arfs_id = 0;
+       } else {
+               rule = efx_rps_hash_find(efx, spec);
+               if (!rule)
+                       /* ARFS table doesn't know of this filter, so remove it */
+                       goto expire;
+               arfs_id = rule->arfs_id;
+               ret = efx_rps_check_rule(rule, filter_idx, &force);
+               if (force)
+                       goto expire;
+               if (!ret) {
+                       spin_unlock_bh(&efx->rps_hash_lock);
+                       goto out_unlock;
+               }
+       }
+       if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id))
                ret = false;
-               goto out_unlock;
+       else if (rule)
+               rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
+expire:
+       saved_spec = *spec; /* remove operation will kfree spec */
+       spin_unlock_bh(&efx->rps_hash_lock);
+       /* At this point (since we dropped the lock), another thread might queue
+        * up a fresh insertion request (but the actual insertion will be held
+        * up by our possession of the filter table lock).  In that case, it
+        * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that
+        * the rule is not removed by efx_rps_hash_del() below.
+        */
+       if (ret)
+               ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
+                                                     filter_idx, true) == 0;
+       /* While we can't safely dereference rule (we dropped the lock), we can
+        * still test it for NULL.
+        */
+       if (ret && rule) {
+               /* Expiring, so remove entry from ARFS table */
+               spin_lock_bh(&efx->rps_hash_lock);
+               efx_rps_hash_del(efx, &saved_spec);
+               spin_unlock_bh(&efx->rps_hash_lock);
        }
-
-       ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority,
-                                             filter_idx, true) == 0;
 out_unlock:
        up_write(&table->lock);
        up_read(&efx->filter_sem);
index 692dd72..a4ebd87 100644 (file)
@@ -3027,6 +3027,10 @@ static int efx_init_struct(struct efx_nic *efx,
        mutex_init(&efx->mac_lock);
 #ifdef CONFIG_RFS_ACCEL
        mutex_init(&efx->rps_mutex);
+       spin_lock_init(&efx->rps_hash_lock);
+       /* Failure to allocate is not fatal, but may degrade ARFS performance */
+       efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
+                                     sizeof(*efx->rps_hash_table), GFP_KERNEL);
 #endif
        efx->phy_op = &efx_dummy_phy_operations;
        efx->mdio.dev = net_dev;
@@ -3070,6 +3074,10 @@ static void efx_fini_struct(struct efx_nic *efx)
 {
        int i;
 
+#ifdef CONFIG_RFS_ACCEL
+       kfree(efx->rps_hash_table);
+#endif
+
        for (i = 0; i < EFX_MAX_CHANNELS; i++)
                kfree(efx->channel[i]);
 
@@ -3092,6 +3100,141 @@ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
        stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
 }
 
+bool efx_filter_spec_equal(const struct efx_filter_spec *left,
+                          const struct efx_filter_spec *right)
+{
+       if ((left->match_flags ^ right->match_flags) |
+           ((left->flags ^ right->flags) &
+            (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
+               return false;
+
+       return memcmp(&left->outer_vid, &right->outer_vid,
+                     sizeof(struct efx_filter_spec) -
+                     offsetof(struct efx_filter_spec, outer_vid)) == 0;
+}
+
+u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
+{
+       BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
+       return jhash2((const u32 *)&spec->outer_vid,
+                     (sizeof(struct efx_filter_spec) -
+                      offsetof(struct efx_filter_spec, outer_vid)) / 4,
+                     0);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
+                       bool *force)
+{
+       if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
+               /* ARFS is currently updating this entry, leave it */
+               return false;
+       }
+       if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
+               /* ARFS tried and failed to update this, so it's probably out
+                * of date.  Remove the filter and the ARFS rule entry.
+                */
+               rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
+               *force = true;
+               return true;
+       } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
+               /* ARFS has moved on, so old filter is not needed.  Since we did
+                * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
+                * not be removed by efx_rps_hash_del() subsequently.
+                */
+               *force = true;
+               return true;
+       }
+       /* Remove it iff ARFS wants to. */
+       return true;
+}
+
+struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
+                                      const struct efx_filter_spec *spec)
+{
+       u32 hash = efx_filter_spec_hash(spec);
+
+       WARN_ON(!spin_is_locked(&efx->rps_hash_lock));
+       if (!efx->rps_hash_table)
+               return NULL;
+       return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
+}
+
+struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+                                       const struct efx_filter_spec *spec)
+{
+       struct efx_arfs_rule *rule;
+       struct hlist_head *head;
+       struct hlist_node *node;
+
+       head = efx_rps_hash_bucket(efx, spec);
+       if (!head)
+               return NULL;
+       hlist_for_each(node, head) {
+               rule = container_of(node, struct efx_arfs_rule, node);
+               if (efx_filter_spec_equal(spec, &rule->spec))
+                       return rule;
+       }
+       return NULL;
+}
+
+struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+                                      const struct efx_filter_spec *spec,
+                                      bool *new)
+{
+       struct efx_arfs_rule *rule;
+       struct hlist_head *head;
+       struct hlist_node *node;
+
+       head = efx_rps_hash_bucket(efx, spec);
+       if (!head)
+               return NULL;
+       hlist_for_each(node, head) {
+               rule = container_of(node, struct efx_arfs_rule, node);
+               if (efx_filter_spec_equal(spec, &rule->spec)) {
+                       *new = false;
+                       return rule;
+               }
+       }
+       rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
+       *new = true;
+       if (rule) {
+               memcpy(&rule->spec, spec, sizeof(rule->spec));
+               hlist_add_head(&rule->node, head);
+       }
+       return rule;
+}
+
+void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
+{
+       struct efx_arfs_rule *rule;
+       struct hlist_head *head;
+       struct hlist_node *node;
+
+       head = efx_rps_hash_bucket(efx, spec);
+       if (WARN_ON(!head))
+               return;
+       hlist_for_each(node, head) {
+               rule = container_of(node, struct efx_arfs_rule, node);
+               if (efx_filter_spec_equal(spec, &rule->spec)) {
+                       /* Someone already reused the entry.  We know that if
+                        * this check doesn't fire (i.e. filter_id == REMOVING)
+                        * then the REMOVING mark was put there by our caller,
+                        * because caller is holding a lock on filter table and
+                        * only holders of that lock set REMOVING.
+                        */
+                       if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
+                               return;
+                       hlist_del(node);
+                       kfree(rule);
+                       return;
+               }
+       }
+       /* We didn't find it. */
+       WARN_ON(1);
+}
+#endif
+
 /* RSS contexts.  We're using linked lists and crappy O(n) algorithms, because
  * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
  */
index a3140e1..3f759eb 100644 (file)
@@ -186,6 +186,27 @@ static inline void efx_filter_rfs_expire(struct work_struct *data) {}
 #endif
 bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
 
+bool efx_filter_spec_equal(const struct efx_filter_spec *left,
+                          const struct efx_filter_spec *right);
+u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
+
+#ifdef CONFIG_RFS_ACCEL
+bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
+                       bool *force);
+
+struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+                                       const struct efx_filter_spec *spec);
+
+/* @new is written to indicate if entry was newly added (true) or if an old
+ * entry was found and returned (false).
+ */
+struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+                                      const struct efx_filter_spec *spec,
+                                      bool *new);
+
+void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
+#endif
+
 /* RSS contexts */
 struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
 struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
index 7174ef5..c72adf8 100644 (file)
@@ -2905,18 +2905,45 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
 {
        struct efx_farch_filter_state *state = efx->filter_state;
        struct efx_farch_filter_table *table;
-       bool ret = false;
+       bool ret = false, force = false;
+       u16 arfs_id;
 
        down_write(&state->lock);
+       spin_lock_bh(&efx->rps_hash_lock);
        table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
        if (test_bit(index, table->used_bitmap) &&
-           table->spec[index].priority == EFX_FILTER_PRI_HINT &&
-           rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
-                               flow_id, 0)) {
-               efx_farch_filter_table_clear_entry(efx, table, index);
-               ret = true;
+           table->spec[index].priority == EFX_FILTER_PRI_HINT) {
+               struct efx_arfs_rule *rule = NULL;
+               struct efx_filter_spec spec;
+
+               efx_farch_filter_to_gen_spec(&spec, &table->spec[index]);
+               if (!efx->rps_hash_table) {
+                       /* In the absence of the table, we always returned 0 to
+                        * ARFS, so use the same to query it.
+                        */
+                       arfs_id = 0;
+               } else {
+                       rule = efx_rps_hash_find(efx, &spec);
+                       if (!rule) {
+                               /* ARFS table doesn't know of this filter, remove it */
+                               force = true;
+                       } else {
+                               arfs_id = rule->arfs_id;
+                               if (!efx_rps_check_rule(rule, index, &force))
+                                       goto out_unlock;
+                       }
+               }
+               if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id,
+                                                flow_id, arfs_id)) {
+                       if (rule)
+                               rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
+                       efx_rps_hash_del(efx, &spec);
+                       efx_farch_filter_table_clear_entry(efx, table, index);
+                       ret = true;
+               }
        }
-
+out_unlock:
+       spin_unlock_bh(&efx->rps_hash_lock);
        up_write(&state->lock);
        return ret;
 }
index eea3808..6556892 100644 (file)
@@ -734,6 +734,35 @@ struct efx_rss_context {
 };
 
 #ifdef CONFIG_RFS_ACCEL
+/* Order of these is important, since filter_id >= %EFX_ARFS_FILTER_ID_PENDING
+ * is used to test if filter does or will exist.
+ */
+#define EFX_ARFS_FILTER_ID_PENDING     -1
+#define EFX_ARFS_FILTER_ID_ERROR       -2
+#define EFX_ARFS_FILTER_ID_REMOVING    -3
+/**
+ * struct efx_arfs_rule - record of an ARFS filter and its IDs
+ * @node: linkage into hash table
+ * @spec: details of the filter (used as key for hash table).  Use efx->type to
+ *     determine which member to use.
+ * @rxq_index: channel to which the filter will steer traffic.
+ * @arfs_id: filter ID which was returned to ARFS
+ * @filter_id: index in software filter table.  May be
+ *     %EFX_ARFS_FILTER_ID_PENDING if filter was not inserted yet,
+ *     %EFX_ARFS_FILTER_ID_ERROR if filter insertion failed, or
+ *     %EFX_ARFS_FILTER_ID_REMOVING if expiry is currently removing the filter.
+ */
+struct efx_arfs_rule {
+       struct hlist_node node;
+       struct efx_filter_spec spec;
+       u16 rxq_index;
+       u16 arfs_id;
+       s32 filter_id;
+};
+
+/* Size chosen so that the table is one page (4kB) */
+#define EFX_ARFS_HASH_TABLE_SIZE       512
+
 /**
  * struct efx_async_filter_insertion - Request to asynchronously insert a filter
  * @net_dev: Reference to the netdevice
@@ -873,6 +902,10 @@ struct efx_async_filter_insertion {
  *     @rps_expire_channel's @rps_flow_id
  * @rps_slot_map: bitmap of in-flight entries in @rps_slot
  * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work()
+ * @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and
+ *     @rps_next_id).
+ * @rps_hash_table: Mapping between ARFS filters and their various IDs
+ * @rps_next_id: next arfs_id for an ARFS filter
  * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
  * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
  *     Decremented when the efx_flush_rx_queue() is called.
@@ -1029,6 +1062,9 @@ struct efx_nic {
        unsigned int rps_expire_index;
        unsigned long rps_slot_map;
        struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT];
+       spinlock_t rps_hash_lock;
+       struct hlist_head *rps_hash_table;
+       u32 rps_next_id;
 #endif
 
        atomic_t active_queues;
index 9c593c6..d2e254f 100644 (file)
@@ -834,9 +834,31 @@ static void efx_filter_rfs_work(struct work_struct *data)
        struct efx_nic *efx = netdev_priv(req->net_dev);
        struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
        int slot_idx = req - efx->rps_slot;
+       struct efx_arfs_rule *rule;
+       u16 arfs_id = 0;
        int rc;
 
        rc = efx->type->filter_insert(efx, &req->spec, true);
+       if (rc >= 0)
+               rc %= efx->type->max_rx_ip_filters;
+       if (efx->rps_hash_table) {
+               spin_lock_bh(&efx->rps_hash_lock);
+               rule = efx_rps_hash_find(efx, &req->spec);
+               /* The rule might have already gone, if someone else's request
+                * for the same spec was already worked and then expired before
+                * we got around to our work.  In that case we have nothing
+                * tying us to an arfs_id, meaning that as soon as the filter
+                * is considered for expiry it will be removed.
+                */
+               if (rule) {
+                       if (rc < 0)
+                               rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
+                       else
+                               rule->filter_id = rc;
+                       arfs_id = rule->arfs_id;
+               }
+               spin_unlock_bh(&efx->rps_hash_lock);
+       }
        if (rc >= 0) {
                /* Remember this so we can check whether to expire the filter
                 * later.
@@ -848,18 +870,18 @@ static void efx_filter_rfs_work(struct work_struct *data)
 
                if (req->spec.ether_type == htons(ETH_P_IP))
                        netif_info(efx, rx_status, efx->net_dev,
-                                  "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+                                  "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
                                   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
                                   req->spec.rem_host, ntohs(req->spec.rem_port),
                                   req->spec.loc_host, ntohs(req->spec.loc_port),
-                                  req->rxq_index, req->flow_id, rc);
+                                  req->rxq_index, req->flow_id, rc, arfs_id);
                else
                        netif_info(efx, rx_status, efx->net_dev,
-                                  "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
+                                  "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
                                   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
                                   req->spec.rem_host, ntohs(req->spec.rem_port),
                                   req->spec.loc_host, ntohs(req->spec.loc_port),
-                                  req->rxq_index, req->flow_id, rc);
+                                  req->rxq_index, req->flow_id, rc, arfs_id);
        }
 
        /* Release references */
@@ -872,8 +894,10 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
 {
        struct efx_nic *efx = netdev_priv(net_dev);
        struct efx_async_filter_insertion *req;
+       struct efx_arfs_rule *rule;
        struct flow_keys fk;
        int slot_idx;
+       bool new;
        int rc;
 
        /* find a free slot */
@@ -926,12 +950,42 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
        req->spec.rem_port = fk.ports.src;
        req->spec.loc_port = fk.ports.dst;
 
+       if (efx->rps_hash_table) {
+               /* Add it to ARFS hash table */
+               spin_lock(&efx->rps_hash_lock);
+               rule = efx_rps_hash_add(efx, &req->spec, &new);
+               if (!rule) {
+                       rc = -ENOMEM;
+                       goto out_unlock;
+               }
+               if (new)
+                       rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
+               rc = rule->arfs_id;
+               /* Skip if existing or pending filter already does the right thing */
+               if (!new && rule->rxq_index == rxq_index &&
+                   rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
+                       goto out_unlock;
+               rule->rxq_index = rxq_index;
+               rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
+               spin_unlock(&efx->rps_hash_lock);
+       } else {
+               /* Without an ARFS hash table, we just use arfs_id 0 for all
+                * filters.  This means if multiple flows hash to the same
+                * flow_id, all but the most recently touched will be eligible
+                * for expiry.
+                */
+               rc = 0;
+       }
+
+       /* Queue the request */
        dev_hold(req->net_dev = net_dev);
        INIT_WORK(&req->work, efx_filter_rfs_work);
        req->rxq_index = rxq_index;
        req->flow_id = flow_id;
        schedule_work(&req->work);
-       return 0;
+       return rc;
+out_unlock:
+       spin_unlock(&efx->rps_hash_lock);
 out_clear:
        clear_bit(slot_idx, &efx->rps_slot_map);
        return rc;
index f081de4..88c1247 100644 (file)
@@ -3443,7 +3443,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
 
                len = (val & RCR_ENTRY_L2_LEN) >>
                        RCR_ENTRY_L2_LEN_SHIFT;
-               len -= ETH_FCS_LEN;
+               append_size = len + ETH_HLEN + ETH_FCS_LEN;
 
                addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
                        RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
@@ -3453,7 +3453,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
                                         RCR_ENTRY_PKTBUFSZ_SHIFT];
 
                off = addr & ~PAGE_MASK;
-               append_size = rcr_size;
                if (num_rcr == 1) {
                        int ptype;
 
@@ -3466,7 +3465,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
                        else
                                skb_checksum_none_assert(skb);
                } else if (!(val & RCR_ENTRY_MULTI))
-                       append_size = len - skb->len;
+                       append_size = append_size - skb->len;
 
                niu_rx_skb_append(skb, page, off, append_size, rcr_size);
                if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
index 3037127..28d893b 100644 (file)
@@ -129,7 +129,7 @@ do {                                                                \
 
 #define RX_PRIORITY_MAPPING    0x76543210
 #define TX_PRIORITY_MAPPING    0x33221100
-#define CPDMA_TX_PRIORITY_MAP  0x01234567
+#define CPDMA_TX_PRIORITY_MAP  0x76543210
 
 #define CPSW_VLAN_AWARE                BIT(1)
 #define CPSW_RX_VLAN_ENCAP     BIT(2)
@@ -1340,6 +1340,8 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
        cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
                           HOST_PORT_NUM, ALE_VLAN |
                           ALE_SECURE, slave->port_vlan);
+       cpsw_ale_control_set(cpsw->ale, slave_port,
+                            ALE_PORT_DROP_UNKNOWN_VLAN, 1);
 }
 
 static void soft_reset_slave(struct cpsw_slave *slave)
index ecc8495..da07ccd 100644 (file)
@@ -1840,7 +1840,8 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
                goto rx_handler_failed;
        }
 
-       ret = netdev_upper_dev_link(vf_netdev, ndev, NULL);
+       ret = netdev_master_upper_dev_link(vf_netdev, ndev,
+                                          NULL, NULL, NULL);
        if (ret != 0) {
                netdev_err(vf_netdev,
                           "can not set master device %s (err = %d)\n",
index 6b127be..e7ca5b5 100644 (file)
@@ -1288,7 +1288,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
                   rndis_device->link_state ? "down" : "up");
 
        if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
-               return net_device;
+               goto out;
 
        rndis_filter_query_link_speed(rndis_device, net_device);
 
index 9fb9b56..4f684cb 100644 (file)
@@ -1045,7 +1045,7 @@ static int atusb_probe(struct usb_interface *interface,
        atusb->tx_dr.bRequest = ATUSB_TX;
        atusb->tx_dr.wValue = cpu_to_le16(0);
 
-       atusb->tx_urb = usb_alloc_urb(0, GFP_ATOMIC);
+       atusb->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!atusb->tx_urb)
                goto fail;
 
index 55a22c7..de0d7f2 100644 (file)
@@ -1267,7 +1267,7 @@ mcr20a_probe(struct spi_device *spi)
        ret = mcr20a_get_platform_data(spi, pdata);
        if (ret < 0) {
                dev_crit(&spi->dev, "mcr20a_get_platform_data failed.\n");
-               return ret;
+               goto free_pdata;
        }
 
        /* init reset gpio */
@@ -1275,7 +1275,7 @@ mcr20a_probe(struct spi_device *spi)
                ret = devm_gpio_request_one(&spi->dev, pdata->rst_gpio,
                                            GPIOF_OUT_INIT_HIGH, "reset");
                if (ret)
-                       return ret;
+                       goto free_pdata;
        }
 
        /* reset mcr20a */
@@ -1291,7 +1291,8 @@ mcr20a_probe(struct spi_device *spi)
        hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops);
        if (!hw) {
                dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto free_pdata;
        }
 
        /* init mcr20a local data */
@@ -1308,8 +1309,10 @@ mcr20a_probe(struct spi_device *spi)
        /* init buf */
        lp->buf = devm_kzalloc(&spi->dev, SPI_COMMAND_BUFFER, GFP_KERNEL);
 
-       if (!lp->buf)
-               return -ENOMEM;
+       if (!lp->buf) {
+               ret = -ENOMEM;
+               goto free_dev;
+       }
 
        mcr20a_setup_tx_spi_messages(lp);
        mcr20a_setup_rx_spi_messages(lp);
@@ -1366,6 +1369,8 @@ mcr20a_probe(struct spi_device *spi)
 
 free_dev:
        ieee802154_free_hw(lp->hw);
+free_pdata:
+       kfree(pdata);
 
        return ret;
 }
index 3bb6b66..f9c2591 100644 (file)
@@ -720,6 +720,15 @@ static struct phy_driver broadcom_drivers[] = {
        .get_strings    = bcm_phy_get_strings,
        .get_stats      = bcm53xx_phy_get_stats,
        .probe          = bcm53xx_phy_probe,
+}, {
+       .phy_id         = PHY_ID_BCM89610,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Broadcom BCM89610",
+       .features       = PHY_GBIT_FEATURES,
+       .flags          = PHY_HAS_INTERRUPT,
+       .config_init    = bcm54xx_config_init,
+       .ack_interrupt  = bcm_phy_ack_intr,
+       .config_intr    = bcm_phy_config_intr,
 } };
 
 module_phy_driver(broadcom_drivers);
@@ -741,6 +750,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
        { PHY_ID_BCMAC131, 0xfffffff0 },
        { PHY_ID_BCM5241, 0xfffffff0 },
        { PHY_ID_BCM5395, 0xfffffff0 },
+       { PHY_ID_BCM89610, 0xfffffff0 },
        { }
 };
 
index c22e8e3..25e2a09 100644 (file)
@@ -1393,6 +1393,15 @@ static int m88e1318_set_wol(struct phy_device *phydev,
                if (err < 0)
                        goto error;
 
+               /* If WOL event happened once, the LED[2] interrupt pin
+                * will not be cleared unless we reading the interrupt status
+                * register. If interrupts are in use, the normal interrupt
+                * handling will clear the WOL event. Clear the WOL event
+                * before enabling it if !phy_interrupt_is_valid()
+                */
+               if (!phy_interrupt_is_valid(phydev))
+                       phy_read(phydev, MII_M1011_IEVENT);
+
                /* Enable the WOL interrupt */
                err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0,
                                   MII_88E1318S_PHY_CSIER_WOL_EIE);
index ac23322..9e4ba8e 100644 (file)
@@ -535,8 +535,17 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
 
        /* Grab the bits from PHYIR1, and put them in the upper half */
        phy_reg = mdiobus_read(bus, addr, MII_PHYSID1);
-       if (phy_reg < 0)
+       if (phy_reg < 0) {
+               /* if there is no device, return without an error so scanning
+                * the bus works properly
+                */
+               if (phy_reg == -EIO || phy_reg == -ENODEV) {
+                       *phy_id = 0xffffffff;
+                       return 0;
+               }
+
                return -EIO;
+       }
 
        *phy_id = (phy_reg & 0xffff) << 16;
 
index 0381da7..fd6c23f 100644 (file)
@@ -125,7 +125,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
        if (id->base.br_nominal) {
                if (id->base.br_nominal != 255) {
                        br_nom = id->base.br_nominal * 100;
-                       br_min = br_nom + id->base.br_nominal * id->ext.br_min;
+                       br_min = br_nom - id->base.br_nominal * id->ext.br_min;
                        br_max = br_nom + id->base.br_nominal * id->ext.br_max;
                } else if (id->ext.br_max) {
                        br_nom = 250 * id->ext.br_max;
index 1483bc7..7df0733 100644 (file)
@@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
        lock_sock(sk);
 
        error = -EINVAL;
+
+       if (sockaddr_len != sizeof(struct sockaddr_pppox))
+               goto end;
+
        if (sp->sa_protocol != PX_PROTO_OE)
                goto end;
 
index acbe849..ddb6bf8 100644 (file)
@@ -1072,14 +1072,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int __team_port_enable_netpoll(struct team_port *port)
 {
        struct netpoll *np;
        int err;
 
-       if (!team->dev->npinfo)
-               return 0;
-
        np = kzalloc(sizeof(*np), GFP_KERNEL);
        if (!np)
                return -ENOMEM;
@@ -1093,6 +1090,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
        return err;
 }
 
+static int team_port_enable_netpoll(struct team_port *port)
+{
+       if (!port->team->dev->npinfo)
+               return 0;
+
+       return __team_port_enable_netpoll(port);
+}
+
 static void team_port_disable_netpoll(struct team_port *port)
 {
        struct netpoll *np = port->np;
@@ -1107,7 +1112,7 @@ static void team_port_disable_netpoll(struct team_port *port)
        kfree(np);
 }
 #else
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int team_port_enable_netpoll(struct team_port *port)
 {
        return 0;
 }
@@ -1221,7 +1226,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
                goto err_vids_add;
        }
 
-       err = team_port_enable_netpoll(team, port);
+       err = team_port_enable_netpoll(port);
        if (err) {
                netdev_err(dev, "Failed to enable netpoll on device %s\n",
                           portname);
@@ -1918,7 +1923,7 @@ static int team_netpoll_setup(struct net_device *dev,
 
        mutex_lock(&team->lock);
        list_for_each_entry(port, &team->port_list, list) {
-               err = team_port_enable_netpoll(team, port);
+               err = __team_port_enable_netpoll(port);
                if (err) {
                        __team_netpoll_cleanup(team);
                        break;
index c853e74..42565dd 100644 (file)
@@ -1098,6 +1098,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
        {QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
        {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
+       {QMI_FIXED_INTF(0x05c6, 0x90b2, 3)},    /* ublox R410M */
        {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
        {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
        {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
@@ -1343,6 +1344,18 @@ static int qmi_wwan_probe(struct usb_interface *intf,
                id->driver_info = (unsigned long)&qmi_wwan_info;
        }
 
+       /* There are devices where the same interface number can be
+        * configured as different functions. We should only bind to
+        * vendor specific functions when matching on interface number
+        */
+       if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER &&
+           desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) {
+               dev_dbg(&intf->dev,
+                       "Rejecting interface number match for class %02x\n",
+                       desc->bInterfaceClass);
+               return -ENODEV;
+       }
+
        /* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */
        if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) {
                dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n");
index 9277f4c..94e177d 100644 (file)
@@ -459,7 +459,7 @@ static void brcmf_fw_free_request(struct brcmf_fw_request *req)
        kfree(req);
 }
 
-static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
+static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
 {
        struct brcmf_fw *fwctx = ctx;
        struct brcmf_fw_item *cur;
@@ -498,13 +498,10 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
        brcmf_dbg(TRACE, "nvram %p len %d\n", nvram, nvram_length);
        cur->nv_data.data = nvram;
        cur->nv_data.len = nvram_length;
-       return;
+       return 0;
 
 fail:
-       brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev));
-       fwctx->done(fwctx->dev, -ENOENT, NULL);
-       brcmf_fw_free_request(fwctx->req);
-       kfree(fwctx);
+       return -ENOENT;
 }
 
 static int brcmf_fw_request_next_item(struct brcmf_fw *fwctx, bool async)
@@ -553,20 +550,27 @@ static void brcmf_fw_request_done(const struct firmware *fw, void *ctx)
        brcmf_dbg(TRACE, "enter: firmware %s %sfound\n", cur->path,
                  fw ? "" : "not ");
 
-       if (fw) {
-               if (cur->type == BRCMF_FW_TYPE_BINARY)
-                       cur->binary = fw;
-               else if (cur->type == BRCMF_FW_TYPE_NVRAM)
-                       brcmf_fw_request_nvram_done(fw, fwctx);
-               else
-                       release_firmware(fw);
-       } else if (cur->type == BRCMF_FW_TYPE_NVRAM) {
-               brcmf_fw_request_nvram_done(NULL, fwctx);
-       } else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL)) {
+       if (!fw)
                ret = -ENOENT;
+
+       switch (cur->type) {
+       case BRCMF_FW_TYPE_NVRAM:
+               ret = brcmf_fw_request_nvram_done(fw, fwctx);
+               break;
+       case BRCMF_FW_TYPE_BINARY:
+               cur->binary = fw;
+               break;
+       default:
+               /* something fishy here so bail out early */
+               brcmf_err("unknown fw type: %d\n", cur->type);
+               release_firmware(fw);
+               ret = -EINVAL;
                goto fail;
        }
 
+       if (ret < 0 && !(cur->flags & BRCMF_FW_REQF_OPTIONAL))
+               goto fail;
+
        do {
                if (++fwctx->curpos == fwctx->req->n_items) {
                        ret = 0;
index 7af3a0f..a17c4a7 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +31,7 @@
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018        Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -749,13 +750,9 @@ struct iwl_scan_req_umac {
 } __packed;
 
 #define IWL_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwl_scan_req_umac)
-#define IWL_SCAN_REQ_UMAC_SIZE_V7 (sizeof(struct iwl_scan_req_umac) - \
-                                        4 * sizeof(u8))
-#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \
-                                  2 * sizeof(u8) - sizeof(__le16))
-#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
-                                  2 * sizeof(__le32) - 2 * sizeof(u8) - \
-                                  sizeof(__le16))
+#define IWL_SCAN_REQ_UMAC_SIZE_V7 48
+#define IWL_SCAN_REQ_UMAC_SIZE_V6 44
+#define IWL_SCAN_REQ_UMAC_SIZE_V1 36
 
 /**
  * struct iwl_umac_scan_abort
index 8928613..ca01746 100644 (file)
@@ -76,6 +76,7 @@
 #include "iwl-io.h"
 #include "iwl-csr.h"
 #include "fw/acpi.h"
+#include "fw/api/nvm-reg.h"
 
 /* NVM offsets (in words) definitions */
 enum nvm_offsets {
@@ -146,8 +147,8 @@ static const u8 iwl_ext_nvm_channels[] = {
        149, 153, 157, 161, 165, 169, 173, 177, 181
 };
 
-#define IWL_NUM_CHANNELS               ARRAY_SIZE(iwl_nvm_channels)
-#define IWL_NUM_CHANNELS_EXT   ARRAY_SIZE(iwl_ext_nvm_channels)
+#define IWL_NVM_NUM_CHANNELS           ARRAY_SIZE(iwl_nvm_channels)
+#define IWL_NVM_NUM_CHANNELS_EXT       ARRAY_SIZE(iwl_ext_nvm_channels)
 #define NUM_2GHZ_CHANNELS              14
 #define NUM_2GHZ_CHANNELS_EXT  14
 #define FIRST_2GHZ_HT_MINUS            5
@@ -301,11 +302,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
        const u8 *nvm_chan;
 
        if (cfg->nvm_type != IWL_NVM_EXT) {
-               num_of_ch = IWL_NUM_CHANNELS;
+               num_of_ch = IWL_NVM_NUM_CHANNELS;
                nvm_chan = &iwl_nvm_channels[0];
                num_2ghz_channels = NUM_2GHZ_CHANNELS;
        } else {
-               num_of_ch = IWL_NUM_CHANNELS_EXT;
+               num_of_ch = IWL_NVM_NUM_CHANNELS_EXT;
                nvm_chan = &iwl_ext_nvm_channels[0];
                num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT;
        }
@@ -720,12 +721,12 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        if (cfg->nvm_type != IWL_NVM_EXT)
                data = kzalloc(sizeof(*data) +
                               sizeof(struct ieee80211_channel) *
-                              IWL_NUM_CHANNELS,
+                              IWL_NVM_NUM_CHANNELS,
                               GFP_KERNEL);
        else
                data = kzalloc(sizeof(*data) +
                               sizeof(struct ieee80211_channel) *
-                              IWL_NUM_CHANNELS_EXT,
+                              IWL_NVM_NUM_CHANNELS_EXT,
                               GFP_KERNEL);
        if (!data)
                return NULL;
@@ -842,24 +843,34 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
        return flags;
 }
 
+struct regdb_ptrs {
+       struct ieee80211_wmm_rule *rule;
+       u32 token;
+};
+
 struct ieee80211_regdomain *
 iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
-                      int num_of_ch, __le32 *channels, u16 fw_mcc)
+                      int num_of_ch, __le32 *channels, u16 fw_mcc,
+                      u16 geo_info)
 {
        int ch_idx;
        u16 ch_flags;
        u32 reg_rule_flags, prev_reg_rule_flags = 0;
        const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
                             iwl_ext_nvm_channels : iwl_nvm_channels;
-       struct ieee80211_regdomain *regd;
-       int size_of_regd;
+       struct ieee80211_regdomain *regd, *copy_rd;
+       int size_of_regd, regd_to_copy, wmms_to_copy;
+       int size_of_wmms = 0;
        struct ieee80211_reg_rule *rule;
+       struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm;
+       struct regdb_ptrs *regdb_ptrs;
        enum nl80211_band band;
        int center_freq, prev_center_freq = 0;
-       int valid_rules = 0;
+       int valid_rules = 0, n_wmms = 0;
+       int i;
        bool new_rule;
        int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
-                        IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS;
+                        IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
 
        if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
                return ERR_PTR(-EINVAL);
@@ -875,10 +886,26 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                sizeof(struct ieee80211_regdomain) +
                num_of_ch * sizeof(struct ieee80211_reg_rule);
 
-       regd = kzalloc(size_of_regd, GFP_KERNEL);
+       if (geo_info & GEO_WMM_ETSI_5GHZ_INFO)
+               size_of_wmms =
+                       num_of_ch * sizeof(struct ieee80211_wmm_rule);
+
+       regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
        if (!regd)
                return ERR_PTR(-ENOMEM);
 
+       regdb_ptrs = kcalloc(num_of_ch, sizeof(*regdb_ptrs), GFP_KERNEL);
+       if (!regdb_ptrs) {
+               copy_rd = ERR_PTR(-ENOMEM);
+               goto out;
+       }
+
+       /* set alpha2 from FW. */
+       regd->alpha2[0] = fw_mcc >> 8;
+       regd->alpha2[1] = fw_mcc & 0xff;
+
+       wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
+
        for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
                ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
                band = (ch_idx < NUM_2GHZ_CHANNELS) ?
@@ -927,14 +954,66 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
 
                iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
                                            nvm_chan[ch_idx], ch_flags);
+
+               if (!(geo_info & GEO_WMM_ETSI_5GHZ_INFO) ||
+                   band == NL80211_BAND_2GHZ)
+                       continue;
+
+               if (!reg_query_regdb_wmm(regd->alpha2, center_freq,
+                                        &regdb_ptrs[n_wmms].token, wmm_rule)) {
+                       /* Add only new rules */
+                       for (i = 0; i < n_wmms; i++) {
+                               if (regdb_ptrs[i].token ==
+                                   regdb_ptrs[n_wmms].token) {
+                                       rule->wmm_rule = regdb_ptrs[i].rule;
+                                       break;
+                               }
+                       }
+                       if (i == n_wmms) {
+                               rule->wmm_rule = wmm_rule;
+                               regdb_ptrs[n_wmms++].rule = wmm_rule;
+                               wmm_rule++;
+                       }
+               }
        }
 
        regd->n_reg_rules = valid_rules;
+       regd->n_wmm_rules = n_wmms;
 
-       /* set alpha2 from FW. */
-       regd->alpha2[0] = fw_mcc >> 8;
-       regd->alpha2[1] = fw_mcc & 0xff;
+       /*
+        * Narrow down regdom for unused regulatory rules to prevent hole
+        * between reg rules to wmm rules.
+        */
+       regd_to_copy = sizeof(struct ieee80211_regdomain) +
+               valid_rules * sizeof(struct ieee80211_reg_rule);
+
+       wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms;
+
+       copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL);
+       if (!copy_rd) {
+               copy_rd = ERR_PTR(-ENOMEM);
+               goto out;
+       }
+
+       memcpy(copy_rd, regd, regd_to_copy);
+       memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd,
+              wmms_to_copy);
+
+       d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy);
+       s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
+
+       for (i = 0; i < regd->n_reg_rules; i++) {
+               if (!regd->reg_rules[i].wmm_rule)
+                       continue;
+
+               copy_rd->reg_rules[i].wmm_rule = d_wmm +
+                       (regd->reg_rules[i].wmm_rule - s_wmm) /
+                       sizeof(struct ieee80211_wmm_rule);
+       }
 
-       return regd;
+out:
+       kfree(regdb_ptrs);
+       kfree(regd);
+       return copy_rd;
 }
 IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
index 306736c..3071a23 100644 (file)
@@ -101,12 +101,14 @@ void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
  *
  * This function parses the regulatory channel data received as a
  * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain,
- * to be fed into the regulatory core. An ERR_PTR is returned on error.
+ * to be fed into the regulatory core. In case the geo_info is set handle
+ * accordingly. An ERR_PTR is returned on error.
  * If not given to the regulatory core, the user is responsible for freeing
  * the regdomain returned here with kfree.
  */
 struct ieee80211_regdomain *
 iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
-                      int num_of_ch, __le32 *channels, u16 fw_mcc);
+                      int num_of_ch, __le32 *channels, u16 fw_mcc,
+                      u16 geo_info);
 
 #endif /* __iwl_nvm_parse_h__ */
index 51b3042..90f8c89 100644 (file)
@@ -311,7 +311,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
        regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
                                      __le32_to_cpu(resp->n_channels),
                                      resp->channels,
-                                     __le16_to_cpu(resp->mcc));
+                                     __le16_to_cpu(resp->mcc),
+                                     __le16_to_cpu(resp->geo_info));
        /* Store the return source id */
        src_id = resp->source_id;
        kfree(resp);
index 96d26cf..4a017a0 100644 (file)
@@ -3236,6 +3236,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
                        GENL_SET_ERR_MSG(info,"MAC is no valid source addr");
                        NL_SET_BAD_ATTR(info->extack,
                                        info->attrs[HWSIM_ATTR_PERM_ADDR]);
+                       kfree(hwname);
                        return -EINVAL;
                }
 
index 8b6b07a..b026e80 100644 (file)
@@ -158,16 +158,6 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
 
 static u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
 {
-       struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
-
-       /* override ant_num / ant_path */
-       if (mod_params->ant_sel) {
-               rtlpriv->btcoexist.btc_info.ant_num =
-                       (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1);
-
-               rtlpriv->btcoexist.btc_info.single_ant_path =
-                       (mod_params->ant_sel == 1 ? 0 : 1);
-       }
        return rtlpriv->btcoexist.btc_info.single_ant_path;
 }
 
@@ -178,7 +168,6 @@ static u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
 
 static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
 {
-       struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
        u8 num;
 
        if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
@@ -186,10 +175,6 @@ static u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
        else
                num = 1;
 
-       /* override ant_num / ant_path */
-       if (mod_params->ant_sel)
-               num = (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1) + 1;
-
        return num;
 }
 
index e7bbbc9..b4f3f91 100644 (file)
@@ -848,6 +848,9 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
                return false;
        }
 
+       if (rtlpriv->cfg->ops->get_btc_status())
+               rtlpriv->btcoexist.btc_ops->btc_power_on_setting(rtlpriv);
+
        bytetmp = rtl_read_byte(rtlpriv, REG_MULTI_FUNC_CTRL);
        rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL, bytetmp | BIT(3));
 
@@ -2696,21 +2699,21 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
                rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
                rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1);
                rtlpriv->btcoexist.btc_info.single_ant_path =
-                        (value & 0x40);        /*0xc3[6]*/
+                        (value & 0x40 ? ANT_AUX : ANT_MAIN);   /*0xc3[6]*/
        } else {
                rtlpriv->btcoexist.btc_info.btcoexist = 0;
                rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
                rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
-               rtlpriv->btcoexist.btc_info.single_ant_path = 0;
+               rtlpriv->btcoexist.btc_info.single_ant_path = ANT_MAIN;
        }
 
        /* override ant_num / ant_path */
        if (mod_params->ant_sel) {
                rtlpriv->btcoexist.btc_info.ant_num =
-                       (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1);
+                       (mod_params->ant_sel == 1 ? ANT_X1 : ANT_X2);
 
                rtlpriv->btcoexist.btc_info.single_ant_path =
-                       (mod_params->ant_sel == 1 ? 0 : 1);
+                       (mod_params->ant_sel == 1 ? ANT_AUX : ANT_MAIN);
        }
 }
 
index d27e339..ce17540 100644 (file)
@@ -2823,6 +2823,11 @@ enum bt_ant_num {
        ANT_X1 = 1,
 };
 
+enum bt_ant_path {
+       ANT_MAIN = 0,
+       ANT_AUX = 1,
+};
+
 enum bt_co_type {
        BT_2WIRE = 0,
        BT_ISSC_3WIRE = 1,
index b979cf3..88a8b59 100644 (file)
@@ -27,7 +27,7 @@ config NVME_FABRICS
 
 config NVME_RDMA
        tristate "NVM Express over Fabrics RDMA host driver"
-       depends on INFINIBAND && BLOCK
+       depends on INFINIBAND && INFINIBAND_ADDR_TRANS && BLOCK
        select NVME_CORE
        select NVME_FABRICS
        select SG_POOL
index 9df4f71..99b857e 100644 (file)
@@ -99,6 +99,7 @@ static struct class *nvme_subsys_class;
 
 static void nvme_ns_remove(struct nvme_ns *ns);
 static int nvme_revalidate_disk(struct gendisk *disk);
+static void nvme_put_subsystem(struct nvme_subsystem *subsys);
 
 int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
 {
@@ -117,7 +118,8 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
        ret = nvme_reset_ctrl(ctrl);
        if (!ret) {
                flush_work(&ctrl->reset_work);
-               if (ctrl->state != NVME_CTRL_LIVE)
+               if (ctrl->state != NVME_CTRL_LIVE &&
+                   ctrl->state != NVME_CTRL_ADMIN_ONLY)
                        ret = -ENETRESET;
        }
 
@@ -350,6 +352,7 @@ static void nvme_free_ns_head(struct kref *ref)
        ida_simple_remove(&head->subsys->ns_ida, head->instance);
        list_del_init(&head->entry);
        cleanup_srcu_struct(&head->srcu);
+       nvme_put_subsystem(head->subsys);
        kfree(head);
 }
 
@@ -764,6 +767,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
                                ret = PTR_ERR(meta);
                                goto out_unmap;
                        }
+                       req->cmd_flags |= REQ_INTEGRITY;
                }
        }
 
@@ -2860,6 +2864,9 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
                goto out_cleanup_srcu;
 
        list_add_tail(&head->entry, &ctrl->subsys->nsheads);
+
+       kref_get(&ctrl->subsys->ref);
+
        return head;
 out_cleanup_srcu:
        cleanup_srcu_struct(&head->srcu);
@@ -2997,31 +3004,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
        if (nvme_init_ns_head(ns, nsid, id))
                goto out_free_id;
        nvme_setup_streams_ns(ctrl, ns);
-       
-#ifdef CONFIG_NVME_MULTIPATH
-       /*
-        * If multipathing is enabled we need to always use the subsystem
-        * instance number for numbering our devices to avoid conflicts
-        * between subsystems that have multiple controllers and thus use
-        * the multipath-aware subsystem node and those that have a single
-        * controller and use the controller node directly.
-        */
-       if (ns->head->disk) {
-               sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
-                               ctrl->cntlid, ns->head->instance);
-               flags = GENHD_FL_HIDDEN;
-       } else {
-               sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
-                               ns->head->instance);
-       }
-#else
-       /*
-        * But without the multipath code enabled, multiple controller per
-        * subsystems are visible as devices and thus we cannot use the
-        * subsystem instance.
-        */
-       sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
-#endif
+       nvme_set_disk_name(disk_name, ns, ctrl, &flags);
 
        if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
                if (nvme_nvm_register(ns, disk_name, node)) {
index 124c458..7ae732a 100644 (file)
@@ -668,6 +668,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
                                ret = -ENOMEM;
                                goto out;
                        }
+                       kfree(opts->transport);
                        opts->transport = p;
                        break;
                case NVMF_OPT_NQN:
@@ -676,6 +677,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
                                ret = -ENOMEM;
                                goto out;
                        }
+                       kfree(opts->subsysnqn);
                        opts->subsysnqn = p;
                        nqnlen = strlen(opts->subsysnqn);
                        if (nqnlen >= NVMF_NQN_SIZE) {
@@ -698,6 +700,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
                                ret = -ENOMEM;
                                goto out;
                        }
+                       kfree(opts->traddr);
                        opts->traddr = p;
                        break;
                case NVMF_OPT_TRSVCID:
@@ -706,6 +709,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
                                ret = -ENOMEM;
                                goto out;
                        }
+                       kfree(opts->trsvcid);
                        opts->trsvcid = p;
                        break;
                case NVMF_OPT_QUEUE_SIZE:
@@ -792,6 +796,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
                                ret = -EINVAL;
                                goto out;
                        }
+                       nvmf_host_put(opts->host);
                        opts->host = nvmf_host_add(p);
                        kfree(p);
                        if (!opts->host) {
@@ -817,6 +822,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
                                ret = -ENOMEM;
                                goto out;
                        }
+                       kfree(opts->host_traddr);
                        opts->host_traddr = p;
                        break;
                case NVMF_OPT_HOST_ID:
index 956e0b8..d7b664a 100644 (file)
 #include "nvme.h"
 
 static bool multipath = true;
-module_param(multipath, bool, 0644);
+module_param(multipath, bool, 0444);
 MODULE_PARM_DESC(multipath,
        "turn on native support for multiple controllers per subsystem");
 
+/*
+ * If multipathing is enabled we need to always use the subsystem instance
+ * number for numbering our devices to avoid conflicts between subsystems that
+ * have multiple controllers and thus use the multipath-aware subsystem node
+ * and those that have a single controller and use the controller node
+ * directly.
+ */
+void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+                       struct nvme_ctrl *ctrl, int *flags)
+{
+       if (!multipath) {
+               sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
+       } else if (ns->head->disk) {
+               sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
+                               ctrl->cntlid, ns->head->instance);
+               *flags = GENHD_FL_HIDDEN;
+       } else {
+               sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
+                               ns->head->instance);
+       }
+}
+
 void nvme_failover_req(struct request *req)
 {
        struct nvme_ns *ns = req->q->queuedata;
index 061fecf..17d2f7c 100644 (file)
@@ -84,6 +84,11 @@ enum nvme_quirks {
         * Supports the LighNVM command set if indicated in vs[1].
         */
        NVME_QUIRK_LIGHTNVM                     = (1 << 6),
+
+       /*
+        * Set MEDIUM priority on SQ creation
+        */
+       NVME_QUIRK_MEDIUM_PRIO_SQ               = (1 << 7),
 };
 
 /*
@@ -436,6 +441,8 @@ extern const struct attribute_group nvme_ns_id_attr_group;
 extern const struct block_device_operations nvme_ns_head_ops;
 
 #ifdef CONFIG_NVME_MULTIPATH
+void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+                       struct nvme_ctrl *ctrl, int *flags);
 void nvme_failover_req(struct request *req);
 bool nvme_req_needs_failover(struct request *req, blk_status_t error);
 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
@@ -461,6 +468,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
 }
 
 #else
+/*
+ * Without the multipath code enabled, multiple controller per subsystems are
+ * visible as devices and thus we cannot use the subsystem instance.
+ */
+static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+                                     struct nvme_ctrl *ctrl, int *flags)
+{
+       sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
+}
+
 static inline void nvme_failover_req(struct request *req)
 {
 }
index fbc71fa..17a0190 100644 (file)
@@ -1093,9 +1093,18 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
 static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
                                                struct nvme_queue *nvmeq)
 {
+       struct nvme_ctrl *ctrl = &dev->ctrl;
        struct nvme_command c;
        int flags = NVME_QUEUE_PHYS_CONTIG;
 
+       /*
+        * Some drives have a bug that auto-enables WRRU if MEDIUM isn't
+        * set. Since URGENT priority is zeroes, it makes all queues
+        * URGENT.
+        */
+       if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ)
+               flags |= NVME_SQ_PRIO_MEDIUM;
+
        /*
         * Note: we (ab)use the fact that the prp fields survive if no data
         * is attached to the request.
@@ -2701,7 +2710,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_STRIPE_SIZE |
                                NVME_QUIRK_DEALLOCATE_ZEROES, },
        { PCI_VDEVICE(INTEL, 0xf1a5),   /* Intel 600P/P3100 */
-               .driver_data = NVME_QUIRK_NO_DEEPEST_PS },
+               .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+                               NVME_QUIRK_MEDIUM_PRIO_SQ },
        { PCI_VDEVICE(INTEL, 0x5845),   /* Qemu emulated controller */
                .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
        { PCI_DEVICE(0x1c58, 0x0003),   /* HGST adapter */
index 5f4f8b1..3c7b61d 100644 (file)
@@ -27,7 +27,7 @@ config NVME_TARGET_LOOP
 
 config NVME_TARGET_RDMA
        tristate "NVMe over Fabrics RDMA target support"
-       depends on INFINIBAND
+       depends on INFINIBAND && INFINIBAND_ADDR_TRANS
        depends on NVME_TARGET
        select SGL_ALLOC
        help
index 31fdfba..27a8561 100644 (file)
@@ -469,6 +469,12 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
        nvme_stop_ctrl(&ctrl->ctrl);
        nvme_loop_shutdown_ctrl(ctrl);
 
+       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+               /* state change failure should never happen */
+               WARN_ON_ONCE(1);
+               return;
+       }
+
        ret = nvme_loop_configure_admin_queue(ctrl);
        if (ret)
                goto out_disable;
index 84aa9d6..6da20b9 100644 (file)
@@ -942,7 +942,7 @@ int __init early_init_dt_scan_chosen_stdout(void)
        int offset;
        const char *p, *q, *options = NULL;
        int l;
-       const struct earlycon_id *match;
+       const struct earlycon_id **p_match;
        const void *fdt = initial_boot_params;
 
        offset = fdt_path_offset(fdt, "/chosen");
@@ -969,7 +969,10 @@ int __init early_init_dt_scan_chosen_stdout(void)
                return 0;
        }
 
-       for (match = __earlycon_table; match < __earlycon_table_end; match++) {
+       for (p_match = __earlycon_table; p_match < __earlycon_table_end;
+            p_match++) {
+               const struct earlycon_id *match = *p_match;
+
                if (!match->compatible[0])
                        continue;
 
index b35fe88..7baa53e 100644 (file)
@@ -102,12 +102,28 @@ static DEFINE_IDR(ovcs_idr);
 
 static BLOCKING_NOTIFIER_HEAD(overlay_notify_chain);
 
+/**
+ * of_overlay_notifier_register() - Register notifier for overlay operations
+ * @nb:                Notifier block to register
+ *
+ * Register for notification on overlay operations on device tree nodes. The
+ * reported actions definied by @of_reconfig_change. The notifier callback
+ * furthermore receives a pointer to the affected device tree node.
+ *
+ * Note that a notifier callback is not supposed to store pointers to a device
+ * tree node or its content beyond @OF_OVERLAY_POST_REMOVE corresponding to the
+ * respective node it received.
+ */
 int of_overlay_notifier_register(struct notifier_block *nb)
 {
        return blocking_notifier_chain_register(&overlay_notify_chain, nb);
 }
 EXPORT_SYMBOL_GPL(of_overlay_notifier_register);
 
+/**
+ * of_overlay_notifier_register() - Unregister notifier for overlay operations
+ * @nb:                Notifier block to unregister
+ */
 int of_overlay_notifier_unregister(struct notifier_block *nb)
 {
        return blocking_notifier_chain_unregister(&overlay_notify_chain, nb);
@@ -671,17 +687,13 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs)
                of_node_put(ovcs->fragments[i].overlay);
        }
        kfree(ovcs->fragments);
-
        /*
-        * TODO
-        *
-        * would like to: kfree(ovcs->overlay_tree);
-        * but can not since drivers may have pointers into this data
-        *
-        * would like to: kfree(ovcs->fdt);
-        * but can not since drivers may have pointers into this data
+        * There should be no live pointers into ovcs->overlay_tree and
+        * ovcs->fdt due to the policy that overlay notifiers are not allowed
+        * to retain pointers into the overlay devicetree.
         */
-
+       kfree(ovcs->overlay_tree);
+       kfree(ovcs->fdt);
        kfree(ovcs);
 }
 
index acba1f5..126cf19 100644 (file)
@@ -1263,7 +1263,7 @@ static struct parisc_driver ccio_driver __refdata = {
  * I/O Page Directory, the resource map, and initalizing the
  * U2/Uturn chip into virtual mode.
  */
-static void
+static void __init
 ccio_ioc_init(struct ioc *ioc)
 {
        int i;
index a6b88c7..d2970a0 100644 (file)
@@ -486,7 +486,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
                return ret;
 
        kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node,
-                                                     "reset-gpio", 0);
+                                                     "reset-gpios", 0);
        if (kirin_pcie->gpio_id_reset < 0)
                return -ENODEV;
 
index b04d37b..9abf549 100644 (file)
@@ -29,6 +29,7 @@
 #define     PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT      5
 #define     PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE             (0 << 11)
 #define     PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT     12
+#define     PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ             0x2
 #define PCIE_CORE_LINK_CTRL_STAT_REG                           0xd0
 #define     PCIE_CORE_LINK_L0S_ENTRY                           BIT(0)
 #define     PCIE_CORE_LINK_TRAINING                            BIT(5)
 #define PCIE_ISR1_MASK_REG                     (CONTROL_BASE_ADDR + 0x4C)
 #define     PCIE_ISR1_POWER_STATE_CHANGE       BIT(4)
 #define     PCIE_ISR1_FLUSH                    BIT(5)
-#define     PCIE_ISR1_ALL_MASK                 GENMASK(5, 4)
+#define     PCIE_ISR1_INTX_ASSERT(val)         BIT(8 + (val))
+#define     PCIE_ISR1_ALL_MASK                 GENMASK(11, 4)
 #define PCIE_MSI_ADDR_LOW_REG                  (CONTROL_BASE_ADDR + 0x50)
 #define PCIE_MSI_ADDR_HIGH_REG                 (CONTROL_BASE_ADDR + 0x54)
 #define PCIE_MSI_STATUS_REG                    (CONTROL_BASE_ADDR + 0x58)
 #define PCIE_CONFIG_WR_TYPE0                   0xa
 #define PCIE_CONFIG_WR_TYPE1                   0xb
 
-/* PCI_BDF shifts 8bit, so we need extra 4bit shift */
-#define PCIE_BDF(dev)                          (dev << 4)
 #define PCIE_CONF_BUS(bus)                     (((bus) & 0xff) << 20)
 #define PCIE_CONF_DEV(dev)                     (((dev) & 0x1f) << 15)
 #define PCIE_CONF_FUNC(fun)                    (((fun) & 0x7)  << 12)
@@ -296,7 +296,8 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
        reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
                (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
                PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
-               PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT;
+               (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
+                PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
        advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
 
        /* Program PCIe Control 2 to disable strict ordering */
@@ -437,7 +438,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
        u32 reg;
        int ret;
 
-       if (PCI_SLOT(devfn) != 0) {
+       if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) {
                *val = 0xffffffff;
                return PCIBIOS_DEVICE_NOT_FOUND;
        }
@@ -456,7 +457,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
        advk_writel(pcie, reg, PIO_CTRL);
 
        /* Program the address registers */
-       reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where);
+       reg = PCIE_CONF_ADDR(bus->number, devfn, where);
        advk_writel(pcie, reg, PIO_ADDR_LS);
        advk_writel(pcie, 0, PIO_ADDR_MS);
 
@@ -491,7 +492,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
        int offset;
        int ret;
 
-       if (PCI_SLOT(devfn) != 0)
+       if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
                return PCIBIOS_DEVICE_NOT_FOUND;
 
        if (where % size)
@@ -609,9 +610,9 @@ static void advk_pcie_irq_mask(struct irq_data *d)
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
        u32 mask;
 
-       mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
-       mask |= PCIE_ISR0_INTX_ASSERT(hwirq);
-       advk_writel(pcie, mask, PCIE_ISR0_MASK_REG);
+       mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
+       mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
+       advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
 }
 
 static void advk_pcie_irq_unmask(struct irq_data *d)
@@ -620,9 +621,9 @@ static void advk_pcie_irq_unmask(struct irq_data *d)
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
        u32 mask;
 
-       mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
-       mask &= ~PCIE_ISR0_INTX_ASSERT(hwirq);
-       advk_writel(pcie, mask, PCIE_ISR0_MASK_REG);
+       mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
+       mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
+       advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
 }
 
 static int advk_pcie_irq_map(struct irq_domain *h,
@@ -765,29 +766,35 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie)
 
 static void advk_pcie_handle_int(struct advk_pcie *pcie)
 {
-       u32 val, mask, status;
+       u32 isr0_val, isr0_mask, isr0_status;
+       u32 isr1_val, isr1_mask, isr1_status;
        int i, virq;
 
-       val = advk_readl(pcie, PCIE_ISR0_REG);
-       mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
-       status = val & ((~mask) & PCIE_ISR0_ALL_MASK);
+       isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
+       isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+       isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK);
+
+       isr1_val = advk_readl(pcie, PCIE_ISR1_REG);
+       isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
+       isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
 
-       if (!status) {
-               advk_writel(pcie, val, PCIE_ISR0_REG);
+       if (!isr0_status && !isr1_status) {
+               advk_writel(pcie, isr0_val, PCIE_ISR0_REG);
+               advk_writel(pcie, isr1_val, PCIE_ISR1_REG);
                return;
        }
 
        /* Process MSI interrupts */
-       if (status & PCIE_ISR0_MSI_INT_PENDING)
+       if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
                advk_pcie_handle_msi(pcie);
 
        /* Process legacy interrupts */
        for (i = 0; i < PCI_NUM_INTX; i++) {
-               if (!(status & PCIE_ISR0_INTX_ASSERT(i)))
+               if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i)))
                        continue;
 
-               advk_writel(pcie, PCIE_ISR0_INTX_ASSERT(i),
-                           PCIE_ISR0_REG);
+               advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
+                           PCIE_ISR1_REG);
 
                virq = irq_find_mapping(pcie->irq_domain, i);
                generic_handle_irq(virq);
index 6ace470..b9a1311 100644 (file)
@@ -958,10 +958,11 @@ static int pci_pm_freeze(struct device *dev)
         * devices should not be touched during freeze/thaw transitions,
         * however.
         */
-       if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+       if (!dev_pm_smart_suspend_and_suspended(dev)) {
                pm_runtime_resume(dev);
+               pci_dev->state_saved = false;
+       }
 
-       pci_dev->state_saved = false;
        if (pm->freeze) {
                int error;
 
index e597655..dbfe7c4 100644 (file)
@@ -1910,7 +1910,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
 EXPORT_SYMBOL(pci_pme_active);
 
 /**
- * pci_enable_wake - enable PCI device as wakeup event source
+ * __pci_enable_wake - enable PCI device as wakeup event source
  * @dev: PCI device affected
  * @state: PCI state from which device will issue wakeup events
  * @enable: True to enable event generation; false to disable
@@ -1928,7 +1928,7 @@ EXPORT_SYMBOL(pci_pme_active);
  * Error code depending on the platform is returned if both the platform and
  * the native mechanism fail to enable the generation of wake-up events
  */
-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
+static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
 {
        int ret = 0;
 
@@ -1969,6 +1969,23 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
 
        return ret;
 }
+
+/**
+ * pci_enable_wake - change wakeup settings for a PCI device
+ * @pci_dev: Target device
+ * @state: PCI state from which device will issue wakeup events
+ * @enable: Whether or not to enable event generation
+ *
+ * If @enable is set, check device_may_wakeup() for the device before calling
+ * __pci_enable_wake() for it.
+ */
+int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
+{
+       if (enable && !device_may_wakeup(&pci_dev->dev))
+               return -EINVAL;
+
+       return __pci_enable_wake(pci_dev, state, enable);
+}
 EXPORT_SYMBOL(pci_enable_wake);
 
 /**
@@ -1981,9 +1998,9 @@ EXPORT_SYMBOL(pci_enable_wake);
  * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
  * ordering constraints.
  *
- * This function only returns error code if the device is not capable of
- * generating PME# from both D3_hot and D3_cold, and the platform is unable to
- * enable wake-up power for it.
+ * This function only returns error code if the device is not allowed to wake
+ * up the system from sleep or it is not capable of generating PME# from both
+ * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
  */
 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
 {
@@ -2114,7 +2131,7 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
 
        dev->runtime_d3cold = target_state == PCI_D3cold;
 
-       pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
+       __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
 
        error = pci_set_power_state(dev, target_state);
 
@@ -2138,16 +2155,16 @@ bool pci_dev_run_wake(struct pci_dev *dev)
 {
        struct pci_bus *bus = dev->bus;
 
-       if (device_can_wakeup(&dev->dev))
-               return true;
-
        if (!dev->pme_support)
                return false;
 
        /* PME-capable in principle, but not from the target power state */
-       if (!pci_pme_capable(dev, pci_target_state(dev, false)))
+       if (!pci_pme_capable(dev, pci_target_state(dev, true)))
                return false;
 
+       if (device_can_wakeup(&dev->dev))
+               return true;
+
        while (bus->parent) {
                struct pci_dev *bridge = bus->self;
 
@@ -5273,11 +5290,11 @@ void pcie_print_link_status(struct pci_dev *dev)
        bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
 
        if (bw_avail >= bw_cap)
-               pci_info(dev, "%u.%03u Gb/s available bandwidth (%s x%d link)\n",
+               pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
                         bw_cap / 1000, bw_cap % 1000,
                         PCIE_SPEED2STR(speed_cap), width_cap);
        else
-               pci_info(dev, "%u.%03u Gb/s available bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
+               pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
                         bw_avail / 1000, bw_avail % 1000,
                         PCIE_SPEED2STR(speed), width,
                         limiting_dev ? pci_name(limiting_dev) : "<unknown>",
index b1ae161..fee9225 100644 (file)
@@ -1622,22 +1622,30 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
 
        if (!need_valid_mask) {
                irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0,
-                                               chip->ngpio, NUMA_NO_NODE);
+                                               community->npins, NUMA_NO_NODE);
                if (irq_base < 0) {
                        dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n");
                        return irq_base;
                }
-       } else {
-               irq_base = 0;
        }
 
-       ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base,
+       ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
                                   handle_bad_irq, IRQ_TYPE_NONE);
        if (ret) {
                dev_err(pctrl->dev, "failed to add IRQ chip\n");
                return ret;
        }
 
+       if (!need_valid_mask) {
+               for (i = 0; i < community->ngpio_ranges; i++) {
+                       range = &community->gpio_ranges[i];
+
+                       irq_domain_associate_many(chip->irq.domain, irq_base,
+                                                 range->base, range->npins);
+                       irq_base += range->npins;
+               }
+       }
+
        gpiochip_set_chained_irqchip(chip, &chv_gpio_irqchip, irq,
                                     chv_gpio_irq_handler);
        return 0;
index 8870a41..fee3435 100644 (file)
                .npins = ((e) - (s) + 1),               \
        }
 
+#define SPTH_GPP(r, s, e, g)                           \
+       {                                               \
+               .reg_num = (r),                         \
+               .base = (s),                            \
+               .size = ((e) - (s) + 1),                \
+               .gpio_base = (g),                       \
+       }
+
+#define SPTH_COMMUNITY(b, s, e, g)                     \
+       {                                               \
+               .barno = (b),                           \
+               .padown_offset = SPT_PAD_OWN,           \
+               .padcfglock_offset = SPT_PADCFGLOCK,    \
+               .hostown_offset = SPT_HOSTSW_OWN,       \
+               .ie_offset = SPT_GPI_IE,                \
+               .pin_base = (s),                        \
+               .npins = ((e) - (s) + 1),               \
+               .gpps = (g),                            \
+               .ngpps = ARRAY_SIZE(g),                 \
+       }
+
 /* Sunrisepoint-LP */
 static const struct pinctrl_pin_desc sptlp_pins[] = {
        /* GPP_A */
@@ -531,10 +552,28 @@ static const struct intel_function spth_functions[] = {
        FUNCTION("i2c2", spth_i2c2_groups),
 };
 
+static const struct intel_padgroup spth_community0_gpps[] = {
+       SPTH_GPP(0, 0, 23, 0),          /* GPP_A */
+       SPTH_GPP(1, 24, 47, 24),        /* GPP_B */
+};
+
+static const struct intel_padgroup spth_community1_gpps[] = {
+       SPTH_GPP(0, 48, 71, 48),        /* GPP_C */
+       SPTH_GPP(1, 72, 95, 72),        /* GPP_D */
+       SPTH_GPP(2, 96, 108, 96),       /* GPP_E */
+       SPTH_GPP(3, 109, 132, 120),     /* GPP_F */
+       SPTH_GPP(4, 133, 156, 144),     /* GPP_G */
+       SPTH_GPP(5, 157, 180, 168),     /* GPP_H */
+};
+
+static const struct intel_padgroup spth_community3_gpps[] = {
+       SPTH_GPP(0, 181, 191, 192),     /* GPP_I */
+};
+
 static const struct intel_community spth_communities[] = {
-       SPT_COMMUNITY(0, 0, 47),
-       SPT_COMMUNITY(1, 48, 180),
-       SPT_COMMUNITY(2, 181, 191),
+       SPTH_COMMUNITY(0, 0, 47, spth_community0_gpps),
+       SPTH_COMMUNITY(1, 48, 180, spth_community1_gpps),
+       SPTH_COMMUNITY(2, 181, 191, spth_community3_gpps),
 };
 
 static const struct intel_pinctrl_soc_data spth_soc_data = {
index 4b91ff7..99a6cea 100644 (file)
@@ -898,7 +898,7 @@ static struct meson_bank meson_axg_periphs_banks[] = {
 
 static struct meson_bank meson_axg_aobus_banks[] = {
        /*   name    first      last      irq   pullen  pull    dir     out     in  */
-       BANK("AO",   GPIOAO_0,  GPIOAO_9, 0, 13, 0,  16,  0, 0,  0,  0,  0, 16,  1,  0),
+       BANK("AO",   GPIOAO_0,  GPIOAO_13, 0, 13, 0,  16,  0, 0,  0,  0,  0, 16,  1,  0),
 };
 
 static struct meson_pmx_bank meson_axg_periphs_pmx_banks[] = {
index 39d06dd..bc309c5 100644 (file)
@@ -154,7 +154,7 @@ config DELL_LAPTOP
        depends on ACPI_VIDEO || ACPI_VIDEO = n
        depends on RFKILL || RFKILL = n
        depends on SERIO_I8042
-       select DELL_SMBIOS
+       depends on DELL_SMBIOS
        select POWER_SUPPLY
        select LEDS_CLASS
        select NEW_LEDS
index d4aeac3..f086469 100644 (file)
@@ -178,8 +178,10 @@ static int asus_wireless_remove(struct acpi_device *adev)
 {
        struct asus_wireless_data *data = acpi_driver_data(adev);
 
-       if (data->wq)
+       if (data->wq) {
+               devm_led_classdev_unregister(&adev->dev, &data->led);
                destroy_workqueue(data->wq);
+       }
        return 0;
 }
 
index 8e70a62..cbbafdc 100644 (file)
@@ -1083,6 +1083,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
                dev_err(qproc->dev, "unable to resolve mba region\n");
                return ret;
        }
+       of_node_put(node);
 
        qproc->mba_phys = r.start;
        qproc->mba_size = resource_size(&r);
@@ -1100,6 +1101,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
                dev_err(qproc->dev, "unable to resolve mpss region\n");
                return ret;
        }
+       of_node_put(node);
 
        qproc->mpss_phys = qproc->mpss_reloc = r.start;
        qproc->mpss_size = resource_size(&r);
index 6d9c583..a9609d9 100644 (file)
@@ -1163,7 +1163,7 @@ int rproc_trigger_recovery(struct rproc *rproc)
        if (ret)
                return ret;
 
-       ret = rproc_stop(rproc, false);
+       ret = rproc_stop(rproc, true);
        if (ret)
                goto unlock_mutex;
 
@@ -1316,7 +1316,7 @@ void rproc_shutdown(struct rproc *rproc)
        if (!atomic_dec_and_test(&rproc->power))
                goto out;
 
-       ret = rproc_stop(rproc, true);
+       ret = rproc_stop(rproc, false);
        if (ret) {
                atomic_inc(&rproc->power);
                goto out;
index 64b6de9..1efdf9f 100644 (file)
@@ -581,4 +581,6 @@ static void rpmsg_chrdev_exit(void)
        unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
 }
 module_exit(rpmsg_chrdev_exit);
+
+MODULE_ALIAS("rpmsg:rpmsg_chrdev");
 MODULE_LICENSE("GPL v2");
index 304e891..60f2250 100644 (file)
@@ -57,7 +57,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
 
 static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
 {
-       long rc = OPAL_BUSY;
+       s64 rc = OPAL_BUSY;
        int retries = 10;
        u32 y_m_d;
        u64 h_m_s_ms;
@@ -66,13 +66,17 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
 
        while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
                rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
-               if (rc == OPAL_BUSY_EVENT)
+               if (rc == OPAL_BUSY_EVENT) {
+                       msleep(OPAL_BUSY_DELAY_MS);
                        opal_poll_events(NULL);
-               else if (retries-- && (rc == OPAL_HARDWARE
-                                      || rc == OPAL_INTERNAL_ERROR))
-                       msleep(10);
-               else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
-                       break;
+               } else if (rc == OPAL_BUSY) {
+                       msleep(OPAL_BUSY_DELAY_MS);
+               } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
+                       if (retries--) {
+                               msleep(10); /* Wait 10ms before retry */
+                               rc = OPAL_BUSY; /* go around again */
+                       }
+               }
        }
 
        if (rc != OPAL_SUCCESS)
@@ -87,21 +91,26 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
 
 static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
 {
-       long rc = OPAL_BUSY;
+       s64 rc = OPAL_BUSY;
        int retries = 10;
        u32 y_m_d = 0;
        u64 h_m_s_ms = 0;
 
        tm_to_opal(tm, &y_m_d, &h_m_s_ms);
+
        while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
                rc = opal_rtc_write(y_m_d, h_m_s_ms);
-               if (rc == OPAL_BUSY_EVENT)
+               if (rc == OPAL_BUSY_EVENT) {
+                       msleep(OPAL_BUSY_DELAY_MS);
                        opal_poll_events(NULL);
-               else if (retries-- && (rc == OPAL_HARDWARE
-                                      || rc == OPAL_INTERNAL_ERROR))
-                       msleep(10);
-               else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
-                       break;
+               } else if (rc == OPAL_BUSY) {
+                       msleep(OPAL_BUSY_DELAY_MS);
+               } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
+                       if (retries--) {
+                               msleep(10); /* Wait 10ms before retry */
+                               rc = OPAL_BUSY; /* go around again */
+                       }
+               }
        }
 
        return rc == OPAL_SUCCESS ? 0 : -EIO;
index 62f5f04..5e963fe 100644 (file)
@@ -592,13 +592,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
 int dasd_alias_add_device(struct dasd_device *device)
 {
        struct dasd_eckd_private *private = device->private;
-       struct alias_lcu *lcu;
+       __u8 uaddr = private->uid.real_unit_addr;
+       struct alias_lcu *lcu = private->lcu;
        unsigned long flags;
        int rc;
 
-       lcu = private->lcu;
        rc = 0;
        spin_lock_irqsave(&lcu->lock, flags);
+       /*
+        * Check if device and lcu type differ. If so, the uac data may be
+        * outdated and needs to be updated.
+        */
+       if (private->uid.type !=  lcu->uac->unit[uaddr].ua_type) {
+               lcu->flags |= UPDATE_PENDING;
+               DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+                             "uid type mismatch - trigger rescan");
+       }
        if (!(lcu->flags & UPDATE_PENDING)) {
                rc = _add_device_to_lcu(lcu, device, device);
                if (rc)
index 6652a49..9029804 100644 (file)
@@ -452,6 +452,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
 
 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
 {
+       struct channel_path *chp;
        struct chp_link link;
        struct chp_id chpid;
        int status;
@@ -464,10 +465,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
        chpid.id = sei_area->rsid;
        /* allocate a new channel path structure, if needed */
        status = chp_get_status(chpid);
-       if (status < 0)
-               chp_new(chpid);
-       else if (!status)
+       if (!status)
                return;
+
+       if (status < 0) {
+               chp_new(chpid);
+       } else {
+               chp = chpid_to_chp(chpid);
+               mutex_lock(&chp->lock);
+               chp_update_desc(chp);
+               mutex_unlock(&chp->lock);
+       }
        memset(&link, 0, sizeof(struct chp_link));
        link.chpid = chpid;
        if ((sei_area->vf & 0xc0) != 0) {
index 439991d..4c14ce4 100644 (file)
@@ -141,7 +141,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
        int i;
 
        for (i = 0; i < nr_queues; i++) {
-               q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
+               q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
                if (!q)
                        return -ENOMEM;
 
@@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
 {
        struct ciw *ciw;
        struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
-       int rc;
 
        memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
        memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
@@ -493,16 +492,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
        ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
        if (!ciw) {
                DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
-               rc = -EINVAL;
-               goto out_err;
+               return -EINVAL;
        }
        irq_ptr->equeue = *ciw;
 
        ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
        if (!ciw) {
                DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
-               rc = -EINVAL;
-               goto out_err;
+               return -EINVAL;
        }
        irq_ptr->aqueue = *ciw;
 
@@ -512,9 +509,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
        init_data->cdev->handler = qdio_int_handler;
        spin_unlock_irq(get_ccwdev_lock(irq_ptr->cdev));
        return 0;
-out_err:
-       qdio_release_memory(irq_ptr);
-       return rc;
 }
 
 void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
index 2c75507..dce92b2 100644 (file)
@@ -715,6 +715,10 @@ void cp_free(struct channel_program *cp)
  * and stores the result to ccwchain list. @cp must have been
  * initialized by a previous call with cp_init(). Otherwise, undefined
  * behavior occurs.
+ * For each chain composing the channel program:
+ * - On entry ch_len holds the count of CCWs to be translated.
+ * - On exit ch_len is adjusted to the count of successfully translated CCWs.
+ * This allows cp_free to find in ch_len the count of CCWs to free in a chain.
  *
  * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
  * as helpers to do ccw chain translation inside the kernel. Basically
@@ -749,11 +753,18 @@ int cp_prefetch(struct channel_program *cp)
                for (idx = 0; idx < len; idx++) {
                        ret = ccwchain_fetch_one(chain, idx, cp);
                        if (ret)
-                               return ret;
+                               goto out_err;
                }
        }
 
        return 0;
+out_err:
+       /* Only cleanup the chain elements that were actually translated. */
+       chain->ch_len = idx;
+       list_for_each_entry_continue(chain, &cp->ccwchain_list, next) {
+               chain->ch_len = 0;
+       }
+       return ret;
 }
 
 /**
index ff6963a..3c80064 100644 (file)
@@ -20,12 +20,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
        int ccode;
        __u8 lpm;
        unsigned long flags;
+       int ret;
 
        sch = private->sch;
 
        spin_lock_irqsave(sch->lock, flags);
        private->state = VFIO_CCW_STATE_BUSY;
-       spin_unlock_irqrestore(sch->lock, flags);
 
        orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
 
@@ -38,10 +38,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
                 * Initialize device status information
                 */
                sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
-               return 0;
+               ret = 0;
+               break;
        case 1:         /* Status pending */
        case 2:         /* Busy */
-               return -EBUSY;
+               ret = -EBUSY;
+               break;
        case 3:         /* Device/path not operational */
        {
                lpm = orb->cmd.lpm;
@@ -51,13 +53,16 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
                        sch->lpm = 0;
 
                if (cio_update_schib(sch))
-                       return -ENODEV;
-
-               return sch->lpm ? -EACCES : -ENODEV;
+                       ret = -ENODEV;
+               else
+                       ret = sch->lpm ? -EACCES : -ENODEV;
+               break;
        }
        default:
-               return ccode;
+               ret = ccode;
        }
+       spin_unlock_irqrestore(sch->lock, flags);
+       return ret;
 }
 
 static void fsm_notoper(struct vfio_ccw_private *private,
index 4326715..78b98b3 100644 (file)
@@ -557,7 +557,6 @@ enum qeth_prot_versions {
 enum qeth_cmd_buffer_state {
        BUF_STATE_FREE,
        BUF_STATE_LOCKED,
-       BUF_STATE_PROCESSED,
 };
 
 enum qeth_cq {
@@ -601,7 +600,6 @@ struct qeth_channel {
        struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
        atomic_t irq_pending;
        int io_buf_no;
-       int buf_no;
 };
 
 /**
index 04fefa5..dffd820 100644 (file)
@@ -706,7 +706,6 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
                qeth_put_reply(reply);
        }
        spin_unlock_irqrestore(&card->lock, flags);
-       atomic_set(&card->write.irq_pending, 0);
 }
 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
 
@@ -818,7 +817,6 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel)
 
        for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
                qeth_release_buffer(channel, &channel->iob[cnt]);
-       channel->buf_no = 0;
        channel->io_buf_no = 0;
 }
 EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
@@ -924,7 +922,6 @@ static int qeth_setup_channel(struct qeth_channel *channel)
                        kfree(channel->iob[cnt].data);
                return -ENOMEM;
        }
-       channel->buf_no = 0;
        channel->io_buf_no = 0;
        atomic_set(&channel->irq_pending, 0);
        spin_lock_init(&channel->iob_lock);
@@ -1100,16 +1097,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
 {
        int rc;
        int cstat, dstat;
-       struct qeth_cmd_buffer *buffer;
+       struct qeth_cmd_buffer *iob = NULL;
        struct qeth_channel *channel;
        struct qeth_card *card;
-       struct qeth_cmd_buffer *iob;
-       __u8 index;
-
-       if (__qeth_check_irb_error(cdev, intparm, irb))
-               return;
-       cstat = irb->scsw.cmd.cstat;
-       dstat = irb->scsw.cmd.dstat;
 
        card = CARD_FROM_CDEV(cdev);
        if (!card)
@@ -1127,6 +1117,19 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
                channel = &card->data;
                QETH_CARD_TEXT(card, 5, "data");
        }
+
+       if (qeth_intparm_is_iob(intparm))
+               iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
+
+       if (__qeth_check_irb_error(cdev, intparm, irb)) {
+               /* IO was terminated, free its resources. */
+               if (iob)
+                       qeth_release_buffer(iob->channel, iob);
+               atomic_set(&channel->irq_pending, 0);
+               wake_up(&card->wait_q);
+               return;
+       }
+
        atomic_set(&channel->irq_pending, 0);
 
        if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
@@ -1150,6 +1153,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
                /* we don't have to handle this further */
                intparm = 0;
        }
+
+       cstat = irb->scsw.cmd.cstat;
+       dstat = irb->scsw.cmd.dstat;
+
        if ((dstat & DEV_STAT_UNIT_EXCEP) ||
            (dstat & DEV_STAT_UNIT_CHECK) ||
            (cstat)) {
@@ -1182,25 +1189,15 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
                channel->state = CH_STATE_RCD_DONE;
                goto out;
        }
-       if (intparm) {
-               buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
-               buffer->state = BUF_STATE_PROCESSED;
-       }
        if (channel == &card->data)
                return;
        if (channel == &card->read &&
            channel->state == CH_STATE_UP)
                __qeth_issue_next_read(card);
 
-       iob = channel->iob;
-       index = channel->buf_no;
-       while (iob[index].state == BUF_STATE_PROCESSED) {
-               if (iob[index].callback != NULL)
-                       iob[index].callback(channel, iob + index);
+       if (iob && iob->callback)
+               iob->callback(iob->channel, iob);
 
-               index = (index + 1) % QETH_CMD_BUFFER_NO;
-       }
-       channel->buf_no = index;
 out:
        wake_up(&card->wait_q);
        return;
@@ -1870,8 +1867,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
                   atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
        QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
        spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
-       rc = ccw_device_start(channel->ccwdev,
-                             &channel->ccw, (addr_t) iob, 0, 0);
+       rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
+                                     (addr_t) iob, 0, 0, QETH_TIMEOUT);
        spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
 
        if (rc) {
@@ -1888,7 +1885,6 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
        if (channel->state != CH_STATE_UP) {
                rc = -ETIME;
                QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
-               qeth_clear_cmd_buffers(channel);
        } else
                rc = 0;
        return rc;
@@ -1942,8 +1938,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
                   atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
        QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
        spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
-       rc = ccw_device_start(channel->ccwdev,
-                             &channel->ccw, (addr_t) iob, 0, 0);
+       rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
+                                     (addr_t) iob, 0, 0, QETH_TIMEOUT);
        spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
 
        if (rc) {
@@ -1964,7 +1960,6 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
                QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
                        dev_name(&channel->ccwdev->dev));
                QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
-               qeth_clear_cmd_buffers(channel);
                return -ETIME;
        }
        return qeth_idx_activate_get_answer(channel, idx_reply_cb);
@@ -2166,8 +2161,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
 
        QETH_CARD_TEXT(card, 6, "noirqpnd");
        spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
-       rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
-                             (addr_t) iob, 0, 0);
+       rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
+                                     (addr_t) iob, 0, 0, event_timeout);
        spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
        if (rc) {
                QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
@@ -2199,8 +2194,6 @@ int qeth_send_control_data(struct qeth_card *card, int len,
                }
        }
 
-       if (reply->rc == -EIO)
-               goto error;
        rc = reply->rc;
        qeth_put_reply(reply);
        return rc;
@@ -2211,10 +2204,6 @@ time_err:
        list_del_init(&reply->list);
        spin_unlock_irqrestore(&reply->card->lock, flags);
        atomic_inc(&reply->received);
-error:
-       atomic_set(&card->write.irq_pending, 0);
-       qeth_release_buffer(iob->channel, iob);
-       card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
        rc = reply->rc;
        qeth_put_reply(reply);
        return rc;
@@ -3033,28 +3022,23 @@ static int qeth_send_startlan(struct qeth_card *card)
        return rc;
 }
 
-static int qeth_default_setadapterparms_cb(struct qeth_card *card,
-               struct qeth_reply *reply, unsigned long data)
+static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
 {
-       struct qeth_ipa_cmd *cmd;
-
-       QETH_CARD_TEXT(card, 4, "defadpcb");
-
-       cmd = (struct qeth_ipa_cmd *) data;
-       if (cmd->hdr.return_code == 0)
+       if (!cmd->hdr.return_code)
                cmd->hdr.return_code =
                        cmd->data.setadapterparms.hdr.return_code;
-       return 0;
+       return cmd->hdr.return_code;
 }
 
 static int qeth_query_setadapterparms_cb(struct qeth_card *card,
                struct qeth_reply *reply, unsigned long data)
 {
-       struct qeth_ipa_cmd *cmd;
+       struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
 
        QETH_CARD_TEXT(card, 3, "quyadpcb");
+       if (qeth_setadpparms_inspect_rc(cmd))
+               return 0;
 
-       cmd = (struct qeth_ipa_cmd *) data;
        if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
                card->info.link_type =
                      cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
@@ -3062,7 +3046,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
        }
        card->options.adp.supported_funcs =
                cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
-       return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
+       return 0;
 }
 
 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
@@ -3154,22 +3138,20 @@ EXPORT_SYMBOL_GPL(qeth_query_ipassists);
 static int qeth_query_switch_attributes_cb(struct qeth_card *card,
                                struct qeth_reply *reply, unsigned long data)
 {
-       struct qeth_ipa_cmd *cmd;
-       struct qeth_switch_info *sw_info;
+       struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
        struct qeth_query_switch_attributes *attrs;
+       struct qeth_switch_info *sw_info;
 
        QETH_CARD_TEXT(card, 2, "qswiatcb");
-       cmd = (struct qeth_ipa_cmd *) data;
-       sw_info = (struct qeth_switch_info *)reply->param;
-       if (cmd->data.setadapterparms.hdr.return_code == 0) {
-               attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
-               sw_info->capabilities = attrs->capabilities;
-               sw_info->settings = attrs->settings;
-               QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
-                                                       sw_info->settings);
-       }
-       qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+       if (qeth_setadpparms_inspect_rc(cmd))
+               return 0;
 
+       sw_info = (struct qeth_switch_info *)reply->param;
+       attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
+       sw_info->capabilities = attrs->capabilities;
+       sw_info->settings = attrs->settings;
+       QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
+                       sw_info->settings);
        return 0;
 }
 
@@ -4207,16 +4189,13 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet);
 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
                struct qeth_reply *reply, unsigned long data)
 {
-       struct qeth_ipa_cmd *cmd;
+       struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
        struct qeth_ipacmd_setadpparms *setparms;
 
        QETH_CARD_TEXT(card, 4, "prmadpcb");
 
-       cmd = (struct qeth_ipa_cmd *) data;
        setparms = &(cmd->data.setadapterparms);
-
-       qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
-       if (cmd->hdr.return_code) {
+       if (qeth_setadpparms_inspect_rc(cmd)) {
                QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
                setparms->data.mode = SET_PROMISC_MODE_OFF;
        }
@@ -4286,18 +4265,18 @@ EXPORT_SYMBOL_GPL(qeth_get_stats);
 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
                struct qeth_reply *reply, unsigned long data)
 {
-       struct qeth_ipa_cmd *cmd;
+       struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
 
        QETH_CARD_TEXT(card, 4, "chgmaccb");
+       if (qeth_setadpparms_inspect_rc(cmd))
+               return 0;
 
-       cmd = (struct qeth_ipa_cmd *) data;
        if (!card->options.layer2 ||
            !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
                ether_addr_copy(card->dev->dev_addr,
                                cmd->data.setadapterparms.data.change_addr.addr);
                card->info.mac_bits |= QETH_LAYER2_MAC_READ;
        }
-       qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
        return 0;
 }
 
@@ -4328,13 +4307,15 @@ EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
                struct qeth_reply *reply, unsigned long data)
 {
-       struct qeth_ipa_cmd *cmd;
+       struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
        struct qeth_set_access_ctrl *access_ctrl_req;
        int fallback = *(int *)reply->param;
 
        QETH_CARD_TEXT(card, 4, "setaccb");
+       if (cmd->hdr.return_code)
+               return 0;
+       qeth_setadpparms_inspect_rc(cmd);
 
-       cmd = (struct qeth_ipa_cmd *) data;
        access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
        QETH_DBF_TEXT_(SETUP, 2, "setaccb");
        QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
@@ -4407,7 +4388,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
                        card->options.isolation = card->options.prev_isolation;
                break;
        }
-       qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
        return 0;
 }
 
@@ -4695,14 +4675,15 @@ out:
 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
                struct qeth_reply *reply, unsigned long data)
 {
-       struct qeth_ipa_cmd *cmd;
+       struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
        struct qeth_qoat_priv *priv;
        char *resdata;
        int resdatalen;
 
        QETH_CARD_TEXT(card, 3, "qoatcb");
+       if (qeth_setadpparms_inspect_rc(cmd))
+               return 0;
 
-       cmd = (struct qeth_ipa_cmd *)data;
        priv = (struct qeth_qoat_priv *)reply->param;
        resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
        resdata = (char *)data + 28;
@@ -4796,21 +4777,18 @@ out:
 static int qeth_query_card_info_cb(struct qeth_card *card,
                                   struct qeth_reply *reply, unsigned long data)
 {
-       struct qeth_ipa_cmd *cmd;
+       struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
+       struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
        struct qeth_query_card_info *card_info;
-       struct carrier_info *carrier_info;
 
        QETH_CARD_TEXT(card, 2, "qcrdincb");
-       carrier_info = (struct carrier_info *)reply->param;
-       cmd = (struct qeth_ipa_cmd *)data;
-       card_info = &cmd->data.setadapterparms.data.card_info;
-       if (cmd->data.setadapterparms.hdr.return_code == 0) {
-               carrier_info->card_type = card_info->card_type;
-               carrier_info->port_mode = card_info->port_mode;
-               carrier_info->port_speed = card_info->port_speed;
-       }
+       if (qeth_setadpparms_inspect_rc(cmd))
+               return 0;
 
-       qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+       card_info = &cmd->data.setadapterparms.data.card_info;
+       carrier_info->card_type = card_info->card_type;
+       carrier_info->port_mode = card_info->port_mode;
+       carrier_info->port_speed = card_info->port_speed;
        return 0;
 }
 
@@ -4857,7 +4835,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
                goto out;
        }
 
-       ccw_device_get_id(CARD_DDEV(card), &id);
+       ccw_device_get_id(CARD_RDEV(card), &id);
        request->resp_buf_len = sizeof(*response);
        request->resp_version = DIAG26C_VERSION2;
        request->op_code = DIAG26C_GET_MAC;
@@ -6563,10 +6541,14 @@ static int __init qeth_core_init(void)
        mutex_init(&qeth_mod_mutex);
 
        qeth_wq = create_singlethread_workqueue("qeth_wq");
+       if (!qeth_wq) {
+               rc = -ENOMEM;
+               goto out_err;
+       }
 
        rc = qeth_register_dbf_views();
        if (rc)
-               goto out_err;
+               goto dbf_err;
        qeth_core_root_dev = root_device_register("qeth");
        rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
        if (rc)
@@ -6603,6 +6585,8 @@ slab_err:
        root_device_unregister(qeth_core_root_dev);
 register_err:
        qeth_unregister_dbf_views();
+dbf_err:
+       destroy_workqueue(qeth_wq);
 out_err:
        pr_err("Initializing the qeth device driver failed\n");
        return rc;
index 619f897..f4d1ec0 100644 (file)
@@ -35,6 +35,18 @@ extern unsigned char IPA_PDU_HEADER[];
 #define QETH_HALT_CHANNEL_PARM -11
 #define QETH_RCD_PARM -12
 
+static inline bool qeth_intparm_is_iob(unsigned long intparm)
+{
+       switch (intparm) {
+       case QETH_CLEAR_CHANNEL_PARM:
+       case QETH_HALT_CHANNEL_PARM:
+       case QETH_RCD_PARM:
+       case 0:
+               return false;
+       }
+       return true;
+}
+
 /*****************************************************************************/
 /* IP Assist related definitions                                             */
 /*****************************************************************************/
index 2ad6f12..b8079f2 100644 (file)
@@ -121,13 +121,10 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
        QETH_CARD_TEXT(card, 2, "L2Setmac");
        rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
        if (rc == 0) {
-               card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
-               ether_addr_copy(card->dev->dev_addr, mac);
                dev_info(&card->gdev->dev,
-                       "MAC address %pM successfully registered on device %s\n",
-                       card->dev->dev_addr, card->dev->name);
+                        "MAC address %pM successfully registered on device %s\n",
+                        mac, card->dev->name);
        } else {
-               card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
                switch (rc) {
                case -EEXIST:
                        dev_warn(&card->gdev->dev,
@@ -142,19 +139,6 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
        return rc;
 }
 
-static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
-{
-       int rc;
-
-       QETH_CARD_TEXT(card, 2, "L2Delmac");
-       if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
-               return 0;
-       rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC);
-       if (rc == 0)
-               card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
-       return rc;
-}
-
 static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
 {
        enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
@@ -519,6 +503,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
 {
        struct sockaddr *addr = p;
        struct qeth_card *card = dev->ml_priv;
+       u8 old_addr[ETH_ALEN];
        int rc = 0;
 
        QETH_CARD_TEXT(card, 3, "setmac");
@@ -530,14 +515,35 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
                return -EOPNOTSUPP;
        }
        QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN);
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
        if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
                QETH_CARD_TEXT(card, 3, "setmcREC");
                return -ERESTARTSYS;
        }
-       rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
-       if (!rc || (rc == -ENOENT))
-               rc = qeth_l2_send_setmac(card, addr->sa_data);
-       return rc ? -EINVAL : 0;
+
+       if (!qeth_card_hw_is_reachable(card)) {
+               ether_addr_copy(dev->dev_addr, addr->sa_data);
+               return 0;
+       }
+
+       /* don't register the same address twice */
+       if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
+           (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
+               return 0;
+
+       /* add the new address, switch over, drop the old */
+       rc = qeth_l2_send_setmac(card, addr->sa_data);
+       if (rc)
+               return rc;
+       ether_addr_copy(old_addr, dev->dev_addr);
+       ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+       if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
+               qeth_l2_remove_mac(card, old_addr);
+       card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+       return 0;
 }
 
 static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -1067,8 +1073,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                goto out_remove;
        }
 
-       if (card->info.type != QETH_CARD_TYPE_OSN)
-               qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
+       if (card->info.type != QETH_CARD_TYPE_OSN &&
+           !qeth_l2_send_setmac(card, card->dev->dev_addr))
+               card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
 
        if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) {
                if (card->info.hwtrap &&
@@ -1338,8 +1345,8 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
        qeth_prepare_control_data(card, len, iob);
        QETH_CARD_TEXT(card, 6, "osnoirqp");
        spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
-       rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
-                             (addr_t) iob, 0, 0);
+       rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw,
+                                     (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
        spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
        if (rc) {
                QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
index c44d7c7..1754f55 100644 (file)
@@ -3,7 +3,7 @@
  *
  * This program is free software: you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
+ * the Free Software Foundation, either version 2 of the License, or
  * (at your option) any later version.
  *
  * This program is distributed in the hope that it will be useful,
index 0156c96..d62ddd6 100644 (file)
@@ -724,6 +724,8 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
        int wait;
        unsigned long flags = 0;
        unsigned long mflags = 0;
+       struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *)
+                       fibptr->hw_fib_va;
 
        fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
        if (callback) {
@@ -734,11 +736,9 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
                wait = 1;
 
 
-       if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
-               struct aac_hba_cmd_req *hbacmd =
-                       (struct aac_hba_cmd_req *)fibptr->hw_fib_va;
+       hbacmd->iu_type = command;
 
-               hbacmd->iu_type = command;
+       if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
                /* bit1 of request_id must be 0 */
                hbacmd->request_id =
                        cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
index abddde1..98597b5 100644 (file)
@@ -296,7 +296,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
                  "Number of Abort FW Timeouts: %lld\n"
                  "Number of Abort IO NOT Found: %lld\n"
 
-                 "Abord issued times: \n"
+                 "Abort issued times: \n"
                  "            < 6 sec : %lld\n"
                  "     6 sec - 20 sec : %lld\n"
                  "    20 sec - 30 sec : %lld\n"
index edb7be7..9e8de14 100644 (file)
@@ -291,7 +291,7 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
                 * Note: We have not moved the current phy_index so we will actually
                 *       compare the startting phy with itself.
                 *       This is expected and required to add the phy to the port. */
-               while (phy_index < SCI_MAX_PHYS) {
+               for (; phy_index < SCI_MAX_PHYS; phy_index++) {
                        if ((phy_mask & (1 << phy_index)) == 0)
                                continue;
                        sci_phy_get_sas_address(&ihost->phys[phy_index],
@@ -311,7 +311,6 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
                                              &ihost->phys[phy_index]);
 
                        assigned_phy_mask |= (1 << phy_index);
-                       phy_index++;
                }
 
        }
index ce97cde..f4d988d 100644 (file)
@@ -1124,12 +1124,12 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
                goto fail_fw_init;
        }
 
-       ret = 0;
+       return 0;
 
 fail_fw_init:
        dev_err(&instance->pdev->dev,
-               "Init cmd return status %s for SCSI host %d\n",
-               ret ? "FAILED" : "SUCCESS", instance->host->host_no);
+               "Init cmd return status FAILED for SCSI host %d\n",
+               instance->host->host_no);
 
        return ret;
 }
index 9ef5e3b..656c98e 100644 (file)
@@ -234,11 +234,13 @@ static const char *sdebug_version_date = "20180128";
 #define F_INV_OP               0x200
 #define F_FAKE_RW              0x400
 #define F_M_ACCESS             0x800   /* media access */
-#define F_LONG_DELAY           0x1000
+#define F_SSU_DELAY            0x1000
+#define F_SYNC_DELAY           0x2000
 
 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
 #define FF_SA (F_SA_HIGH | F_SA_LOW)
+#define F_LONG_DELAY           (F_SSU_DELAY | F_SYNC_DELAY)
 
 #define SDEBUG_MAX_PARTS 4
 
@@ -510,7 +512,7 @@ static const struct opcode_info_t release_iarr[] = {
 };
 
 static const struct opcode_info_t sync_cache_iarr[] = {
-       {0, 0x91, 0, F_LONG_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
+       {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
            {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
             0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* SYNC_CACHE (16) */
 };
@@ -553,7 +555,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
            resp_write_dt0, write_iarr,                 /* WRITE(16) */
                {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
                 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
-       {0, 0x1b, 0, F_LONG_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
+       {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
            {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
        {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
            resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
@@ -606,7 +608,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = {
            resp_write_same_10, write_same_iarr,        /* WRITE SAME(10) */
                {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
                 0, 0, 0, 0, 0} },
-       {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_LONG_DELAY | F_M_ACCESS,
+       {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
            resp_sync_cache, sync_cache_iarr,
            {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
             0, 0, 0, 0} },                     /* SYNC_CACHE (10) */
@@ -667,6 +669,7 @@ static bool sdebug_strict = DEF_STRICT;
 static bool sdebug_any_injecting_opt;
 static bool sdebug_verbose;
 static bool have_dif_prot;
+static bool write_since_sync;
 static bool sdebug_statistics = DEF_STATISTICS;
 
 static unsigned int sdebug_store_sectors;
@@ -1607,6 +1610,7 @@ static int resp_start_stop(struct scsi_cmnd *scp,
 {
        unsigned char *cmd = scp->cmnd;
        int power_cond, stop;
+       bool changing;
 
        power_cond = (cmd[4] & 0xf0) >> 4;
        if (power_cond) {
@@ -1614,8 +1618,12 @@ static int resp_start_stop(struct scsi_cmnd *scp,
                return check_condition_result;
        }
        stop = !(cmd[4] & 1);
+       changing = atomic_read(&devip->stopped) == !stop;
        atomic_xchg(&devip->stopped, stop);
-       return (cmd[1] & 0x1) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */
+       if (!changing || cmd[1] & 0x1)  /* state unchanged or IMMED set */
+               return SDEG_RES_IMMED_MASK;
+       else
+               return 0;
 }
 
 static sector_t get_sdebug_capacity(void)
@@ -2473,6 +2481,7 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
        if (do_write) {
                sdb = scsi_out(scmd);
                dir = DMA_TO_DEVICE;
+               write_since_sync = true;
        } else {
                sdb = scsi_in(scmd);
                dir = DMA_FROM_DEVICE;
@@ -3583,6 +3592,7 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
 static int resp_sync_cache(struct scsi_cmnd *scp,
                           struct sdebug_dev_info *devip)
 {
+       int res = 0;
        u64 lba;
        u32 num_blocks;
        u8 *cmd = scp->cmnd;
@@ -3598,7 +3608,11 @@ static int resp_sync_cache(struct scsi_cmnd *scp,
                mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
                return check_condition_result;
        }
-       return (cmd[1] & 0x2) ? SDEG_RES_IMMED_MASK : 0; /* check IMMED bit */
+       if (!write_since_sync || cmd[1] & 0x2)
+               res = SDEG_RES_IMMED_MASK;
+       else            /* delay if write_since_sync and IMMED clear */
+               write_since_sync = false;
+       return res;
 }
 
 #define RL_BUCKET_ELEMS 8
@@ -5777,13 +5791,14 @@ fini:
                return schedule_resp(scp, devip, errsts, pfp, 0, 0);
        else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) {
                /*
-                * If any delay is active, want F_LONG_DELAY to be at least 1
+                * If any delay is active, for F_SSU_DELAY want at least 1
                 * second and if sdebug_jdelay>0 want a long delay of that
-                * many seconds.
+                * many seconds; for F_SYNC_DELAY want 1/20 of that.
                 */
                int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
+               int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
 
-               jdelay = mult_frac(USER_HZ * jdelay, HZ, USER_HZ);
+               jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
                return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
        } else
                return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
index f4b52b4..65f6c94 100644 (file)
@@ -2322,6 +2322,12 @@ iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
        return nlmsg_multicast(nls, skb, 0, group, gfp);
 }
 
+static int
+iscsi_unicast_skb(struct sk_buff *skb, u32 portid)
+{
+       return nlmsg_unicast(nls, skb, portid);
+}
+
 int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
                   char *data, uint32_t data_size)
 {
@@ -2524,14 +2530,11 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
 EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
 
 static int
-iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
-                   void *payload, int size)
+iscsi_if_send_reply(u32 portid, int type, void *payload, int size)
 {
        struct sk_buff  *skb;
        struct nlmsghdr *nlh;
        int len = nlmsg_total_size(size);
-       int flags = multi ? NLM_F_MULTI : 0;
-       int t = done ? NLMSG_DONE : type;
 
        skb = alloc_skb(len, GFP_ATOMIC);
        if (!skb) {
@@ -2539,10 +2542,9 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
                return -ENOMEM;
        }
 
-       nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
-       nlh->nlmsg_flags = flags;
+       nlh = __nlmsg_put(skb, 0, 0, type, (len - sizeof(*nlh)), 0);
        memcpy(nlmsg_data(nlh), payload, size);
-       return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
+       return iscsi_unicast_skb(skb, portid);
 }
 
 static int
@@ -3470,6 +3472,7 @@ static int
 iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
 {
        int err = 0;
+       u32 portid;
        struct iscsi_uevent *ev = nlmsg_data(nlh);
        struct iscsi_transport *transport = NULL;
        struct iscsi_internal *priv;
@@ -3490,10 +3493,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
        if (!try_module_get(transport->owner))
                return -EINVAL;
 
+       portid = NETLINK_CB(skb).portid;
+
        switch (nlh->nlmsg_type) {
        case ISCSI_UEVENT_CREATE_SESSION:
                err = iscsi_if_create_session(priv, ep, ev,
-                                             NETLINK_CB(skb).portid,
+                                             portid,
                                              ev->u.c_session.initial_cmdsn,
                                              ev->u.c_session.cmds_max,
                                              ev->u.c_session.queue_depth);
@@ -3506,7 +3511,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
                }
 
                err = iscsi_if_create_session(priv, ep, ev,
-                                       NETLINK_CB(skb).portid,
+                                       portid,
                                        ev->u.c_bound_session.initial_cmdsn,
                                        ev->u.c_bound_session.cmds_max,
                                        ev->u.c_bound_session.queue_depth);
@@ -3664,6 +3669,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
 static void
 iscsi_if_rx(struct sk_buff *skb)
 {
+       u32 portid = NETLINK_CB(skb).portid;
+
        mutex_lock(&rx_queue_mutex);
        while (skb->len >= NLMSG_HDRLEN) {
                int err;
@@ -3699,8 +3706,8 @@ iscsi_if_rx(struct sk_buff *skb)
                                break;
                        if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
                                break;
-                       err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
-                               nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
+                       err = iscsi_if_send_reply(portid, nlh->nlmsg_type,
+                                                 ev, sizeof(*ev));
                } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
                skb_pull(skb, rlen);
        }
index a6201e6..9421d98 100644 (file)
@@ -2121,6 +2121,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
                                break;  /* standby */
                        if (sshdr.asc == 4 && sshdr.ascq == 0xc)
                                break;  /* unavailable */
+                       if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
+                               break;  /* sanitize in progress */
                        /*
                         * Issue command to spin up drive when not ready
                         */
index 41df75e..210407c 100644 (file)
@@ -400,8 +400,10 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
  *
  * Check that all zones of the device are equal. The last zone can however
  * be smaller. The zone size must also be a power of two number of LBAs.
+ *
+ * Returns the zone size in bytes upon success or an error code upon failure.
  */
-static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
 {
        u64 zone_blocks = 0;
        sector_t block = 0;
@@ -412,8 +414,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
        int ret;
        u8 same;
 
-       sdkp->zone_blocks = 0;
-
        /* Get a buffer */
        buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
        if (!buf)
@@ -445,16 +445,17 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
 
                /* Parse zone descriptors */
                while (rec < buf + buf_len) {
-                       zone_blocks = get_unaligned_be64(&rec[8]);
-                       if (sdkp->zone_blocks == 0) {
-                               sdkp->zone_blocks = zone_blocks;
-                       } else if (zone_blocks != sdkp->zone_blocks &&
-                                  (block + zone_blocks < sdkp->capacity
-                                   || zone_blocks > sdkp->zone_blocks)) {
-                               zone_blocks = 0;
+                       u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
+
+                       if (zone_blocks == 0) {
+                               zone_blocks = this_zone_blocks;
+                       } else if (this_zone_blocks != zone_blocks &&
+                                  (block + this_zone_blocks < sdkp->capacity
+                                   || this_zone_blocks > zone_blocks)) {
+                               this_zone_blocks = 0;
                                goto out;
                        }
-                       block += zone_blocks;
+                       block += this_zone_blocks;
                        rec += 64;
                }
 
@@ -467,8 +468,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
 
        } while (block < sdkp->capacity);
 
-       zone_blocks = sdkp->zone_blocks;
-
 out:
        if (!zone_blocks) {
                if (sdkp->first_scan)
@@ -488,8 +487,7 @@ out:
                                  "Zone size too large\n");
                ret = -ENODEV;
        } else {
-               sdkp->zone_blocks = zone_blocks;
-               sdkp->zone_shift = ilog2(zone_blocks);
+               ret = zone_blocks;
        }
 
 out_free:
@@ -500,15 +498,14 @@ out_free:
 
 /**
  * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
- * @sdkp: The disk of the bitmap
+ * @nr_zones: Number of zones to allocate space for.
+ * @numa_node: NUMA node to allocate the memory from.
  */
-static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
+static inline unsigned long *
+sd_zbc_alloc_zone_bitmap(u32 nr_zones, int numa_node)
 {
-       struct request_queue *q = sdkp->disk->queue;
-
-       return kzalloc_node(BITS_TO_LONGS(sdkp->nr_zones)
-                           * sizeof(unsigned long),
-                           GFP_KERNEL, q->node);
+       return kzalloc_node(BITS_TO_LONGS(nr_zones) * sizeof(unsigned long),
+                           GFP_KERNEL, numa_node);
 }
 
 /**
@@ -516,6 +513,7 @@ static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
  * @sdkp: disk used
  * @buf: report reply buffer
  * @buflen: length of @buf
+ * @zone_shift: logarithm base 2 of the number of blocks in a zone
  * @seq_zones_bitmap: bitmap of sequential zones to set
  *
  * Parse reported zone descriptors in @buf to identify sequential zones and
@@ -525,7 +523,7 @@ static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
  * Return the LBA after the last zone reported.
  */
 static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
-                                    unsigned int buflen,
+                                    unsigned int buflen, u32 zone_shift,
                                     unsigned long *seq_zones_bitmap)
 {
        sector_t lba, next_lba = sdkp->capacity;
@@ -544,7 +542,7 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
                if (type != ZBC_ZONE_TYPE_CONV &&
                    cond != ZBC_ZONE_COND_READONLY &&
                    cond != ZBC_ZONE_COND_OFFLINE)
-                       set_bit(lba >> sdkp->zone_shift, seq_zones_bitmap);
+                       set_bit(lba >> zone_shift, seq_zones_bitmap);
                next_lba = lba + get_unaligned_be64(&rec[8]);
                rec += 64;
        }
@@ -553,12 +551,16 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
 }
 
 /**
- * sd_zbc_setup_seq_zones_bitmap - Initialize the disk seq zone bitmap.
+ * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap.
  * @sdkp: target disk
+ * @zone_shift: logarithm base 2 of the number of blocks in a zone
+ * @nr_zones: number of zones to set up a seq zone bitmap for
  *
  * Allocate a zone bitmap and initialize it by identifying sequential zones.
  */
-static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
+static unsigned long *
+sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp, u32 zone_shift,
+                             u32 nr_zones)
 {
        struct request_queue *q = sdkp->disk->queue;
        unsigned long *seq_zones_bitmap;
@@ -566,9 +568,9 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
        unsigned char *buf;
        int ret = -ENOMEM;
 
-       seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(sdkp);
+       seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(nr_zones, q->node);
        if (!seq_zones_bitmap)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
        if (!buf)
@@ -579,7 +581,7 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
                if (ret)
                        goto out;
                lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
-                                          seq_zones_bitmap);
+                                          zone_shift, seq_zones_bitmap);
        }
 
        if (lba != sdkp->capacity) {
@@ -591,12 +593,9 @@ out:
        kfree(buf);
        if (ret) {
                kfree(seq_zones_bitmap);
-               return ret;
+               return ERR_PTR(ret);
        }
-
-       q->seq_zones_bitmap = seq_zones_bitmap;
-
-       return 0;
+       return seq_zones_bitmap;
 }
 
 static void sd_zbc_cleanup(struct scsi_disk *sdkp)
@@ -612,44 +611,64 @@ static void sd_zbc_cleanup(struct scsi_disk *sdkp)
        q->nr_zones = 0;
 }
 
-static int sd_zbc_setup(struct scsi_disk *sdkp)
+static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks)
 {
        struct request_queue *q = sdkp->disk->queue;
+       u32 zone_shift = ilog2(zone_blocks);
+       u32 nr_zones;
        int ret;
 
-       /* READ16/WRITE16 is mandatory for ZBC disks */
-       sdkp->device->use_16_for_rw = 1;
-       sdkp->device->use_10_for_rw = 0;
-
        /* chunk_sectors indicates the zone size */
-       blk_queue_chunk_sectors(sdkp->disk->queue,
-                       logical_to_sectors(sdkp->device, sdkp->zone_blocks));
-       sdkp->nr_zones =
-               round_up(sdkp->capacity, sdkp->zone_blocks) >> sdkp->zone_shift;
+       blk_queue_chunk_sectors(q,
+                       logical_to_sectors(sdkp->device, zone_blocks));
+       nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift;
 
        /*
         * Initialize the device request queue information if the number
         * of zones changed.
         */
-       if (sdkp->nr_zones != q->nr_zones) {
-
-               sd_zbc_cleanup(sdkp);
-
-               q->nr_zones = sdkp->nr_zones;
-               if (sdkp->nr_zones) {
-                       q->seq_zones_wlock = sd_zbc_alloc_zone_bitmap(sdkp);
-                       if (!q->seq_zones_wlock) {
+       if (nr_zones != sdkp->nr_zones || nr_zones != q->nr_zones) {
+               unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
+               size_t zone_bitmap_size;
+
+               if (nr_zones) {
+                       seq_zones_wlock = sd_zbc_alloc_zone_bitmap(nr_zones,
+                                                                  q->node);
+                       if (!seq_zones_wlock) {
                                ret = -ENOMEM;
                                goto err;
                        }
 
-                       ret = sd_zbc_setup_seq_zones_bitmap(sdkp);
-                       if (ret) {
-                               sd_zbc_cleanup(sdkp);
+                       seq_zones_bitmap = sd_zbc_setup_seq_zones_bitmap(sdkp,
+                                                       zone_shift, nr_zones);
+                       if (IS_ERR(seq_zones_bitmap)) {
+                               ret = PTR_ERR(seq_zones_bitmap);
+                               kfree(seq_zones_wlock);
                                goto err;
                        }
                }
-
+               zone_bitmap_size = BITS_TO_LONGS(nr_zones) *
+                       sizeof(unsigned long);
+               blk_mq_freeze_queue(q);
+               if (q->nr_zones != nr_zones) {
+                       /* READ16/WRITE16 is mandatory for ZBC disks */
+                       sdkp->device->use_16_for_rw = 1;
+                       sdkp->device->use_10_for_rw = 0;
+
+                       sdkp->zone_blocks = zone_blocks;
+                       sdkp->zone_shift = zone_shift;
+                       sdkp->nr_zones = nr_zones;
+                       q->nr_zones = nr_zones;
+                       swap(q->seq_zones_wlock, seq_zones_wlock);
+                       swap(q->seq_zones_bitmap, seq_zones_bitmap);
+               } else if (memcmp(q->seq_zones_bitmap, seq_zones_bitmap,
+                                 zone_bitmap_size) != 0) {
+                       memcpy(q->seq_zones_bitmap, seq_zones_bitmap,
+                              zone_bitmap_size);
+               }
+               blk_mq_unfreeze_queue(q);
+               kfree(seq_zones_wlock);
+               kfree(seq_zones_bitmap);
        }
 
        return 0;
@@ -661,6 +680,7 @@ err:
 
 int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
 {
+       int64_t zone_blocks;
        int ret;
 
        if (!sd_is_zoned(sdkp))
@@ -697,12 +717,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
         * Check zone size: only devices with a constant zone size (except
         * an eventual last runt zone) that is a power of 2 are supported.
         */
-       ret = sd_zbc_check_zone_size(sdkp);
-       if (ret)
+       zone_blocks = sd_zbc_check_zone_size(sdkp);
+       ret = -EFBIG;
+       if (zone_blocks != (u32)zone_blocks)
+               goto err;
+       ret = zone_blocks;
+       if (ret < 0)
                goto err;
 
        /* The drive satisfies the kernel restrictions: set it up */
-       ret = sd_zbc_setup(sdkp);
+       ret = sd_zbc_setup(sdkp, zone_blocks);
        if (ret)
                goto err;
 
index 8c51d62..a2ec0bc 100644 (file)
@@ -1722,11 +1722,14 @@ static int storvsc_probe(struct hv_device *device,
                max_targets = STORVSC_MAX_TARGETS;
                max_channels = STORVSC_MAX_CHANNELS;
                /*
-                * On Windows8 and above, we support sub-channels for storage.
+                * On Windows8 and above, we support sub-channels for storage
+                * on SCSI and FC controllers.
                 * The number of sub-channels offerred is based on the number of
                 * VCPUs in the guest.
                 */
-               max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel);
+               if (!dev_is_ide)
+                       max_sub_channels =
+                               (num_cpus - 1) / storvsc_vcpus_per_sub_channel;
        }
 
        scsi_driver.can_queue = (max_outstanding_req_per_channel *
index c5b1bf1..00e7905 100644 (file)
@@ -276,6 +276,35 @@ static inline void ufshcd_remove_non_printable(char *val)
                *val = ' ';
 }
 
+static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+               const char *str)
+{
+       struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+
+       trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
+}
+
+static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+               const char *str)
+{
+       struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+
+       trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
+}
+
+static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
+               const char *str)
+{
+       struct utp_task_req_desc *descp;
+       struct utp_upiu_task_req *task_req;
+       int off = (int)tag - hba->nutrs;
+
+       descp = &hba->utmrdl_base_addr[off];
+       task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
+       trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
+                       &task_req->input_param1);
+}
+
 static void ufshcd_add_command_trace(struct ufs_hba *hba,
                unsigned int tag, const char *str)
 {
@@ -285,6 +314,9 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
        struct ufshcd_lrb *lrbp;
        int transfer_len = -1;
 
+       /* trace UPIU also */
+       ufshcd_add_cmd_upiu_trace(hba, tag, str);
+
        if (!trace_ufshcd_command_enabled())
                return;
 
@@ -2550,6 +2582,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
 
        hba->dev_cmd.complete = &wait;
 
+       ufshcd_add_query_upiu_trace(hba, tag, "query_send");
        /* Make sure descriptors are ready before ringing the doorbell */
        wmb();
        spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2559,6 +2592,9 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
 
        err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
 
+       ufshcd_add_query_upiu_trace(hba, tag,
+                       err ? "query_complete_err" : "query_complete");
+
 out_put_tag:
        ufshcd_put_dev_cmd_tag(hba, tag);
        wake_up(&hba->dev_cmd.tag_wq);
@@ -5443,11 +5479,14 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
 
        spin_unlock_irqrestore(host->host_lock, flags);
 
+       ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
+
        /* wait until the task management command is completed */
        err = wait_event_timeout(hba->tm_wq,
                        test_bit(free_slot, &hba->tm_condition),
                        msecs_to_jiffies(TM_CMD_TIMEOUT));
        if (!err) {
+               ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
                dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
                                __func__, tm_function);
                if (ufshcd_clear_tm_cmd(hba, free_slot))
@@ -5456,6 +5495,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
                err = -ETIMEDOUT;
        } else {
                err = ufshcd_task_req_compl(hba, free_slot, tm_response);
+               ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
        }
 
        clear_bit(free_slot, &hba->tm_condition);
index c374e3b..777e5f1 100644 (file)
@@ -609,7 +609,7 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
                        break;
 
                case BTSTAT_ABORTQUEUE:
-                       cmd->result = (DID_ABORT << 16);
+                       cmd->result = (DID_BUS_BUSY << 16);
                        break;
 
                case BTSTAT_SCSIPARITY:
index 884419c..457ea1f 100644 (file)
@@ -183,7 +183,7 @@ static u16 slim_slicesize(int code)
                0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7
        };
 
-       clamp(code, 1, (int)ARRAY_SIZE(sizetocode));
+       code = clamp(code, 1, (int)ARRAY_SIZE(sizetocode));
 
        return sizetocode[code - 1];
 }
index fe96a8b..f7ed118 100644 (file)
@@ -45,7 +45,7 @@ struct rpi_power_domains {
 struct rpi_power_domain_packet {
        u32 domain;
        u32 on;
-} __packet;
+};
 
 /*
  * Asks the firmware to enable or disable power on a specific power
index 16cab40..aeab05f 100644 (file)
@@ -1799,7 +1799,7 @@ static int imx_csi_probe(struct platform_device *pdev)
        priv->dev->of_node = pdata->of_node;
        pinctrl = devm_pinctrl_get_select_default(priv->dev);
        if (IS_ERR(pinctrl)) {
-               ret = PTR_ERR(priv->vdev);
+               ret = PTR_ERR(pinctrl);
                dev_dbg(priv->dev,
                        "devm_pinctrl_get_select_default() failed: %d\n", ret);
                if (ret != -ENODEV)
index 6b5300c..885f5fc 100644 (file)
@@ -1390,7 +1390,7 @@ static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif,
        }
 
        if (hif_drv->usr_conn_req.ies) {
-               conn_info.req_ies = kmemdup(conn_info.req_ies,
+               conn_info.req_ies = kmemdup(hif_drv->usr_conn_req.ies,
                                            hif_drv->usr_conn_req.ies_len,
                                            GFP_KERNEL);
                if (conn_info.req_ies)
index 07c814c..6042901 100644 (file)
@@ -427,8 +427,8 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
        struct scatterlist *sg = &cmd->t_data_sg[0];
-       unsigned char *buf, zero = 0x00, *p = &zero;
-       int rc, ret;
+       unsigned char *buf, *not_zero;
+       int ret;
 
        buf = kmap(sg_page(sg)) + sg->offset;
        if (!buf)
@@ -437,10 +437,10 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
         * Fall back to block_execute_write_same() slow-path if
         * incoming WRITE_SAME payload does not contain zeros.
         */
-       rc = memcmp(buf, p, cmd->data_length);
+       not_zero = memchr_inv(buf, 0x00, cmd->data_length);
        kunmap(sg_page(sg));
 
-       if (rc)
+       if (not_zero)
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
        ret = blkdev_issue_zeroout(bdev,
index 0d99b24..6cb933e 100644 (file)
@@ -890,6 +890,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                        bytes = min(bytes, data_len);
 
                        if (!bio) {
+new_bio:
                                nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
                                nr_pages -= nr_vecs;
                                /*
@@ -931,6 +932,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                                 * be allocated with pscsi_get_bio() above.
                                 */
                                bio = NULL;
+                               goto new_bio;
                        }
 
                        data_len -= bytes;
index 8a7f24d..0c19fcd 100644 (file)
@@ -194,6 +194,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
                return -EFAULT;
        }
 
+       priv->priv = obj;
        obj->max_state = p->package.count - 1;
        obj->cdev =
                thermal_cooling_device_register(acpi_device_bid(priv->adev),
@@ -201,8 +202,6 @@ static int int3403_cdev_add(struct int3403_priv *priv)
        if (IS_ERR(obj->cdev))
                result = PTR_ERR(obj->cdev);
 
-       priv->priv = obj;
-
        kfree(buf.pointer);
        /* TODO: add ACPI notification support */
 
index ed805c7..ac83f72 100644 (file)
  * @regulator: pointer to the TMU regulator structure.
  * @reg_conf: pointer to structure to register with core thermal.
  * @ntrip: number of supported trip points.
+ * @enabled: current status of TMU device
  * @tmu_initialize: SoC specific TMU initialization method
  * @tmu_control: SoC specific TMU control method
  * @tmu_read: SoC specific TMU temperature read method
@@ -205,6 +206,7 @@ struct exynos_tmu_data {
        struct regulator *regulator;
        struct thermal_zone_device *tzd;
        unsigned int ntrip;
+       bool enabled;
 
        int (*tmu_initialize)(struct platform_device *pdev);
        void (*tmu_control)(struct platform_device *pdev, bool on);
@@ -398,6 +400,7 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
        mutex_lock(&data->lock);
        clk_enable(data->clk);
        data->tmu_control(pdev, on);
+       data->enabled = on;
        clk_disable(data->clk);
        mutex_unlock(&data->lock);
 }
@@ -889,19 +892,24 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
 static int exynos_get_temp(void *p, int *temp)
 {
        struct exynos_tmu_data *data = p;
+       int value, ret = 0;
 
-       if (!data || !data->tmu_read)
+       if (!data || !data->tmu_read || !data->enabled)
                return -EINVAL;
 
        mutex_lock(&data->lock);
        clk_enable(data->clk);
 
-       *temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
+       value = data->tmu_read(data);
+       if (value < 0)
+               ret = value;
+       else
+               *temp = code_to_temp(data, value) * MCELSIUS;
 
        clk_disable(data->clk);
        mutex_unlock(&data->lock);
 
-       return 0;
+       return ret;
 }
 
 #ifdef CONFIG_THERMAL_EMULATION
index 3b3e1f6..1dbe27c 100644 (file)
@@ -121,6 +121,9 @@ struct gsm_dlci {
        struct mutex mutex;
 
        /* Link layer */
+       int mode;
+#define DLCI_MODE_ABM          0       /* Normal Asynchronous Balanced Mode */
+#define DLCI_MODE_ADM          1       /* Asynchronous Disconnected Mode */
        spinlock_t lock;        /* Protects the internal state */
        struct timer_list t1;   /* Retransmit timer for SABM and UA */
        int retries;
@@ -1364,7 +1367,13 @@ retry:
        ctrl->data = data;
        ctrl->len = clen;
        gsm->pending_cmd = ctrl;
-       gsm->cretries = gsm->n2;
+
+       /* If DLCI0 is in ADM mode skip retries, it won't respond */
+       if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
+               gsm->cretries = 1;
+       else
+               gsm->cretries = gsm->n2;
+
        mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
        gsm_control_transmit(gsm, ctrl);
        spin_unlock_irqrestore(&gsm->control_lock, flags);
@@ -1472,6 +1481,7 @@ static void gsm_dlci_t1(struct timer_list *t)
                        if (debug & 8)
                                pr_info("DLCI %d opening in ADM mode.\n",
                                        dlci->addr);
+                       dlci->mode = DLCI_MODE_ADM;
                        gsm_dlci_open(dlci);
                } else {
                        gsm_dlci_close(dlci);
@@ -2861,11 +2871,22 @@ static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
 static int gsm_carrier_raised(struct tty_port *port)
 {
        struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+       struct gsm_mux *gsm = dlci->gsm;
+
        /* Not yet open so no carrier info */
        if (dlci->state != DLCI_OPEN)
                return 0;
        if (debug & 2)
                return 1;
+
+       /*
+        * Basic mode with control channel in ADM mode may not respond
+        * to CMD_MSC at all and modem_rx is empty.
+        */
+       if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM &&
+           !dlci->modem_rx)
+               return 1;
+
        return dlci->modem_rx & TIOCM_CD;
 }
 
index a242783..2268339 100644 (file)
@@ -169,7 +169,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match)
  */
 int __init setup_earlycon(char *buf)
 {
-       const struct earlycon_id *match;
+       const struct earlycon_id **p_match;
 
        if (!buf || !buf[0])
                return -EINVAL;
@@ -177,7 +177,9 @@ int __init setup_earlycon(char *buf)
        if (early_con.flags & CON_ENABLED)
                return -EALREADY;
 
-       for (match = __earlycon_table; match < __earlycon_table_end; match++) {
+       for (p_match = __earlycon_table; p_match < __earlycon_table_end;
+            p_match++) {
+               const struct earlycon_id *match = *p_match;
                size_t len = strlen(match->name);
 
                if (strncmp(buf, match->name, len))
index 91f3a1a..c2fc6be 100644 (file)
@@ -316,7 +316,7 @@ static u32 imx_uart_readl(struct imx_port *sport, u32 offset)
                 * differ from the value that was last written. As it only
                 * clears after being set, reread conditionally.
                 */
-               if (sport->ucr2 & UCR2_SRST)
+               if (!(sport->ucr2 & UCR2_SRST))
                        sport->ucr2 = readl(sport->port.membase + offset);
                return sport->ucr2;
                break;
@@ -1833,6 +1833,11 @@ static int imx_uart_rs485_config(struct uart_port *port,
                rs485conf->flags &= ~SER_RS485_ENABLED;
 
        if (rs485conf->flags & SER_RS485_ENABLED) {
+               /* Enable receiver if low-active RTS signal is requested */
+               if (sport->have_rtscts &&  !sport->have_rtsgpio &&
+                   !(rs485conf->flags & SER_RS485_RTS_ON_SEND))
+                       rs485conf->flags |= SER_RS485_RX_DURING_TX;
+
                /* disable transmitter */
                ucr2 = imx_uart_readl(sport, UCR2);
                if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND)
@@ -2265,6 +2270,18 @@ static int imx_uart_probe(struct platform_device *pdev)
            (!sport->have_rtscts && !sport->have_rtsgpio))
                dev_err(&pdev->dev, "no RTS control, disabling rs485\n");
 
+       /*
+        * If using the i.MX UART RTS/CTS control then the RTS (CTS_B)
+        * signal cannot be set low during transmission in case the
+        * receiver is off (limitation of the i.MX UART IP).
+        */
+       if (sport->port.rs485.flags & SER_RS485_ENABLED &&
+           sport->have_rtscts && !sport->have_rtsgpio &&
+           (!(sport->port.rs485.flags & SER_RS485_RTS_ON_SEND) &&
+            !(sport->port.rs485.flags & SER_RS485_RX_DURING_TX)))
+               dev_err(&pdev->dev,
+                       "low-active RTS not possible when receiver is off, enabling receiver\n");
+
        imx_uart_rs485_config(&sport->port, &sport->port.rs485);
 
        /* Disable interrupts before requesting them */
index 750e564..f503fab 100644 (file)
@@ -495,7 +495,6 @@ static void mvebu_uart_set_termios(struct uart_port *port,
                termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
                termios->c_cflag &= CREAD | CBAUD;
                termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
-               termios->c_lflag = old->c_lflag;
        }
 
        spin_unlock_irqrestore(&port->lock, flags);
index 65ff669..a1b3eb0 100644 (file)
@@ -1022,6 +1022,7 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
        struct qcom_geni_serial_port *port;
        struct uart_port *uport;
        struct resource *res;
+       int irq;
 
        if (pdev->dev.of_node)
                line = of_alias_get_id(pdev->dev.of_node, "serial");
@@ -1061,11 +1062,12 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
        port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
        port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
 
-       uport->irq = platform_get_irq(pdev, 0);
-       if (uport->irq < 0) {
-               dev_err(&pdev->dev, "Failed to get IRQ %d\n", uport->irq);
-               return uport->irq;
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "Failed to get IRQ %d\n", irq);
+               return irq;
        }
+       uport->irq = irq;
 
        uport->private_data = &qcom_geni_console_driver;
        platform_set_drvdata(pdev, port);
index abcb4d0..bd72dd8 100644 (file)
@@ -1181,7 +1181,7 @@ static int __init cdns_early_console_setup(struct earlycon_device *device,
        /* only set baud if specified on command line - otherwise
         * assume it has been initialized by a boot loader.
         */
-       if (device->baud) {
+       if (port->uartclk && device->baud) {
                u32 cd = 0, bdiv = 0;
                u32 mr;
                int div8;
index 63114ea..7c838b9 100644 (file)
@@ -2816,7 +2816,10 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
 
        kref_init(&tty->kref);
        tty->magic = TTY_MAGIC;
-       tty_ldisc_init(tty);
+       if (tty_ldisc_init(tty)) {
+               kfree(tty);
+               return NULL;
+       }
        tty->session = NULL;
        tty->pgrp = NULL;
        mutex_init(&tty->legacy_mutex);
index 050f4d6..fb7329a 100644 (file)
@@ -176,12 +176,11 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
                        return ERR_CAST(ldops);
        }
 
-       ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL);
-       if (ld == NULL) {
-               put_ldops(ldops);
-               return ERR_PTR(-ENOMEM);
-       }
-
+       /*
+        * There is no way to handle allocation failure of only 16 bytes.
+        * Let's simplify error handling and save more memory.
+        */
+       ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL);
        ld->ops = ldops;
        ld->tty = tty;
 
@@ -527,19 +526,16 @@ static int tty_ldisc_failto(struct tty_struct *tty, int ld)
 static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
 {
        /* There is an outstanding reference here so this is safe */
-       old = tty_ldisc_get(tty, old->ops->num);
-       WARN_ON(IS_ERR(old));
-       tty->ldisc = old;
-       tty_set_termios_ldisc(tty, old->ops->num);
-       if (tty_ldisc_open(tty, old) < 0) {
-               tty_ldisc_put(old);
+       if (tty_ldisc_failto(tty, old->ops->num) < 0) {
+               const char *name = tty_name(tty);
+
+               pr_warn("Falling back ldisc for %s.\n", name);
                /* The traditional behaviour is to fall back to N_TTY, we
                   want to avoid falling back to N_NULL unless we have no
                   choice to avoid the risk of breaking anything */
                if (tty_ldisc_failto(tty, N_TTY) < 0 &&
                    tty_ldisc_failto(tty, N_NULL) < 0)
-                       panic("Couldn't open N_NULL ldisc for %s.",
-                             tty_name(tty));
+                       panic("Couldn't open N_NULL ldisc for %s.", name);
        }
 }
 
@@ -824,12 +820,13 @@ EXPORT_SYMBOL_GPL(tty_ldisc_release);
  *     the tty structure is not completely set up when this call is made.
  */
 
-void tty_ldisc_init(struct tty_struct *tty)
+int tty_ldisc_init(struct tty_struct *tty)
 {
        struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
        if (IS_ERR(ld))
-               panic("n_tty: init_tty");
+               return PTR_ERR(ld);
        tty->ldisc = ld;
+       return 0;
 }
 
 /**
index f695a7e..c690d10 100644 (file)
@@ -19,7 +19,7 @@
  * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
  *    > /sys/bus/vmbus/drivers/uio_hv_generic/bind
  */
-
+#define DEBUG 1
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/device.h>
@@ -94,10 +94,11 @@ hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
  */
 static void hv_uio_channel_cb(void *context)
 {
-       struct hv_uio_private_data *pdata = context;
-       struct hv_device *dev = pdata->device;
+       struct vmbus_channel *chan = context;
+       struct hv_device *hv_dev = chan->device_obj;
+       struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
 
-       dev->channel->inbound.ring_buffer->interrupt_mask = 1;
+       chan->inbound.ring_buffer->interrupt_mask = 1;
        virt_mb();
 
        uio_event_notify(&pdata->info);
@@ -121,78 +122,46 @@ static void hv_uio_rescind(struct vmbus_channel *channel)
        uio_event_notify(&pdata->info);
 }
 
-/*
- * Handle fault when looking for sub channel ring buffer
- * Subchannel ring buffer is same as resource 0 which is main ring buffer
- * This is derived from uio_vma_fault
+/* Sysfs API to allow mmap of the ring buffers
+ * The ring buffer is allocated as contiguous memory by vmbus_open
  */
-static int hv_uio_vma_fault(struct vm_fault *vmf)
-{
-       struct vm_area_struct *vma = vmf->vma;
-       void *ring_buffer = vma->vm_private_data;
-       struct page *page;
-       void *addr;
-
-       addr = ring_buffer + (vmf->pgoff << PAGE_SHIFT);
-       page = virt_to_page(addr);
-       get_page(page);
-       vmf->page = page;
-       return 0;
-}
-
-static const struct vm_operations_struct hv_uio_vm_ops = {
-       .fault = hv_uio_vma_fault,
-};
-
-/* Sysfs API to allow mmap of the ring buffers */
 static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
                            struct bin_attribute *attr,
                            struct vm_area_struct *vma)
 {
        struct vmbus_channel *channel
                = container_of(kobj, struct vmbus_channel, kobj);
-       unsigned long requested_pages, actual_pages;
-
-       if (vma->vm_end < vma->vm_start)
-               return -EINVAL;
-
-       /* only allow 0 for now */
-       if (vma->vm_pgoff > 0)
-               return -EINVAL;
+       struct hv_device *dev = channel->primary_channel->device_obj;
+       u16 q_idx = channel->offermsg.offer.sub_channel_index;
 
-       requested_pages = vma_pages(vma);
-       actual_pages = 2 * HV_RING_SIZE;
-       if (requested_pages > actual_pages)
-               return -EINVAL;
+       dev_dbg(&dev->device, "mmap channel %u pages %#lx at %#lx\n",
+               q_idx, vma_pages(vma), vma->vm_pgoff);
 
-       vma->vm_private_data = channel->ringbuffer_pages;
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-       vma->vm_ops = &hv_uio_vm_ops;
-       return 0;
+       return vm_iomap_memory(vma, virt_to_phys(channel->ringbuffer_pages),
+                              channel->ringbuffer_pagecount << PAGE_SHIFT);
 }
 
-static struct bin_attribute ring_buffer_bin_attr __ro_after_init = {
+static const struct bin_attribute ring_buffer_bin_attr = {
        .attr = {
                .name = "ring",
                .mode = 0600,
-               /* size is set at init time */
        },
+       .size = 2 * HV_RING_SIZE * PAGE_SIZE,
        .mmap = hv_uio_ring_mmap,
 };
 
-/* Callback from VMBUS subystem when new channel created. */
+/* Callback from VMBUS subsystem when new channel created. */
 static void
 hv_uio_new_channel(struct vmbus_channel *new_sc)
 {
        struct hv_device *hv_dev = new_sc->primary_channel->device_obj;
        struct device *device = &hv_dev->device;
-       struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
        const size_t ring_bytes = HV_RING_SIZE * PAGE_SIZE;
        int ret;
 
        /* Create host communication ring */
        ret = vmbus_open(new_sc, ring_bytes, ring_bytes, NULL, 0,
-                        hv_uio_channel_cb, pdata);
+                        hv_uio_channel_cb, new_sc);
        if (ret) {
                dev_err(device, "vmbus_open subchannel failed: %d\n", ret);
                return;
@@ -234,7 +203,7 @@ hv_uio_probe(struct hv_device *dev,
 
        ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE,
                         HV_RING_SIZE * PAGE_SIZE, NULL, 0,
-                        hv_uio_channel_cb, pdata);
+                        hv_uio_channel_cb, dev->channel);
        if (ret)
                goto fail;
 
@@ -326,6 +295,11 @@ hv_uio_probe(struct hv_device *dev,
        vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
        vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
 
+       ret = sysfs_create_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
+       if (ret)
+               dev_notice(&dev->device,
+                          "sysfs create ring bin file failed; %d\n", ret);
+
        hv_set_drvdata(dev, pdata);
 
        return 0;
index 75f7fb1..987fc5b 100644 (file)
@@ -207,5 +207,6 @@ config USB_ULPI_BUS
 
 config USB_ROLE_SWITCH
        tristate
+       select USB_COMMON
 
 endif # USB_SUPPORT
index c821b4b..7b5cb28 100644 (file)
@@ -191,7 +191,9 @@ static const unsigned short full_speed_maxpacket_maxes[4] = {
 static const unsigned short high_speed_maxpacket_maxes[4] = {
        [USB_ENDPOINT_XFER_CONTROL] = 64,
        [USB_ENDPOINT_XFER_ISOC] = 1024,
-       [USB_ENDPOINT_XFER_BULK] = 512,
+
+       /* Bulk should be 512, but some devices use 1024: we will warn below */
+       [USB_ENDPOINT_XFER_BULK] = 1024,
        [USB_ENDPOINT_XFER_INT] = 1024,
 };
 static const unsigned short super_speed_maxpacket_maxes[4] = {
index 777036a..0a42c5d 100644 (file)
@@ -2262,7 +2262,8 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
                hcd->state = HC_STATE_SUSPENDED;
 
                if (!PMSG_IS_AUTO(msg))
-                       usb_phy_roothub_power_off(hcd->phy_roothub);
+                       usb_phy_roothub_suspend(hcd->self.sysdev,
+                                               hcd->phy_roothub);
 
                /* Did we race with a root-hub wakeup event? */
                if (rhdev->do_remote_wakeup) {
@@ -2302,7 +2303,8 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
        }
 
        if (!PMSG_IS_AUTO(msg)) {
-               status = usb_phy_roothub_power_on(hcd->phy_roothub);
+               status = usb_phy_roothub_resume(hcd->self.sysdev,
+                                               hcd->phy_roothub);
                if (status)
                        return status;
        }
@@ -2344,7 +2346,7 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
                }
        } else {
                hcd->state = old_state;
-               usb_phy_roothub_power_off(hcd->phy_roothub);
+               usb_phy_roothub_suspend(hcd->self.sysdev, hcd->phy_roothub);
                dev_dbg(&rhdev->dev, "bus %s fail, err %d\n",
                                "resume", status);
                if (status != -ESHUTDOWN)
@@ -2377,6 +2379,7 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
 
        spin_lock_irqsave (&hcd_root_hub_lock, flags);
        if (hcd->rh_registered) {
+               pm_wakeup_event(&hcd->self.root_hub->dev, 0);
                set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
                queue_work(pm_wq, &hcd->wakeup_work);
        }
@@ -2758,12 +2761,16 @@ int usb_add_hcd(struct usb_hcd *hcd,
        }
 
        if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
-               hcd->phy_roothub = usb_phy_roothub_init(hcd->self.sysdev);
+               hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
                if (IS_ERR(hcd->phy_roothub)) {
                        retval = PTR_ERR(hcd->phy_roothub);
-                       goto err_phy_roothub_init;
+                       goto err_phy_roothub_alloc;
                }
 
+               retval = usb_phy_roothub_init(hcd->phy_roothub);
+               if (retval)
+                       goto err_phy_roothub_alloc;
+
                retval = usb_phy_roothub_power_on(hcd->phy_roothub);
                if (retval)
                        goto err_usb_phy_roothub_power_on;
@@ -2936,7 +2943,7 @@ err_create_buf:
        usb_phy_roothub_power_off(hcd->phy_roothub);
 err_usb_phy_roothub_power_on:
        usb_phy_roothub_exit(hcd->phy_roothub);
-err_phy_roothub_init:
+err_phy_roothub_alloc:
        if (hcd->remove_phy && hcd->usb_phy) {
                usb_phy_shutdown(hcd->usb_phy);
                usb_put_phy(hcd->usb_phy);
index f6ea16e..aa9968d 100644 (file)
@@ -653,12 +653,17 @@ void usb_wakeup_notification(struct usb_device *hdev,
                unsigned int portnum)
 {
        struct usb_hub *hub;
+       struct usb_port *port_dev;
 
        if (!hdev)
                return;
 
        hub = usb_hub_to_struct_hub(hdev);
        if (hub) {
+               port_dev = hub->ports[portnum - 1];
+               if (port_dev && port_dev->child)
+                       pm_wakeup_event(&port_dev->child->dev, 0);
+
                set_bit(portnum, hub->wakeup_bits);
                kick_hub_wq(hub);
        }
@@ -3434,8 +3439,11 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
 
        /* Skip the initial Clear-Suspend step for a remote wakeup */
        status = hub_port_status(hub, port1, &portstatus, &portchange);
-       if (status == 0 && !port_is_suspended(hub, portstatus))
+       if (status == 0 && !port_is_suspended(hub, portstatus)) {
+               if (portchange & USB_PORT_STAT_C_SUSPEND)
+                       pm_wakeup_event(&udev->dev, 0);
                goto SuspendCleared;
+       }
 
        /* see 7.1.7.7; affects power usage, but not budgeting */
        if (hub_is_superspeed(hub->hdev))
index 09b7c43..9879767 100644 (file)
@@ -19,19 +19,6 @@ struct usb_phy_roothub {
        struct list_head        list;
 };
 
-static struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev)
-{
-       struct usb_phy_roothub *roothub_entry;
-
-       roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL);
-       if (!roothub_entry)
-               return ERR_PTR(-ENOMEM);
-
-       INIT_LIST_HEAD(&roothub_entry->list);
-
-       return roothub_entry;
-}
-
 static int usb_phy_roothub_add_phy(struct device *dev, int index,
                                   struct list_head *list)
 {
@@ -45,9 +32,11 @@ static int usb_phy_roothub_add_phy(struct device *dev, int index,
                        return PTR_ERR(phy);
        }
 
-       roothub_entry = usb_phy_roothub_alloc(dev);
-       if (IS_ERR(roothub_entry))
-               return PTR_ERR(roothub_entry);
+       roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL);
+       if (!roothub_entry)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&roothub_entry->list);
 
        roothub_entry->phy = phy;
 
@@ -56,28 +45,44 @@ static int usb_phy_roothub_add_phy(struct device *dev, int index,
        return 0;
 }
 
-struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev)
+struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev)
 {
        struct usb_phy_roothub *phy_roothub;
-       struct usb_phy_roothub *roothub_entry;
-       struct list_head *head;
        int i, num_phys, err;
 
+       if (!IS_ENABLED(CONFIG_GENERIC_PHY))
+               return NULL;
+
        num_phys = of_count_phandle_with_args(dev->of_node, "phys",
                                              "#phy-cells");
        if (num_phys <= 0)
                return NULL;
 
-       phy_roothub = usb_phy_roothub_alloc(dev);
-       if (IS_ERR(phy_roothub))
-               return phy_roothub;
+       phy_roothub = devm_kzalloc(dev, sizeof(*phy_roothub), GFP_KERNEL);
+       if (!phy_roothub)
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&phy_roothub->list);
 
        for (i = 0; i < num_phys; i++) {
                err = usb_phy_roothub_add_phy(dev, i, &phy_roothub->list);
                if (err)
-                       goto err_out;
+                       return ERR_PTR(err);
        }
 
+       return phy_roothub;
+}
+EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc);
+
+int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub)
+{
+       struct usb_phy_roothub *roothub_entry;
+       struct list_head *head;
+       int err;
+
+       if (!phy_roothub)
+               return 0;
+
        head = &phy_roothub->list;
 
        list_for_each_entry(roothub_entry, head, list) {
@@ -86,14 +91,13 @@ struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev)
                        goto err_exit_phys;
        }
 
-       return phy_roothub;
+       return 0;
 
 err_exit_phys:
        list_for_each_entry_continue_reverse(roothub_entry, head, list)
                phy_exit(roothub_entry->phy);
 
-err_out:
-       return ERR_PTR(err);
+       return err;
 }
 EXPORT_SYMBOL_GPL(usb_phy_roothub_init);
 
@@ -111,7 +115,7 @@ int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub)
        list_for_each_entry(roothub_entry, head, list) {
                err = phy_exit(roothub_entry->phy);
                if (err)
-                       ret = ret;
+                       ret = err;
        }
 
        return ret;
@@ -156,3 +160,38 @@ void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub)
                phy_power_off(roothub_entry->phy);
 }
 EXPORT_SYMBOL_GPL(usb_phy_roothub_power_off);
+
+int usb_phy_roothub_suspend(struct device *controller_dev,
+                           struct usb_phy_roothub *phy_roothub)
+{
+       usb_phy_roothub_power_off(phy_roothub);
+
+       /* keep the PHYs initialized so the device can wake up the system */
+       if (device_may_wakeup(controller_dev))
+               return 0;
+
+       return usb_phy_roothub_exit(phy_roothub);
+}
+EXPORT_SYMBOL_GPL(usb_phy_roothub_suspend);
+
+int usb_phy_roothub_resume(struct device *controller_dev,
+                          struct usb_phy_roothub *phy_roothub)
+{
+       int err;
+
+       /* if the device can't wake up the system _exit was called */
+       if (!device_may_wakeup(controller_dev)) {
+               err = usb_phy_roothub_init(phy_roothub);
+               if (err)
+                       return err;
+       }
+
+       err = usb_phy_roothub_power_on(phy_roothub);
+
+       /* undo _init if _power_on failed */
+       if (err && !device_may_wakeup(controller_dev))
+               usb_phy_roothub_exit(phy_roothub);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(usb_phy_roothub_resume);
index 6fde59b..88a3c03 100644 (file)
@@ -1,7 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * USB roothub wrapper
+ *
+ * Copyright (C) 2018 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ */
+
+#ifndef __USB_CORE_PHY_H_
+#define __USB_CORE_PHY_H_
+
+struct device;
 struct usb_phy_roothub;
 
-struct usb_phy_roothub *usb_phy_roothub_init(struct device *dev);
+struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev);
+
+int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub);
 int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub);
 
 int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub);
 void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub);
+
+int usb_phy_roothub_suspend(struct device *controller_dev,
+                           struct usb_phy_roothub *phy_roothub);
+int usb_phy_roothub_resume(struct device *controller_dev,
+                          struct usb_phy_roothub *phy_roothub);
+
+#endif /* __USB_CORE_PHY_H_ */
index 920f48a..c55def2 100644 (file)
@@ -186,6 +186,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x03f0, 0x0701), .driver_info =
                        USB_QUIRK_STRING_FETCH_255 },
 
+       /* HP v222w 16GB Mini USB Drive */
+       { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
+
        /* Creative SB Audigy 2 NX */
        { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
 
index d83be56..a666e07 100644 (file)
@@ -985,6 +985,7 @@ struct dwc2_hsotg {
 
        /* DWC OTG HW Release versions */
 #define DWC2_CORE_REV_2_71a    0x4f54271a
+#define DWC2_CORE_REV_2_72a     0x4f54272a
 #define DWC2_CORE_REV_2_80a    0x4f54280a
 #define DWC2_CORE_REV_2_90a    0x4f54290a
 #define DWC2_CORE_REV_2_91a    0x4f54291a
@@ -992,6 +993,7 @@ struct dwc2_hsotg {
 #define DWC2_CORE_REV_2_94a    0x4f54294a
 #define DWC2_CORE_REV_3_00a    0x4f54300a
 #define DWC2_CORE_REV_3_10a    0x4f54310a
+#define DWC2_CORE_REV_4_00a    0x4f54400a
 #define DWC2_FS_IOT_REV_1_00a  0x5531100a
 #define DWC2_HS_IOT_REV_1_00a  0x5532100a
 
index 6c32bf2..83cb557 100644 (file)
@@ -3928,6 +3928,27 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
        if (index && !hs_ep->isochronous)
                epctrl |= DXEPCTL_SETD0PID;
 
+       /* WA for Full speed ISOC IN in DDMA mode.
+        * By Clear NAK status of EP, core will send ZLP
+        * to IN token and assert NAK interrupt relying
+        * on TxFIFO status only
+        */
+
+       if (hsotg->gadget.speed == USB_SPEED_FULL &&
+           hs_ep->isochronous && dir_in) {
+               /* The WA applies only to core versions from 2.72a
+                * to 4.00a (including both). Also for FS_IOT_1.00a
+                * and HS_IOT_1.00a.
+                */
+               u32 gsnpsid = dwc2_readl(hsotg->regs + GSNPSID);
+
+               if ((gsnpsid >= DWC2_CORE_REV_2_72a &&
+                    gsnpsid <= DWC2_CORE_REV_4_00a) ||
+                    gsnpsid == DWC2_FS_IOT_REV_1_00a ||
+                    gsnpsid == DWC2_HS_IOT_REV_1_00a)
+                       epctrl |= DXEPCTL_CNAK;
+       }
+
        dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
                __func__, epctrl);
 
index 190f959..c51b73b 100644 (file)
@@ -358,9 +358,14 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
 
 static int dwc2_vbus_supply_init(struct dwc2_hsotg *hsotg)
 {
+       int ret;
+
        hsotg->vbus_supply = devm_regulator_get_optional(hsotg->dev, "vbus");
-       if (IS_ERR(hsotg->vbus_supply))
-               return 0;
+       if (IS_ERR(hsotg->vbus_supply)) {
+               ret = PTR_ERR(hsotg->vbus_supply);
+               hsotg->vbus_supply = NULL;
+               return ret == -ENODEV ? 0 : ret;
+       }
 
        return regulator_enable(hsotg->vbus_supply);
 }
@@ -4342,9 +4347,7 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd)
 
        spin_unlock_irqrestore(&hsotg->lock, flags);
 
-       dwc2_vbus_supply_init(hsotg);
-
-       return 0;
+       return dwc2_vbus_supply_init(hsotg);
 }
 
 /*
index 7f21747..bea2e8e 100644 (file)
@@ -141,8 +141,10 @@ static int dwc2_pci_probe(struct pci_dev *pci,
                goto err;
 
        glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
-       if (!glue)
+       if (!glue) {
+               ret = -ENOMEM;
                goto err;
+       }
 
        ret = platform_device_add(dwc2);
        if (ret) {
index 8796a5e..0dedf8a 100644 (file)
@@ -166,7 +166,7 @@ static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
        dwc3_ep_inc_trb(&dep->trb_dequeue);
 }
 
-void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
+static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
                struct dwc3_request *req, int status)
 {
        struct dwc3                     *dwc = dep->dwc;
@@ -1424,7 +1424,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
                                        dwc->lock);
 
                        if (!r->trb)
-                               goto out1;
+                               goto out0;
 
                        if (r->num_pending_sgs) {
                                struct dwc3_trb *trb;
index 7889bcc..8b72b19 100644 (file)
@@ -221,7 +221,7 @@ static void pn_tx_complete(struct usb_ep *ep, struct usb_request *req)
        netif_wake_queue(dev);
 }
 
-static int pn_net_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t pn_net_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct phonet_port *port = netdev_priv(dev);
        struct f_phonet *fp;
index 4c6c08b..21307d8 100644 (file)
@@ -73,9 +73,10 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
        if (!qh)
                goto done;
        qh->hw = (struct ehci_qh_hw *)
-               dma_pool_zalloc(ehci->qh_pool, flags, &dma);
+               dma_pool_alloc(ehci->qh_pool, flags, &dma);
        if (!qh->hw)
                goto fail;
+       memset(qh->hw, 0, sizeof *qh->hw);
        qh->qh_dma = dma;
        // INIT_LIST_HEAD (&qh->qh_list);
        INIT_LIST_HEAD (&qh->qtd_list);
index 28e2a33..e56db44 100644 (file)
@@ -1287,7 +1287,7 @@ itd_urb_transaction(
                } else {
  alloc_itd:
                        spin_unlock_irqrestore(&ehci->lock, flags);
-                       itd = dma_pool_zalloc(ehci->itd_pool, mem_flags,
+                       itd = dma_pool_alloc(ehci->itd_pool, mem_flags,
                                        &itd_dma);
                        spin_lock_irqsave(&ehci->lock, flags);
                        if (!itd) {
@@ -1297,6 +1297,7 @@ itd_urb_transaction(
                        }
                }
 
+               memset(itd, 0, sizeof(*itd));
                itd->itd_dma = itd_dma;
                itd->frame = NO_FRAME;
                list_add(&itd->itd_list, &sched->td_list);
@@ -2080,7 +2081,7 @@ sitd_urb_transaction(
                } else {
  alloc_sitd:
                        spin_unlock_irqrestore(&ehci->lock, flags);
-                       sitd = dma_pool_zalloc(ehci->sitd_pool, mem_flags,
+                       sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags,
                                        &sitd_dma);
                        spin_lock_irqsave(&ehci->lock, flags);
                        if (!sitd) {
@@ -2090,6 +2091,7 @@ sitd_urb_transaction(
                        }
                }
 
+               memset(sitd, 0, sizeof(*sitd));
                sitd->sitd_dma = sitd_dma;
                sitd->frame = NO_FRAME;
                list_add(&sitd->sitd_list, &iso_sched->td_list);
index 48779c4..eb494ec 100644 (file)
@@ -320,9 +320,11 @@ int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci)
 
 void xhci_dbc_tty_unregister_driver(void)
 {
-       tty_unregister_driver(dbc_tty_driver);
-       put_tty_driver(dbc_tty_driver);
-       dbc_tty_driver = NULL;
+       if (dbc_tty_driver) {
+               tty_unregister_driver(dbc_tty_driver);
+               put_tty_driver(dbc_tty_driver);
+               dbc_tty_driver = NULL;
+       }
 }
 
 static void dbc_rx_push(unsigned long _port)
index f17b7ea..85ffda8 100644 (file)
@@ -126,7 +126,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
                xhci->quirks |= XHCI_AMD_PLL_FIX;
 
-       if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43bb)
+       if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+               (pdev->device == 0x15e0 ||
+                pdev->device == 0x15e1 ||
+                pdev->device == 0x43bb))
                xhci->quirks |= XHCI_SUSPEND_DELAY;
 
        if (pdev->vendor == PCI_VENDOR_ID_AMD)
index df327dc..c1b22fc 100644 (file)
@@ -157,6 +157,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
        struct resource         *res;
        struct usb_hcd          *hcd;
        struct clk              *clk;
+       struct clk              *reg_clk;
        int                     ret;
        int                     irq;
 
@@ -226,17 +227,27 @@ static int xhci_plat_probe(struct platform_device *pdev)
        hcd->rsrc_len = resource_size(res);
 
        /*
-        * Not all platforms have a clk so it is not an error if the
-        * clock does not exists.
+        * Not all platforms have clks so it is not an error if the
+        * clock do not exist.
         */
+       reg_clk = devm_clk_get(&pdev->dev, "reg");
+       if (!IS_ERR(reg_clk)) {
+               ret = clk_prepare_enable(reg_clk);
+               if (ret)
+                       goto put_hcd;
+       } else if (PTR_ERR(reg_clk) == -EPROBE_DEFER) {
+               ret = -EPROBE_DEFER;
+               goto put_hcd;
+       }
+
        clk = devm_clk_get(&pdev->dev, NULL);
        if (!IS_ERR(clk)) {
                ret = clk_prepare_enable(clk);
                if (ret)
-                       goto put_hcd;
+                       goto disable_reg_clk;
        } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
                ret = -EPROBE_DEFER;
-               goto put_hcd;
+               goto disable_reg_clk;
        }
 
        xhci = hcd_to_xhci(hcd);
@@ -252,6 +263,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
        device_wakeup_enable(hcd->self.controller);
 
        xhci->clk = clk;
+       xhci->reg_clk = reg_clk;
        xhci->main_hcd = hcd;
        xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev,
                        dev_name(&pdev->dev), hcd);
@@ -320,8 +332,10 @@ put_usb3_hcd:
        usb_put_hcd(xhci->shared_hcd);
 
 disable_clk:
-       if (!IS_ERR(clk))
-               clk_disable_unprepare(clk);
+       clk_disable_unprepare(clk);
+
+disable_reg_clk:
+       clk_disable_unprepare(reg_clk);
 
 put_hcd:
        usb_put_hcd(hcd);
@@ -338,6 +352,7 @@ static int xhci_plat_remove(struct platform_device *dev)
        struct usb_hcd  *hcd = platform_get_drvdata(dev);
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        struct clk *clk = xhci->clk;
+       struct clk *reg_clk = xhci->reg_clk;
 
        xhci->xhc_state |= XHCI_STATE_REMOVING;
 
@@ -347,8 +362,8 @@ static int xhci_plat_remove(struct platform_device *dev)
        usb_remove_hcd(hcd);
        usb_put_hcd(xhci->shared_hcd);
 
-       if (!IS_ERR(clk))
-               clk_disable_unprepare(clk);
+       clk_disable_unprepare(clk);
+       clk_disable_unprepare(reg_clk);
        usb_put_hcd(hcd);
 
        pm_runtime_set_suspended(&dev->dev);
@@ -420,7 +435,6 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
 static struct platform_driver usb_xhci_driver = {
        .probe  = xhci_plat_probe,
        .remove = xhci_plat_remove,
-       .shutdown       = usb_hcd_platform_shutdown,
        .driver = {
                .name = "xhci-hcd",
                .pm = &xhci_plat_pm_ops,
index 9b27798..711da33 100644 (file)
@@ -3621,6 +3621,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
                del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
        }
        xhci_debugfs_remove_slot(xhci, udev->slot_id);
+       virt_dev->udev = NULL;
        ret = xhci_disable_slot(xhci, udev->slot_id);
        if (ret)
                xhci_free_virt_device(xhci, udev->slot_id);
index 05c909b..6dfc486 100644 (file)
@@ -1729,8 +1729,9 @@ struct xhci_hcd {
        int             page_shift;
        /* msi-x vectors */
        int             msix_count;
-       /* optional clock */
+       /* optional clocks */
        struct clk              *clk;
+       struct clk              *reg_clk;
        /* data structures */
        struct xhci_device_context_array *dcbaa;
        struct xhci_ring        *cmd_ring;
index 05a679d..6a60bc0 100644 (file)
@@ -451,7 +451,6 @@ static int dsps_musb_init(struct musb *musb)
        if (!rev)
                return -ENODEV;
 
-       usb_phy_init(musb->xceiv);
        if (IS_ERR(musb->phy))  {
                musb->phy = NULL;
        } else {
@@ -501,7 +500,6 @@ static int dsps_musb_exit(struct musb *musb)
        struct dsps_glue *glue = dev_get_drvdata(dev->parent);
 
        del_timer_sync(&musb->dev_timer);
-       usb_phy_shutdown(musb->xceiv);
        phy_power_off(musb->phy);
        phy_exit(musb->phy);
        debugfs_remove_recursive(glue->dbgfs_root);
index e564695..71c5835 100644 (file)
@@ -417,7 +417,6 @@ void musb_g_tx(struct musb *musb, u8 epnum)
        req = next_request(musb_ep);
        request = &req->request;
 
-       trace_musb_req_tx(req);
        csr = musb_readw(epio, MUSB_TXCSR);
        musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
 
@@ -456,6 +455,8 @@ void musb_g_tx(struct musb *musb, u8 epnum)
                u8      is_dma = 0;
                bool    short_packet = false;
 
+               trace_musb_req_tx(req);
+
                if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
                        is_dma = 1;
                        csr |= MUSB_TXCSR_P_WZC_BITS;
index 3a8451a..e7f99d5 100644 (file)
@@ -990,7 +990,9 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
                        /* set tx_reinit and schedule the next qh */
                        ep->tx_reinit = 1;
                }
-               musb_start_urb(musb, is_in, next_qh);
+
+               if (next_qh)
+                       musb_start_urb(musb, is_in, next_qh);
        }
 }
 
@@ -2754,6 +2756,7 @@ int musb_host_setup(struct musb *musb, int power_budget)
        hcd->self.otg_port = 1;
        musb->xceiv->otg->host = &hcd->self;
        hcd->power_budget = 2 * (power_budget ? : 250);
+       hcd->skip_phy_initialization = 1;
 
        ret = usb_add_hcd(hcd, 0, 0);
        if (ret < 0)
index a646820..533f127 100644 (file)
@@ -62,6 +62,7 @@ config USB_SERIAL_SIMPLE
                - Fundamental Software dongle.
                - Google USB serial devices
                - HP4x calculators
+               - Libtransistor USB console
                - a number of Motorola phones
                - Motorola Tetra devices
                - Novatel Wireless GPS receivers
index de1e759..eb6c26c 100644 (file)
@@ -214,6 +214,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
        { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
        { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
+       { USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */
        { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
        { } /* Terminating Entry */
 };
index 87202ad..7ea221d 100644 (file)
@@ -1898,7 +1898,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
                return ftdi_jtag_probe(serial);
 
        if (udev->product &&
-               (!strcmp(udev->product, "BeagleBone/XDS100V2") ||
+               (!strcmp(udev->product, "Arrow USB Blaster") ||
+                !strcmp(udev->product, "BeagleBone/XDS100V2") ||
                 !strcmp(udev->product, "SNAP Connect E10")))
                return ftdi_jtag_probe(serial);
 
index c3f2522..2058852 100644 (file)
@@ -233,6 +233,8 @@ static void option_instat_callback(struct urb *urb);
 /* These Quectel products use Qualcomm's vendor ID */
 #define QUECTEL_PRODUCT_UC20                   0x9003
 #define QUECTEL_PRODUCT_UC15                   0x9090
+/* These u-blox products use Qualcomm's vendor ID */
+#define UBLOX_PRODUCT_R410M                    0x90b2
 /* These Yuga products use Qualcomm's vendor ID */
 #define YUGA_PRODUCT_CLM920_NC5                        0x9625
 
@@ -1065,6 +1067,9 @@ static const struct usb_device_id option_ids[] = {
        /* Yuga products use Qualcomm vendor ID */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
          .driver_info = RSVD(1) | RSVD(4) },
+       /* u-blox products using Qualcomm vendor ID */
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
+         .driver_info = RSVD(1) | RSVD(3) },
        /* Quectel products using Quectel vendor ID */
        { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
          .driver_info = RSVD(4) },
index 4ef79e2..40864c2 100644 (file)
@@ -63,6 +63,11 @@ DEVICE(flashloader, FLASHLOADER_IDS);
                                        0x01) }
 DEVICE(google, GOOGLE_IDS);
 
+/* Libtransistor USB console */
+#define LIBTRANSISTOR_IDS()                    \
+       { USB_DEVICE(0x1209, 0x8b00) }
+DEVICE(libtransistor, LIBTRANSISTOR_IDS);
+
 /* ViVOpay USB Serial Driver */
 #define VIVOPAY_IDS()                  \
        { USB_DEVICE(0x1d5f, 0x1004) }  /* ViVOpay 8800 */
@@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
        &funsoft_device,
        &flashloader_device,
        &google_device,
+       &libtransistor_device,
        &vivopay_device,
        &moto_modem_device,
        &motorola_tetra_device,
@@ -126,6 +132,7 @@ static const struct usb_device_id id_table[] = {
        FUNSOFT_IDS(),
        FLASHLOADER_IDS(),
        GOOGLE_IDS(),
+       LIBTRANSISTOR_IDS(),
        VIVOPAY_IDS(),
        MOTO_IDS(),
        MOTOROLA_TETRA_IDS(),
index f5373ed..8ddbecc 100644 (file)
@@ -335,47 +335,48 @@ static int palm_os_3_probe(struct usb_serial *serial,
                goto exit;
        }
 
-       if (retval == sizeof(*connection_info)) {
-                       connection_info = (struct visor_connection_info *)
-                                                       transfer_buffer;
-
-               num_ports = le16_to_cpu(connection_info->num_ports);
-               for (i = 0; i < num_ports; ++i) {
-                       switch (
-                          connection_info->connections[i].port_function_id) {
-                       case VISOR_FUNCTION_GENERIC:
-                               string = "Generic";
-                               break;
-                       case VISOR_FUNCTION_DEBUGGER:
-                               string = "Debugger";
-                               break;
-                       case VISOR_FUNCTION_HOTSYNC:
-                               string = "HotSync";
-                               break;
-                       case VISOR_FUNCTION_CONSOLE:
-                               string = "Console";
-                               break;
-                       case VISOR_FUNCTION_REMOTE_FILE_SYS:
-                               string = "Remote File System";
-                               break;
-                       default:
-                               string = "unknown";
-                               break;
-                       }
-                       dev_info(dev, "%s: port %d, is for %s use\n",
-                               serial->type->description,
-                               connection_info->connections[i].port, string);
-               }
+       if (retval != sizeof(*connection_info)) {
+               dev_err(dev, "Invalid connection information received from device\n");
+               retval = -ENODEV;
+               goto exit;
        }
-       /*
-       * Handle devices that report invalid stuff here.
-       */
+
+       connection_info = (struct visor_connection_info *)transfer_buffer;
+
+       num_ports = le16_to_cpu(connection_info->num_ports);
+
+       /* Handle devices that report invalid stuff here. */
        if (num_ports == 0 || num_ports > 2) {
                dev_warn(dev, "%s: No valid connect info available\n",
                        serial->type->description);
                num_ports = 2;
        }
 
+       for (i = 0; i < num_ports; ++i) {
+               switch (connection_info->connections[i].port_function_id) {
+               case VISOR_FUNCTION_GENERIC:
+                       string = "Generic";
+                       break;
+               case VISOR_FUNCTION_DEBUGGER:
+                       string = "Debugger";
+                       break;
+               case VISOR_FUNCTION_HOTSYNC:
+                       string = "HotSync";
+                       break;
+               case VISOR_FUNCTION_CONSOLE:
+                       string = "Console";
+                       break;
+               case VISOR_FUNCTION_REMOTE_FILE_SYS:
+                       string = "Remote File System";
+                       break;
+               default:
+                       string = "unknown";
+                       break;
+               }
+               dev_info(dev, "%s: port %d, is for %s use\n",
+                       serial->type->description,
+                       connection_info->connections[i].port, string);
+       }
        dev_info(dev, "%s: Number of ports: %d\n", serial->type->description,
                num_ports);
 
index 677d121..ded49e3 100644 (file)
@@ -3725,6 +3725,7 @@ void tcpm_unregister_port(struct tcpm_port *port)
        for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
                typec_unregister_altmode(port->port_altmode[i]);
        typec_unregister_port(port->typec_port);
+       usb_role_switch_put(port->role_sw);
        tcpm_debugfs_exit(port);
        destroy_workqueue(port->wq);
 }
index 8b84068..4b4c8d2 100644 (file)
@@ -73,6 +73,7 @@ struct tps6598x {
        struct device *dev;
        struct regmap *regmap;
        struct mutex lock; /* device lock */
+       u8 i2c_protocol:1;
 
        struct typec_port *port;
        struct typec_partner *partner;
@@ -80,19 +81,39 @@ struct tps6598x {
        struct typec_capability typec_cap;
 };
 
+static int
+tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len)
+{
+       u8 data[len + 1];
+       int ret;
+
+       if (!tps->i2c_protocol)
+               return regmap_raw_read(tps->regmap, reg, val, len);
+
+       ret = regmap_raw_read(tps->regmap, reg, data, sizeof(data));
+       if (ret)
+               return ret;
+
+       if (data[0] < len)
+               return -EIO;
+
+       memcpy(val, &data[1], len);
+       return 0;
+}
+
 static inline int tps6598x_read16(struct tps6598x *tps, u8 reg, u16 *val)
 {
-       return regmap_raw_read(tps->regmap, reg, val, sizeof(u16));
+       return tps6598x_block_read(tps, reg, val, sizeof(u16));
 }
 
 static inline int tps6598x_read32(struct tps6598x *tps, u8 reg, u32 *val)
 {
-       return regmap_raw_read(tps->regmap, reg, val, sizeof(u32));
+       return tps6598x_block_read(tps, reg, val, sizeof(u32));
 }
 
 static inline int tps6598x_read64(struct tps6598x *tps, u8 reg, u64 *val)
 {
-       return regmap_raw_read(tps->regmap, reg, val, sizeof(u64));
+       return tps6598x_block_read(tps, reg, val, sizeof(u64));
 }
 
 static inline int tps6598x_write16(struct tps6598x *tps, u8 reg, u16 val)
@@ -121,8 +142,8 @@ static int tps6598x_read_partner_identity(struct tps6598x *tps)
        struct tps6598x_rx_identity_reg id;
        int ret;
 
-       ret = regmap_raw_read(tps->regmap, TPS_REG_RX_IDENTITY_SOP,
-                             &id, sizeof(id));
+       ret = tps6598x_block_read(tps, TPS_REG_RX_IDENTITY_SOP,
+                                 &id, sizeof(id));
        if (ret)
                return ret;
 
@@ -224,13 +245,13 @@ static int tps6598x_exec_cmd(struct tps6598x *tps, const char *cmd,
        } while (val);
 
        if (out_len) {
-               ret = regmap_raw_read(tps->regmap, TPS_REG_DATA1,
-                                     out_data, out_len);
+               ret = tps6598x_block_read(tps, TPS_REG_DATA1,
+                                         out_data, out_len);
                if (ret)
                        return ret;
                val = out_data[0];
        } else {
-               ret = regmap_read(tps->regmap, TPS_REG_DATA1, &val);
+               ret = tps6598x_block_read(tps, TPS_REG_DATA1, &val, sizeof(u8));
                if (ret)
                        return ret;
        }
@@ -385,6 +406,16 @@ static int tps6598x_probe(struct i2c_client *client)
        if (!vid)
                return -ENODEV;
 
+       /*
+        * Checking can the adapter handle SMBus protocol. If it can not, the
+        * driver needs to take care of block reads separately.
+        *
+        * FIXME: Testing with I2C_FUNC_I2C. regmap-i2c uses I2C protocol
+        * unconditionally if the adapter has I2C_FUNC_I2C set.
+        */
+       if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+               tps->i2c_protocol = true;
+
        ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
        if (ret < 0)
                return ret;
index b57891c..7afbea5 100644 (file)
@@ -5,6 +5,6 @@ obj-$(CONFIG_TYPEC_UCSI)        += typec_ucsi.o
 
 typec_ucsi-y                   := ucsi.o
 
-typec_ucsi-$(CONFIG_FTRACE)    += trace.o
+typec_ucsi-$(CONFIG_TRACING)   += trace.o
 
 obj-$(CONFIG_UCSI_ACPI)                += ucsi_acpi.o
index bf0977f..bd5cca5 100644 (file)
@@ -28,7 +28,7 @@
  * difficult to estimate the time it takes for the system to process the command
  * before it is actually passed to the PPM.
  */
-#define UCSI_TIMEOUT_MS                1000
+#define UCSI_TIMEOUT_MS                5000
 
 /*
  * UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests
index c31c840..d41d0cd 100644 (file)
@@ -186,7 +186,12 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
        if (!bid)
                return -ENODEV;
 
+       /* device_attach() callers should hold parent lock for USB */
+       if (bid->udev->dev.parent)
+               device_lock(bid->udev->dev.parent);
        ret = device_attach(&bid->udev->dev);
+       if (bid->udev->dev.parent)
+               device_unlock(bid->udev->dev.parent);
        if (ret < 0) {
                dev_err(&bid->udev->dev, "rebind failed\n");
                return ret;
index 473fb8a..bf8afe9 100644 (file)
@@ -243,7 +243,7 @@ enum usbip_side {
 #define        VUDC_EVENT_ERROR_USB    (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
 #define        VUDC_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
 
-#define        VDEV_EVENT_REMOVED      (USBIP_EH_SHUTDOWN | USBIP_EH_BYE)
+#define        VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
 #define        VDEV_EVENT_DOWN         (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
 #define        VDEV_EVENT_ERROR_TCP    (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
 #define        VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
index 5b4c086..5d88917 100644 (file)
@@ -91,10 +91,6 @@ static void event_handler(struct work_struct *work)
                        unset_event(ud, USBIP_EH_UNUSABLE);
                }
 
-               /* Stop the error handler. */
-               if (ud->event & USBIP_EH_BYE)
-                       usbip_dbg_eh("removed %p\n", ud);
-
                wake_up(&ud->eh_waitq);
        }
 }
index 20e3d46..d11f3f8 100644 (file)
@@ -354,6 +354,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                usbip_dbg_vhci_rh(" ClearHubFeature\n");
                break;
        case ClearPortFeature:
+               if (rhport < 0)
+                       goto error;
                switch (wValue) {
                case USB_PORT_FEAT_SUSPEND:
                        if (hcd->speed == HCD_USB3) {
@@ -511,11 +513,16 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                                goto error;
                        }
 
+                       if (rhport < 0)
+                               goto error;
+
                        vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND;
                        break;
                case USB_PORT_FEAT_POWER:
                        usbip_dbg_vhci_rh(
                                " SetPortFeature: USB_PORT_FEAT_POWER\n");
+                       if (rhport < 0)
+                               goto error;
                        if (hcd->speed == HCD_USB3)
                                vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER;
                        else
@@ -524,6 +531,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                case USB_PORT_FEAT_BH_PORT_RESET:
                        usbip_dbg_vhci_rh(
                                " SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n");
+                       if (rhport < 0)
+                               goto error;
                        /* Applicable only for USB3.0 hub */
                        if (hcd->speed != HCD_USB3) {
                                pr_err("USB_PORT_FEAT_BH_PORT_RESET req not "
@@ -534,6 +543,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                case USB_PORT_FEAT_RESET:
                        usbip_dbg_vhci_rh(
                                " SetPortFeature: USB_PORT_FEAT_RESET\n");
+                       if (rhport < 0)
+                               goto error;
                        /* if it's already enabled, disable */
                        if (hcd->speed == HCD_USB3) {
                                vhci_hcd->port_status[rhport] = 0;
@@ -554,6 +565,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                default:
                        usbip_dbg_vhci_rh(" SetPortFeature: default %d\n",
                                          wValue);
+                       if (rhport < 0)
+                               goto error;
                        if (hcd->speed == HCD_USB3) {
                                if ((vhci_hcd->port_status[rhport] &
                                     USB_SS_PORT_STAT_POWER) != 0) {
index 190dbf8..2f3856a 100644 (file)
@@ -114,7 +114,7 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
        }
 
 out:
-       kfree(req);
+       vbg_req_free(req, sizeof(*req));
        kfree(pages);
 }
 
@@ -144,7 +144,7 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
 
        rc = vbg_req_perform(gdev, req);
 
-       kfree(req);
+       vbg_req_free(req, sizeof(*req));
 
        if (rc < 0) {
                vbg_err("%s error: %d\n", __func__, rc);
@@ -214,8 +214,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
        ret = vbg_status_code_to_errno(rc);
 
 out_free:
-       kfree(req2);
-       kfree(req1);
+       vbg_req_free(req2, sizeof(*req2));
+       vbg_req_free(req1, sizeof(*req1));
        return ret;
 }
 
@@ -245,7 +245,7 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
        if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
                rc = VINF_SUCCESS;
 
-       kfree(req);
+       vbg_req_free(req, sizeof(*req));
 
        return vbg_status_code_to_errno(rc);
 }
@@ -431,7 +431,7 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
        rc = vbg_req_perform(gdev, req);
        do_div(req->interval_ns, 1000000); /* ns -> ms */
        gdev->heartbeat_interval_ms = req->interval_ns;
-       kfree(req);
+       vbg_req_free(req, sizeof(*req));
 
        return vbg_status_code_to_errno(rc);
 }
@@ -454,12 +454,6 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
        if (ret < 0)
                return ret;
 
-       /*
-        * Preallocate the request to use it from the timer callback because:
-        *    1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL
-        *       and the timer callback runs at DISPATCH_LEVEL;
-        *    2) avoid repeated allocations.
-        */
        gdev->guest_heartbeat_req = vbg_req_alloc(
                                        sizeof(*gdev->guest_heartbeat_req),
                                        VMMDEVREQ_GUEST_HEARTBEAT);
@@ -481,8 +475,8 @@ static void vbg_heartbeat_exit(struct vbg_dev *gdev)
 {
        del_timer_sync(&gdev->heartbeat_timer);
        vbg_heartbeat_host_config(gdev, false);
-       kfree(gdev->guest_heartbeat_req);
-
+       vbg_req_free(gdev->guest_heartbeat_req,
+                    sizeof(*gdev->guest_heartbeat_req));
 }
 
 /**
@@ -543,7 +537,7 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
        if (rc < 0)
                vbg_err("%s error, rc: %d\n", __func__, rc);
 
-       kfree(req);
+       vbg_req_free(req, sizeof(*req));
        return vbg_status_code_to_errno(rc);
 }
 
@@ -617,7 +611,7 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
 
 out:
        mutex_unlock(&gdev->session_mutex);
-       kfree(req);
+       vbg_req_free(req, sizeof(*req));
 
        return ret;
 }
@@ -642,7 +636,7 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
        if (rc < 0)
                vbg_err("%s error, rc: %d\n", __func__, rc);
 
-       kfree(req);
+       vbg_req_free(req, sizeof(*req));
        return vbg_status_code_to_errno(rc);
 }
 
@@ -712,7 +706,7 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
 
 out:
        mutex_unlock(&gdev->session_mutex);
-       kfree(req);
+       vbg_req_free(req, sizeof(*req));
 
        return ret;
 }
@@ -733,8 +727,10 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
 
        rc = vbg_req_perform(gdev, req);
        ret = vbg_status_code_to_errno(rc);
-       if (ret)
+       if (ret) {
+               vbg_err("%s error: %d\n", __func__, rc);
                goto out;
+       }
 
        snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
                 req->major, req->minor, req->build, req->revision);
@@ -749,7 +745,7 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
        }
 
 out:
-       kfree(req);
+       vbg_req_free(req, sizeof(*req));
        return ret;
 }
 
@@ -847,11 +843,16 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
        return 0;
 
 err_free_reqs:
-       kfree(gdev->mouse_status_req);
-       kfree(gdev->ack_events_req);
-       kfree(gdev->cancel_req);
-       kfree(gdev->mem_balloon.change_req);
-       kfree(gdev->mem_balloon.get_req);
+       vbg_req_free(gdev->mouse_status_req,
+                    sizeof(*gdev->mouse_status_req));
+       vbg_req_free(gdev->ack_events_req,
+                    sizeof(*gdev->ack_events_req));
+       vbg_req_free(gdev->cancel_req,
+                    sizeof(*gdev->cancel_req));
+       vbg_req_free(gdev->mem_balloon.change_req,
+                    sizeof(*gdev->mem_balloon.change_req));
+       vbg_req_free(gdev->mem_balloon.get_req,
+                    sizeof(*gdev->mem_balloon.get_req));
        return ret;
 }
 
@@ -872,11 +873,16 @@ void vbg_core_exit(struct vbg_dev *gdev)
        vbg_reset_host_capabilities(gdev);
        vbg_core_set_mouse_status(gdev, 0);
 
-       kfree(gdev->mouse_status_req);
-       kfree(gdev->ack_events_req);
-       kfree(gdev->cancel_req);
-       kfree(gdev->mem_balloon.change_req);
-       kfree(gdev->mem_balloon.get_req);
+       vbg_req_free(gdev->mouse_status_req,
+                    sizeof(*gdev->mouse_status_req));
+       vbg_req_free(gdev->ack_events_req,
+                    sizeof(*gdev->ack_events_req));
+       vbg_req_free(gdev->cancel_req,
+                    sizeof(*gdev->cancel_req));
+       vbg_req_free(gdev->mem_balloon.change_req,
+                    sizeof(*gdev->mem_balloon.change_req));
+       vbg_req_free(gdev->mem_balloon.get_req,
+                    sizeof(*gdev->mem_balloon.get_req));
 }
 
 /**
@@ -1415,7 +1421,7 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
        req->flags = dump->u.in.flags;
        dump->hdr.rc = vbg_req_perform(gdev, req);
 
-       kfree(req);
+       vbg_req_free(req, sizeof(*req));
        return 0;
 }
 
@@ -1513,7 +1519,7 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
        if (rc < 0)
                vbg_err("%s error, rc: %d\n", __func__, rc);
 
-       kfree(req);
+       vbg_req_free(req, sizeof(*req));
        return vbg_status_code_to_errno(rc);
 }
 
index 6c784bf..7ad9ec4 100644 (file)
@@ -171,4 +171,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
 
 void vbg_linux_mouse_event(struct vbg_dev *gdev);
 
+/* Private (non exported) functions form vboxguest_utils.c */
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
+void vbg_req_free(void *req, size_t len);
+int vbg_req_perform(struct vbg_dev *gdev, void *req);
+int vbg_hgcm_call32(
+       struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
+       struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
+       int *vbox_status);
+
 #endif
index 82e280d..398d226 100644 (file)
@@ -87,6 +87,7 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
        struct vbg_session *session = filp->private_data;
        size_t returned_size, size;
        struct vbg_ioctl_hdr hdr;
+       bool is_vmmdev_req;
        int ret = 0;
        void *buf;
 
@@ -106,8 +107,17 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
        if (size > SZ_16M)
                return -E2BIG;
 
-       /* __GFP_DMA32 because IOCTL_VMMDEV_REQUEST passes this to the host */
-       buf = kmalloc(size, GFP_KERNEL | __GFP_DMA32);
+       /*
+        * IOCTL_VMMDEV_REQUEST needs the buffer to be below 4G to avoid
+        * the need for a bounce-buffer and another copy later on.
+        */
+       is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) ||
+                        req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
+
+       if (is_vmmdev_req)
+               buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
+       else
+               buf = kmalloc(size, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
 
@@ -132,7 +142,10 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
                ret = -EFAULT;
 
 out:
-       kfree(buf);
+       if (is_vmmdev_req)
+               vbg_req_free(buf, size);
+       else
+               kfree(buf);
 
        return ret;
 }
index 0f0dab8..bf44742 100644 (file)
@@ -65,8 +65,9 @@ VBG_LOG(vbg_debug, pr_debug);
 void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
 {
        struct vmmdev_request_header *req;
+       int order = get_order(PAGE_ALIGN(len));
 
-       req = kmalloc(len, GFP_KERNEL | __GFP_DMA32);
+       req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
        if (!req)
                return NULL;
 
@@ -82,6 +83,14 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
        return req;
 }
 
+void vbg_req_free(void *req, size_t len)
+{
+       if (!req)
+               return;
+
+       free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
+}
+
 /* Note this function returns a VBox status code, not a negative errno!! */
 int vbg_req_perform(struct vbg_dev *gdev, void *req)
 {
@@ -137,7 +146,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
                rc = hgcm_connect->header.result;
        }
 
-       kfree(hgcm_connect);
+       vbg_req_free(hgcm_connect, sizeof(*hgcm_connect));
 
        *vbox_status = rc;
        return 0;
@@ -166,7 +175,7 @@ int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
        if (rc >= 0)
                rc = hgcm_disconnect->header.result;
 
-       kfree(hgcm_disconnect);
+       vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect));
 
        *vbox_status = rc;
        return 0;
@@ -623,7 +632,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
        }
 
        if (!leak_it)
-               kfree(call);
+               vbg_req_free(call, size);
 
 free_bounce_bufs:
        if (bounce_bufs) {
index 3bedfed..7587fb6 100644 (file)
@@ -121,7 +121,7 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
        p = text;
        do {
                struct sockaddr_rxrpc *srx = &alist->addrs[alist->nr_addrs];
-               char tdelim = delim;
+               const char *q, *stop;
 
                if (*p == delim) {
                        p++;
@@ -130,28 +130,33 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
 
                if (*p == '[') {
                        p++;
-                       tdelim = ']';
+                       q = memchr(p, ']', end - p);
+               } else {
+                       for (q = p; q < end; q++)
+                               if (*q == '+' || *q == delim)
+                                       break;
                }
 
-               if (in4_pton(p, end - p,
+               if (in4_pton(p, q - p,
                             (u8 *)&srx->transport.sin6.sin6_addr.s6_addr32[3],
-                            tdelim, &p)) {
+                            -1, &stop)) {
                        srx->transport.sin6.sin6_addr.s6_addr32[0] = 0;
                        srx->transport.sin6.sin6_addr.s6_addr32[1] = 0;
                        srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
-               } else if (in6_pton(p, end - p,
+               } else if (in6_pton(p, q - p,
                                    srx->transport.sin6.sin6_addr.s6_addr,
-                                   tdelim, &p)) {
+                                   -1, &stop)) {
                        /* Nothing to do */
                } else {
                        goto bad_address;
                }
 
-               if (tdelim == ']') {
-                       if (p == end || *p != ']')
-                               goto bad_address;
+               if (stop != q)
+                       goto bad_address;
+
+               p = q;
+               if (q < end && *q == ']')
                        p++;
-               }
 
                if (p < end) {
                        if (*p == '+') {
index abd9a84..571437d 100644 (file)
 /*
  * Set up an interest-in-callbacks record for a volume on a server and
  * register it with the server.
- * - Called with volume->server_sem held.
+ * - Called with vnode->io_lock held.
  */
 int afs_register_server_cb_interest(struct afs_vnode *vnode,
-                                   struct afs_server_entry *entry)
+                                   struct afs_server_list *slist,
+                                   unsigned int index)
 {
-       struct afs_cb_interest *cbi = entry->cb_interest, *vcbi, *new, *x;
+       struct afs_server_entry *entry = &slist->servers[index];
+       struct afs_cb_interest *cbi, *vcbi, *new, *old;
        struct afs_server *server = entry->server;
 
 again:
+       if (vnode->cb_interest &&
+           likely(vnode->cb_interest == entry->cb_interest))
+               return 0;
+
+       read_lock(&slist->lock);
+       cbi = afs_get_cb_interest(entry->cb_interest);
+       read_unlock(&slist->lock);
+
        vcbi = vnode->cb_interest;
        if (vcbi) {
-               if (vcbi == cbi)
+               if (vcbi == cbi) {
+                       afs_put_cb_interest(afs_v2net(vnode), cbi);
                        return 0;
+               }
 
+               /* Use a new interest in the server list for the same server
+                * rather than an old one that's still attached to a vnode.
+                */
                if (cbi && vcbi->server == cbi->server) {
                        write_seqlock(&vnode->cb_lock);
-                       vnode->cb_interest = afs_get_cb_interest(cbi);
+                       old = vnode->cb_interest;
+                       vnode->cb_interest = cbi;
                        write_sequnlock(&vnode->cb_lock);
-                       afs_put_cb_interest(afs_v2net(vnode), cbi);
+                       afs_put_cb_interest(afs_v2net(vnode), old);
                        return 0;
                }
 
+               /* Re-use the one attached to the vnode. */
                if (!cbi && vcbi->server == server) {
-                       afs_get_cb_interest(vcbi);
-                       x = cmpxchg(&entry->cb_interest, cbi, vcbi);
-                       if (x != cbi) {
-                               cbi = x;
-                               afs_put_cb_interest(afs_v2net(vnode), vcbi);
+                       write_lock(&slist->lock);
+                       if (entry->cb_interest) {
+                               write_unlock(&slist->lock);
+                               afs_put_cb_interest(afs_v2net(vnode), cbi);
                                goto again;
                        }
+
+                       entry->cb_interest = cbi;
+                       write_unlock(&slist->lock);
                        return 0;
                }
        }
@@ -72,13 +91,16 @@ again:
                list_add_tail(&new->cb_link, &server->cb_interests);
                write_unlock(&server->cb_break_lock);
 
-               x = cmpxchg(&entry->cb_interest, cbi, new);
-               if (x == cbi) {
+               write_lock(&slist->lock);
+               if (!entry->cb_interest) {
+                       entry->cb_interest = afs_get_cb_interest(new);
                        cbi = new;
+                       new = NULL;
                } else {
-                       cbi = x;
-                       afs_put_cb_interest(afs_v2net(vnode), new);
+                       cbi = afs_get_cb_interest(entry->cb_interest);
                }
+               write_unlock(&slist->lock);
+               afs_put_cb_interest(afs_v2net(vnode), new);
        }
 
        ASSERT(cbi);
@@ -88,11 +110,14 @@ again:
         */
        write_seqlock(&vnode->cb_lock);
 
-       vnode->cb_interest = afs_get_cb_interest(cbi);
+       old = vnode->cb_interest;
+       vnode->cb_interest = cbi;
        vnode->cb_s_break = cbi->server->cb_s_break;
+       vnode->cb_v_break = vnode->volume->cb_v_break;
        clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
 
        write_sequnlock(&vnode->cb_lock);
+       afs_put_cb_interest(afs_v2net(vnode), old);
        return 0;
 }
 
@@ -171,13 +196,24 @@ static void afs_break_one_callback(struct afs_server *server,
                if (cbi->vid != fid->vid)
                        continue;
 
-               data.volume = NULL;
-               data.fid = *fid;
-               inode = ilookup5_nowait(cbi->sb, fid->vnode, afs_iget5_test, &data);
-               if (inode) {
-                       vnode = AFS_FS_I(inode);
-                       afs_break_callback(vnode);
-                       iput(inode);
+               if (fid->vnode == 0 && fid->unique == 0) {
+                       /* The callback break applies to an entire volume. */
+                       struct afs_super_info *as = AFS_FS_S(cbi->sb);
+                       struct afs_volume *volume = as->volume;
+
+                       write_lock(&volume->cb_break_lock);
+                       volume->cb_v_break++;
+                       write_unlock(&volume->cb_break_lock);
+               } else {
+                       data.volume = NULL;
+                       data.fid = *fid;
+                       inode = ilookup5_nowait(cbi->sb, fid->vnode,
+                                               afs_iget5_test, &data);
+                       if (inode) {
+                               vnode = AFS_FS_I(inode);
+                               afs_break_callback(vnode);
+                               iput(inode);
+                       }
                }
        }
 
@@ -195,6 +231,8 @@ void afs_break_callbacks(struct afs_server *server, size_t count,
        ASSERT(server != NULL);
        ASSERTCMP(count, <=, AFSCBMAX);
 
+       /* TODO: Sort the callback break list by volume ID */
+
        for (; count > 0; callbacks++, count--) {
                _debug("- Fid { vl=%08x n=%u u=%u }  CB { v=%u x=%u t=%u }",
                       callbacks->fid.vid,
index 357de90..c332c95 100644 (file)
@@ -133,21 +133,10 @@ bool afs_cm_incoming_call(struct afs_call *call)
 }
 
 /*
- * clean up a cache manager call
+ * Clean up a cache manager call.
  */
 static void afs_cm_destructor(struct afs_call *call)
 {
-       _enter("");
-
-       /* Break the callbacks here so that we do it after the final ACK is
-        * received.  The step number here must match the final number in
-        * afs_deliver_cb_callback().
-        */
-       if (call->unmarshall == 5) {
-               ASSERT(call->cm_server && call->count && call->request);
-               afs_break_callbacks(call->cm_server, call->count, call->request);
-       }
-
        kfree(call->buffer);
        call->buffer = NULL;
 }
@@ -161,14 +150,14 @@ static void SRXAFSCB_CallBack(struct work_struct *work)
 
        _enter("");
 
-       /* be sure to send the reply *before* attempting to spam the AFS server
-        * with FSFetchStatus requests on the vnodes with broken callbacks lest
-        * the AFS server get into a vicious cycle of trying to break further
-        * callbacks because it hadn't received completion of the CBCallBack op
-        * yet */
-       afs_send_empty_reply(call);
+       /* We need to break the callbacks before sending the reply as the
+        * server holds up change visibility till it receives our reply so as
+        * to maintain cache coherency.
+        */
+       if (call->cm_server)
+               afs_break_callbacks(call->cm_server, call->count, call->request);
 
-       afs_break_callbacks(call->cm_server, call->count, call->request);
+       afs_send_empty_reply(call);
        afs_put_call(call);
        _leave("");
 }
@@ -180,7 +169,6 @@ static int afs_deliver_cb_callback(struct afs_call *call)
 {
        struct afs_callback_break *cb;
        struct sockaddr_rxrpc srx;
-       struct afs_server *server;
        __be32 *bp;
        int ret, loop;
 
@@ -267,15 +255,6 @@ static int afs_deliver_cb_callback(struct afs_call *call)
 
                call->offset = 0;
                call->unmarshall++;
-
-               /* Record that the message was unmarshalled successfully so
-                * that the call destructor can know do the callback breaking
-                * work, even if the final ACK isn't received.
-                *
-                * If the step number changes, then afs_cm_destructor() must be
-                * updated also.
-                */
-               call->unmarshall++;
        case 5:
                break;
        }
@@ -286,10 +265,9 @@ static int afs_deliver_cb_callback(struct afs_call *call)
        /* we'll need the file server record as that tells us which set of
         * vnodes to operate upon */
        rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx);
-       server = afs_find_server(call->net, &srx);
-       if (!server)
-               return -ENOTCONN;
-       call->cm_server = server;
+       call->cm_server = afs_find_server(call->net, &srx);
+       if (!call->cm_server)
+               trace_afs_cm_no_server(call, &srx);
 
        return afs_queue_call_work(call);
 }
@@ -303,7 +281,8 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work)
 
        _enter("{%p}", call->cm_server);
 
-       afs_init_callback_state(call->cm_server);
+       if (call->cm_server)
+               afs_init_callback_state(call->cm_server);
        afs_send_empty_reply(call);
        afs_put_call(call);
        _leave("");
@@ -315,7 +294,6 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work)
 static int afs_deliver_cb_init_call_back_state(struct afs_call *call)
 {
        struct sockaddr_rxrpc srx;
-       struct afs_server *server;
        int ret;
 
        _enter("");
@@ -328,10 +306,9 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call)
 
        /* we'll need the file server record as that tells us which set of
         * vnodes to operate upon */
-       server = afs_find_server(call->net, &srx);
-       if (!server)
-               return -ENOTCONN;
-       call->cm_server = server;
+       call->cm_server = afs_find_server(call->net, &srx);
+       if (!call->cm_server)
+               trace_afs_cm_no_server(call, &srx);
 
        return afs_queue_call_work(call);
 }
@@ -341,8 +318,6 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call)
  */
 static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
 {
-       struct sockaddr_rxrpc srx;
-       struct afs_server *server;
        struct afs_uuid *r;
        unsigned loop;
        __be32 *b;
@@ -398,11 +373,11 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
 
        /* we'll need the file server record as that tells us which set of
         * vnodes to operate upon */
-       rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx);
-       server = afs_find_server(call->net, &srx);
-       if (!server)
-               return -ENOTCONN;
-       call->cm_server = server;
+       rcu_read_lock();
+       call->cm_server = afs_find_server_by_uuid(call->net, call->request);
+       rcu_read_unlock();
+       if (!call->cm_server)
+               trace_afs_cm_no_server_u(call, call->request);
 
        return afs_queue_call_work(call);
 }
index 5889f70..7d62300 100644 (file)
@@ -180,6 +180,7 @@ static int afs_dir_open(struct inode *inode, struct file *file)
  * get reclaimed during the iteration.
  */
 static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
+       __acquires(&dvnode->validate_lock)
 {
        struct afs_read *req;
        loff_t i_size;
@@ -261,18 +262,21 @@ retry:
        /* If we're going to reload, we need to lock all the pages to prevent
         * races.
         */
-       if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
-               ret = -ERESTARTSYS;
-               for (i = 0; i < req->nr_pages; i++)
-                       if (lock_page_killable(req->pages[i]) < 0)
-                               goto error_unlock;
+       ret = -ERESTARTSYS;
+       if (down_read_killable(&dvnode->validate_lock) < 0)
+               goto error;
 
-               if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
-                       goto success;
+       if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
+               goto success;
+
+       up_read(&dvnode->validate_lock);
+       if (down_write_killable(&dvnode->validate_lock) < 0)
+               goto error;
 
+       if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
                ret = afs_fetch_data(dvnode, key, req);
                if (ret < 0)
-                       goto error_unlock_all;
+                       goto error_unlock;
 
                task_io_account_read(PAGE_SIZE * req->nr_pages);
 
@@ -284,33 +288,26 @@ retry:
                for (i = 0; i < req->nr_pages; i++)
                        if (!afs_dir_check_page(dvnode, req->pages[i],
                                                req->actual_len))
-                               goto error_unlock_all;
+                               goto error_unlock;
 
                // TODO: Trim excess pages
 
                set_bit(AFS_VNODE_DIR_VALID, &dvnode->flags);
        }
 
+       downgrade_write(&dvnode->validate_lock);
 success:
-       i = req->nr_pages;
-       while (i > 0)
-               unlock_page(req->pages[--i]);
        return req;
 
-error_unlock_all:
-       i = req->nr_pages;
 error_unlock:
-       while (i > 0)
-               unlock_page(req->pages[--i]);
+       up_write(&dvnode->validate_lock);
 error:
        afs_put_read(req);
        _leave(" = %d", ret);
        return ERR_PTR(ret);
 
 content_has_grown:
-       i = req->nr_pages;
-       while (i > 0)
-               unlock_page(req->pages[--i]);
+       up_write(&dvnode->validate_lock);
        afs_put_read(req);
        goto retry;
 }
@@ -473,6 +470,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
        }
 
 out:
+       up_read(&dvnode->validate_lock);
        afs_put_read(req);
        _leave(" = %d", ret);
        return ret;
@@ -1143,7 +1141,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, dvnode, key)) {
                while (afs_select_fileserver(&fc)) {
-                       fc.cb_break = dvnode->cb_break + dvnode->cb_s_break;
+                       fc.cb_break = afs_calc_vnode_cb_break(dvnode);
                        afs_fs_create(&fc, dentry->d_name.name, mode, data_version,
                                      &newfid, &newstatus, &newcb);
                }
@@ -1213,7 +1211,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, dvnode, key)) {
                while (afs_select_fileserver(&fc)) {
-                       fc.cb_break = dvnode->cb_break + dvnode->cb_s_break;
+                       fc.cb_break = afs_calc_vnode_cb_break(dvnode);
                        afs_fs_remove(&fc, dentry->d_name.name, true,
                                      data_version);
                }
@@ -1316,7 +1314,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, dvnode, key)) {
                while (afs_select_fileserver(&fc)) {
-                       fc.cb_break = dvnode->cb_break + dvnode->cb_s_break;
+                       fc.cb_break = afs_calc_vnode_cb_break(dvnode);
                        afs_fs_remove(&fc, dentry->d_name.name, false,
                                      data_version);
                }
@@ -1373,7 +1371,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, dvnode, key)) {
                while (afs_select_fileserver(&fc)) {
-                       fc.cb_break = dvnode->cb_break + dvnode->cb_s_break;
+                       fc.cb_break = afs_calc_vnode_cb_break(dvnode);
                        afs_fs_create(&fc, dentry->d_name.name, mode, data_version,
                                      &newfid, &newstatus, &newcb);
                }
@@ -1443,8 +1441,8 @@ static int afs_link(struct dentry *from, struct inode *dir,
                }
 
                while (afs_select_fileserver(&fc)) {
-                       fc.cb_break = dvnode->cb_break + dvnode->cb_s_break;
-                       fc.cb_break_2 = vnode->cb_break + vnode->cb_s_break;
+                       fc.cb_break = afs_calc_vnode_cb_break(dvnode);
+                       fc.cb_break_2 = afs_calc_vnode_cb_break(vnode);
                        afs_fs_link(&fc, vnode, dentry->d_name.name, data_version);
                }
 
@@ -1512,7 +1510,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, dvnode, key)) {
                while (afs_select_fileserver(&fc)) {
-                       fc.cb_break = dvnode->cb_break + dvnode->cb_s_break;
+                       fc.cb_break = afs_calc_vnode_cb_break(dvnode);
                        afs_fs_symlink(&fc, dentry->d_name.name,
                                       content, data_version,
                                       &newfid, &newstatus);
@@ -1588,8 +1586,8 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        }
                }
                while (afs_select_fileserver(&fc)) {
-                       fc.cb_break = orig_dvnode->cb_break + orig_dvnode->cb_s_break;
-                       fc.cb_break_2 = new_dvnode->cb_break + new_dvnode->cb_s_break;
+                       fc.cb_break = afs_calc_vnode_cb_break(orig_dvnode);
+                       fc.cb_break_2 = afs_calc_vnode_cb_break(new_dvnode);
                        afs_fs_rename(&fc, old_dentry->d_name.name,
                                      new_dvnode, new_dentry->d_name.name,
                                      orig_data_version, new_data_version);
index c24c080..7d4f261 100644 (file)
@@ -238,7 +238,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *de
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, vnode, key)) {
                while (afs_select_fileserver(&fc)) {
-                       fc.cb_break = vnode->cb_break + vnode->cb_s_break;
+                       fc.cb_break = afs_calc_vnode_cb_break(vnode);
                        afs_fs_fetch_data(&fc, desc);
                }
 
index 7a0e017..dc62d15 100644 (file)
@@ -86,7 +86,7 @@ static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, vnode, key)) {
                while (afs_select_fileserver(&fc)) {
-                       fc.cb_break = vnode->cb_break + vnode->cb_s_break;
+                       fc.cb_break = afs_calc_vnode_cb_break(vnode);
                        afs_fs_set_lock(&fc, type);
                }
 
@@ -117,7 +117,7 @@ static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, vnode, key)) {
                while (afs_select_current_fileserver(&fc)) {
-                       fc.cb_break = vnode->cb_break + vnode->cb_s_break;
+                       fc.cb_break = afs_calc_vnode_cb_break(vnode);
                        afs_fs_extend_lock(&fc);
                }
 
@@ -148,7 +148,7 @@ static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, vnode, key)) {
                while (afs_select_current_fileserver(&fc)) {
-                       fc.cb_break = vnode->cb_break + vnode->cb_s_break;
+                       fc.cb_break = afs_calc_vnode_cb_break(vnode);
                        afs_fs_release_lock(&fc);
                }
 
index efacdb7..b273e1d 100644 (file)
@@ -134,6 +134,7 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call,
                                     struct afs_read *read_req)
 {
        const struct afs_xdr_AFSFetchStatus *xdr = (const void *)*_bp;
+       bool inline_error = (call->operation_ID == afs_FS_InlineBulkStatus);
        u64 data_version, size;
        u32 type, abort_code;
        u8 flags = 0;
@@ -142,13 +143,32 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call,
        if (vnode)
                write_seqlock(&vnode->cb_lock);
 
+       abort_code = ntohl(xdr->abort_code);
+
        if (xdr->if_version != htonl(AFS_FSTATUS_VERSION)) {
+               if (xdr->if_version == htonl(0) &&
+                   abort_code != 0 &&
+                   inline_error) {
+                       /* The OpenAFS fileserver has a bug in FS.InlineBulkStatus
+                        * whereby it doesn't set the interface version in the error
+                        * case.
+                        */
+                       status->abort_code = abort_code;
+                       ret = 0;
+                       goto out;
+               }
+
                pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version));
                goto bad;
        }
 
+       if (abort_code != 0 && inline_error) {
+               status->abort_code = abort_code;
+               ret = 0;
+               goto out;
+       }
+
        type = ntohl(xdr->type);
-       abort_code = ntohl(xdr->abort_code);
        switch (type) {
        case AFS_FTYPE_FILE:
        case AFS_FTYPE_DIR:
@@ -165,13 +185,6 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call,
                }
                status->type = type;
                break;
-       case AFS_FTYPE_INVALID:
-               if (abort_code != 0) {
-                       status->abort_code = abort_code;
-                       ret = 0;
-                       goto out;
-               }
-               /* Fall through */
        default:
                goto bad;
        }
@@ -248,7 +261,7 @@ static void xdr_decode_AFSCallBack(struct afs_call *call,
 
        write_seqlock(&vnode->cb_lock);
 
-       if (call->cb_break == (vnode->cb_break + cbi->server->cb_s_break)) {
+       if (call->cb_break == afs_cb_break_sum(vnode, cbi)) {
                vnode->cb_version       = ntohl(*bp++);
                cb_expiry               = ntohl(*bp++);
                vnode->cb_type          = ntohl(*bp++);
index 06194cf..479b7fd 100644 (file)
@@ -108,7 +108,7 @@ int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool new_inode)
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, vnode, key)) {
                while (afs_select_fileserver(&fc)) {
-                       fc.cb_break = vnode->cb_break + vnode->cb_s_break;
+                       fc.cb_break = afs_calc_vnode_cb_break(vnode);
                        afs_fs_fetch_file_status(&fc, NULL, new_inode);
                }
 
@@ -393,15 +393,18 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
        read_seqlock_excl(&vnode->cb_lock);
 
        if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
-               if (vnode->cb_s_break != vnode->cb_interest->server->cb_s_break) {
+               if (vnode->cb_s_break != vnode->cb_interest->server->cb_s_break ||
+                   vnode->cb_v_break != vnode->volume->cb_v_break) {
                        vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
+                       vnode->cb_v_break = vnode->volume->cb_v_break;
+                       valid = false;
                } else if (vnode->status.type == AFS_FTYPE_DIR &&
                           test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) &&
                           vnode->cb_expires_at - 10 > now) {
-                               valid = true;
+                       valid = true;
                } else if (!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) &&
                           vnode->cb_expires_at - 10 > now) {
-                               valid = true;
+                       valid = true;
                }
        } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
                valid = true;
@@ -415,7 +418,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
        if (valid)
                goto valid;
 
-       mutex_lock(&vnode->validate_lock);
+       down_write(&vnode->validate_lock);
 
        /* if the promise has expired, we need to check the server again to get
         * a new promise - note that if the (parent) directory's metadata was
@@ -444,13 +447,13 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
         * different */
        if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
                afs_zap_data(vnode);
-       mutex_unlock(&vnode->validate_lock);
+       up_write(&vnode->validate_lock);
 valid:
        _leave(" = 0");
        return 0;
 
 error_unlock:
-       mutex_unlock(&vnode->validate_lock);
+       up_write(&vnode->validate_lock);
        _leave(" = %d", ret);
        return ret;
 }
@@ -574,7 +577,7 @@ int afs_setattr(struct dentry *dentry, struct iattr *attr)
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, vnode, key)) {
                while (afs_select_fileserver(&fc)) {
-                       fc.cb_break = vnode->cb_break + vnode->cb_s_break;
+                       fc.cb_break = afs_calc_vnode_cb_break(vnode);
                        afs_fs_setattr(&fc, attr);
                }
 
index f8086ec..e3f8a46 100644 (file)
@@ -396,6 +396,7 @@ struct afs_server {
 #define AFS_SERVER_FL_PROBED   5               /* The fileserver has been probed */
 #define AFS_SERVER_FL_PROBING  6               /* Fileserver is being probed */
 #define AFS_SERVER_FL_NO_IBULK 7               /* Fileserver doesn't support FS.InlineBulkStatus */
+#define AFS_SERVER_FL_MAY_HAVE_CB 8            /* May have callbacks on this fileserver */
        atomic_t                usage;
        u32                     addr_version;   /* Address list version */
 
@@ -433,6 +434,7 @@ struct afs_server_list {
        unsigned short          index;          /* Server currently in use */
        unsigned short          vnovol_mask;    /* Servers to be skipped due to VNOVOL */
        unsigned int            seq;            /* Set to ->servers_seq when installed */
+       rwlock_t                lock;
        struct afs_server_entry servers[];
 };
 
@@ -459,6 +461,9 @@ struct afs_volume {
        rwlock_t                servers_lock;   /* Lock for ->servers */
        unsigned int            servers_seq;    /* Incremented each time ->servers changes */
 
+       unsigned                cb_v_break;     /* Break-everything counter. */
+       rwlock_t                cb_break_lock;
+
        afs_voltype_t           type;           /* type of volume */
        short                   error;
        char                    type_force;     /* force volume type (suppress R/O -> R/W) */
@@ -494,7 +499,7 @@ struct afs_vnode {
 #endif
        struct afs_permits __rcu *permit_cache; /* cache of permits so far obtained */
        struct mutex            io_lock;        /* Lock for serialising I/O on this mutex */
-       struct mutex            validate_lock;  /* lock for validating this vnode */
+       struct rw_semaphore     validate_lock;  /* lock for validating this vnode */
        spinlock_t              wb_lock;        /* lock for wb_keys */
        spinlock_t              lock;           /* waitqueue/flags lock */
        unsigned long           flags;
@@ -519,6 +524,7 @@ struct afs_vnode {
        /* outstanding callback notification on this file */
        struct afs_cb_interest  *cb_interest;   /* Server on which this resides */
        unsigned int            cb_s_break;     /* Mass break counter on ->server */
+       unsigned int            cb_v_break;     /* Mass break counter on ->volume */
        unsigned int            cb_break;       /* Break counter on vnode */
        seqlock_t               cb_lock;        /* Lock for ->cb_interest, ->status, ->cb_*break */
 
@@ -648,16 +654,29 @@ extern void afs_init_callback_state(struct afs_server *);
 extern void afs_break_callback(struct afs_vnode *);
 extern void afs_break_callbacks(struct afs_server *, size_t, struct afs_callback_break*);
 
-extern int afs_register_server_cb_interest(struct afs_vnode *, struct afs_server_entry *);
+extern int afs_register_server_cb_interest(struct afs_vnode *,
+                                          struct afs_server_list *, unsigned int);
 extern void afs_put_cb_interest(struct afs_net *, struct afs_cb_interest *);
 extern void afs_clear_callback_interests(struct afs_net *, struct afs_server_list *);
 
 static inline struct afs_cb_interest *afs_get_cb_interest(struct afs_cb_interest *cbi)
 {
-       refcount_inc(&cbi->usage);
+       if (cbi)
+               refcount_inc(&cbi->usage);
        return cbi;
 }
 
+static inline unsigned int afs_calc_vnode_cb_break(struct afs_vnode *vnode)
+{
+       return vnode->cb_break + vnode->cb_s_break + vnode->cb_v_break;
+}
+
+static inline unsigned int afs_cb_break_sum(struct afs_vnode *vnode,
+                                           struct afs_cb_interest *cbi)
+{
+       return vnode->cb_break + cbi->server->cb_s_break + vnode->volume->cb_v_break;
+}
+
 /*
  * cell.c
  */
index ac0feac..e065bc0 100644 (file)
@@ -179,7 +179,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
                         */
                        if (fc->flags & AFS_FS_CURSOR_VNOVOL) {
                                fc->ac.error = -EREMOTEIO;
-                               goto failed;
+                               goto next_server;
                        }
 
                        write_lock(&vnode->volume->servers_lock);
@@ -201,7 +201,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
                         */
                        if (vnode->volume->servers == fc->server_list) {
                                fc->ac.error = -EREMOTEIO;
-                               goto failed;
+                               goto next_server;
                        }
 
                        /* Try again */
@@ -350,8 +350,8 @@ use_server:
         * break request before we've finished decoding the reply and
         * installing the vnode.
         */
-       fc->ac.error = afs_register_server_cb_interest(
-               vnode, &fc->server_list->servers[fc->index]);
+       fc->ac.error = afs_register_server_cb_interest(vnode, fc->server_list,
+                                                      fc->index);
        if (fc->ac.error < 0)
                goto failed;
 
@@ -369,8 +369,16 @@ use_server:
        if (!test_bit(AFS_SERVER_FL_PROBED, &server->flags)) {
                fc->ac.alist = afs_get_addrlist(alist);
 
-               if (!afs_probe_fileserver(fc))
-                       goto failed;
+               if (!afs_probe_fileserver(fc)) {
+                       switch (fc->ac.error) {
+                       case -ENOMEM:
+                       case -ERESTARTSYS:
+                       case -EINTR:
+                               goto failed;
+                       default:
+                               goto next_server;
+                       }
+               }
        }
 
        if (!fc->ac.alist)
index 5c62639..0873594 100644 (file)
@@ -41,6 +41,7 @@ int afs_open_socket(struct afs_net *net)
 {
        struct sockaddr_rxrpc srx;
        struct socket *socket;
+       unsigned int min_level;
        int ret;
 
        _enter("");
@@ -60,6 +61,12 @@ int afs_open_socket(struct afs_net *net)
        srx.transport.sin6.sin6_family  = AF_INET6;
        srx.transport.sin6.sin6_port    = htons(AFS_CM_PORT);
 
+       min_level = RXRPC_SECURITY_ENCRYPT;
+       ret = kernel_setsockopt(socket, SOL_RXRPC, RXRPC_MIN_SECURITY_LEVEL,
+                               (void *)&min_level, sizeof(min_level));
+       if (ret < 0)
+               goto error_2;
+
        ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
        if (ret == -EADDRINUSE) {
                srx.transport.sin6.sin6_port = 0;
@@ -482,8 +489,12 @@ static void afs_deliver_to_call(struct afs_call *call)
                state = READ_ONCE(call->state);
                switch (ret) {
                case 0:
-                       if (state == AFS_CALL_CL_PROC_REPLY)
+                       if (state == AFS_CALL_CL_PROC_REPLY) {
+                               if (call->cbi)
+                                       set_bit(AFS_SERVER_FL_MAY_HAVE_CB,
+                                               &call->cbi->server->flags);
                                goto call_complete;
+                       }
                        ASSERTCMP(state, >, AFS_CALL_CL_PROC_REPLY);
                        goto done;
                case -EINPROGRESS:
@@ -493,11 +504,6 @@ static void afs_deliver_to_call(struct afs_call *call)
                case -ECONNABORTED:
                        ASSERTCMP(state, ==, AFS_CALL_COMPLETE);
                        goto done;
-               case -ENOTCONN:
-                       abort_code = RX_CALL_DEAD;
-                       rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
-                                               abort_code, ret, "KNC");
-                       goto local_abort;
                case -ENOTSUPP:
                        abort_code = RXGEN_OPCODE;
                        rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
index cea2fff..1992b0f 100644 (file)
@@ -147,8 +147,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
                                        break;
                                }
 
-                               if (cb_break != (vnode->cb_break +
-                                                vnode->cb_interest->server->cb_s_break)) {
+                               if (cb_break != afs_cb_break_sum(vnode, vnode->cb_interest)) {
                                        changed = true;
                                        break;
                                }
@@ -178,7 +177,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
                }
        }
 
-       if (cb_break != (vnode->cb_break + vnode->cb_interest->server->cb_s_break))
+       if (cb_break != afs_cb_break_sum(vnode, vnode->cb_interest))
                goto someone_else_changed_it;
 
        /* We need a ref on any permits list we want to copy as we'll have to
@@ -257,7 +256,7 @@ found:
 
        spin_lock(&vnode->lock);
        zap = rcu_access_pointer(vnode->permit_cache);
-       if (cb_break == (vnode->cb_break + vnode->cb_interest->server->cb_s_break) &&
+       if (cb_break == afs_cb_break_sum(vnode, vnode->cb_interest) &&
            zap == permits)
                rcu_assign_pointer(vnode->permit_cache, replacement);
        else
index 629c749..3af4625 100644 (file)
@@ -67,12 +67,6 @@ struct afs_server *afs_find_server(struct afs_net *net,
                                                              sizeof(struct in6_addr));
                                        if (diff == 0)
                                                goto found;
-                                       if (diff < 0) {
-                                               // TODO: Sort the list
-                                               //if (i == alist->nr_ipv4)
-                                               //      goto not_found;
-                                               break;
-                                       }
                                }
                        }
                } else {
@@ -87,17 +81,10 @@ struct afs_server *afs_find_server(struct afs_net *net,
                                                        (u32 __force)b->sin6_addr.s6_addr32[3]);
                                        if (diff == 0)
                                                goto found;
-                                       if (diff < 0) {
-                                               // TODO: Sort the list
-                                               //if (i == 0)
-                                               //      goto not_found;
-                                               break;
-                                       }
                                }
                        }
                }
 
-       //not_found:
                server = NULL;
        found:
                if (server && !atomic_inc_not_zero(&server->usage))
@@ -395,14 +382,16 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
        struct afs_addr_list *alist = rcu_access_pointer(server->addresses);
        struct afs_addr_cursor ac = {
                .alist  = alist,
-               .addr   = &alist->addrs[0],
                .start  = alist->index,
-               .index  = alist->index,
+               .index  = 0,
+               .addr   = &alist->addrs[alist->index],
                .error  = 0,
        };
        _enter("%p", server);
 
-       afs_fs_give_up_all_callbacks(net, server, &ac, NULL);
+       if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
+               afs_fs_give_up_all_callbacks(net, server, &ac, NULL);
+
        call_rcu(&server->rcu, afs_server_rcu);
        afs_dec_servers_outstanding(net);
 }
index 0f8dc4c..8a5760a 100644 (file)
@@ -49,6 +49,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
                goto error;
 
        refcount_set(&slist->usage, 1);
+       rwlock_init(&slist->lock);
 
        /* Make sure a records exists for each server in the list. */
        for (i = 0; i < vldb->nr_servers; i++) {
@@ -64,9 +65,11 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
                        goto error_2;
                }
 
-               /* Insertion-sort by server pointer */
+               /* Insertion-sort by UUID */
                for (j = 0; j < slist->nr_servers; j++)
-                       if (slist->servers[j].server >= server)
+                       if (memcmp(&slist->servers[j].server->uuid,
+                                  &server->uuid,
+                                  sizeof(server->uuid)) >= 0)
                                break;
                if (j < slist->nr_servers) {
                        if (slist->servers[j].server == server) {
index 65081ec..9e5d796 100644 (file)
@@ -590,7 +590,7 @@ static void afs_i_init_once(void *_vnode)
        memset(vnode, 0, sizeof(*vnode));
        inode_init_once(&vnode->vfs_inode);
        mutex_init(&vnode->io_lock);
-       mutex_init(&vnode->validate_lock);
+       init_rwsem(&vnode->validate_lock);
        spin_lock_init(&vnode->wb_lock);
        spin_lock_init(&vnode->lock);
        INIT_LIST_HEAD(&vnode->wb_keys);
@@ -688,7 +688,7 @@ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf)
        if (afs_begin_vnode_operation(&fc, vnode, key)) {
                fc.flags |= AFS_FS_CURSOR_NO_VSLEEP;
                while (afs_select_fileserver(&fc)) {
-                       fc.cb_break = vnode->cb_break + vnode->cb_s_break;
+                       fc.cb_break = afs_calc_vnode_cb_break(vnode);
                        afs_fs_get_volume_status(&fc, &vs);
                }
 
index c164698..8b39e6e 100644 (file)
@@ -351,7 +351,7 @@ found_key:
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) {
                while (afs_select_fileserver(&fc)) {
-                       fc.cb_break = vnode->cb_break + vnode->cb_s_break;
+                       fc.cb_break = afs_calc_vnode_cb_break(vnode);
                        afs_fs_store_data(&fc, mapping, first, last, offset, to);
                }
 
index e2f16b6..51b5e2d 100644 (file)
@@ -3142,7 +3142,11 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
        struct rb_node *node;
        int ret = 0;
 
+       spin_lock(&root->fs_info->trans_lock);
        cur_trans = root->fs_info->running_transaction;
+       if (cur_trans)
+               refcount_inc(&cur_trans->use_count);
+       spin_unlock(&root->fs_info->trans_lock);
        if (!cur_trans)
                return 0;
 
@@ -3151,6 +3155,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
        head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
        if (!head) {
                spin_unlock(&delayed_refs->lock);
+               btrfs_put_transaction(cur_trans);
                return 0;
        }
 
@@ -3167,6 +3172,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
                mutex_lock(&head->mutex);
                mutex_unlock(&head->mutex);
                btrfs_put_delayed_ref_head(head);
+               btrfs_put_transaction(cur_trans);
                return -EAGAIN;
        }
        spin_unlock(&delayed_refs->lock);
@@ -3199,6 +3205,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
        }
        spin_unlock(&head->lock);
        mutex_unlock(&head->mutex);
+       btrfs_put_transaction(cur_trans);
        return ret;
 }
 
index 00b7d32..b041b94 100644 (file)
@@ -1841,7 +1841,7 @@ again:
                old_bytenr = btrfs_node_blockptr(parent, slot);
                blocksize = fs_info->nodesize;
                old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
-               btrfs_node_key_to_cpu(parent, &key, slot);
+               btrfs_node_key_to_cpu(parent, &first_key, slot);
 
                if (level <= max_level) {
                        eb = path->nodes[level];
index 221e5cd..c0074d2 100644 (file)
@@ -5236,6 +5236,10 @@ static int send_write_or_clone(struct send_ctx *sctx,
                len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
        }
 
+       if (offset >= sctx->cur_inode_size) {
+               ret = 0;
+               goto out;
+       }
        if (offset + len > sctx->cur_inode_size)
                len = sctx->cur_inode_size - offset;
        if (len == 0) {
index f85040d..cf0e45b 100644 (file)
@@ -70,69 +70,104 @@ static __le32 ceph_flags_sys2wire(u32 flags)
  */
 
 /*
- * Calculate the length sum of direct io vectors that can
- * be combined into one page vector.
+ * How many pages to get in one call to iov_iter_get_pages().  This
+ * determines the size of the on-stack array used as a buffer.
  */
-static size_t dio_get_pagev_size(const struct iov_iter *it)
+#define ITER_GET_BVECS_PAGES   64
+
+static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
+                               struct bio_vec *bvecs)
 {
-    const struct iovec *iov = it->iov;
-    const struct iovec *iovend = iov + it->nr_segs;
-    size_t size;
-
-    size = iov->iov_len - it->iov_offset;
-    /*
-     * An iov can be page vectored when both the current tail
-     * and the next base are page aligned.
-     */
-    while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) &&
-           (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) {
-        size += iov->iov_len;
-    }
-    dout("dio_get_pagevlen len = %zu\n", size);
-    return size;
+       size_t size = 0;
+       int bvec_idx = 0;
+
+       if (maxsize > iov_iter_count(iter))
+               maxsize = iov_iter_count(iter);
+
+       while (size < maxsize) {
+               struct page *pages[ITER_GET_BVECS_PAGES];
+               ssize_t bytes;
+               size_t start;
+               int idx = 0;
+
+               bytes = iov_iter_get_pages(iter, pages, maxsize - size,
+                                          ITER_GET_BVECS_PAGES, &start);
+               if (bytes < 0)
+                       return size ?: bytes;
+
+               iov_iter_advance(iter, bytes);
+               size += bytes;
+
+               for ( ; bytes; idx++, bvec_idx++) {
+                       struct bio_vec bv = {
+                               .bv_page = pages[idx],
+                               .bv_len = min_t(int, bytes, PAGE_SIZE - start),
+                               .bv_offset = start,
+                       };
+
+                       bvecs[bvec_idx] = bv;
+                       bytes -= bv.bv_len;
+                       start = 0;
+               }
+       }
+
+       return size;
 }
 
 /*
- * Allocate a page vector based on (@it, @nbytes).
- * The return value is the tuple describing a page vector,
- * that is (@pages, @page_align, @num_pages).
+ * iov_iter_get_pages() only considers one iov_iter segment, no matter
+ * what maxsize or maxpages are given.  For ITER_BVEC that is a single
+ * page.
+ *
+ * Attempt to get up to @maxsize bytes worth of pages from @iter.
+ * Return the number of bytes in the created bio_vec array, or an error.
  */
-static struct page **
-dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
-                   size_t *page_align, int *num_pages)
+static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
+                                   struct bio_vec **bvecs, int *num_bvecs)
 {
-       struct iov_iter tmp_it = *it;
-       size_t align;
-       struct page **pages;
-       int ret = 0, idx, npages;
+       struct bio_vec *bv;
+       size_t orig_count = iov_iter_count(iter);
+       ssize_t bytes;
+       int npages;
 
-       align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
-               (PAGE_SIZE - 1);
-       npages = calc_pages_for(align, nbytes);
-       pages = kvmalloc(sizeof(*pages) * npages, GFP_KERNEL);
-       if (!pages)
-               return ERR_PTR(-ENOMEM);
+       iov_iter_truncate(iter, maxsize);
+       npages = iov_iter_npages(iter, INT_MAX);
+       iov_iter_reexpand(iter, orig_count);
 
-       for (idx = 0; idx < npages; ) {
-               size_t start;
-               ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes,
-                                        npages - idx, &start);
-               if (ret < 0)
-                       goto fail;
+       /*
+        * __iter_get_bvecs() may populate only part of the array -- zero it
+        * out.
+        */
+       bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
+       if (!bv)
+               return -ENOMEM;
 
-               iov_iter_advance(&tmp_it, ret);
-               nbytes -= ret;
-               idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE;
+       bytes = __iter_get_bvecs(iter, maxsize, bv);
+       if (bytes < 0) {
+               /*
+                * No pages were pinned -- just free the array.
+                */
+               kvfree(bv);
+               return bytes;
        }
 
-       BUG_ON(nbytes != 0);
-       *num_pages = npages;
-       *page_align = align;
-       dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align);
-       return pages;
-fail:
-       ceph_put_page_vector(pages, idx, false);
-       return ERR_PTR(ret);
+       *bvecs = bv;
+       *num_bvecs = npages;
+       return bytes;
+}
+
+static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
+{
+       int i;
+
+       for (i = 0; i < num_bvecs; i++) {
+               if (bvecs[i].bv_page) {
+                       if (should_dirty)
+                               set_page_dirty_lock(bvecs[i].bv_page);
+                       put_page(bvecs[i].bv_page);
+               }
+       }
+       kvfree(bvecs);
 }
 
 /*
@@ -746,11 +781,12 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
        struct inode *inode = req->r_inode;
        struct ceph_aio_request *aio_req = req->r_priv;
        struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
-       int num_pages = calc_pages_for((u64)osd_data->alignment,
-                                      osd_data->length);
 
-       dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
-            inode, rc, osd_data->length);
+       BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
+       BUG_ON(!osd_data->num_bvecs);
+
+       dout("ceph_aio_complete_req %p rc %d bytes %u\n",
+            inode, rc, osd_data->bvec_pos.iter.bi_size);
 
        if (rc == -EOLDSNAPC) {
                struct ceph_aio_work *aio_work;
@@ -768,9 +804,10 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
        } else if (!aio_req->write) {
                if (rc == -ENOENT)
                        rc = 0;
-               if (rc >= 0 && osd_data->length > rc) {
-                       int zoff = osd_data->alignment + rc;
-                       int zlen = osd_data->length - rc;
+               if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
+                       struct iov_iter i;
+                       int zlen = osd_data->bvec_pos.iter.bi_size - rc;
+
                        /*
                         * If read is satisfied by single OSD request,
                         * it can pass EOF. Otherwise read is within
@@ -785,13 +822,16 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
                                aio_req->total_len = rc + zlen;
                        }
 
-                       if (zlen > 0)
-                               ceph_zero_page_vector_range(zoff, zlen,
-                                                           osd_data->pages);
+                       iov_iter_bvec(&i, ITER_BVEC, osd_data->bvec_pos.bvecs,
+                                     osd_data->num_bvecs,
+                                     osd_data->bvec_pos.iter.bi_size);
+                       iov_iter_advance(&i, rc);
+                       iov_iter_zero(zlen, &i);
                }
        }
 
-       ceph_put_page_vector(osd_data->pages, num_pages, aio_req->should_dirty);
+       put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
+                 aio_req->should_dirty);
        ceph_osdc_put_request(req);
 
        if (rc < 0)
@@ -879,7 +919,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
        struct ceph_vino vino;
        struct ceph_osd_request *req;
-       struct page **pages;
+       struct bio_vec *bvecs;
        struct ceph_aio_request *aio_req = NULL;
        int num_pages = 0;
        int flags;
@@ -914,10 +954,14 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
        }
 
        while (iov_iter_count(iter) > 0) {
-               u64 size = dio_get_pagev_size(iter);
-               size_t start = 0;
+               u64 size = iov_iter_count(iter);
                ssize_t len;
 
+               if (write)
+                       size = min_t(u64, size, fsc->mount_options->wsize);
+               else
+                       size = min_t(u64, size, fsc->mount_options->rsize);
+
                vino = ceph_vino(inode);
                req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
                                            vino, pos, &size, 0,
@@ -933,18 +977,14 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
                        break;
                }
 
-               if (write)
-                       size = min_t(u64, size, fsc->mount_options->wsize);
-               else
-                       size = min_t(u64, size, fsc->mount_options->rsize);
-
-               len = size;
-               pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
-               if (IS_ERR(pages)) {
+               len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
+               if (len < 0) {
                        ceph_osdc_put_request(req);
-                       ret = PTR_ERR(pages);
+                       ret = len;
                        break;
                }
+               if (len != size)
+                       osd_req_op_extent_update(req, 0, len);
 
                /*
                 * To simplify error handling, allow AIO when IO within i_size
@@ -977,8 +1017,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
                        req->r_mtime = mtime;
                }
 
-               osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
-                                                false, false);
+               osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
 
                if (aio_req) {
                        aio_req->total_len += len;
@@ -991,7 +1030,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
                        list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
 
                        pos += len;
-                       iov_iter_advance(iter, len);
                        continue;
                }
 
@@ -1004,25 +1042,26 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
                        if (ret == -ENOENT)
                                ret = 0;
                        if (ret >= 0 && ret < len && pos + ret < size) {
+                               struct iov_iter i;
                                int zlen = min_t(size_t, len - ret,
                                                 size - pos - ret);
-                               ceph_zero_page_vector_range(start + ret, zlen,
-                                                           pages);
+
+                               iov_iter_bvec(&i, ITER_BVEC, bvecs, num_pages,
+                                             len);
+                               iov_iter_advance(&i, ret);
+                               iov_iter_zero(zlen, &i);
                                ret += zlen;
                        }
                        if (ret >= 0)
                                len = ret;
                }
 
-               ceph_put_page_vector(pages, num_pages, should_dirty);
-
+               put_bvecs(bvecs, num_pages, should_dirty);
                ceph_osdc_put_request(req);
                if (ret < 0)
                        break;
 
                pos += len;
-               iov_iter_advance(iter, len);
-
                if (!write && pos >= size)
                        break;
 
index 7e72348..315f7e6 100644 (file)
@@ -228,7 +228,15 @@ static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
 
 static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci)
 {
-       return (ci->i_max_files || ci->i_max_bytes);
+       bool ret = false;
+       spin_lock(&ci->i_ceph_lock);
+       if ((ci->i_max_files || ci->i_max_bytes) &&
+           ci->i_vino.snap == CEPH_NOSNAP &&
+           ci->i_snap_realm &&
+           ci->i_snap_realm->ino == ci->i_vino.ino)
+               ret = true;
+       spin_unlock(&ci->i_ceph_lock);
+       return ret;
 }
 
 static size_t ceph_vxattrcb_quota(struct ceph_inode_info *ci, char *val,
@@ -1008,14 +1016,19 @@ int __ceph_setxattr(struct inode *inode, const char *name,
        char *newval = NULL;
        struct ceph_inode_xattr *xattr = NULL;
        int required_blob_size;
+       bool check_realm = false;
        bool lock_snap_rwsem = false;
 
        if (ceph_snap(inode) != CEPH_NOSNAP)
                return -EROFS;
 
        vxattr = ceph_match_vxattr(inode, name);
-       if (vxattr && vxattr->readonly)
-               return -EOPNOTSUPP;
+       if (vxattr) {
+               if (vxattr->readonly)
+                       return -EOPNOTSUPP;
+               if (value && !strncmp(vxattr->name, "ceph.quota", 10))
+                       check_realm = true;
+       }
 
        /* pass any unhandled ceph.* xattrs through to the MDS */
        if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN))
@@ -1109,6 +1122,15 @@ do_sync_unlocked:
                err = -EBUSY;
        } else {
                err = ceph_sync_setxattr(inode, name, value, size, flags);
+               if (err >= 0 && check_realm) {
+                       /* check if snaprealm was created for quota inode */
+                       spin_lock(&ci->i_ceph_lock);
+                       if ((ci->i_max_files || ci->i_max_bytes) &&
+                           !(ci->i_snap_realm &&
+                             ci->i_snap_realm->ino == ci->i_vino.ino))
+                               err = -EOPNOTSUPP;
+                       spin_unlock(&ci->i_ceph_lock);
+               }
        }
 out:
        ceph_free_cap_flush(prealloc_cf);
index 741749a..5f132d5 100644 (file)
@@ -197,7 +197,7 @@ config CIFS_SMB311
 
 config CIFS_SMB_DIRECT
        bool "SMB Direct support (Experimental)"
-       depends on CIFS=m && INFINIBAND || CIFS=y && INFINIBAND=y
+       depends on CIFS=m && INFINIBAND && INFINIBAND_ADDR_TRANS || CIFS=y && INFINIBAND=y && INFINIBAND_ADDR_TRANS=y
        help
          Enables SMB Direct experimental support for SMB 3.0, 3.02 and 3.1.1.
          SMB Direct allows transferring SMB packets over RDMA. If unsure,
index f715609..5a5a015 100644 (file)
@@ -1047,6 +1047,18 @@ out:
        return rc;
 }
 
+/*
+ * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
+ * is a dummy operation.
+ */
+static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+       cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
+                file, datasync);
+
+       return 0;
+}
+
 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
                                struct file *dst_file, loff_t destoff,
                                size_t len, unsigned int flags)
@@ -1181,6 +1193,7 @@ const struct file_operations cifs_dir_ops = {
        .copy_file_range = cifs_copy_file_range,
        .clone_file_range = cifs_clone_file_range,
        .llseek = generic_file_llseek,
+       .fsync = cifs_dir_fsync,
 };
 
 static void
index 6d3e40d..1529a08 100644 (file)
@@ -455,6 +455,9 @@ cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
                server->sign = true;
        }
 
+       if (cifs_rdma_enabled(server) && server->sign)
+               cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled");
+
        return 0;
 }
 
index e8830f0..7a10a5d 100644 (file)
@@ -1977,14 +1977,6 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                goto cifs_parse_mount_err;
        }
 
-#ifdef CONFIG_CIFS_SMB_DIRECT
-       if (vol->rdma && vol->sign) {
-               cifs_dbg(VFS, "Currently SMB direct doesn't support signing."
-                       " This is being fixed\n");
-               goto cifs_parse_mount_err;
-       }
-#endif
-
 #ifndef CONFIG_KEYS
        /* Muliuser mounts require CONFIG_KEYS support */
        if (vol->multiuser) {
@@ -2959,6 +2951,22 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
                }
        }
 
+       if (volume_info->seal) {
+               if (ses->server->vals->protocol_id == 0) {
+                       cifs_dbg(VFS,
+                                "SMB3 or later required for encryption\n");
+                       rc = -EOPNOTSUPP;
+                       goto out_fail;
+               } else if (tcon->ses->server->capabilities &
+                                       SMB2_GLOBAL_CAP_ENCRYPTION)
+                       tcon->seal = true;
+               else {
+                       cifs_dbg(VFS, "Encryption is not supported on share\n");
+                       rc = -EOPNOTSUPP;
+                       goto out_fail;
+               }
+       }
+
        /*
         * BB Do we need to wrap session_mutex around this TCon call and Unix
         * SetFS as we do on SessSetup and reconnect?
@@ -3007,22 +3015,6 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
                tcon->use_resilient = true;
        }
 
-       if (volume_info->seal) {
-               if (ses->server->vals->protocol_id == 0) {
-                       cifs_dbg(VFS,
-                                "SMB3 or later required for encryption\n");
-                       rc = -EOPNOTSUPP;
-                       goto out_fail;
-               } else if (tcon->ses->server->capabilities &
-                                       SMB2_GLOBAL_CAP_ENCRYPTION)
-                       tcon->seal = true;
-               else {
-                       cifs_dbg(VFS, "Encryption is not supported on share\n");
-                       rc = -EOPNOTSUPP;
-                       goto out_fail;
-               }
-       }
-
        /*
         * We can have only one retry value for a connection to a share so for
         * resources mounted more than once to the same server share the last
index 38ebf3f..9c6d95f 100644 (file)
@@ -252,9 +252,14 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
        wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
        wsize = min_t(unsigned int, wsize, server->max_write);
 #ifdef CONFIG_CIFS_SMB_DIRECT
-       if (server->rdma)
-               wsize = min_t(unsigned int,
+       if (server->rdma) {
+               if (server->sign)
+                       wsize = min_t(unsigned int,
+                               wsize, server->smbd_conn->max_fragmented_send_size);
+               else
+                       wsize = min_t(unsigned int,
                                wsize, server->smbd_conn->max_readwrite_size);
+       }
 #endif
        if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
                wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
@@ -272,9 +277,14 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
        rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
        rsize = min_t(unsigned int, rsize, server->max_read);
 #ifdef CONFIG_CIFS_SMB_DIRECT
-       if (server->rdma)
-               rsize = min_t(unsigned int,
+       if (server->rdma) {
+               if (server->sign)
+                       rsize = min_t(unsigned int,
+                               rsize, server->smbd_conn->max_fragmented_recv_size);
+               else
+                       rsize = min_t(unsigned int,
                                rsize, server->smbd_conn->max_readwrite_size);
+       }
 #endif
 
        if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
@@ -579,9 +589,15 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
 
        SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
 
+       /*
+        * If ea_name is NULL (listxattr) and there are no EAs, return 0 as it's
+        * not an error. Otherwise, the specified ea_name was not found.
+        */
        if (!rc)
                rc = move_smb2_ea_to_cifs(ea_data, buf_size, smb2_data,
                                          SMB2_MAX_EA_BUF, ea_name);
+       else if (!ea_name && rc == -ENODATA)
+               rc = 0;
 
        kfree(smb2_data);
        return rc;
index 0f044c4..0f48741 100644 (file)
@@ -383,10 +383,10 @@ static void
 build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
 {
        pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
-       pneg_ctxt->DataLength = cpu_to_le16(6);
-       pneg_ctxt->CipherCount = cpu_to_le16(2);
-       pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
-       pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM;
+       pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + le16 cipher */
+       pneg_ctxt->CipherCount = cpu_to_le16(1);
+/* pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;*/ /* not supported yet */
+       pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_CCM;
 }
 
 static void
@@ -444,6 +444,7 @@ static int decode_encrypt_ctx(struct TCP_Server_Info *server,
                return -EINVAL;
        }
        server->cipher_type = ctxt->Ciphers[0];
+       server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
        return 0;
 }
 
@@ -729,19 +730,14 @@ neg_exit:
 
 int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
 {
-       int rc = 0;
-       struct validate_negotiate_info_req vneg_inbuf;
+       int rc;
+       struct validate_negotiate_info_req *pneg_inbuf;
        struct validate_negotiate_info_rsp *pneg_rsp = NULL;
        u32 rsplen;
        u32 inbuflen; /* max of 4 dialects */
 
        cifs_dbg(FYI, "validate negotiate\n");
 
-#ifdef CONFIG_CIFS_SMB_DIRECT
-       if (tcon->ses->server->rdma)
-               return 0;
-#endif
-
        /* In SMB3.11 preauth integrity supersedes validate negotiate */
        if (tcon->ses->server->dialect == SMB311_PROT_ID)
                return 0;
@@ -764,63 +760,69 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
        if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
                cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
 
-       vneg_inbuf.Capabilities =
+       pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS);
+       if (!pneg_inbuf)
+               return -ENOMEM;
+
+       pneg_inbuf->Capabilities =
                        cpu_to_le32(tcon->ses->server->vals->req_capabilities);
-       memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
+       memcpy(pneg_inbuf->Guid, tcon->ses->server->client_guid,
                                        SMB2_CLIENT_GUID_SIZE);
 
        if (tcon->ses->sign)
-               vneg_inbuf.SecurityMode =
+               pneg_inbuf->SecurityMode =
                        cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
        else if (global_secflags & CIFSSEC_MAY_SIGN)
-               vneg_inbuf.SecurityMode =
+               pneg_inbuf->SecurityMode =
                        cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
        else
-               vneg_inbuf.SecurityMode = 0;
+               pneg_inbuf->SecurityMode = 0;
 
 
        if (strcmp(tcon->ses->server->vals->version_string,
                SMB3ANY_VERSION_STRING) == 0) {
-               vneg_inbuf.Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
-               vneg_inbuf.Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
-               vneg_inbuf.DialectCount = cpu_to_le16(2);
+               pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
+               pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
+               pneg_inbuf->DialectCount = cpu_to_le16(2);
                /* structure is big enough for 3 dialects, sending only 2 */
-               inbuflen = sizeof(struct validate_negotiate_info_req) - 2;
+               inbuflen = sizeof(*pneg_inbuf) -
+                               sizeof(pneg_inbuf->Dialects[0]);
        } else if (strcmp(tcon->ses->server->vals->version_string,
                SMBDEFAULT_VERSION_STRING) == 0) {
-               vneg_inbuf.Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
-               vneg_inbuf.Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
-               vneg_inbuf.Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
-               vneg_inbuf.DialectCount = cpu_to_le16(3);
+               pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
+               pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
+               pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
+               pneg_inbuf->DialectCount = cpu_to_le16(3);
                /* structure is big enough for 3 dialects */
-               inbuflen = sizeof(struct validate_negotiate_info_req);
+               inbuflen = sizeof(*pneg_inbuf);
        } else {
                /* otherwise specific dialect was requested */
-               vneg_inbuf.Dialects[0] =
+               pneg_inbuf->Dialects[0] =
                        cpu_to_le16(tcon->ses->server->vals->protocol_id);
-               vneg_inbuf.DialectCount = cpu_to_le16(1);
+               pneg_inbuf->DialectCount = cpu_to_le16(1);
                /* structure is big enough for 3 dialects, sending only 1 */
-               inbuflen = sizeof(struct validate_negotiate_info_req) - 4;
+               inbuflen = sizeof(*pneg_inbuf) -
+                               sizeof(pneg_inbuf->Dialects[0]) * 2;
        }
 
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
-               (char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req),
-               (char **)&pneg_rsp, &rsplen);
+               (char *)pneg_inbuf, inbuflen, (char **)&pneg_rsp, &rsplen);
 
        if (rc != 0) {
                cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
-               return -EIO;
+               rc = -EIO;
+               goto out_free_inbuf;
        }
 
-       if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
+       rc = -EIO;
+       if (rsplen != sizeof(*pneg_rsp)) {
                cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n",
                         rsplen);
 
                /* relax check since Mac returns max bufsize allowed on ioctl */
-               if ((rsplen > CIFSMaxBufSize)
-                    || (rsplen < sizeof(struct validate_negotiate_info_rsp)))
-                       goto err_rsp_free;
+               if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp))
+                       goto out_free_rsp;
        }
 
        /* check validate negotiate info response matches what we got earlier */
@@ -837,15 +839,17 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
                goto vneg_out;
 
        /* validate negotiate successful */
+       rc = 0;
        cifs_dbg(FYI, "validate negotiate info successful\n");
-       kfree(pneg_rsp);
-       return 0;
+       goto out_free_rsp;
 
 vneg_out:
        cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
-err_rsp_free:
+out_free_rsp:
        kfree(pneg_rsp);
-       return -EIO;
+out_free_inbuf:
+       kfree(pneg_inbuf);
+       return rc;
 }
 
 enum securityEnum
@@ -2590,7 +2594,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
         * If we want to do a RDMA write, fill in and append
         * smbd_buffer_descriptor_v1 to the end of read request
         */
-       if (server->rdma && rdata &&
+       if (server->rdma && rdata && !server->sign &&
                rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
 
                struct smbd_buffer_descriptor_v1 *v1;
@@ -2968,7 +2972,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
         * If we want to do a server RDMA read, fill in and append
         * smbd_buffer_descriptor_v1 to the end of write request
         */
-       if (server->rdma && wdata->bytes >=
+       if (server->rdma && !server->sign && wdata->bytes >=
                server->smbd_conn->rdma_readwrite_threshold) {
 
                struct smbd_buffer_descriptor_v1 *v1;
index 6093e51..d28f358 100644 (file)
@@ -297,7 +297,7 @@ struct smb2_encryption_neg_context {
        __le16  DataLength;
        __le32  Reserved;
        __le16  CipherCount; /* AES-128-GCM and AES-128-CCM */
-       __le16  Ciphers[2]; /* Ciphers[0] since only one used now */
+       __le16  Ciphers[1]; /* Ciphers[0] since only one used now */
 } __packed;
 
 struct smb2_negotiate_rsp {
index 87817dd..c62f7c9 100644 (file)
@@ -2086,7 +2086,7 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
        int start, i, j;
        int max_iov_size =
                info->max_send_size - sizeof(struct smbd_data_transfer);
-       struct kvec iov[SMBDIRECT_MAX_SGE];
+       struct kvec *iov;
        int rc;
 
        info->smbd_send_pending++;
@@ -2096,32 +2096,20 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
        }
 
        /*
-        * This usually means a configuration error
-        * We use RDMA read/write for packet size > rdma_readwrite_threshold
-        * as long as it's properly configured we should never get into this
-        * situation
-        */
-       if (rqst->rq_nvec + rqst->rq_npages > SMBDIRECT_MAX_SGE) {
-               log_write(ERR, "maximum send segment %x exceeding %x\n",
-                        rqst->rq_nvec + rqst->rq_npages, SMBDIRECT_MAX_SGE);
-               rc = -EINVAL;
-               goto done;
-       }
-
-       /*
-        * Remove the RFC1002 length defined in MS-SMB2 section 2.1
-        * It is used only for TCP transport
+        * Skip the RFC1002 length defined in MS-SMB2 section 2.1
+        * It is used only for TCP transport in the iov[0]
         * In future we may want to add a transport layer under protocol
         * layer so this will only be issued to TCP transport
         */
-       iov[0].iov_base = (char *)rqst->rq_iov[0].iov_base + 4;
-       iov[0].iov_len = rqst->rq_iov[0].iov_len - 4;
-       buflen += iov[0].iov_len;
+
+       if (rqst->rq_iov[0].iov_len != 4) {
+               log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
+               return -EINVAL;
+       }
+       iov = &rqst->rq_iov[1];
 
        /* total up iov array first */
-       for (i = 1; i < rqst->rq_nvec; i++) {
-               iov[i].iov_base = rqst->rq_iov[i].iov_base;
-               iov[i].iov_len = rqst->rq_iov[i].iov_len;
+       for (i = 0; i < rqst->rq_nvec-1; i++) {
                buflen += iov[i].iov_len;
        }
 
@@ -2198,14 +2186,14 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
                                                goto done;
                                }
                                i++;
-                               if (i == rqst->rq_nvec)
+                               if (i == rqst->rq_nvec-1)
                                        break;
                        }
                        start = i;
                        buflen = 0;
                } else {
                        i++;
-                       if (i == rqst->rq_nvec) {
+                       if (i == rqst->rq_nvec-1) {
                                /* send out all remaining vecs */
                                remaining_data_length -= buflen;
                                log_write(INFO,
index 8f6f259..927226a 100644 (file)
@@ -753,7 +753,7 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
                goto out;
 
 #ifdef CONFIG_CIFS_SMB311
-       if (ses->status == CifsNew)
+       if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
                smb311_update_preauth_hash(ses, rqst->rq_iov+1,
                                           rqst->rq_nvec-1);
 #endif
@@ -798,7 +798,7 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
                *resp_buf_type = CIFS_SMALL_BUFFER;
 
 #ifdef CONFIG_CIFS_SMB311
-       if (ses->status == CifsNew) {
+       if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
                struct kvec iov = {
                        .iov_base = buf + 4,
                        .iov_len = get_rfc1002_length(buf)
@@ -834,8 +834,11 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
        if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
                new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1),
                                  GFP_KERNEL);
-               if (!new_iov)
+               if (!new_iov) {
+                       /* otherwise cifs_send_recv below sets resp_buf_type */
+                       *resp_buf_type = CIFS_NO_BUFFER;
                        return -ENOMEM;
+               }
        } else
                new_iov = s_iov;
 
index a33d8fb..508b905 100644 (file)
@@ -321,6 +321,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        ext4_grpblk_t offset;
        ext4_grpblk_t next_zero_bit;
+       ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb);
        ext4_fsblk_t blk;
        ext4_fsblk_t group_first_block;
 
@@ -338,7 +339,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
        /* check whether block bitmap block number is set */
        blk = ext4_block_bitmap(sb, desc);
        offset = blk - group_first_block;
-       if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize ||
+       if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
            !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
                /* bad block bitmap */
                return blk;
@@ -346,7 +347,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
        /* check whether the inode bitmap block number is set */
        blk = ext4_inode_bitmap(sb, desc);
        offset = blk - group_first_block;
-       if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize ||
+       if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
            !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
                /* bad block bitmap */
                return blk;
@@ -354,8 +355,8 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
        /* check whether the inode table block number is set */
        blk = ext4_inode_table(sb, desc);
        offset = blk - group_first_block;
-       if (offset < 0 || EXT4_B2C(sbi, offset) >= sb->s_blocksize ||
-           EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= sb->s_blocksize)
+       if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+           EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit)
                return blk;
        next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
                        EXT4_B2C(sbi, offset + sbi->s_itb_per_group),
index 0a73159..c969275 100644 (file)
@@ -5329,8 +5329,9 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
        stop = le32_to_cpu(extent->ee_block);
 
        /*
-        * In case of left shift, Don't start shifting extents until we make
-        * sure the hole is big enough to accommodate the shift.
+       * For left shifts, make sure the hole on the left is big enough to
+       * accommodate the shift.  For right shifts, make sure the last extent
+       * won't be shifted beyond EXT_MAX_BLOCKS.
        */
        if (SHIFT == SHIFT_LEFT) {
                path = ext4_find_extent(inode, start - 1, &path,
@@ -5350,9 +5351,14 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
 
                if ((start == ex_start && shift > ex_start) ||
                    (shift > start - ex_end)) {
-                       ext4_ext_drop_refs(path);
-                       kfree(path);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto out;
+               }
+       } else {
+               if (shift > EXT_MAX_BLOCKS -
+                   (stop + ext4_ext_get_actual_len(extent))) {
+                       ret = -EINVAL;
+                       goto out;
                }
        }
 
index 185f7e6..eb104e8 100644 (file)
@@ -5886,5 +5886,6 @@ static void __exit ext4_exit_fs(void)
 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
 MODULE_DESCRIPTION("Fourth Extended Filesystem");
 MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: crc32c");
 module_init(ext4_init_fs)
 module_exit(ext4_exit_fs)
index 47d7c15..471d863 100644 (file)
@@ -1961,7 +1961,7 @@ void wb_workfn(struct work_struct *work)
        }
 
        if (!list_empty(&wb->work_list))
-               mod_delayed_work(bdi_wq, &wb->dwork, 0);
+               wb_wakeup(wb);
        else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
                wb_wakeup_delayed(wb);
 
index ac31103..8aa4537 100644 (file)
@@ -532,6 +532,7 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
         */
        ret = start_this_handle(journal, handle, GFP_NOFS);
        if (ret < 0) {
+               handle->h_journal = journal;
                jbd2_journal_free_reserved(handle);
                return ret;
        }
index 01c6b38..7869622 100644 (file)
@@ -4250,10 +4250,11 @@ out:
 static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
                         struct dentry *new_dentry, bool preserve)
 {
-       int error;
+       int error, had_lock;
        struct inode *inode = d_inode(old_dentry);
        struct buffer_head *old_bh = NULL;
        struct inode *new_orphan_inode = NULL;
+       struct ocfs2_lock_holder oh;
 
        if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
                return -EOPNOTSUPP;
@@ -4295,6 +4296,14 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
                goto out;
        }
 
+       had_lock = ocfs2_inode_lock_tracker(new_orphan_inode, NULL, 1,
+                                           &oh);
+       if (had_lock < 0) {
+               error = had_lock;
+               mlog_errno(error);
+               goto out;
+       }
+
        /* If the security isn't preserved, we need to re-initialize them. */
        if (!preserve) {
                error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
@@ -4302,14 +4311,15 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
                if (error)
                        mlog_errno(error);
        }
-out:
        if (!error) {
                error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
                                                       new_dentry);
                if (error)
                        mlog_errno(error);
        }
+       ocfs2_inode_unlock_tracker(new_orphan_inode, 1, &oh, had_lock);
 
+out:
        if (new_orphan_inode) {
                /*
                 * We need to open_unlock the inode no matter whether we
index 1b2ede6..1a76d75 100644 (file)
@@ -261,7 +261,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
         * Inherently racy -- command line shares address space
         * with code and data.
         */
-       rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
+       rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_ANON);
        if (rv <= 0)
                goto out_free_page;
 
@@ -279,7 +279,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
                        int nr_read;
 
                        _count = min3(count, len, PAGE_SIZE);
-                       nr_read = access_remote_vm(mm, p, page, _count, 0);
+                       nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
                        if (nr_read < 0)
                                rv = nr_read;
                        if (nr_read <= 0)
@@ -325,7 +325,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
                                bool final;
 
                                _count = min3(count, len, PAGE_SIZE);
-                               nr_read = access_remote_vm(mm, p, page, _count, 0);
+                               nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
                                if (nr_read < 0)
                                        rv = nr_read;
                                if (nr_read <= 0)
@@ -946,7 +946,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
                max_len = min_t(size_t, PAGE_SIZE, count);
                this_len = min(max_len, this_len);
 
-               retval = access_remote_vm(mm, (env_start + src), page, this_len, 0);
+               retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON);
 
                if (retval <= 0) {
                        ret = retval;
index d1e8276..e64ecb9 100644 (file)
@@ -209,25 +209,34 @@ kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
 {
        struct list_head *head = (struct list_head *)arg;
        struct kcore_list *ent;
+       struct page *p;
+
+       if (!pfn_valid(pfn))
+               return 1;
+
+       p = pfn_to_page(pfn);
+       if (!memmap_valid_within(pfn, p, page_zone(p)))
+               return 1;
 
        ent = kmalloc(sizeof(*ent), GFP_KERNEL);
        if (!ent)
                return -ENOMEM;
-       ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
+       ent->addr = (unsigned long)page_to_virt(p);
        ent->size = nr_pages << PAGE_SHIFT;
 
-       /* Sanity check: Can happen in 32bit arch...maybe */
-       if (ent->addr < (unsigned long) __va(0))
+       if (!virt_addr_valid(ent->addr))
                goto free_out;
 
        /* cut not-mapped area. ....from ppc-32 code. */
        if (ULONG_MAX - ent->addr < ent->size)
                ent->size = ULONG_MAX - ent->addr;
 
-       /* cut when vmalloc() area is higher than direct-map area */
-       if (VMALLOC_START > (unsigned long)__va(0)) {
-               if (ent->addr > VMALLOC_START)
-                       goto free_out;
+       /*
+        * We've already checked virt_addr_valid so we know this address
+        * is a valid pointer, therefore we can check against it to determine
+        * if we need to trim
+        */
+       if (VMALLOC_START > ent->addr) {
                if (VMALLOC_START - ent->addr < ent->size)
                        ent->size = VMALLOC_START - ent->addr;
        }
index ce4a34a..35a1244 100644 (file)
@@ -511,7 +511,14 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
                if (args->flags & ATTR_CREATE)
                        return retval;
                retval = xfs_attr_shortform_remove(args);
-               ASSERT(retval == 0);
+               if (retval)
+                       return retval;
+               /*
+                * Since we have removed the old attr, clear ATTR_REPLACE so
+                * that the leaf format add routine won't trip over the attr
+                * not being around.
+                */
+               args->flags &= ~ATTR_REPLACE;
        }
 
        if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX ||
index 6a7c2f0..040eeda 100644 (file)
@@ -725,12 +725,16 @@ xfs_bmap_extents_to_btree(
        *logflagsp = 0;
        if ((error = xfs_alloc_vextent(&args))) {
                xfs_iroot_realloc(ip, -1, whichfork);
+               ASSERT(ifp->if_broot == NULL);
+               XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
                xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
                return error;
        }
 
        if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
                xfs_iroot_realloc(ip, -1, whichfork);
+               ASSERT(ifp->if_broot == NULL);
+               XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
                xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
                return -ENOSPC;
        }
index ef68b1d..1201107 100644 (file)
@@ -466,6 +466,8 @@ xfs_dinode_verify(
                                return __this_address;
                        if (di_size > XFS_DFORK_DSIZE(dip, mp))
                                return __this_address;
+                       if (dip->di_nextents)
+                               return __this_address;
                        /* fall through */
                case XFS_DINODE_FMT_EXTENTS:
                case XFS_DINODE_FMT_BTREE:
@@ -484,12 +486,31 @@ xfs_dinode_verify(
        if (XFS_DFORK_Q(dip)) {
                switch (dip->di_aformat) {
                case XFS_DINODE_FMT_LOCAL:
+                       if (dip->di_anextents)
+                               return __this_address;
+               /* fall through */
                case XFS_DINODE_FMT_EXTENTS:
                case XFS_DINODE_FMT_BTREE:
                        break;
                default:
                        return __this_address;
                }
+       } else {
+               /*
+                * If there is no fork offset, this may be a freshly-made inode
+                * in a new disk cluster, in which case di_aformat is zeroed.
+                * Otherwise, such an inode must be in EXTENTS format; this goes
+                * for freed inodes as well.
+                */
+               switch (dip->di_aformat) {
+               case 0:
+               case XFS_DINODE_FMT_EXTENTS:
+                       break;
+               default:
+                       return __this_address;
+               }
+               if (dip->di_anextents)
+                       return __this_address;
        }
 
        /* only version 3 or greater inodes are extensively verified here */
index 299aee4..e70fb8c 100644 (file)
@@ -778,22 +778,26 @@ xfs_file_fallocate(
                if (error)
                        goto out_unlock;
        } else if (mode & FALLOC_FL_INSERT_RANGE) {
-               unsigned int blksize_mask = i_blocksize(inode) - 1;
+               unsigned int    blksize_mask = i_blocksize(inode) - 1;
+               loff_t          isize = i_size_read(inode);
 
-               new_size = i_size_read(inode) + len;
                if (offset & blksize_mask || len & blksize_mask) {
                        error = -EINVAL;
                        goto out_unlock;
                }
 
-               /* check the new inode size does not wrap through zero */
-               if (new_size > inode->i_sb->s_maxbytes) {
+               /*
+                * New inode size must not exceed ->s_maxbytes, accounting for
+                * possible signed overflow.
+                */
+               if (inode->i_sb->s_maxbytes - isize < len) {
                        error = -EFBIG;
                        goto out_unlock;
                }
+               new_size = isize + len;
 
                /* Offset should be less than i_size */
-               if (offset >= i_size_read(inode)) {
+               if (offset >= isize) {
                        error = -EINVAL;
                        goto out_unlock;
                }
@@ -876,8 +880,18 @@ xfs_file_dedupe_range(
        struct file     *dst_file,
        u64             dst_loff)
 {
+       struct inode    *srci = file_inode(src_file);
+       u64             max_dedupe;
        int             error;
 
+       /*
+        * Since we have to read all these pages in to compare them, cut
+        * it off at MAX_RW_COUNT/2 rounded down to the nearest block.
+        * That means we won't do more than MAX_RW_COUNT IO per request.
+        */
+       max_dedupe = (MAX_RW_COUNT >> 1) & ~(i_blocksize(srci) - 1);
+       if (len > max_dedupe)
+               len = max_dedupe;
        error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
                                     len, true);
        if (error)
index 278841c..af24057 100644 (file)
 #endif
 
 #ifdef CONFIG_SERIAL_EARLYCON
-#define EARLYCON_TABLE() STRUCT_ALIGN();                       \
+#define EARLYCON_TABLE() . = ALIGN(8);                         \
                         VMLINUX_SYMBOL(__earlycon_table) = .;  \
                         KEEP(*(__earlycon_table))              \
                         VMLINUX_SYMBOL(__earlycon_table_end) = .;
index 86e3ec6..90ec780 100644 (file)
@@ -76,7 +76,7 @@
 #define I2C6           63
 #define USART1         64
 #define RTCAPB         65
-#define TZC            66
+#define TZC1           66
 #define TZPC           67
 #define IWDG1          68
 #define BSEC           69
 #define CRC1           110
 #define USBH           111
 #define ETHSTP         112
+#define TZC2           113
 
 /* Kernel clocks */
 #define SDMMC1_K       118
 #define CK_MCO2                212
 
 /* TRACE & DEBUG clocks */
-#define DBG            213
 #define CK_DBG         214
 #define CK_TRACE       215
 
index e518e4e..4b15481 100644 (file)
@@ -37,10 +37,15 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
         * Our PSCI implementation stays the same across versions from
         * v0.2 onward, only adding the few mandatory functions (such
         * as FEATURES with 1.0) that are required by newer
-        * revisions. It is thus safe to return the latest.
+        * revisions. It is thus safe to return the latest, unless
+        * userspace has instructed us otherwise.
         */
-       if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+       if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
+               if (vcpu->kvm->arch.psci_version)
+                       return vcpu->kvm->arch.psci_version;
+
                return KVM_ARM_PSCI_LATEST;
+       }
 
        return KVM_ARM_PSCI_0_1;
 }
@@ -48,4 +53,11 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
 
 int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
 
+struct kvm_one_reg;
+
+int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
+int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
+int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
+
 #endif /* __KVM_ARM_PSCI_H__ */
index 24f0394..e7efe12 100644 (file)
@@ -131,6 +131,7 @@ struct vgic_irq {
                u32 mpidr;                      /* GICv3 target VCPU */
        };
        u8 source;                      /* GICv2 SGIs only */
+       u8 active_source;               /* GICv2 SGIs only */
        u8 priority;
        enum vgic_irq_config config;    /* Level or edge */
 
index e3986f4..ebc34a5 100644 (file)
@@ -9,6 +9,9 @@
 struct blk_mq_tags;
 struct blk_flush_queue;
 
+/**
+ * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device
+ */
 struct blk_mq_hw_ctx {
        struct {
                spinlock_t              lock;
index 9af3e0f..5c4eee0 100644 (file)
@@ -605,6 +605,11 @@ struct request_queue {
         * initialized by the low level device driver (e.g. scsi/sd.c).
         * Stacking drivers (device mappers) may or may not initialize
         * these fields.
+        *
+        * Reads of this information must be protected with blk_queue_enter() /
+        * blk_queue_exit(). Modifying this information is only allowed while
+        * no requests are being processed. See also blk_mq_freeze_queue() and
+        * blk_mq_unfreeze_queue().
         */
        unsigned int            nr_zones;
        unsigned long           *seq_zones_bitmap;
@@ -737,6 +742,7 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
 #define blk_queue_quiesced(q)  test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
 #define blk_queue_preempt_only(q)                              \
        test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
+#define blk_queue_fua(q)       test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
 
 extern int blk_set_preempt_only(struct request_queue *q);
 extern void blk_clear_preempt_only(struct request_queue *q);
index 95a7abd..469b20e 100644 (file)
@@ -31,6 +31,7 @@ struct bpf_map_ops {
        void (*map_release)(struct bpf_map *map, struct file *map_file);
        void (*map_free)(struct bpf_map *map);
        int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
+       void (*map_release_uref)(struct bpf_map *map);
 
        /* funcs callable from userspace and from eBPF programs */
        void *(*map_lookup_elem)(struct bpf_map *map, void *key);
@@ -339,8 +340,8 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
                                struct bpf_prog *old_prog);
 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
-                            __u32 __user *prog_ids, u32 request_cnt,
-                            __u32 __user *prog_cnt);
+                            u32 *prog_ids, u32 request_cnt,
+                            u32 *prog_cnt);
 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
                        struct bpf_prog *exclude_prog,
                        struct bpf_prog *include_prog,
@@ -351,6 +352,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
                struct bpf_prog **_prog, *__prog;       \
                struct bpf_prog_array *_array;          \
                u32 _ret = 1;                           \
+               preempt_disable();                      \
                rcu_read_lock();                        \
                _array = rcu_dereference(array);        \
                if (unlikely(check_non_null && !_array))\
@@ -362,6 +364,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
                }                                       \
 _out:                                                  \
                rcu_read_unlock();                      \
+               preempt_enable_no_resched();            \
                _ret;                                   \
         })
 
@@ -434,7 +437,6 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
                                 void *key, void *value, u64 map_flags);
 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
-void bpf_fd_array_map_clear(struct bpf_map *map);
 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
                                void *key, void *value, u64 map_flags);
 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
index d3339dd..b324e01 100644 (file)
@@ -25,6 +25,7 @@
 #define PHY_ID_BCM54612E               0x03625e60
 #define PHY_ID_BCM54616S               0x03625d10
 #define PHY_ID_BCM57780                        0x03625d90
+#define PHY_ID_BCM89610                        0x03625cd0
 
 #define PHY_ID_BCM7250                 0xae025280
 #define PHY_ID_BCM7260                 0xae025190
index 528ccc9..96bb322 100644 (file)
@@ -77,7 +77,10 @@ struct ceph_osd_data {
                        u32                     bio_length;
                };
 #endif /* CONFIG_BLOCK */
-               struct ceph_bvec_iter   bvec_pos;
+               struct {
+                       struct ceph_bvec_iter   bvec_pos;
+                       u32                     num_bvecs;
+               };
        };
 };
 
@@ -412,6 +415,10 @@ void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
                                    struct ceph_bio_iter *bio_pos,
                                    u32 bio_length);
 #endif /* CONFIG_BLOCK */
+void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
+                                     unsigned int which,
+                                     struct bio_vec *bvecs, u32 num_bvecs,
+                                     u32 bytes);
 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
                                         unsigned int which,
                                         struct ceph_bvec_iter *bvec_pos);
@@ -426,7 +433,8 @@ extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *,
                                        bool own_pages);
 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
                                       unsigned int which,
-                                      struct bio_vec *bvecs, u32 bytes);
+                                      struct bio_vec *bvecs, u32 num_bvecs,
+                                      u32 bytes);
 extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
                                        unsigned int which,
                                        struct page **pages, u64 length,
index 210a890..1d25e14 100644 (file)
@@ -765,6 +765,9 @@ int __clk_mux_determine_rate(struct clk_hw *hw,
 int __clk_determine_rate(struct clk_hw *core, struct clk_rate_request *req);
 int __clk_mux_determine_rate_closest(struct clk_hw *hw,
                                     struct clk_rate_request *req);
+int clk_mux_determine_rate_flags(struct clk_hw *hw,
+                                struct clk_rate_request *req,
+                                unsigned long flags);
 void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent);
 void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
                           unsigned long max_rate);
index 0059b99..4779569 100644 (file)
@@ -256,7 +256,9 @@ enum probe_type {
  *             automatically.
  * @pm:                Power management operations of the device which matched
  *             this driver.
- * @coredump:  Called through sysfs to initiate a device coredump.
+ * @coredump:  Called when sysfs entry is written to. The device driver
+ *             is expected to call the dev_coredump API resulting in a
+ *             uevent.
  * @p:         Driver core's private data, no one other than the driver
  *             core can touch this.
  *
@@ -288,7 +290,7 @@ struct device_driver {
        const struct attribute_group **groups;
 
        const struct dev_pm_ops *pm;
-       int (*coredump) (struct device *dev);
+       void (*coredump) (struct device *dev);
 
        struct driver_private *p;
 };
index ebe4181..b32cd20 100644 (file)
@@ -310,6 +310,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
  *     fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS
  *     instead of the latter), any change to them will be overwritten
  *     by kernel. Returns a negative error code or zero.
+ * @get_fecparam: Get the network device Forward Error Correction parameters.
+ * @set_fecparam: Set the network device Forward Error Correction parameters.
  *
  * All operations are optional (i.e. the function pointer may be set
  * to %NULL) and callers must take this into account.  Callers must
index e0c95c9..e64c029 100644 (file)
@@ -217,12 +217,10 @@ struct fsnotify_mark_connector {
        union { /* Object pointer [lock] */
                struct inode *inode;
                struct vfsmount *mnt;
-       };
-       union {
-               struct hlist_head list;
                /* Used listing heads to free after srcu period expires */
                struct fsnotify_mark_connector *destroy_next;
        };
+       struct hlist_head list;
 };
 
 /*
index c826b0b..6cb8a57 100644 (file)
@@ -368,7 +368,9 @@ static inline void free_part_stats(struct hd_struct *part)
        part_stat_add(cpu, gendiskp, field, -subnd)
 
 void part_in_flight(struct request_queue *q, struct hd_struct *part,
-                       unsigned int inflight[2]);
+                   unsigned int inflight[2]);
+void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
+                      unsigned int inflight[2]);
 void part_dec_in_flight(struct request_queue *q, struct hd_struct *part,
                        int rw);
 void part_inc_in_flight(struct request_queue *q, struct hd_struct *part,
index a2656c3..3892e9c 100644 (file)
@@ -161,9 +161,11 @@ struct hrtimer_clock_base {
 enum  hrtimer_base_type {
        HRTIMER_BASE_MONOTONIC,
        HRTIMER_BASE_REALTIME,
+       HRTIMER_BASE_BOOTTIME,
        HRTIMER_BASE_TAI,
        HRTIMER_BASE_MONOTONIC_SOFT,
        HRTIMER_BASE_REALTIME_SOFT,
+       HRTIMER_BASE_BOOTTIME_SOFT,
        HRTIMER_BASE_TAI_SOFT,
        HRTIMER_MAX_CLOCK_BASES,
 };
index c196176..2803264 100644 (file)
@@ -62,6 +62,7 @@ void *kthread_probe_data(struct task_struct *k);
 int kthread_park(struct task_struct *k);
 void kthread_unpark(struct task_struct *k);
 void kthread_parkme(void);
+void kthread_park_complete(struct task_struct *k);
 
 int kthreadd(void *unused);
 extern struct task_struct *kthreadd_task;
index 767d193..2a156c5 100644 (file)
@@ -1284,25 +1284,19 @@ enum {
 };
 
 static inline const struct cpumask *
-mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
+mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
 {
-       const struct cpumask *mask;
        struct irq_desc *desc;
        unsigned int irq;
        int eqn;
        int err;
 
-       err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq);
+       err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
        if (err)
                return NULL;
 
        desc = irq_to_desc(irq);
-#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
-       mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
-#else
-       mask = desc->irq_common_data.affinity;
-#endif
-       return mask;
+       return desc->affinity_hint;
 }
 
 #endif /* MLX5_DRIVER_H */
index 1ac1f06..c080af5 100644 (file)
@@ -2493,6 +2493,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
 #define FOLL_MLOCK     0x1000  /* lock present pages */
 #define FOLL_REMOTE    0x2000  /* we are working on non-current tsk/mm */
 #define FOLL_COW       0x4000  /* internal GUP flag */
+#define FOLL_ANON      0x8000  /* don't do file mappings */
 
 static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
 {
index b63fa45..3529683 100644 (file)
@@ -85,6 +85,7 @@ struct flchip {
        unsigned int write_suspended:1;
        unsigned int erase_suspended:1;
        unsigned long in_progress_block_addr;
+       unsigned long in_progress_block_mask;
 
        struct mutex mutex;
        wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
index 5bad038..6adac11 100644 (file)
@@ -95,6 +95,8 @@ static inline int check_stable_address_space(struct mm_struct *mm)
        return 0;
 }
 
+void __oom_reap_task_mm(struct mm_struct *mm);
+
 extern unsigned long oom_badness(struct task_struct *p,
                struct mem_cgroup *memcg, const nodemask_t *nodemask,
                unsigned long totalpages);
index 6bfd2b5..af8a61b 100644 (file)
@@ -26,6 +26,7 @@
 
 #include <linux/compiler.h>
 #include <linux/rbtree.h>
+#include <linux/rcupdate.h>
 
 /*
  * Please note - only struct rb_augment_callbacks and the prototypes for
index ece43e8..7d012fa 100644 (file)
@@ -35,6 +35,7 @@
 
 #include <linux/rbtree.h>
 #include <linux/seqlock.h>
+#include <linux/rcupdate.h>
 
 struct latch_tree_node {
        struct rb_node node[2];
index d09a9c7..dfdaede 100644 (file)
@@ -569,7 +569,7 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev)
 void rproc_add_subdev(struct rproc *rproc,
                      struct rproc_subdev *subdev,
                      int (*probe)(struct rproc_subdev *subdev),
-                     void (*remove)(struct rproc_subdev *subdev, bool graceful));
+                     void (*remove)(struct rproc_subdev *subdev, bool crashed));
 
 void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
 
index b3d697f..c241370 100644 (file)
@@ -112,17 +112,36 @@ struct task_group;
 
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 
+/*
+ * Special states are those that do not use the normal wait-loop pattern. See
+ * the comment with set_special_state().
+ */
+#define is_special_task_state(state)                           \
+       ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
+
 #define __set_current_state(state_value)                       \
        do {                                                    \
+               WARN_ON_ONCE(is_special_task_state(state_value));\
                current->task_state_change = _THIS_IP_;         \
                current->state = (state_value);                 \
        } while (0)
+
 #define set_current_state(state_value)                         \
        do {                                                    \
+               WARN_ON_ONCE(is_special_task_state(state_value));\
                current->task_state_change = _THIS_IP_;         \
                smp_store_mb(current->state, (state_value));    \
        } while (0)
 
+#define set_special_state(state_value)                                 \
+       do {                                                            \
+               unsigned long flags; /* may shadow */                   \
+               WARN_ON_ONCE(!is_special_task_state(state_value));      \
+               raw_spin_lock_irqsave(&current->pi_lock, flags);        \
+               current->task_state_change = _THIS_IP_;                 \
+               current->state = (state_value);                         \
+               raw_spin_unlock_irqrestore(&current->pi_lock, flags);   \
+       } while (0)
 #else
 /*
  * set_current_state() includes a barrier so that the write of current->state
@@ -144,8 +163,8 @@ struct task_group;
  *
  * The above is typically ordered against the wakeup, which does:
  *
- *     need_sleep = false;
- *     wake_up_state(p, TASK_UNINTERRUPTIBLE);
+ *   need_sleep = false;
+ *   wake_up_state(p, TASK_UNINTERRUPTIBLE);
  *
  * Where wake_up_state() (and all other wakeup primitives) imply enough
  * barriers to order the store of the variable against wakeup.
@@ -154,12 +173,33 @@ struct task_group;
  * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
  * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
  *
- * This is obviously fine, since they both store the exact same value.
+ * However, with slightly different timing the wakeup TASK_RUNNING store can
+ * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
+ * a problem either because that will result in one extra go around the loop
+ * and our @cond test will save the day.
  *
  * Also see the comments of try_to_wake_up().
  */
-#define __set_current_state(state_value) do { current->state = (state_value); } while (0)
-#define set_current_state(state_value)  smp_store_mb(current->state, (state_value))
+#define __set_current_state(state_value)                               \
+       current->state = (state_value)
+
+#define set_current_state(state_value)                                 \
+       smp_store_mb(current->state, (state_value))
+
+/*
+ * set_special_state() should be used for those states when the blocking task
+ * can not use the regular condition based wait-loop. In that case we must
+ * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
+ * will not collide with our state change.
+ */
+#define set_special_state(state_value)                                 \
+       do {                                                            \
+               unsigned long flags; /* may shadow */                   \
+               raw_spin_lock_irqsave(&current->pi_lock, flags);        \
+               current->state = (state_value);                         \
+               raw_spin_unlock_irqrestore(&current->pi_lock, flags);   \
+       } while (0)
+
 #endif
 
 /* Task command name length: */
index a7ce74c..113d1ad 100644 (file)
@@ -280,7 +280,7 @@ static inline void kernel_signal_stop(void)
 {
        spin_lock_irq(&current->sighand->siglock);
        if (current->jobctl & JOBCTL_STOP_DEQUEUED)
-               __set_current_state(TASK_STOPPED);
+               set_special_state(TASK_STOPPED);
        spin_unlock_irq(&current->sighand->siglock);
 
        schedule();
index 1d35610..b4c9fda 100644 (file)
@@ -351,10 +351,10 @@ struct earlycon_id {
        char    name[16];
        char    compatible[128];
        int     (*setup)(struct earlycon_device *, const char *options);
-} __aligned(32);
+};
 
-extern const struct earlycon_id __earlycon_table[];
-extern const struct earlycon_id __earlycon_table_end[];
+extern const struct earlycon_id *__earlycon_table[];
+extern const struct earlycon_id *__earlycon_table_end[];
 
 #if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
 #define EARLYCON_USED_OR_UNUSED        __used
@@ -362,12 +362,19 @@ extern const struct earlycon_id __earlycon_table_end[];
 #define EARLYCON_USED_OR_UNUSED        __maybe_unused
 #endif
 
-#define OF_EARLYCON_DECLARE(_name, compat, fn)                         \
-       static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
-            EARLYCON_USED_OR_UNUSED __section(__earlycon_table)        \
+#define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id)             \
+       static const struct earlycon_id unique_id                       \
+            EARLYCON_USED_OR_UNUSED __initconst                        \
                = { .name = __stringify(_name),                         \
                    .compatible = compat,                               \
-                   .setup = fn  }
+                   .setup = fn  };                                     \
+       static const struct earlycon_id EARLYCON_USED_OR_UNUSED         \
+               __section(__earlycon_table)                             \
+               * const __PASTE(__p, unique_id) = &unique_id
+
+#define OF_EARLYCON_DECLARE(_name, compat, fn)                         \
+       _OF_EARLYCON_DECLARE(_name, compat, fn,                         \
+                            __UNIQUE_ID(__earlycon_##_name))
 
 #define EARLYCON_DECLARE(_name, fn)    OF_EARLYCON_DECLARE(_name, "", fn)
 
index e8f0f85..c0c5c5b 100644 (file)
@@ -50,9 +50,9 @@ partial_name_hash(unsigned long c, unsigned long prevhash)
  * losing bits).  This also has the property (wanted by the dcache)
  * that the msbits make a good hash table index.
  */
-static inline unsigned long end_name_hash(unsigned long hash)
+static inline unsigned int end_name_hash(unsigned long hash)
 {
-       return __hash_32((unsigned int)hash);
+       return hash_long(hash, 32);
 }
 
 /*
index 45bc6b3..53604b0 100644 (file)
@@ -60,6 +60,81 @@ struct ti_emif_pm_functions {
        u32 abort_sr;
 } __packed __aligned(8);
 
+static inline void ti_emif_asm_offsets(void)
+{
+       DEFINE(EMIF_SDCFG_VAL_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_sdcfg_val));
+       DEFINE(EMIF_TIMING1_VAL_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_timing1_val));
+       DEFINE(EMIF_TIMING2_VAL_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_timing2_val));
+       DEFINE(EMIF_TIMING3_VAL_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_timing3_val));
+       DEFINE(EMIF_REF_CTRL_VAL_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_ref_ctrl_val));
+       DEFINE(EMIF_ZQCFG_VAL_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_zqcfg_val));
+       DEFINE(EMIF_PMCR_VAL_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_pmcr_val));
+       DEFINE(EMIF_PMCR_SHDW_VAL_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_pmcr_shdw_val));
+       DEFINE(EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_rd_wr_level_ramp_ctrl));
+       DEFINE(EMIF_RD_WR_EXEC_THRESH_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_rd_wr_exec_thresh));
+       DEFINE(EMIF_COS_CONFIG_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_cos_config));
+       DEFINE(EMIF_PRIORITY_TO_COS_MAPPING_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_priority_to_cos_mapping));
+       DEFINE(EMIF_CONNECT_ID_SERV_1_MAP_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_connect_id_serv_1_map));
+       DEFINE(EMIF_CONNECT_ID_SERV_2_MAP_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_connect_id_serv_2_map));
+       DEFINE(EMIF_OCP_CONFIG_VAL_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_ocp_config_val));
+       DEFINE(EMIF_LPDDR2_NVM_TIM_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim));
+       DEFINE(EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_lpddr2_nvm_tim_shdw));
+       DEFINE(EMIF_DLL_CALIB_CTRL_VAL_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val));
+       DEFINE(EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_dll_calib_ctrl_val_shdw));
+       DEFINE(EMIF_DDR_PHY_CTLR_1_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_ddr_phy_ctlr_1));
+       DEFINE(EMIF_EXT_PHY_CTRL_VALS_OFFSET,
+              offsetof(struct emif_regs_amx3, emif_ext_phy_ctrl_vals));
+       DEFINE(EMIF_REGS_AMX3_SIZE, sizeof(struct emif_regs_amx3));
+
+       BLANK();
+
+       DEFINE(EMIF_PM_BASE_ADDR_VIRT_OFFSET,
+              offsetof(struct ti_emif_pm_data, ti_emif_base_addr_virt));
+       DEFINE(EMIF_PM_BASE_ADDR_PHYS_OFFSET,
+              offsetof(struct ti_emif_pm_data, ti_emif_base_addr_phys));
+       DEFINE(EMIF_PM_CONFIG_OFFSET,
+              offsetof(struct ti_emif_pm_data, ti_emif_sram_config));
+       DEFINE(EMIF_PM_REGS_VIRT_OFFSET,
+              offsetof(struct ti_emif_pm_data, regs_virt));
+       DEFINE(EMIF_PM_REGS_PHYS_OFFSET,
+              offsetof(struct ti_emif_pm_data, regs_phys));
+       DEFINE(EMIF_PM_DATA_SIZE, sizeof(struct ti_emif_pm_data));
+
+       BLANK();
+
+       DEFINE(EMIF_PM_SAVE_CONTEXT_OFFSET,
+              offsetof(struct ti_emif_pm_functions, save_context));
+       DEFINE(EMIF_PM_RESTORE_CONTEXT_OFFSET,
+              offsetof(struct ti_emif_pm_functions, restore_context));
+       DEFINE(EMIF_PM_ENTER_SR_OFFSET,
+              offsetof(struct ti_emif_pm_functions, enter_sr));
+       DEFINE(EMIF_PM_EXIT_SR_OFFSET,
+              offsetof(struct ti_emif_pm_functions, exit_sr));
+       DEFINE(EMIF_PM_ABORT_SR_OFFSET,
+              offsetof(struct ti_emif_pm_functions, abort_sr));
+       DEFINE(EMIF_PM_FUNCTIONS_SIZE, sizeof(struct ti_emif_pm_functions));
+}
+
 struct gen_pool;
 
 int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst);
index 4b3dca1..7acb953 100644 (file)
@@ -52,7 +52,6 @@ struct tk_read_base {
  * @offs_real:         Offset clock monotonic -> clock realtime
  * @offs_boot:         Offset clock monotonic -> clock boottime
  * @offs_tai:          Offset clock monotonic -> clock tai
- * @time_suspended:    Accumulated suspend time
  * @tai_offset:                The current UTC to TAI offset in seconds
  * @clock_was_set_seq: The sequence number of clock was set events
  * @cs_was_changed_seq:        The sequence number of clocksource change events
@@ -95,7 +94,6 @@ struct timekeeper {
        ktime_t                 offs_real;
        ktime_t                 offs_boot;
        ktime_t                 offs_tai;
-       ktime_t                 time_suspended;
        s32                     tai_offset;
        unsigned int            clock_was_set_seq;
        u8                      cs_was_changed_seq;
index 9737fbe..588a0e4 100644 (file)
@@ -33,25 +33,20 @@ extern void ktime_get_ts64(struct timespec64 *ts);
 extern time64_t ktime_get_seconds(void);
 extern time64_t __ktime_get_real_seconds(void);
 extern time64_t ktime_get_real_seconds(void);
-extern void ktime_get_active_ts64(struct timespec64 *ts);
 
 extern int __getnstimeofday64(struct timespec64 *tv);
 extern void getnstimeofday64(struct timespec64 *tv);
 extern void getboottime64(struct timespec64 *ts);
 
-#define ktime_get_real_ts64(ts)                getnstimeofday64(ts)
-
-/* Clock BOOTTIME compatibility wrappers */
-static inline void get_monotonic_boottime64(struct timespec64 *ts)
-{
-       ktime_get_ts64(ts);
-}
+#define ktime_get_real_ts64(ts)        getnstimeofday64(ts)
 
 /*
  * ktime_t based interfaces
  */
+
 enum tk_offsets {
        TK_OFFS_REAL,
+       TK_OFFS_BOOT,
        TK_OFFS_TAI,
        TK_OFFS_MAX,
 };
@@ -62,10 +57,6 @@ extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
 extern ktime_t ktime_get_raw(void);
 extern u32 ktime_get_resolution_ns(void);
 
-/* Clock BOOTTIME compatibility wrappers */
-static inline ktime_t ktime_get_boottime(void) { return ktime_get(); }
-static inline u64 ktime_get_boot_ns(void) { return ktime_get(); }
-
 /**
  * ktime_get_real - get the real (wall-) time in ktime_t format
  */
@@ -74,6 +65,17 @@ static inline ktime_t ktime_get_real(void)
        return ktime_get_with_offset(TK_OFFS_REAL);
 }
 
+/**
+ * ktime_get_boottime - Returns monotonic time since boot in ktime_t format
+ *
+ * This is similar to CLOCK_MONTONIC/ktime_get, but also includes the
+ * time spent in suspend.
+ */
+static inline ktime_t ktime_get_boottime(void)
+{
+       return ktime_get_with_offset(TK_OFFS_BOOT);
+}
+
 /**
  * ktime_get_clocktai - Returns the TAI time of day in ktime_t format
  */
@@ -100,6 +102,11 @@ static inline u64 ktime_get_real_ns(void)
        return ktime_to_ns(ktime_get_real());
 }
 
+static inline u64 ktime_get_boot_ns(void)
+{
+       return ktime_to_ns(ktime_get_boottime());
+}
+
 static inline u64 ktime_get_tai_ns(void)
 {
        return ktime_to_ns(ktime_get_clocktai());
@@ -112,11 +119,17 @@ static inline u64 ktime_get_raw_ns(void)
 
 extern u64 ktime_get_mono_fast_ns(void);
 extern u64 ktime_get_raw_fast_ns(void);
+extern u64 ktime_get_boot_fast_ns(void);
 extern u64 ktime_get_real_fast_ns(void);
 
 /*
  * timespec64 interfaces utilizing the ktime based ones
  */
+static inline void get_monotonic_boottime64(struct timespec64 *ts)
+{
+       *ts = ktime_to_timespec64(ktime_get_boottime());
+}
+
 static inline void timekeeping_clocktai64(struct timespec64 *ts)
 {
        *ts = ktime_to_timespec64(ktime_get_clocktai());
index 47f8af2..1dd587b 100644 (file)
@@ -701,7 +701,7 @@ extern int tty_unregister_ldisc(int disc);
 extern int tty_set_ldisc(struct tty_struct *tty, int disc);
 extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
 extern void tty_ldisc_release(struct tty_struct *tty);
-extern void tty_ldisc_init(struct tty_struct *tty);
+extern int __must_check tty_ldisc_init(struct tty_struct *tty);
 extern void tty_ldisc_deinit(struct tty_struct *tty);
 extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
                                 char *f, int count);
index 4b6b928..8675e14 100644 (file)
@@ -52,7 +52,7 @@
 #define USB_GADGET_DELAYED_STATUS       0x7fff /* Impossibly large value */
 
 /* big enough to hold our biggest descriptor */
-#define USB_COMP_EP0_BUFSIZ    1024
+#define USB_COMP_EP0_BUFSIZ    4096
 
 /* OS feature descriptor length <= 4kB */
 #define USB_COMP_EP0_OS_DESC_BUFSIZ    4096
index c71def6..a240ed2 100644 (file)
@@ -24,24 +24,6 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
 #define vbg_debug pr_debug
 #endif
 
-/**
- * Allocate memory for generic request and initialize the request header.
- *
- * Return: the allocated memory
- * @len:               Size of memory block required for the request.
- * @req_type:          The generic request type.
- */
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
-
-/**
- * Perform a generic request.
- *
- * Return: VBox status code
- * @gdev:              The Guest extension device.
- * @req:               Pointer to the request structure.
- */
-int vbg_req_perform(struct vbg_dev *gdev, void *req);
-
 int vbg_hgcm_connect(struct vbg_dev *gdev,
                     struct vmmdev_hgcm_service_location *loc,
                     u32 *client_id, int *vbox_status);
@@ -52,11 +34,6 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
                  u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
                  u32 parm_count, int *vbox_status);
 
-int vbg_hgcm_call32(
-       struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
-       struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
-       int *vbox_status);
-
 /**
  * Convert a VirtualBox status code to a standard Linux kernel return value.
  * Return: 0 or negative errno value.
index 988c735..fa1b5da 100644 (file)
@@ -157,6 +157,9 @@ int virtio_device_freeze(struct virtio_device *dev);
 int virtio_device_restore(struct virtio_device *dev);
 #endif
 
+#define virtio_device_for_each_vq(vdev, vq) \
+       list_for_each_entry(vq, &vdev->vqs, list)
+
 /**
  * virtio_driver - operations for a virtio I/O driver
  * @driver: underlying device driver (populate name and owner).
index 9318b21..2b0072f 100644 (file)
@@ -305,4 +305,21 @@ do {                                                                       \
        __ret;                                                          \
 })
 
+/**
+ * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
+ *
+ * @bit: the bit of the word being waited on
+ * @word: the word being waited on, a kernel virtual address
+ *
+ * You can use this helper if bitflags are manipulated atomically rather than
+ * non-atomically under a lock.
+ */
+static inline void clear_and_wake_up_bit(int bit, void *word)
+{
+       clear_bit_unlock(bit, word);
+       /* See wake_up_bit() for which memory barrier you need to use. */
+       smp_mb__after_atomic();
+       wake_up_bit(word, bit);
+}
+
 #endif /* _LINUX_WAIT_BIT_H */
index 5ee007c..cb213c1 100644 (file)
@@ -5,7 +5,7 @@
  * Author: Santiago Nunez-Corrales <santiago.nunez@ridgerun.com>
  *
  * This code is partially based upon the TVP5150 driver
- * written by Mauro Carvalho Chehab (mchehab@infradead.org),
+ * written by Mauro Carvalho Chehab <mchehab@kernel.org>,
  * the TVP514x driver written by Vaibhav Hiremath <hvaibhav@ti.com>
  * and the TVP7002 driver in the TI LSP 2.10.00.14
  *
index 0bda0ad..60a664f 100644 (file)
@@ -1,11 +1,11 @@
 /*
  * generic helper functions for handling video4linux capture buffers
  *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
  *
  * Highly based on video-buf written originally by:
  * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
  * (c) 2006 Ted Walther and John Sokol
  *
  * This program is free software; you can redistribute it and/or modify
index d8b2785..01bd142 100644 (file)
@@ -6,11 +6,11 @@
  * into PAGE_SIZE chunks).  They also assume the driver does not need
  * to touch the video data.
  *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
  *
  * Highly based on video-buf written originally by:
  * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
  * (c) 2006 Ted Walther and John Sokol
  *
  * This program is free software; you can redistribute it and/or modify
index 486a97e..36c6a4a 100644 (file)
@@ -6,7 +6,7 @@
  * into PAGE_SIZE chunks).  They also assume the driver does not need
  * to touch the video data.
  *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
+ * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index f801fc9..b522351 100644 (file)
@@ -198,6 +198,7 @@ struct bonding {
        struct   slave __rcu *primary_slave;
        struct   bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
        bool     force_primary;
+       u32      nest_level;
        s32      slave_cnt; /* never change this value outside the attach/detach wrappers */
        int     (*recv_probe)(const struct sk_buff *, struct bonding *,
                              struct slave *);
index 9a07477..d1fcf24 100644 (file)
@@ -251,7 +251,7 @@ extern struct flow_dissector flow_keys_buf_dissector;
  * This structure is used to hold a digest of the full flow keys. This is a
  * larger "hash" of a flow to allow definitively matching specific flows where
  * the 32 bit skb->hash is not large enough. The size is limited to 16 bytes so
- * that it can by used in CB of skb (see sch_choke for an example).
+ * that it can be used in CB of skb (see sch_choke for an example).
  */
 #define FLOW_KEYS_DIGEST_LEN   16
 struct flow_keys_digest {
index 44b9c00..e117617 100644 (file)
@@ -12,7 +12,8 @@
 void *ife_encode(struct sk_buff *skb, u16 metalen);
 void *ife_decode(struct sk_buff *skb, u16 *metalen);
 
-void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen);
+void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype,
+                         u16 *dlen, u16 *totlen);
 int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
                        const void *dval);
 
index 5c40f11..df528a6 100644 (file)
@@ -97,6 +97,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
 
 struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
                          struct proto *prot, int kern);
+void llc_sk_stop_all_timers(struct sock *sk, bool sync);
 void llc_sk_free(struct sock *sk);
 
 void llc_sk_reset(struct sock *sk);
index d2279b2..b2f3a0c 100644 (file)
@@ -2080,7 +2080,7 @@ struct ieee80211_txq {
  *     virtual interface might not be given air time for the transmission of
  *     the frame, as it is not synced with the AP/P2P GO yet, and thus the
  *     deauthentication frame might not be transmitted.
- >
+ *
  * @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't
  *     support QoS NDP for AP probing - that's most likely a driver bug.
  *
index 3da8e13..b400d0b 100644 (file)
@@ -148,6 +148,7 @@ struct tls_context {
        struct scatterlist *partially_sent_record;
        u16 partially_sent_offset;
        unsigned long flags;
+       bool in_tcp_sendpages;
 
        u16 pending_open_record_frags;
        int (*push_pending_record)(struct sock *sk, int flags);
index a872379..45e75c3 100644 (file)
@@ -375,6 +375,7 @@ struct xfrm_input_afinfo {
 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);
 
+void xfrm_flush_gc(void);
 void xfrm_state_delete_tunnel(struct xfrm_state *x);
 
 struct xfrm_type {
index 04e0679..e03bd9d 100644 (file)
@@ -11,8 +11,6 @@ struct scsi_sense_hdr;
 extern void scsi_print_command(struct scsi_cmnd *);
 extern size_t __scsi_format_command(char *, size_t,
                                   const unsigned char *, size_t);
-extern void scsi_show_extd_sense(const struct scsi_device *, const char *,
-                                unsigned char, unsigned char);
 extern void scsi_print_sense_hdr(const struct scsi_device *, const char *,
                                 const struct scsi_sense_hdr *);
 extern void scsi_print_sense(const struct scsi_cmnd *);
index 50df5b2..8ee8991 100644 (file)
@@ -143,13 +143,13 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node);
 static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag,
                                        void *data, size_t len)
 {
-       return 0;
+       return -ENOSYS;
 }
 
 static inline int rpi_firmware_property_list(struct rpi_firmware *fw,
                                             void *data, size_t tag_size)
 {
-       return 0;
+       return -ENOSYS;
 }
 
 static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
index f082055..d0a341b 100644 (file)
@@ -575,6 +575,48 @@ TRACE_EVENT(afs_protocol_error,
                      __entry->call, __entry->error, __entry->where)
            );
 
+TRACE_EVENT(afs_cm_no_server,
+           TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx),
+
+           TP_ARGS(call, srx),
+
+           TP_STRUCT__entry(
+                   __field(unsigned int,                       call    )
+                   __field(unsigned int,                       op_id   )
+                   __field_struct(struct sockaddr_rxrpc,       srx     )
+                            ),
+
+           TP_fast_assign(
+                   __entry->call = call->debug_id;
+                   __entry->op_id = call->operation_ID;
+                   memcpy(&__entry->srx, srx, sizeof(__entry->srx));
+                          ),
+
+           TP_printk("c=%08x op=%u %pISpc",
+                     __entry->call, __entry->op_id, &__entry->srx.transport)
+           );
+
+TRACE_EVENT(afs_cm_no_server_u,
+           TP_PROTO(struct afs_call *call, const uuid_t *uuid),
+
+           TP_ARGS(call, uuid),
+
+           TP_STRUCT__entry(
+                   __field(unsigned int,                       call    )
+                   __field(unsigned int,                       op_id   )
+                   __field_struct(uuid_t,                      uuid    )
+                            ),
+
+           TP_fast_assign(
+                   __entry->call = call->debug_id;
+                   __entry->op_id = call->operation_ID;
+                   memcpy(&__entry->uuid, uuid, sizeof(__entry->uuid));
+                          ),
+
+           TP_printk("c=%08x op=%u %pU",
+                     __entry->call, __entry->op_id, &__entry->uuid)
+           );
+
 #endif /* _TRACE_AFS_H */
 
 /* This part must be outside protection */
index 8d6cf10..eb903c3 100644 (file)
@@ -31,7 +31,11 @@ TRACE_EVENT(initcall_start,
        TP_ARGS(func),
 
        TP_STRUCT__entry(
-               __field(initcall_t, func)
+               /*
+                * Use field_struct to avoid is_signed_type()
+                * comparison of a function pointer
+                */
+               __field_struct(initcall_t, func)
        ),
 
        TP_fast_assign(
@@ -48,8 +52,12 @@ TRACE_EVENT(initcall_finish,
        TP_ARGS(func, ret),
 
        TP_STRUCT__entry(
-               __field(initcall_t,     func)
-               __field(int,            ret)
+               /*
+                * Use field_struct to avoid is_signed_type()
+                * comparison of a function pointer
+                */
+               __field_struct(initcall_t,      func)
+               __field(int,                    ret)
        ),
 
        TP_fast_assign(
index 9e96c2f..077e664 100644 (file)
@@ -15,6 +15,7 @@
 #define _TRACE_RXRPC_H
 
 #include <linux/tracepoint.h>
+#include <linux/errqueue.h>
 
 /*
  * Define enums for tracing information.
@@ -210,6 +211,20 @@ enum rxrpc_congest_change {
        rxrpc_cong_saw_nack,
 };
 
+enum rxrpc_tx_fail_trace {
+       rxrpc_tx_fail_call_abort,
+       rxrpc_tx_fail_call_ack,
+       rxrpc_tx_fail_call_data_frag,
+       rxrpc_tx_fail_call_data_nofrag,
+       rxrpc_tx_fail_call_final_resend,
+       rxrpc_tx_fail_conn_abort,
+       rxrpc_tx_fail_conn_challenge,
+       rxrpc_tx_fail_conn_response,
+       rxrpc_tx_fail_reject,
+       rxrpc_tx_fail_version_keepalive,
+       rxrpc_tx_fail_version_reply,
+};
+
 #endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
 
 /*
@@ -437,6 +452,19 @@ enum rxrpc_congest_change {
        EM(RXRPC_CALL_LOCAL_ERROR,              "LocalError") \
        E_(RXRPC_CALL_NETWORK_ERROR,            "NetError")
 
+#define rxrpc_tx_fail_traces \
+       EM(rxrpc_tx_fail_call_abort,            "CallAbort") \
+       EM(rxrpc_tx_fail_call_ack,              "CallAck") \
+       EM(rxrpc_tx_fail_call_data_frag,        "CallDataFrag") \
+       EM(rxrpc_tx_fail_call_data_nofrag,      "CallDataNofrag") \
+       EM(rxrpc_tx_fail_call_final_resend,     "CallFinalResend") \
+       EM(rxrpc_tx_fail_conn_abort,            "ConnAbort") \
+       EM(rxrpc_tx_fail_conn_challenge,        "ConnChall") \
+       EM(rxrpc_tx_fail_conn_response,         "ConnResp") \
+       EM(rxrpc_tx_fail_reject,                "Reject") \
+       EM(rxrpc_tx_fail_version_keepalive,     "VerKeepalive") \
+       E_(rxrpc_tx_fail_version_reply,         "VerReply")
+
 /*
  * Export enum symbols via userspace.
  */
@@ -460,6 +488,7 @@ rxrpc_propose_ack_traces;
 rxrpc_propose_ack_outcomes;
 rxrpc_congest_modes;
 rxrpc_congest_changes;
+rxrpc_tx_fail_traces;
 
 /*
  * Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -1374,6 +1403,62 @@ TRACE_EVENT(rxrpc_resend,
                      __entry->anno)
            );
 
+TRACE_EVENT(rxrpc_rx_icmp,
+           TP_PROTO(struct rxrpc_peer *peer, struct sock_extended_err *ee,
+                    struct sockaddr_rxrpc *srx),
+
+           TP_ARGS(peer, ee, srx),
+
+           TP_STRUCT__entry(
+                   __field(unsigned int,                       peer    )
+                   __field_struct(struct sock_extended_err,    ee      )
+                   __field_struct(struct sockaddr_rxrpc,       srx     )
+                            ),
+
+           TP_fast_assign(
+                   __entry->peer = peer->debug_id;
+                   memcpy(&__entry->ee, ee, sizeof(__entry->ee));
+                   memcpy(&__entry->srx, srx, sizeof(__entry->srx));
+                          ),
+
+           TP_printk("P=%08x o=%u t=%u c=%u i=%u d=%u e=%d %pISp",
+                     __entry->peer,
+                     __entry->ee.ee_origin,
+                     __entry->ee.ee_type,
+                     __entry->ee.ee_code,
+                     __entry->ee.ee_info,
+                     __entry->ee.ee_data,
+                     __entry->ee.ee_errno,
+                     &__entry->srx.transport)
+           );
+
+TRACE_EVENT(rxrpc_tx_fail,
+           TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, int ret,
+                    enum rxrpc_tx_fail_trace what),
+
+           TP_ARGS(debug_id, serial, ret, what),
+
+           TP_STRUCT__entry(
+                   __field(unsigned int,               debug_id        )
+                   __field(rxrpc_serial_t,             serial          )
+                   __field(int,                        ret             )
+                   __field(enum rxrpc_tx_fail_trace,   what            )
+                            ),
+
+           TP_fast_assign(
+                   __entry->debug_id = debug_id;
+                   __entry->serial = serial;
+                   __entry->ret = ret;
+                   __entry->what = what;
+                          ),
+
+           TP_printk("c=%08x r=%x ret=%d %s",
+                     __entry->debug_id,
+                     __entry->serial,
+                     __entry->ret,
+                     __print_symbolic(__entry->what, rxrpc_tx_fail_traces))
+           );
+
 #endif /* _TRACE_RXRPC_H */
 
 /* This part must be outside protection */
index 335d872..bbb08a3 100644 (file)
@@ -224,6 +224,8 @@ TRACE_EVENT(rpc_stats_latency,
        TP_ARGS(task, backlog, rtt, execute),
 
        TP_STRUCT__entry(
+               __field(unsigned int, task_id)
+               __field(unsigned int, client_id)
                __field(u32, xid)
                __field(int, version)
                __string(progname, task->tk_client->cl_program->name)
@@ -231,13 +233,11 @@ TRACE_EVENT(rpc_stats_latency,
                __field(unsigned long, backlog)
                __field(unsigned long, rtt)
                __field(unsigned long, execute)
-               __string(addr,
-                        task->tk_xprt->address_strings[RPC_DISPLAY_ADDR])
-               __string(port,
-                        task->tk_xprt->address_strings[RPC_DISPLAY_PORT])
        ),
 
        TP_fast_assign(
+               __entry->client_id = task->tk_client->cl_clid;
+               __entry->task_id = task->tk_pid;
                __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid);
                __entry->version = task->tk_client->cl_vers;
                __assign_str(progname, task->tk_client->cl_program->name)
@@ -245,14 +245,10 @@ TRACE_EVENT(rpc_stats_latency,
                __entry->backlog = ktime_to_us(backlog);
                __entry->rtt = ktime_to_us(rtt);
                __entry->execute = ktime_to_us(execute);
-               __assign_str(addr,
-                            task->tk_xprt->address_strings[RPC_DISPLAY_ADDR]);
-               __assign_str(port,
-                            task->tk_xprt->address_strings[RPC_DISPLAY_PORT]);
        ),
 
-       TP_printk("peer=[%s]:%s xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu",
-               __get_str(addr), __get_str(port), __entry->xid,
+       TP_printk("task:%u@%d xid=0x%08x %sv%d %s backlog=%lu rtt=%lu execute=%lu",
+               __entry->task_id, __entry->client_id, __entry->xid,
                __get_str(progname), __entry->version, __get_str(procname),
                __entry->backlog, __entry->rtt, __entry->execute)
 );
index bf6f826..f8260e5 100644 (file)
@@ -257,6 +257,33 @@ TRACE_EVENT(ufshcd_command,
        )
 );
 
+TRACE_EVENT(ufshcd_upiu,
+       TP_PROTO(const char *dev_name, const char *str, void *hdr, void *tsf),
+
+       TP_ARGS(dev_name, str, hdr, tsf),
+
+       TP_STRUCT__entry(
+               __string(dev_name, dev_name)
+               __string(str, str)
+               __array(unsigned char, hdr, 12)
+               __array(unsigned char, tsf, 16)
+       ),
+
+       TP_fast_assign(
+               __assign_str(dev_name, dev_name);
+               __assign_str(str, str);
+               memcpy(__entry->hdr, hdr, sizeof(__entry->hdr));
+               memcpy(__entry->tsf, tsf, sizeof(__entry->tsf));
+       ),
+
+       TP_printk(
+               "%s: %s: HDR:%s, CDB:%s",
+               __get_str(str), __get_str(dev_name),
+               __print_hex(__entry->hdr, sizeof(__entry->hdr)),
+               __print_hex(__entry->tsf, sizeof(__entry->tsf))
+       )
+);
+
 #endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
 
 /* This part must be outside protection */
index 2f057a4..9a761bc 100644 (file)
@@ -25,6 +25,8 @@ DECLARE_EVENT_CLASS(workqueue_work,
        TP_printk("work struct %p", __entry->work)
 );
 
+struct pool_workqueue;
+
 /**
  * workqueue_queue_work - called when a work gets queued
  * @req_cpu:   the requested cpu
index 7dd8f34..fdcf88b 100644 (file)
@@ -352,22 +352,6 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd,
 DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
 DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
 
-TRACE_EVENT(xen_mmu_flush_tlb_all,
-           TP_PROTO(int x),
-           TP_ARGS(x),
-           TP_STRUCT__entry(__array(char, x, 0)),
-           TP_fast_assign((void)x),
-           TP_printk("%s", "")
-       );
-
-TRACE_EVENT(xen_mmu_flush_tlb,
-           TP_PROTO(int x),
-           TP_ARGS(x),
-           TP_STRUCT__entry(__array(char, x, 0)),
-           TP_fast_assign((void)x),
-           TP_printk("%s", "")
-       );
-
 TRACE_EVENT(xen_mmu_flush_tlb_one_user,
            TP_PROTO(unsigned long addr),
            TP_ARGS(addr),
index 050b92d..0fc33bf 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
 /*
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 1065006..b02c41e 100644 (file)
@@ -676,6 +676,13 @@ struct kvm_ioeventfd {
        __u8  pad[36];
 };
 
+#define KVM_X86_DISABLE_EXITS_MWAIT          (1 << 0)
+#define KVM_X86_DISABLE_EXITS_HTL            (1 << 1)
+#define KVM_X86_DISABLE_EXITS_PAUSE          (1 << 2)
+#define KVM_X86_DISABLE_VALID_EXITS          (KVM_X86_DISABLE_EXITS_MWAIT | \
+                                              KVM_X86_DISABLE_EXITS_HTL | \
+                                              KVM_X86_DISABLE_EXITS_PAUSE)
+
 /* for KVM_ENABLE_CAP */
 struct kvm_enable_cap {
        /* in */
index 15daf5e..9c36301 100644 (file)
@@ -2698,6 +2698,8 @@ enum nl80211_attrs {
 #define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
 #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
 
+#define NL80211_WIPHY_NAME_MAXLEN              128
+
 #define NL80211_MAX_SUPP_RATES                 32
 #define NL80211_MAX_SUPP_HT_RATES              77
 #define NL80211_MAX_SUPP_REG_RULES             64
index a66b213..20c6bd0 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /*
  * Copyright (c) 2008 Oracle.  All rights reserved.
  *
index 0f27281..6b58371 100644 (file)
@@ -780,24 +780,6 @@ enum {
        NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5,
 };
 
-/* proc/sys/net/irda */
-enum {
-       NET_IRDA_DISCOVERY=1,
-       NET_IRDA_DEVNAME=2,
-       NET_IRDA_DEBUG=3,
-       NET_IRDA_FAST_POLL=4,
-       NET_IRDA_DISCOVERY_SLOTS=5,
-       NET_IRDA_DISCOVERY_TIMEOUT=6,
-       NET_IRDA_SLOT_TIMEOUT=7,
-       NET_IRDA_MAX_BAUD_RATE=8,
-       NET_IRDA_MIN_TX_TURN_TIME=9,
-       NET_IRDA_MAX_TX_DATA_SIZE=10,
-       NET_IRDA_MAX_TX_WINDOW=11,
-       NET_IRDA_MAX_NOREPLY_TIME=12,
-       NET_IRDA_WARN_NOREPLY_TIME=13,
-       NET_IRDA_LAP_KEEPALIVE_TIME=14,
-};
-
 
 /* CTL_FS names: */
 enum
index 16a2966..4c0338e 100644 (file)
@@ -73,7 +73,6 @@ struct __kernel_old_timeval {
  */
 #define CLOCK_SGI_CYCLE                        10
 #define CLOCK_TAI                      11
-#define CLOCK_MONOTONIC_ACTIVE         12
 
 #define MAX_CLOCKS                     16
 #define CLOCKS_MASK                    (CLOCK_REALTIME | CLOCK_MONOTONIC)
index c6633e9..ff02287 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /*
  * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
  *
index 40297a3..13b8cb5 100644 (file)
@@ -57,6 +57,21 @@ struct virtio_balloon_config {
 #define VIRTIO_BALLOON_S_HTLB_PGFAIL   9  /* Hugetlb page allocation failures */
 #define VIRTIO_BALLOON_S_NR       10
 
+#define VIRTIO_BALLOON_S_NAMES_WITH_PREFIX(VIRTIO_BALLOON_S_NAMES_prefix) { \
+       VIRTIO_BALLOON_S_NAMES_prefix "swap-in", \
+       VIRTIO_BALLOON_S_NAMES_prefix "swap-out", \
+       VIRTIO_BALLOON_S_NAMES_prefix "major-faults", \
+       VIRTIO_BALLOON_S_NAMES_prefix "minor-faults", \
+       VIRTIO_BALLOON_S_NAMES_prefix "free-memory", \
+       VIRTIO_BALLOON_S_NAMES_prefix "total-memory", \
+       VIRTIO_BALLOON_S_NAMES_prefix "available-memory", \
+       VIRTIO_BALLOON_S_NAMES_prefix "disk-caches", \
+       VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-allocations", \
+       VIRTIO_BALLOON_S_NAMES_prefix "hugetlb-failures" \
+}
+
+#define VIRTIO_BALLOON_S_NAMES VIRTIO_BALLOON_S_NAMES_WITH_PREFIX("")
+
 /*
  * Memory statistics structure.
  * Driver fills an array of these structures and passes to device.
index 9acb4b7..85aed67 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /*
  * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
  *
index 1fefd01..a159ba8 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /*
  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
  *
index 7092c8d..78613b6 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /*
  * Copyright (c) 2016 Hisilicon Limited.
  *
index 4a8f956..e2709bb 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /*
  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
index 04e46ea..625545d 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /*
  * Copyright (c) 2017-2018, Mellanox Technologies inc.  All rights reserved.
  *
index ef92118..90c0cf2 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /*
  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
  * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
index 0d2607f..435155d 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /*
  * Copyright (c) 2005 Intel Corporation.  All rights reserved.
  *
index 9be0739..6aeb033 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /*
  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
index 04f64bc..f745575 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /*
  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
index cb4a02c..fdaf00e 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /*
  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
  *
index ac756cd..91b12e1 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /*
  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
index 35bfd40..f80495b 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /*
  * Copyright (c) 2006 - 2011 Intel Corporation.  All rights reserved.
  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
index 8ba0989..24c658b 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /* QLogic qedr NIC Driver
  * Copyright (c) 2015-2016  QLogic Corporation
  *
index e126902..0d1e78e 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /*
  * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
  *
index d223f41..d92d272 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /*
  * Copyright (c) 2016 Mellanox Technologies, LTD. All rights reserved.
  *
index 1f8a9e7..44ef6a3 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
 /*
  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  *
index b795aa3..fd37315 100644 (file)
@@ -423,7 +423,7 @@ static noinline void __ref rest_init(void)
 
        /*
         * Enable might_sleep() and smp_processor_id() checks.
-        * They cannot be enabled earlier because with CONFIG_PRREMPT=y
+        * They cannot be enabled earlier because with CONFIG_PREEMPT=y
         * kernel_thread() would trigger might_sleep() splats. With
         * CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled
         * already, but it's stuck on the kthreadd_done completion.
@@ -1034,6 +1034,13 @@ __setup("rodata=", set_debug_rodata);
 static void mark_readonly(void)
 {
        if (rodata_enabled) {
+               /*
+                * load_module() results in W+X mappings, which are cleaned up
+                * with call_rcu_sched().  Let's make sure that queued work is
+                * flushed so that we don't hit false positives looking for
+                * insecure pages which are W+X.
+                */
+               rcu_barrier_sched();
                mark_rodata_ro();
                rodata_test();
        } else
index 14750e7..027107f 100644 (file)
@@ -476,7 +476,7 @@ static u32 prog_fd_array_sys_lookup_elem(void *ptr)
 }
 
 /* decrement refcnt of all bpf_progs that are stored in this map */
-void bpf_fd_array_map_clear(struct bpf_map *map)
+static void bpf_fd_array_map_clear(struct bpf_map *map)
 {
        struct bpf_array *array = container_of(map, struct bpf_array, map);
        int i;
@@ -495,6 +495,7 @@ const struct bpf_map_ops prog_array_map_ops = {
        .map_fd_get_ptr = prog_fd_array_get_ptr,
        .map_fd_put_ptr = prog_fd_array_put_ptr,
        .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
+       .map_release_uref = bpf_fd_array_map_clear,
 };
 
 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
index d315b39..ba03ec3 100644 (file)
@@ -1572,13 +1572,32 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
        return cnt;
 }
 
+static bool bpf_prog_array_copy_core(struct bpf_prog **prog,
+                                    u32 *prog_ids,
+                                    u32 request_cnt)
+{
+       int i = 0;
+
+       for (; *prog; prog++) {
+               if (*prog == &dummy_bpf_prog.prog)
+                       continue;
+               prog_ids[i] = (*prog)->aux->id;
+               if (++i == request_cnt) {
+                       prog++;
+                       break;
+               }
+       }
+
+       return !!(*prog);
+}
+
 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
                                __u32 __user *prog_ids, u32 cnt)
 {
        struct bpf_prog **prog;
        unsigned long err = 0;
-       u32 i = 0, *ids;
        bool nospc;
+       u32 *ids;
 
        /* users of this function are doing:
         * cnt = bpf_prog_array_length();
@@ -1595,16 +1614,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
                return -ENOMEM;
        rcu_read_lock();
        prog = rcu_dereference(progs)->progs;
-       for (; *prog; prog++) {
-               if (*prog == &dummy_bpf_prog.prog)
-                       continue;
-               ids[i] = (*prog)->aux->id;
-               if (++i == cnt) {
-                       prog++;
-                       break;
-               }
-       }
-       nospc = !!(*prog);
+       nospc = bpf_prog_array_copy_core(prog, ids, cnt);
        rcu_read_unlock();
        err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
        kfree(ids);
@@ -1683,22 +1693,25 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
 }
 
 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
-                            __u32 __user *prog_ids, u32 request_cnt,
-                            __u32 __user *prog_cnt)
+                            u32 *prog_ids, u32 request_cnt,
+                            u32 *prog_cnt)
 {
+       struct bpf_prog **prog;
        u32 cnt = 0;
 
        if (array)
                cnt = bpf_prog_array_length(array);
 
-       if (copy_to_user(prog_cnt, &cnt, sizeof(cnt)))
-               return -EFAULT;
+       *prog_cnt = cnt;
 
        /* return early if user requested only program count or nothing to copy */
        if (!request_cnt || !cnt)
                return 0;
 
-       return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt);
+       /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
+       prog = rcu_dereference_check(array, 1)->progs;
+       return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC
+                                                                    : 0;
 }
 
 static void bpf_prog_free_deferred(struct work_struct *work)
index 8dd9210..098eca5 100644 (file)
@@ -43,6 +43,7 @@
 #include <net/tcp.h>
 #include <linux/ptr_ring.h>
 #include <net/inet_common.h>
+#include <linux/sched/signal.h>
 
 #define SOCK_CREATE_FLAG_MASK \
        (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
@@ -325,6 +326,9 @@ retry:
                        if (ret > 0) {
                                if (apply)
                                        apply_bytes -= ret;
+
+                               sg->offset += ret;
+                               sg->length -= ret;
                                size -= ret;
                                offset += ret;
                                if (uncharge)
@@ -332,8 +336,6 @@ retry:
                                goto retry;
                        }
 
-                       sg->length = size;
-                       sg->offset = offset;
                        return ret;
                }
 
@@ -391,7 +393,8 @@ static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
        } while (i != md->sg_end);
 }
 
-static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
+static void free_bytes_sg(struct sock *sk, int bytes,
+                         struct sk_msg_buff *md, bool charge)
 {
        struct scatterlist *sg = md->sg_data;
        int i = md->sg_start, free;
@@ -401,11 +404,13 @@ static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
                if (bytes < free) {
                        sg[i].length -= bytes;
                        sg[i].offset += bytes;
-                       sk_mem_uncharge(sk, bytes);
+                       if (charge)
+                               sk_mem_uncharge(sk, bytes);
                        break;
                }
 
-               sk_mem_uncharge(sk, sg[i].length);
+               if (charge)
+                       sk_mem_uncharge(sk, sg[i].length);
                put_page(sg_page(&sg[i]));
                bytes -= sg[i].length;
                sg[i].length = 0;
@@ -416,6 +421,7 @@ static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md)
                if (i == MAX_SKB_FRAGS)
                        i = 0;
        }
+       md->sg_start = i;
 }
 
 static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
@@ -523,8 +529,6 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
        i = md->sg_start;
 
        do {
-               r->sg_data[i] = md->sg_data[i];
-
                size = (apply && apply_bytes < md->sg_data[i].length) ?
                        apply_bytes : md->sg_data[i].length;
 
@@ -535,6 +539,7 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
                }
 
                sk_mem_charge(sk, size);
+               r->sg_data[i] = md->sg_data[i];
                r->sg_data[i].length = size;
                md->sg_data[i].length -= size;
                md->sg_data[i].offset += size;
@@ -575,10 +580,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
                                       struct sk_msg_buff *md,
                                       int flags)
 {
+       bool ingress = !!(md->flags & BPF_F_INGRESS);
        struct smap_psock *psock;
        struct scatterlist *sg;
-       int i, err, free = 0;
-       bool ingress = !!(md->flags & BPF_F_INGRESS);
+       int err = 0;
 
        sg = md->sg_data;
 
@@ -606,16 +611,8 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
 out_rcu:
        rcu_read_unlock();
 out:
-       i = md->sg_start;
-       while (sg[i].length) {
-               free += sg[i].length;
-               put_page(sg_page(&sg[i]));
-               sg[i].length = 0;
-               i++;
-               if (i == MAX_SKB_FRAGS)
-                       i = 0;
-       }
-       return free;
+       free_bytes_sg(NULL, send, md, false);
+       return err;
 }
 
 static inline void bpf_md_init(struct smap_psock *psock)
@@ -700,19 +697,26 @@ more_data:
                err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags);
                lock_sock(sk);
 
+               if (unlikely(err < 0)) {
+                       free_start_sg(sk, m);
+                       psock->sg_size = 0;
+                       if (!cork)
+                               *copied -= send;
+               } else {
+                       psock->sg_size -= send;
+               }
+
                if (cork) {
                        free_start_sg(sk, m);
+                       psock->sg_size = 0;
                        kfree(m);
                        m = NULL;
+                       err = 0;
                }
-               if (unlikely(err))
-                       *copied -= err;
-               else
-                       psock->sg_size -= send;
                break;
        case __SK_DROP:
        default:
-               free_bytes_sg(sk, send, m);
+               free_bytes_sg(sk, send, m, true);
                apply_bytes_dec(psock, send);
                *copied -= send;
                psock->sg_size -= send;
@@ -732,6 +736,26 @@ out_err:
        return err;
 }
 
+static int bpf_wait_data(struct sock *sk,
+                        struct smap_psock *psk, int flags,
+                        long timeo, int *err)
+{
+       int rc;
+
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+       add_wait_queue(sk_sleep(sk), &wait);
+       sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+       rc = sk_wait_event(sk, &timeo,
+                          !list_empty(&psk->ingress) ||
+                          !skb_queue_empty(&sk->sk_receive_queue),
+                          &wait);
+       sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+       remove_wait_queue(sk_sleep(sk), &wait);
+
+       return rc;
+}
+
 static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                           int nonblock, int flags, int *addr_len)
 {
@@ -755,6 +779,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
 
        lock_sock(sk);
+bytes_ready:
        while (copied != len) {
                struct scatterlist *sg;
                struct sk_msg_buff *md;
@@ -809,6 +834,28 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                }
        }
 
+       if (!copied) {
+               long timeo;
+               int data;
+               int err = 0;
+
+               timeo = sock_rcvtimeo(sk, nonblock);
+               data = bpf_wait_data(sk, psock, flags, timeo, &err);
+
+               if (data) {
+                       if (!skb_queue_empty(&sk->sk_receive_queue)) {
+                               release_sock(sk);
+                               smap_release_sock(psock, sk);
+                               copied = tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
+                               return copied;
+                       }
+                       goto bytes_ready;
+               }
+
+               if (err)
+                       copied = err;
+       }
+
        release_sock(sk);
        smap_release_sock(psock, sk);
        return copied;
@@ -1442,9 +1489,6 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
            attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
                return ERR_PTR(-EINVAL);
 
-       if (attr->value_size > KMALLOC_MAX_SIZE)
-               return ERR_PTR(-E2BIG);
-
        err = bpf_tcp_ulp_register();
        if (err && err != -EEXIST)
                return ERR_PTR(err);
@@ -1834,7 +1878,7 @@ static int sock_map_update_elem(struct bpf_map *map,
        return err;
 }
 
-static void sock_map_release(struct bpf_map *map, struct file *map_file)
+static void sock_map_release(struct bpf_map *map)
 {
        struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
        struct bpf_prog *orig;
@@ -1858,7 +1902,7 @@ const struct bpf_map_ops sock_map_ops = {
        .map_get_next_key = sock_map_get_next_key,
        .map_update_elem = sock_map_update_elem,
        .map_delete_elem = sock_map_delete_elem,
-       .map_release = sock_map_release,
+       .map_release_uref = sock_map_release,
 };
 
 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
index 4ca46df..016ef90 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/cred.h>
 #include <linux/timekeeping.h>
 #include <linux/ctype.h>
+#include <linux/nospec.h>
 
 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
                           (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
@@ -102,12 +103,14 @@ const struct bpf_map_ops bpf_map_offload_ops = {
 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
 {
        const struct bpf_map_ops *ops;
+       u32 type = attr->map_type;
        struct bpf_map *map;
        int err;
 
-       if (attr->map_type >= ARRAY_SIZE(bpf_map_types))
+       if (type >= ARRAY_SIZE(bpf_map_types))
                return ERR_PTR(-EINVAL);
-       ops = bpf_map_types[attr->map_type];
+       type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
+       ops = bpf_map_types[type];
        if (!ops)
                return ERR_PTR(-EINVAL);
 
@@ -122,7 +125,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
        if (IS_ERR(map))
                return map;
        map->ops = ops;
-       map->map_type = attr->map_type;
+       map->map_type = type;
        return map;
 }
 
@@ -257,8 +260,8 @@ static void bpf_map_free_deferred(struct work_struct *work)
 static void bpf_map_put_uref(struct bpf_map *map)
 {
        if (atomic_dec_and_test(&map->usercnt)) {
-               if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
-                       bpf_fd_array_map_clear(map);
+               if (map->ops->map_release_uref)
+                       map->ops->map_release_uref(map);
        }
 }
 
@@ -871,11 +874,17 @@ static const struct bpf_prog_ops * const bpf_prog_types[] = {
 
 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
 {
-       if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
+       const struct bpf_prog_ops *ops;
+
+       if (type >= ARRAY_SIZE(bpf_prog_types))
+               return -EINVAL;
+       type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
+       ops = bpf_prog_types[type];
+       if (!ops)
                return -EINVAL;
 
        if (!bpf_prog_is_dev_bound(prog->aux))
-               prog->aux->ops = bpf_prog_types[type];
+               prog->aux->ops = ops;
        else
                prog->aux->ops = &bpf_offload_prog_ops;
        prog->type = type;
index 6d21894..92d8c98 100644 (file)
@@ -34,6 +34,7 @@ int compat_get_timex(struct timex *txc, const struct compat_timex __user *utp)
 {
        struct compat_timex tx32;
 
+       memset(txc, 0, sizeof(struct timex));
        if (copy_from_user(&tx32, utp, sizeof(struct compat_timex)))
                return -EFAULT;
 
index 6c6b3c4..1d8ca9e 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/circ_buf.h>
 #include <linux/poll.h>
+#include <linux/nospec.h>
 
 #include "internal.h"
 
@@ -867,8 +868,10 @@ perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
                        return NULL;
 
                /* AUX space */
-               if (pgoff >= rb->aux_pgoff)
-                       return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
+               if (pgoff >= rb->aux_pgoff) {
+                       int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
+                       return virt_to_page(rb->aux_pages[aux_pgoff]);
+               }
        }
 
        return __perf_mmap_to_page(rb, pgoff);
index ce6848e..1725b90 100644 (file)
@@ -491,7 +491,7 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
        if (!uprobe)
                return NULL;
 
-       uprobe->inode = igrab(inode);
+       uprobe->inode = inode;
        uprobe->offset = offset;
        init_rwsem(&uprobe->register_rwsem);
        init_rwsem(&uprobe->consumer_rwsem);
@@ -502,7 +502,6 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
        if (cur_uprobe) {
                kfree(uprobe);
                uprobe = cur_uprobe;
-               iput(inode);
        }
 
        return uprobe;
@@ -701,7 +700,6 @@ static void delete_uprobe(struct uprobe *uprobe)
        rb_erase(&uprobe->rb_node, &uprobes_tree);
        spin_unlock(&uprobes_treelock);
        RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
-       iput(uprobe->inode);
        put_uprobe(uprobe);
 }
 
@@ -873,7 +871,8 @@ static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *u
  * tuple).  Creation refcount stops uprobe_unregister from freeing the
  * @uprobe even before the register operation is complete. Creation
  * refcount is released when the last @uc for the @uprobe
- * unregisters.
+ * unregisters. Caller of uprobe_register() is required to keep @inode
+ * (and the containing mount) referenced.
  *
  * Return errno if it cannot successully install probes
  * else return 0 (success)
index 102160f..ea61902 100644 (file)
@@ -2428,7 +2428,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
        struct kprobe_blacklist_entry *ent =
                list_entry(v, struct kprobe_blacklist_entry, list);
 
-       seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr,
+       seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
                   (void *)ent->end_addr, (void *)ent->start_addr);
        return 0;
 }
index cd50e99..2017a39 100644 (file)
@@ -55,7 +55,6 @@ enum KTHREAD_BITS {
        KTHREAD_IS_PER_CPU = 0,
        KTHREAD_SHOULD_STOP,
        KTHREAD_SHOULD_PARK,
-       KTHREAD_IS_PARKED,
 };
 
 static inline void set_kthread_struct(void *kthread)
@@ -177,14 +176,12 @@ void *kthread_probe_data(struct task_struct *task)
 
 static void __kthread_parkme(struct kthread *self)
 {
-       __set_current_state(TASK_PARKED);
-       while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
-               if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
-                       complete(&self->parked);
+       for (;;) {
+               set_current_state(TASK_PARKED);
+               if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
+                       break;
                schedule();
-               __set_current_state(TASK_PARKED);
        }
-       clear_bit(KTHREAD_IS_PARKED, &self->flags);
        __set_current_state(TASK_RUNNING);
 }
 
@@ -194,6 +191,11 @@ void kthread_parkme(void)
 }
 EXPORT_SYMBOL_GPL(kthread_parkme);
 
+void kthread_park_complete(struct task_struct *k)
+{
+       complete(&to_kthread(k)->parked);
+}
+
 static int kthread(void *_create)
 {
        /* Copy data: it's on kthread's stack */
@@ -450,22 +452,15 @@ void kthread_unpark(struct task_struct *k)
 {
        struct kthread *kthread = to_kthread(k);
 
-       clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
        /*
-        * We clear the IS_PARKED bit here as we don't wait
-        * until the task has left the park code. So if we'd
-        * park before that happens we'd see the IS_PARKED bit
-        * which might be about to be cleared.
+        * Newly created kthread was parked when the CPU was offline.
+        * The binding was lost and we need to set it again.
         */
-       if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
-               /*
-                * Newly created kthread was parked when the CPU was offline.
-                * The binding was lost and we need to set it again.
-                */
-               if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
-                       __kthread_bind(k, kthread->cpu, TASK_PARKED);
-               wake_up_state(k, TASK_PARKED);
-       }
+       if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
+               __kthread_bind(k, kthread->cpu, TASK_PARKED);
+
+       clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+       wake_up_state(k, TASK_PARKED);
 }
 EXPORT_SYMBOL_GPL(kthread_unpark);
 
@@ -488,12 +483,13 @@ int kthread_park(struct task_struct *k)
        if (WARN_ON(k->flags & PF_EXITING))
                return -ENOSYS;
 
-       if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
-               set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
-               if (k != current) {
-                       wake_up_process(k);
-                       wait_for_completion(&kthread->parked);
-               }
+       if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
+               return -EBUSY;
+
+       set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+       if (k != current) {
+               wake_up_process(k);
+               wait_for_completion(&kthread->parked);
        }
 
        return 0;
index a6e43a5..c9bea7f 100644 (file)
@@ -1472,7 +1472,8 @@ static ssize_t module_sect_show(struct module_attribute *mattr,
 {
        struct module_sect_attr *sattr =
                container_of(mattr, struct module_sect_attr, mattr);
-       return sprintf(buf, "0x%pK\n", (void *)sattr->address);
+       return sprintf(buf, "0x%px\n", kptr_restrict < 2 ?
+                      (void *)sattr->address : NULL);
 }
 
 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
@@ -3516,6 +3517,11 @@ static noinline int do_init_module(struct module *mod)
         * walking this with preempt disabled.  In all the failure paths, we
         * call synchronize_sched(), but we don't want to slow down the success
         * path, so use actual RCU here.
+        * Note that module_alloc() on most architectures creates W+X page
+        * mappings which won't be cleaned up until do_free_init() runs.  Any
+        * code such as mark_rodata_ro() which depends on those mappings to
+        * be cleaned up needs to sync with the queued work - ie
+        * rcu_barrier_sched()
         */
        call_rcu_sched(&freeinit->rcu, do_free_init);
        mutex_unlock(&module_mutex);
index 6be6c57..2d4ff53 100644 (file)
@@ -2,6 +2,7 @@
 /*
  * Auto-group scheduling implementation:
  */
+#include <linux/nospec.h>
 #include "sched.h"
 
 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
@@ -209,7 +210,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
        static unsigned long next = INITIAL_JIFFIES;
        struct autogroup *ag;
        unsigned long shares;
-       int err;
+       int err, idx;
 
        if (nice < MIN_NICE || nice > MAX_NICE)
                return -EINVAL;
@@ -227,7 +228,9 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
 
        next = HZ / 10 + jiffies;
        ag = autogroup_task_get(p);
-       shares = scale_load(sched_prio_to_weight[nice + 20]);
+
+       idx = array_index_nospec(nice + 20, 40);
+       shares = scale_load(sched_prio_to_weight[idx]);
 
        down_write(&ag->lock);
        err = sched_group_set_shares(ag->tg, shares);
index 5e10aae..092f7c4 100644 (file)
@@ -7,6 +7,9 @@
  */
 #include "sched.h"
 
+#include <linux/kthread.h>
+#include <linux/nospec.h>
+
 #include <asm/switch_to.h>
 #include <asm/tlb.h>
 
@@ -2718,20 +2721,28 @@ static struct rq *finish_task_switch(struct task_struct *prev)
                membarrier_mm_sync_core_before_usermode(mm);
                mmdrop(mm);
        }
-       if (unlikely(prev_state == TASK_DEAD)) {
-               if (prev->sched_class->task_dead)
-                       prev->sched_class->task_dead(prev);
+       if (unlikely(prev_state & (TASK_DEAD|TASK_PARKED))) {
+               switch (prev_state) {
+               case TASK_DEAD:
+                       if (prev->sched_class->task_dead)
+                               prev->sched_class->task_dead(prev);
 
-               /*
-                * Remove function-return probe instances associated with this
-                * task and put them back on the free list.
-                */
-               kprobe_flush_task(prev);
+                       /*
+                        * Remove function-return probe instances associated with this
+                        * task and put them back on the free list.
+                        */
+                       kprobe_flush_task(prev);
+
+                       /* Task is done with its stack. */
+                       put_task_stack(prev);
 
-               /* Task is done with its stack. */
-               put_task_stack(prev);
+                       put_task_struct(prev);
+                       break;
 
-               put_task_struct(prev);
+               case TASK_PARKED:
+                       kthread_park_complete(prev);
+                       break;
+               }
        }
 
        tick_nohz_task_switch();
@@ -3498,23 +3509,8 @@ static void __sched notrace __schedule(bool preempt)
 
 void __noreturn do_task_dead(void)
 {
-       /*
-        * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
-        * when the following two conditions become true.
-        *   - There is race condition of mmap_sem (It is acquired by
-        *     exit_mm()), and
-        *   - SMI occurs before setting TASK_RUNINNG.
-        *     (or hypervisor of virtual machine switches to other guest)
-        *  As a result, we may become TASK_RUNNING after becoming TASK_DEAD
-        *
-        * To avoid it, we have to wait for releasing tsk->pi_lock which
-        * is held by try_to_wake_up()
-        */
-       raw_spin_lock_irq(&current->pi_lock);
-       raw_spin_unlock_irq(&current->pi_lock);
-
        /* Causes final put_task_struct in finish_task_switch(): */
-       __set_current_state(TASK_DEAD);
+       set_special_state(TASK_DEAD);
 
        /* Tell freezer to ignore us: */
        current->flags |= PF_NOFREEZE;
@@ -6928,11 +6924,15 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
                                     struct cftype *cft, s64 nice)
 {
        unsigned long weight;
+       int idx;
 
        if (nice < MIN_NICE || nice > MAX_NICE)
                return -ERANGE;
 
-       weight = sched_prio_to_weight[NICE_TO_PRIO(nice) - MAX_RT_PRIO];
+       idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
+       idx = array_index_nospec(idx, 40);
+       weight = sched_prio_to_weight[idx];
+
        return sched_group_set_shares(css_tg(css), scale_load(weight));
 }
 #endif
index d2c6083..e13df95 100644 (file)
@@ -305,7 +305,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
         * Do not reduce the frequency if the CPU has not been idle
         * recently, as the reduction is likely to be premature then.
         */
-       if (busy && next_f < sg_policy->next_freq) {
+       if (busy && next_f < sg_policy->next_freq &&
+           sg_policy->next_freq != UINT_MAX) {
                next_f = sg_policy->next_freq;
 
                /* Reset cached freq as next_freq has changed */
@@ -396,19 +397,6 @@ static void sugov_irq_work(struct irq_work *irq_work)
 
        sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
 
-       /*
-        * For RT tasks, the schedutil governor shoots the frequency to maximum.
-        * Special care must be taken to ensure that this kthread doesn't result
-        * in the same behavior.
-        *
-        * This is (mostly) guaranteed by the work_in_progress flag. The flag is
-        * updated only at the end of the sugov_work() function and before that
-        * the schedutil governor rejects all other frequency scaling requests.
-        *
-        * There is a very rare case though, where the RT thread yields right
-        * after the work_in_progress flag is cleared. The effects of that are
-        * neglected for now.
-        */
        kthread_queue_work(&sg_policy->worker, &sg_policy->work);
 }
 
index 54dc31e..79f574d 100644 (file)
@@ -1854,7 +1854,6 @@ static int task_numa_migrate(struct task_struct *p)
 static void numa_migrate_preferred(struct task_struct *p)
 {
        unsigned long interval = HZ;
-       unsigned long numa_migrate_retry;
 
        /* This task has no NUMA fault statistics yet */
        if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
@@ -1862,18 +1861,7 @@ static void numa_migrate_preferred(struct task_struct *p)
 
        /* Periodically retry migrating the task to the preferred node */
        interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
-       numa_migrate_retry = jiffies + interval;
-
-       /*
-        * Check that the new retry threshold is after the current one. If
-        * the retry is in the future, it implies that wake_affine has
-        * temporarily asked NUMA balancing to backoff from placement.
-        */
-       if (numa_migrate_retry > p->numa_migrate_retry)
-               return;
-
-       /* Safe to try placing the task on the preferred node */
-       p->numa_migrate_retry = numa_migrate_retry;
+       p->numa_migrate_retry = jiffies + interval;
 
        /* Success if task is already running on preferred CPU */
        if (task_node(p) == p->numa_preferred_nid)
@@ -5922,48 +5910,6 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
        return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits;
 }
 
-#ifdef CONFIG_NUMA_BALANCING
-static void
-update_wa_numa_placement(struct task_struct *p, int prev_cpu, int target)
-{
-       unsigned long interval;
-
-       if (!static_branch_likely(&sched_numa_balancing))
-               return;
-
-       /* If balancing has no preference then continue gathering data */
-       if (p->numa_preferred_nid == -1)
-               return;
-
-       /*
-        * If the wakeup is not affecting locality then it is neutral from
-        * the perspective of NUMA balacing so continue gathering data.
-        */
-       if (cpu_to_node(prev_cpu) == cpu_to_node(target))
-               return;
-
-       /*
-        * Temporarily prevent NUMA balancing trying to place waker/wakee after
-        * wakee has been moved by wake_affine. This will potentially allow
-        * related tasks to converge and update their data placement. The
-        * 4 * numa_scan_period is to allow the two-pass filter to migrate
-        * hot data to the wakers node.
-        */
-       interval = max(sysctl_numa_balancing_scan_delay,
-                        p->numa_scan_period << 2);
-       p->numa_migrate_retry = jiffies + msecs_to_jiffies(interval);
-
-       interval = max(sysctl_numa_balancing_scan_delay,
-                        current->numa_scan_period << 2);
-       current->numa_migrate_retry = jiffies + msecs_to_jiffies(interval);
-}
-#else
-static void
-update_wa_numa_placement(struct task_struct *p, int prev_cpu, int target)
-{
-}
-#endif
-
 static int wake_affine(struct sched_domain *sd, struct task_struct *p,
                       int this_cpu, int prev_cpu, int sync)
 {
@@ -5979,7 +5925,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
        if (target == nr_cpumask_bits)
                return prev_cpu;
 
-       update_wa_numa_placement(p, prev_cpu, target);
        schedstat_inc(sd->ttwu_move_affine);
        schedstat_inc(p->se.statistics.nr_wakeups_affine);
        return target;
@@ -9847,6 +9792,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
        if (curr_cost > this_rq->max_idle_balance_cost)
                this_rq->max_idle_balance_cost = curr_cost;
 
+out:
        /*
         * While browsing the domains, we released the rq lock, a task could
         * have been enqueued in the meantime. Since we're not going idle,
@@ -9855,7 +9801,6 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
        if (this_rq->cfs.h_nr_running && !pulled_task)
                pulled_task = 1;
 
-out:
        /* Move the next balance forward */
        if (time_after(this_rq->next_balance, next_balance))
                this_rq->next_balance = next_balance;
index d4ccea5..9c33163 100644 (file)
@@ -1961,14 +1961,27 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
                        return;
        }
 
+       set_special_state(TASK_TRACED);
+
        /*
         * We're committing to trapping.  TRACED should be visible before
         * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
         * Also, transition to TRACED and updates to ->jobctl should be
         * atomic with respect to siglock and should be done after the arch
         * hook as siglock is released and regrabbed across it.
+        *
+        *     TRACER                               TRACEE
+        *
+        *     ptrace_attach()
+        * [L]   wait_on_bit(JOBCTL_TRAPPING)   [S] set_special_state(TRACED)
+        *     do_wait()
+        *       set_current_state()                smp_wmb();
+        *       ptrace_do_wait()
+        *         wait_task_stopped()
+        *           task_stopped_code()
+        * [L]         task_is_traced()         [S] task_clear_jobctl_trapping();
         */
-       set_current_state(TASK_TRACED);
+       smp_wmb();
 
        current->last_siginfo = info;
        current->exit_code = exit_code;
@@ -2176,7 +2189,7 @@ static bool do_signal_stop(int signr)
                if (task_participate_group_stop(current))
                        notify = CLD_STOPPED;
 
-               __set_current_state(TASK_STOPPED);
+               set_special_state(TASK_STOPPED);
                spin_unlock_irq(&current->sighand->siglock);
 
                /*
index b759126..64c0291 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/smpboot.h>
 #include <linux/atomic.h>
 #include <linux/nmi.h>
+#include <linux/sched/wake_q.h>
 
 /*
  * Structure to determine completion condition and record errors.  May
@@ -65,27 +66,31 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done)
 }
 
 static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
-                                       struct cpu_stop_work *work)
+                                       struct cpu_stop_work *work,
+                                       struct wake_q_head *wakeq)
 {
        list_add_tail(&work->list, &stopper->works);
-       wake_up_process(stopper->thread);
+       wake_q_add(wakeq, stopper->thread);
 }
 
 /* queue @work to @stopper.  if offline, @work is completed immediately */
 static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
 {
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+       DEFINE_WAKE_Q(wakeq);
        unsigned long flags;
        bool enabled;
 
        spin_lock_irqsave(&stopper->lock, flags);
        enabled = stopper->enabled;
        if (enabled)
-               __cpu_stop_queue_work(stopper, work);
+               __cpu_stop_queue_work(stopper, work, &wakeq);
        else if (work->done)
                cpu_stop_signal_done(work->done);
        spin_unlock_irqrestore(&stopper->lock, flags);
 
+       wake_up_q(&wakeq);
+
        return enabled;
 }
 
@@ -229,6 +234,7 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
 {
        struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
        struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
+       DEFINE_WAKE_Q(wakeq);
        int err;
 retry:
        spin_lock_irq(&stopper1->lock);
@@ -252,8 +258,8 @@ retry:
                        goto unlock;
 
        err = 0;
-       __cpu_stop_queue_work(stopper1, work1);
-       __cpu_stop_queue_work(stopper2, work2);
+       __cpu_stop_queue_work(stopper1, work1, &wakeq);
+       __cpu_stop_queue_work(stopper2, work2, &wakeq);
 unlock:
        spin_unlock(&stopper2->lock);
        spin_unlock_irq(&stopper1->lock);
@@ -263,6 +269,9 @@ unlock:
                        cpu_relax();
                goto retry;
        }
+
+       wake_up_q(&wakeq);
+
        return err;
 }
 /**
index e8c0dab..07148b4 100644 (file)
@@ -704,24 +704,6 @@ static const struct bin_table bin_net_netfilter_table[] = {
        {}
 };
 
-static const struct bin_table bin_net_irda_table[] = {
-       { CTL_INT,      NET_IRDA_DISCOVERY,             "discovery" },
-       { CTL_STR,      NET_IRDA_DEVNAME,               "devname" },
-       { CTL_INT,      NET_IRDA_DEBUG,                 "debug" },
-       { CTL_INT,      NET_IRDA_FAST_POLL,             "fast_poll_increase" },
-       { CTL_INT,      NET_IRDA_DISCOVERY_SLOTS,       "discovery_slots" },
-       { CTL_INT,      NET_IRDA_DISCOVERY_TIMEOUT,     "discovery_timeout" },
-       { CTL_INT,      NET_IRDA_SLOT_TIMEOUT,          "slot_timeout" },
-       { CTL_INT,      NET_IRDA_MAX_BAUD_RATE,         "max_baud_rate" },
-       { CTL_INT,      NET_IRDA_MIN_TX_TURN_TIME,      "min_tx_turn_time" },
-       { CTL_INT,      NET_IRDA_MAX_TX_DATA_SIZE,      "max_tx_data_size" },
-       { CTL_INT,      NET_IRDA_MAX_TX_WINDOW,         "max_tx_window" },
-       { CTL_INT,      NET_IRDA_MAX_NOREPLY_TIME,      "max_noreply_time" },
-       { CTL_INT,      NET_IRDA_WARN_NOREPLY_TIME,     "warn_noreply_time" },
-       { CTL_INT,      NET_IRDA_LAP_KEEPALIVE_TIME,    "lap_keepalive_time" },
-       {}
-};
-
 static const struct bin_table bin_net_table[] = {
        { CTL_DIR,      NET_CORE,               "core",         bin_net_core_table },
        /* NET_ETHER not used */
@@ -743,7 +725,7 @@ static const struct bin_table bin_net_table[] = {
        { CTL_DIR,      NET_LLC,                "llc",          bin_net_llc_table },
        { CTL_DIR,      NET_NETFILTER,          "netfilter",    bin_net_netfilter_table },
        /* NET_DCCP "dccp" no longer used */
-       { CTL_DIR,      NET_IRDA,               "irda",         bin_net_irda_table },
+       /* NET_IRDA "irda" no longer used */
        { CTL_INT,      2089,                   "nf_conntrack_max" },
        {}
 };
index 0e974cf..84f3742 100644 (file)
@@ -119,6 +119,16 @@ static DEFINE_SPINLOCK(watchdog_lock);
 static int watchdog_running;
 static atomic_t watchdog_reset_pending;
 
+static void inline clocksource_watchdog_lock(unsigned long *flags)
+{
+       spin_lock_irqsave(&watchdog_lock, *flags);
+}
+
+static void inline clocksource_watchdog_unlock(unsigned long *flags)
+{
+       spin_unlock_irqrestore(&watchdog_lock, *flags);
+}
+
 static int clocksource_watchdog_kthread(void *data);
 static void __clocksource_change_rating(struct clocksource *cs, int rating);
 
@@ -142,9 +152,19 @@ static void __clocksource_unstable(struct clocksource *cs)
        cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
        cs->flags |= CLOCK_SOURCE_UNSTABLE;
 
+       /*
+        * If the clocksource is registered clocksource_watchdog_kthread() will
+        * re-rate and re-select.
+        */
+       if (list_empty(&cs->list)) {
+               cs->rating = 0;
+               return;
+       }
+
        if (cs->mark_unstable)
                cs->mark_unstable(cs);
 
+       /* kick clocksource_watchdog_kthread() */
        if (finished_booting)
                schedule_work(&watchdog_work);
 }
@@ -153,10 +173,8 @@ static void __clocksource_unstable(struct clocksource *cs)
  * clocksource_mark_unstable - mark clocksource unstable via watchdog
  * @cs:                clocksource to be marked unstable
  *
- * This function is called instead of clocksource_change_rating from
- * cpu hotplug code to avoid a deadlock between the clocksource mutex
- * and the cpu hotplug mutex. It defers the update of the clocksource
- * to the watchdog thread.
+ * This function is called by the x86 TSC code to mark clocksources as unstable;
+ * it defers demotion and re-selection to a kthread.
  */
 void clocksource_mark_unstable(struct clocksource *cs)
 {
@@ -164,7 +182,7 @@ void clocksource_mark_unstable(struct clocksource *cs)
 
        spin_lock_irqsave(&watchdog_lock, flags);
        if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
-               if (list_empty(&cs->wd_list))
+               if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
                        list_add(&cs->wd_list, &watchdog_list);
                __clocksource_unstable(cs);
        }
@@ -319,9 +337,8 @@ static void clocksource_resume_watchdog(void)
 
 static void clocksource_enqueue_watchdog(struct clocksource *cs)
 {
-       unsigned long flags;
+       INIT_LIST_HEAD(&cs->wd_list);
 
-       spin_lock_irqsave(&watchdog_lock, flags);
        if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
                /* cs is a clocksource to be watched. */
                list_add(&cs->wd_list, &watchdog_list);
@@ -331,7 +348,6 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
                if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
                        cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
        }
-       spin_unlock_irqrestore(&watchdog_lock, flags);
 }
 
 static void clocksource_select_watchdog(bool fallback)
@@ -373,9 +389,6 @@ static void clocksource_select_watchdog(bool fallback)
 
 static void clocksource_dequeue_watchdog(struct clocksource *cs)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&watchdog_lock, flags);
        if (cs != watchdog) {
                if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
                        /* cs is a watched clocksource. */
@@ -384,21 +397,19 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
                        clocksource_stop_watchdog();
                }
        }
-       spin_unlock_irqrestore(&watchdog_lock, flags);
 }
 
 static int __clocksource_watchdog_kthread(void)
 {
        struct clocksource *cs, *tmp;
        unsigned long flags;
-       LIST_HEAD(unstable);
        int select = 0;
 
        spin_lock_irqsave(&watchdog_lock, flags);
        list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
                if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
                        list_del_init(&cs->wd_list);
-                       list_add(&cs->wd_list, &unstable);
+                       __clocksource_change_rating(cs, 0);
                        select = 1;
                }
                if (cs->flags & CLOCK_SOURCE_RESELECT) {
@@ -410,11 +421,6 @@ static int __clocksource_watchdog_kthread(void)
        clocksource_stop_watchdog();
        spin_unlock_irqrestore(&watchdog_lock, flags);
 
-       /* Needs to be done outside of watchdog lock */
-       list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
-               list_del_init(&cs->wd_list);
-               __clocksource_change_rating(cs, 0);
-       }
        return select;
 }
 
@@ -447,6 +453,9 @@ static inline int __clocksource_watchdog_kthread(void) { return 0; }
 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
 void clocksource_mark_unstable(struct clocksource *cs) { }
 
+static void inline clocksource_watchdog_lock(unsigned long *flags) { }
+static void inline clocksource_watchdog_unlock(unsigned long *flags) { }
+
 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
 
 /**
@@ -779,14 +788,19 @@ EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
  */
 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
 {
+       unsigned long flags;
 
        /* Initialize mult/shift and max_idle_ns */
        __clocksource_update_freq_scale(cs, scale, freq);
 
        /* Add clocksource to the clocksource list */
        mutex_lock(&clocksource_mutex);
+
+       clocksource_watchdog_lock(&flags);
        clocksource_enqueue(cs);
        clocksource_enqueue_watchdog(cs);
+       clocksource_watchdog_unlock(&flags);
+
        clocksource_select();
        clocksource_select_watchdog(false);
        mutex_unlock(&clocksource_mutex);
@@ -808,8 +822,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
  */
 void clocksource_change_rating(struct clocksource *cs, int rating)
 {
+       unsigned long flags;
+
        mutex_lock(&clocksource_mutex);
+       clocksource_watchdog_lock(&flags);
        __clocksource_change_rating(cs, rating);
+       clocksource_watchdog_unlock(&flags);
+
        clocksource_select();
        clocksource_select_watchdog(false);
        mutex_unlock(&clocksource_mutex);
@@ -821,6 +840,8 @@ EXPORT_SYMBOL(clocksource_change_rating);
  */
 static int clocksource_unbind(struct clocksource *cs)
 {
+       unsigned long flags;
+
        if (clocksource_is_watchdog(cs)) {
                /* Select and try to install a replacement watchdog. */
                clocksource_select_watchdog(true);
@@ -834,8 +855,12 @@ static int clocksource_unbind(struct clocksource *cs)
                if (curr_clocksource == cs)
                        return -EBUSY;
        }
+
+       clocksource_watchdog_lock(&flags);
        clocksource_dequeue_watchdog(cs);
        list_del_init(&cs->list);
+       clocksource_watchdog_unlock(&flags);
+
        return 0;
 }
 
index eda1210..14e8587 100644 (file)
@@ -90,6 +90,11 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
                        .clockid = CLOCK_REALTIME,
                        .get_time = &ktime_get_real,
                },
+               {
+                       .index = HRTIMER_BASE_BOOTTIME,
+                       .clockid = CLOCK_BOOTTIME,
+                       .get_time = &ktime_get_boottime,
+               },
                {
                        .index = HRTIMER_BASE_TAI,
                        .clockid = CLOCK_TAI,
@@ -105,6 +110,11 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
                        .clockid = CLOCK_REALTIME,
                        .get_time = &ktime_get_real,
                },
+               {
+                       .index = HRTIMER_BASE_BOOTTIME_SOFT,
+                       .clockid = CLOCK_BOOTTIME,
+                       .get_time = &ktime_get_boottime,
+               },
                {
                        .index = HRTIMER_BASE_TAI_SOFT,
                        .clockid = CLOCK_TAI,
@@ -119,7 +129,7 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
 
        [CLOCK_REALTIME]        = HRTIMER_BASE_REALTIME,
        [CLOCK_MONOTONIC]       = HRTIMER_BASE_MONOTONIC,
-       [CLOCK_BOOTTIME]        = HRTIMER_BASE_MONOTONIC,
+       [CLOCK_BOOTTIME]        = HRTIMER_BASE_BOOTTIME,
        [CLOCK_TAI]             = HRTIMER_BASE_TAI,
 };
 
@@ -571,12 +581,14 @@ __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_
 static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
 {
        ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
+       ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
        ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
 
        ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
-                                                  offs_real, offs_tai);
+                                           offs_real, offs_boot, offs_tai);
 
        base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
+       base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
        base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
 
        return now;
index e0dbae9..69a937c 100644 (file)
@@ -83,8 +83,6 @@ int do_clock_gettime(clockid_t which_clock, struct timespec64 *tp)
        case CLOCK_BOOTTIME:
                get_monotonic_boottime64(tp);
                break;
-       case CLOCK_MONOTONIC_ACTIVE:
-               ktime_get_active_ts64(tp);
        default:
                return -EINVAL;
        }
index b6899b5..10b7186 100644 (file)
@@ -252,16 +252,15 @@ static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *
        return 0;
 }
 
-static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
+static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp)
 {
-       timekeeping_clocktai64(tp);
+       get_monotonic_boottime64(tp);
        return 0;
 }
 
-static int posix_get_monotonic_active(clockid_t which_clock,
-                                     struct timespec64 *tp)
+static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
 {
-       ktime_get_active_ts64(tp);
+       timekeeping_clocktai64(tp);
        return 0;
 }
 
@@ -1317,9 +1316,19 @@ static const struct k_clock clock_tai = {
        .timer_arm              = common_hrtimer_arm,
 };
 
-static const struct k_clock clock_monotonic_active = {
+static const struct k_clock clock_boottime = {
        .clock_getres           = posix_get_hrtimer_res,
-       .clock_get              = posix_get_monotonic_active,
+       .clock_get              = posix_get_boottime,
+       .nsleep                 = common_nsleep,
+       .timer_create           = common_timer_create,
+       .timer_set              = common_timer_set,
+       .timer_get              = common_timer_get,
+       .timer_del              = common_timer_del,
+       .timer_rearm            = common_hrtimer_rearm,
+       .timer_forward          = common_hrtimer_forward,
+       .timer_remaining        = common_hrtimer_remaining,
+       .timer_try_to_cancel    = common_hrtimer_try_to_cancel,
+       .timer_arm              = common_hrtimer_arm,
 };
 
 static const struct k_clock * const posix_clocks[] = {
@@ -1330,11 +1339,10 @@ static const struct k_clock * const posix_clocks[] = {
        [CLOCK_MONOTONIC_RAW]           = &clock_monotonic_raw,
        [CLOCK_REALTIME_COARSE]         = &clock_realtime_coarse,
        [CLOCK_MONOTONIC_COARSE]        = &clock_monotonic_coarse,
-       [CLOCK_BOOTTIME]                = &clock_monotonic,
+       [CLOCK_BOOTTIME]                = &clock_boottime,
        [CLOCK_REALTIME_ALARM]          = &alarm_clock,
        [CLOCK_BOOTTIME_ALARM]          = &alarm_clock,
        [CLOCK_TAI]                     = &clock_tai,
-       [CLOCK_MONOTONIC_ACTIVE]        = &clock_monotonic_active,
 };
 
 static const struct k_clock *clockid_to_kclock(const clockid_t id)
index 099572c..49edc1c 100644 (file)
@@ -419,19 +419,6 @@ void tick_suspend_local(void)
        clockevents_shutdown(td->evtdev);
 }
 
-static void tick_forward_next_period(void)
-{
-       ktime_t delta, now = ktime_get();
-       u64 n;
-
-       delta = ktime_sub(now, tick_next_period);
-       n = ktime_divns(delta, tick_period);
-       tick_next_period += n * tick_period;
-       if (tick_next_period < now)
-               tick_next_period += tick_period;
-       tick_sched_forward_next_period();
-}
-
 /**
  * tick_resume_local - Resume the local tick device
  *
@@ -444,8 +431,6 @@ void tick_resume_local(void)
        struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
        bool broadcast = tick_resume_check_broadcast();
 
-       tick_forward_next_period();
-
        clockevents_tick_resume(td->evtdev);
        if (!broadcast) {
                if (td->mode == TICKDEV_MODE_PERIODIC)
index 21efab7..e277284 100644 (file)
@@ -141,12 +141,6 @@ static inline void tick_check_oneshot_broadcast_this_cpu(void) { }
 static inline bool tick_broadcast_oneshot_available(void) { return tick_oneshot_possible(); }
 #endif /* !(BROADCAST && ONESHOT) */
 
-#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
-extern void tick_sched_forward_next_period(void);
-#else
-static inline void tick_sched_forward_next_period(void) { }
-#endif
-
 /* NO_HZ_FULL internal */
 #ifdef CONFIG_NO_HZ_FULL
 extern void tick_nohz_init(void);
index 646645e..da9455a 100644 (file)
@@ -51,15 +51,6 @@ struct tick_sched *tick_get_tick_sched(int cpu)
  */
 static ktime_t last_jiffies_update;
 
-/*
- * Called after resume. Make sure that jiffies are not fast forwarded due to
- * clock monotonic being forwarded by the suspended time.
- */
-void tick_sched_forward_next_period(void)
-{
-       last_jiffies_update = tick_next_period;
-}
-
 /*
  * Must be called with interrupts disabled !
  */
@@ -804,12 +795,12 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
                return;
        }
 
-       hrtimer_set_expires(&ts->sched_timer, tick);
-
-       if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
-               hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
-       else
+       if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
+               hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED);
+       } else {
+               hrtimer_set_expires(&ts->sched_timer, tick);
                tick_program_event(tick, 1);
+       }
 }
 
 static void tick_nohz_retain_tick(struct tick_sched *ts)
index dcf7f20..49cbcee 100644 (file)
@@ -138,12 +138,7 @@ static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
 
 static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
 {
-       /* Update both bases so mono and raw stay coupled. */
-       tk->tkr_mono.base += delta;
-       tk->tkr_raw.base += delta;
-
-       /* Accumulate time spent in suspend */
-       tk->time_suspended += delta;
+       tk->offs_boot = ktime_add(tk->offs_boot, delta);
 }
 
 /*
@@ -473,6 +468,36 @@ u64 ktime_get_raw_fast_ns(void)
 }
 EXPORT_SYMBOL_GPL(ktime_get_raw_fast_ns);
 
+/**
+ * ktime_get_boot_fast_ns - NMI safe and fast access to boot clock.
+ *
+ * To keep it NMI safe since we're accessing from tracing, we're not using a
+ * separate timekeeper with updates to monotonic clock and boot offset
+ * protected with seqlocks. This has the following minor side effects:
+ *
+ * (1) Its possible that a timestamp be taken after the boot offset is updated
+ * but before the timekeeper is updated. If this happens, the new boot offset
+ * is added to the old timekeeping making the clock appear to update slightly
+ * earlier:
+ *    CPU 0                                        CPU 1
+ *    timekeeping_inject_sleeptime64()
+ *    __timekeeping_inject_sleeptime(tk, delta);
+ *                                                 timestamp();
+ *    timekeeping_update(tk, TK_CLEAR_NTP...);
+ *
+ * (2) On 32-bit systems, the 64-bit boot offset (tk->offs_boot) may be
+ * partially updated.  Since the tk->offs_boot update is a rare event, this
+ * should be a rare occurrence which postprocessing should be able to handle.
+ */
+u64 notrace ktime_get_boot_fast_ns(void)
+{
+       struct timekeeper *tk = &tk_core.timekeeper;
+
+       return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
+}
+EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
+
+
 /*
  * See comment for __ktime_get_fast_ns() vs. timestamp ordering
  */
@@ -764,6 +789,7 @@ EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
 
 static ktime_t *offsets[TK_OFFS_MAX] = {
        [TK_OFFS_REAL]  = &tk_core.timekeeper.offs_real,
+       [TK_OFFS_BOOT]  = &tk_core.timekeeper.offs_boot,
        [TK_OFFS_TAI]   = &tk_core.timekeeper.offs_tai,
 };
 
@@ -860,39 +886,6 @@ void ktime_get_ts64(struct timespec64 *ts)
 }
 EXPORT_SYMBOL_GPL(ktime_get_ts64);
 
-/**
- * ktime_get_active_ts64 - Get the active non-suspended monotonic clock
- * @ts:                pointer to timespec variable
- *
- * The function calculates the monotonic clock from the realtime clock and
- * the wall_to_monotonic offset, subtracts the accumulated suspend time and
- * stores the result in normalized timespec64 format in the variable
- * pointed to by @ts.
- */
-void ktime_get_active_ts64(struct timespec64 *ts)
-{
-       struct timekeeper *tk = &tk_core.timekeeper;
-       struct timespec64 tomono, tsusp;
-       u64 nsec, nssusp;
-       unsigned int seq;
-
-       WARN_ON(timekeeping_suspended);
-
-       do {
-               seq = read_seqcount_begin(&tk_core.seq);
-               ts->tv_sec = tk->xtime_sec;
-               nsec = timekeeping_get_ns(&tk->tkr_mono);
-               tomono = tk->wall_to_monotonic;
-               nssusp = tk->time_suspended;
-       } while (read_seqcount_retry(&tk_core.seq, seq));
-
-       ts->tv_sec += tomono.tv_sec;
-       ts->tv_nsec = 0;
-       timespec64_add_ns(ts, nsec + tomono.tv_nsec);
-       tsusp = ns_to_timespec64(nssusp);
-       *ts = timespec64_sub(*ts, tsusp);
-}
-
 /**
  * ktime_get_seconds - Get the seconds portion of CLOCK_MONOTONIC
  *
@@ -1593,6 +1586,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
                return;
        }
        tk_xtime_add(tk, delta);
+       tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
        tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
        tk_debug_account_sleep_time(delta);
 }
@@ -2125,7 +2119,7 @@ out:
 void getboottime64(struct timespec64 *ts)
 {
        struct timekeeper *tk = &tk_core.timekeeper;
-       ktime_t t = ktime_sub(tk->offs_real, tk->time_suspended);
+       ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
 
        *ts = ktime_to_timespec64(t);
 }
@@ -2188,6 +2182,7 @@ void do_timer(unsigned long ticks)
  * ktime_get_update_offsets_now - hrtimer helper
  * @cwsseq:    pointer to check and store the clock was set sequence number
  * @offs_real: pointer to storage for monotonic -> realtime offset
+ * @offs_boot: pointer to storage for monotonic -> boottime offset
  * @offs_tai:  pointer to storage for monotonic -> clock tai offset
  *
  * Returns current monotonic time and updates the offsets if the
@@ -2197,7 +2192,7 @@ void do_timer(unsigned long ticks)
  * Called from hrtimer_interrupt() or retrigger_next_event()
  */
 ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
-                                    ktime_t *offs_tai)
+                                    ktime_t *offs_boot, ktime_t *offs_tai)
 {
        struct timekeeper *tk = &tk_core.timekeeper;
        unsigned int seq;
@@ -2214,6 +2209,7 @@ ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
                if (*cwsseq != tk->clock_was_set_seq) {
                        *cwsseq = tk->clock_was_set_seq;
                        *offs_real = tk->offs_real;
+                       *offs_boot = tk->offs_boot;
                        *offs_tai = tk->offs_tai;
                }
 
index 79b67f5..7a9b4eb 100644 (file)
@@ -6,6 +6,7 @@
  */
 extern ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq,
                                            ktime_t *offs_real,
+                                           ktime_t *offs_boot,
                                            ktime_t *offs_tai);
 
 extern int timekeeping_valid_for_hres(void);
index d88e96d..56ba0f2 100644 (file)
@@ -977,6 +977,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
 {
        struct perf_event_query_bpf __user *uquery = info;
        struct perf_event_query_bpf query = {};
+       u32 *ids, prog_cnt, ids_len;
        int ret;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -985,16 +986,32 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
                return -EINVAL;
        if (copy_from_user(&query, uquery, sizeof(query)))
                return -EFAULT;
-       if (query.ids_len > BPF_TRACE_MAX_PROGS)
+
+       ids_len = query.ids_len;
+       if (ids_len > BPF_TRACE_MAX_PROGS)
                return -E2BIG;
+       ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
+       if (!ids)
+               return -ENOMEM;
+       /*
+        * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
+        * is required when user only wants to check for uquery->prog_cnt.
+        * There is no need to check for it since the case is handled
+        * gracefully in bpf_prog_array_copy_info.
+        */
 
        mutex_lock(&bpf_event_mutex);
        ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
-                                      uquery->ids,
-                                      query.ids_len,
-                                      &uquery->prog_cnt);
+                                      ids,
+                                      ids_len,
+                                      &prog_cnt);
        mutex_unlock(&bpf_event_mutex);
 
+       if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
+           copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
+               ret = -EFAULT;
+
+       kfree(ids);
        return ret;
 }
 
index 16bbf06..8d83bcf 100644 (file)
@@ -5514,10 +5514,10 @@ static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
        ftrace_create_filter_files(&global_ops, d_tracer);
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       trace_create_file("set_graph_function", 0444, d_tracer,
+       trace_create_file("set_graph_function", 0644, d_tracer,
                                    NULL,
                                    &ftrace_graph_fops);
-       trace_create_file("set_graph_notrace", 0444, d_tracer,
+       trace_create_file("set_graph_notrace", 0644, d_tracer,
                                    NULL,
                                    &ftrace_graph_notrace_fops);
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index dfbcf9e..414d721 100644 (file)
@@ -1165,7 +1165,7 @@ static struct {
        { trace_clock,                  "perf",         1 },
        { ktime_get_mono_fast_ns,       "mono",         1 },
        { ktime_get_raw_fast_ns,        "mono_raw",     1 },
-       { ktime_get_mono_fast_ns,       "boot",         1 },
+       { ktime_get_boot_fast_ns,       "boot",         1 },
        ARCH_TRACE_CLOCKS
 };
 
index e954ae3..e3a658b 100644 (file)
@@ -356,7 +356,7 @@ FTRACE_ENTRY(hwlat, hwlat_entry,
                __field(        unsigned int,           seqnum          )
        ),
 
-       F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llunmi-ts:%llu\tnmi-count:%u\n",
+       F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llu\tnmi-ts:%llu\tnmi-count:%u\n",
                 __entry->seqnum,
                 __entry->tv_sec,
                 __entry->tv_nsec,
index 9b4716b..7d306b7 100644 (file)
@@ -762,6 +762,9 @@ static int regex_match_full(char *str, struct regex *r, int len)
 
 static int regex_match_front(char *str, struct regex *r, int len)
 {
+       if (len < r->len)
+               return 0;
+
        if (strncmp(str, r->pattern, r->len) == 0)
                return 1;
        return 0;
@@ -1499,14 +1502,14 @@ static int process_preds(struct trace_event_call *call,
                return ret;
        }
 
-       if (!nr_preds) {
-               prog = NULL;
-       } else {
-               prog = predicate_parse(filter_string, nr_parens, nr_preds,
+       if (!nr_preds)
+               return -EINVAL;
+
+       prog = predicate_parse(filter_string, nr_parens, nr_preds,
                               parse_pred, call, pe);
-               if (IS_ERR(prog))
-                       return PTR_ERR(prog);
-       }
+       if (IS_ERR(prog))
+               return PTR_ERR(prog);
+
        rcu_assign_pointer(filter->prog, prog);
        return 0;
 }
index 0d7b3ff..b9061ed 100644 (file)
@@ -2466,6 +2466,7 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
                else if (strcmp(modifier, "usecs") == 0)
                        *flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
                else {
+                       hist_err("Invalid field modifier: ", modifier);
                        field = ERR_PTR(-EINVAL);
                        goto out;
                }
@@ -2481,6 +2482,7 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
        else {
                field = trace_find_event_field(file->event_call, field_name);
                if (!field || !field->size) {
+                       hist_err("Couldn't find field: ", field_name);
                        field = ERR_PTR(-EINVAL);
                        goto out;
                }
@@ -4913,6 +4915,16 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
                seq_printf(m, "%s", field_name);
        } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
                seq_puts(m, "common_timestamp");
+
+       if (hist_field->flags) {
+               if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) &&
+                   !(hist_field->flags & HIST_FIELD_FL_EXPR)) {
+                       const char *flags = get_hist_field_flags(hist_field);
+
+                       if (flags)
+                               seq_printf(m, ".%s", flags);
+               }
+       }
 }
 
 static int event_hist_trigger_print(struct seq_file *m,
index 3c7bfc4..4237eba 100644 (file)
@@ -472,7 +472,7 @@ static __init int stack_trace_init(void)
                        NULL, &stack_trace_fops);
 
 #ifdef CONFIG_DYNAMIC_FTRACE
-       trace_create_file("stack_trace_filter", 0444, d_tracer,
+       trace_create_file("stack_trace_filter", 0644, d_tracer,
                          &trace_ops, &stack_trace_filter_fops);
 #endif
 
index 34fd0e0..ac89287 100644 (file)
@@ -55,6 +55,7 @@ struct trace_uprobe {
        struct list_head                list;
        struct trace_uprobe_filter      filter;
        struct uprobe_consumer          consumer;
+       struct path                     path;
        struct inode                    *inode;
        char                            *filename;
        unsigned long                   offset;
@@ -289,7 +290,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu)
        for (i = 0; i < tu->tp.nr_args; i++)
                traceprobe_free_probe_arg(&tu->tp.args[i]);
 
-       iput(tu->inode);
+       path_put(&tu->path);
        kfree(tu->tp.call.class->system);
        kfree(tu->tp.call.name);
        kfree(tu->filename);
@@ -363,7 +364,6 @@ end:
 static int create_trace_uprobe(int argc, char **argv)
 {
        struct trace_uprobe *tu;
-       struct inode *inode;
        char *arg, *event, *group, *filename;
        char buf[MAX_EVENT_NAME_LEN];
        struct path path;
@@ -371,7 +371,6 @@ static int create_trace_uprobe(int argc, char **argv)
        bool is_delete, is_return;
        int i, ret;
 
-       inode = NULL;
        ret = 0;
        is_delete = false;
        is_return = false;
@@ -437,21 +436,16 @@ static int create_trace_uprobe(int argc, char **argv)
        }
        /* Find the last occurrence, in case the path contains ':' too. */
        arg = strrchr(argv[1], ':');
-       if (!arg) {
-               ret = -EINVAL;
-               goto fail_address_parse;
-       }
+       if (!arg)
+               return -EINVAL;
 
        *arg++ = '\0';
        filename = argv[1];
        ret = kern_path(filename, LOOKUP_FOLLOW, &path);
        if (ret)
-               goto fail_address_parse;
-
-       inode = igrab(d_real_inode(path.dentry));
-       path_put(&path);
+               return ret;
 
-       if (!inode || !S_ISREG(inode->i_mode)) {
+       if (!d_is_reg(path.dentry)) {
                ret = -EINVAL;
                goto fail_address_parse;
        }
@@ -490,7 +484,7 @@ static int create_trace_uprobe(int argc, char **argv)
                goto fail_address_parse;
        }
        tu->offset = offset;
-       tu->inode = inode;
+       tu->path = path;
        tu->filename = kstrdup(filename, GFP_KERNEL);
 
        if (!tu->filename) {
@@ -558,7 +552,7 @@ error:
        return ret;
 
 fail_address_parse:
-       iput(inode);
+       path_put(&path);
 
        pr_info("Failed to parse address or file.\n");
 
@@ -922,6 +916,7 @@ probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file,
                goto err_flags;
 
        tu->consumer.filter = filter;
+       tu->inode = d_real_inode(tu->path.dentry);
        ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
        if (ret)
                goto err_buffer;
@@ -967,6 +962,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
        WARN_ON(!uprobe_filter_is_empty(&tu->filter));
 
        uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
+       tu->inode = NULL;
        tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE;
 
        uprobe_buffer_disable();
@@ -1337,7 +1333,6 @@ struct trace_event_call *
 create_local_trace_uprobe(char *name, unsigned long offs, bool is_return)
 {
        struct trace_uprobe *tu;
-       struct inode *inode;
        struct path path;
        int ret;
 
@@ -1345,11 +1340,8 @@ create_local_trace_uprobe(char *name, unsigned long offs, bool is_return)
        if (ret)
                return ERR_PTR(ret);
 
-       inode = igrab(d_inode(path.dentry));
-       path_put(&path);
-
-       if (!inode || !S_ISREG(inode->i_mode)) {
-               iput(inode);
+       if (!d_is_reg(path.dentry)) {
+               path_put(&path);
                return ERR_PTR(-EINVAL);
        }
 
@@ -1364,11 +1356,12 @@ create_local_trace_uprobe(char *name, unsigned long offs, bool is_return)
        if (IS_ERR(tu)) {
                pr_info("Failed to allocate trace_uprobe.(%d)\n",
                        (int)PTR_ERR(tu));
+               path_put(&path);
                return ERR_CAST(tu);
        }
 
        tu->offset = offs;
-       tu->inode = inode;
+       tu->path = path;
        tu->filename = kstrdup(name, GFP_KERNEL);
        init_trace_event_call(tu, &tu->tp.call);
 
index 671b134..1e37da2 100644 (file)
@@ -207,7 +207,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
                        lockdep_is_held(&tracepoints_mutex));
        old = func_add(&tp_funcs, func, prio);
        if (IS_ERR(old)) {
-               WARN_ON_ONCE(1);
+               WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
                return PTR_ERR(old);
        }
 
@@ -239,7 +239,7 @@ static int tracepoint_remove_func(struct tracepoint *tp,
                        lockdep_is_held(&tracepoints_mutex));
        old = func_remove(&tp_funcs, func);
        if (IS_ERR(old)) {
-               WARN_ON_ONCE(1);
+               WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
                return PTR_ERR(old);
        }
 
index c0bba30..bbfb229 100644 (file)
@@ -84,7 +84,8 @@ again:
                __free_pages(page, page_order);
                page = NULL;
 
-               if (dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
+               if (IS_ENABLED(CONFIG_ZONE_DMA) &&
+                   dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
                    !(gfp & GFP_DMA)) {
                        gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
                        goto again;
index df78241..81f9e33 100644 (file)
@@ -111,27 +111,22 @@ EXPORT_SYMBOL(errseq_set);
  * errseq_sample() - Grab current errseq_t value.
  * @eseq: Pointer to errseq_t to be sampled.
  *
- * This function allows callers to sample an errseq_t value, marking it as
- * "seen" if required.
+ * This function allows callers to initialise their errseq_t variable.
+ * If the error has been "seen", new callers will not see an old error.
+ * If there is an unseen error in @eseq, the caller of this function will
+ * see it the next time it checks for an error.
  *
+ * Context: Any context.
  * Return: The current errseq value.
  */
 errseq_t errseq_sample(errseq_t *eseq)
 {
        errseq_t old = READ_ONCE(*eseq);
-       errseq_t new = old;
 
-       /*
-        * For the common case of no errors ever having been set, we can skip
-        * marking the SEEN bit. Once an error has been set, the value will
-        * never go back to zero.
-        */
-       if (old != 0) {
-               new |= ERRSEQ_SEEN;
-               if (old != new)
-                       cmpxchg(eseq, old, new);
-       }
-       return new;
+       /* If nobody has seen this error yet, then we can be the first. */
+       if (!(old & ERRSEQ_SEEN))
+               old = 0;
+       return old;
 }
 EXPORT_SYMBOL(errseq_sample);
 
index 5985a25..5367ffa 100644 (file)
@@ -132,7 +132,12 @@ static int __init find_bit_test(void)
        test_find_next_bit(bitmap, BITMAP_LEN);
        test_find_next_zero_bit(bitmap, BITMAP_LEN);
        test_find_last_bit(bitmap, BITMAP_LEN);
-       test_find_first_bit(bitmap, BITMAP_LEN);
+
+       /*
+        * test_find_first_bit() may take some time, so
+        * traverse only part of bitmap to avoid soft lockup.
+        */
+       test_find_first_bit(bitmap, BITMAP_LEN / 10);
        test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
 
        pr_err("\nStart testing find_bit() with sparse bitmap\n");
index e1d1f29..18989b5 100644 (file)
@@ -233,13 +233,12 @@ static int kobject_add_internal(struct kobject *kobj)
 
                /* be noisy on error issues */
                if (error == -EEXIST)
-                       WARN(1,
-                            "%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
-                            __func__, kobject_name(kobj));
+                       pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
+                              __func__, kobject_name(kobj));
                else
-                       WARN(1, "%s failed for %s (error: %d parent: %s)\n",
-                            __func__, kobject_name(kobj), error,
-                            parent ? kobject_name(parent) : "'none'");
+                       pr_err("%s failed for %s (error: %d parent: %s)\n",
+                              __func__, kobject_name(kobj), error,
+                              parent ? kobject_name(parent) : "'none'");
        } else
                kobj->state_in_sysfs = 1;
 
index fece575..cc64058 100644 (file)
@@ -714,7 +714,7 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
 
        phys_addr = swiotlb_tbl_map_single(dev,
                        __phys_to_dma(dev, io_tlb_start),
-                       0, size, DMA_FROM_DEVICE, 0);
+                       0, size, DMA_FROM_DEVICE, attrs);
        if (phys_addr == SWIOTLB_MAP_ERROR)
                goto out_warn;
 
@@ -737,7 +737,7 @@ out_unmap:
        swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
                        DMA_ATTR_SKIP_CPU_SYNC);
 out_warn:
-       if ((attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
+       if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
                dev_warn(dev,
                        "swiotlb: coherent allocation failed, size=%zu\n",
                        size);
index 30c0cb8..23920c5 100644 (file)
@@ -1669,19 +1669,22 @@ char *pointer_string(char *buf, char *end, const void *ptr,
        return number(buf, end, (unsigned long int)ptr, spec);
 }
 
-static bool have_filled_random_ptr_key __read_mostly;
+static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
 static siphash_key_t ptr_key __read_mostly;
 
-static void fill_random_ptr_key(struct random_ready_callback *unused)
+static void enable_ptr_key_workfn(struct work_struct *work)
 {
        get_random_bytes(&ptr_key, sizeof(ptr_key));
-       /*
-        * have_filled_random_ptr_key==true is dependent on get_random_bytes().
-        * ptr_to_id() needs to see have_filled_random_ptr_key==true
-        * after get_random_bytes() returns.
-        */
-       smp_mb();
-       WRITE_ONCE(have_filled_random_ptr_key, true);
+       /* Needs to run from preemptible context */
+       static_branch_disable(&not_filled_random_ptr_key);
+}
+
+static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
+
+static void fill_random_ptr_key(struct random_ready_callback *unused)
+{
+       /* This may be in an interrupt handler. */
+       queue_work(system_unbound_wq, &enable_ptr_key_work);
 }
 
 static struct random_ready_callback random_ready = {
@@ -1695,7 +1698,8 @@ static int __init initialize_ptr_random(void)
        if (!ret) {
                return 0;
        } else if (ret == -EALREADY) {
-               fill_random_ptr_key(&random_ready);
+               /* This is in preemptible context */
+               enable_ptr_key_workfn(&enable_ptr_key_work);
                return 0;
        }
 
@@ -1709,7 +1713,7 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
        unsigned long hashval;
        const int default_width = 2 * sizeof(ptr);
 
-       if (unlikely(!have_filled_random_ptr_key)) {
+       if (static_branch_unlikely(&not_filled_random_ptr_key)) {
                spec.field_width = default_width;
                /* string length must be less than default_width */
                return string(buf, end, "(ptrval)", spec);
index 023190c..7441bd9 100644 (file)
@@ -115,6 +115,7 @@ static int bdi_debug_register(struct backing_dev_info *bdi, const char *name)
                                               bdi, &bdi_debug_stats_fops);
        if (!bdi->debug_stats) {
                debugfs_remove(bdi->debug_dir);
+               bdi->debug_dir = NULL;
                return -ENOMEM;
        }
 
@@ -383,7 +384,7 @@ static void wb_shutdown(struct bdi_writeback *wb)
         * the barrier provided by test_and_clear_bit() above.
         */
        smp_wmb();
-       clear_bit(WB_shutting_down, &wb->state);
+       clear_and_wake_up_bit(WB_shutting_down, &wb->state);
 }
 
 static void wb_exit(struct bdi_writeback *wb)
index 76af4cf..541904a 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -544,6 +544,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
        if (vm_flags & (VM_IO | VM_PFNMAP))
                return -EFAULT;
 
+       if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
+               return -EFAULT;
+
        if (write) {
                if (!(vm_flags & VM_WRITE)) {
                        if (!(gup_flags & FOLL_FORCE))
index 5684330..8c0af0f 100644 (file)
@@ -528,14 +528,12 @@ int migrate_page_move_mapping(struct address_space *mapping,
                int i;
                int index = page_index(page);
 
-               for (i = 0; i < HPAGE_PMD_NR; i++) {
+               for (i = 1; i < HPAGE_PMD_NR; i++) {
                        pslot = radix_tree_lookup_slot(&mapping->i_pages,
                                                       index + i);
                        radix_tree_replace_slot(&mapping->i_pages, pslot,
                                                newpage + i);
                }
-       } else {
-               radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
        }
 
        /*
index 188f195..78e14fa 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -100,11 +100,20 @@ pgprot_t protection_map[16] __ro_after_init = {
        __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
 };
 
+#ifndef CONFIG_ARCH_HAS_FILTER_PGPROT
+static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
+{
+       return prot;
+}
+#endif
+
 pgprot_t vm_get_page_prot(unsigned long vm_flags)
 {
-       return __pgprot(pgprot_val(protection_map[vm_flags &
+       pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags &
                                (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
                        pgprot_val(arch_vm_get_page_prot(vm_flags)));
+
+       return arch_filter_pgprot(ret);
 }
 EXPORT_SYMBOL(vm_get_page_prot);
 
@@ -1315,6 +1324,35 @@ static inline int mlock_future_check(struct mm_struct *mm,
        return 0;
 }
 
+static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
+{
+       if (S_ISREG(inode->i_mode))
+               return inode->i_sb->s_maxbytes;
+
+       if (S_ISBLK(inode->i_mode))
+               return MAX_LFS_FILESIZE;
+
+       /* Special "we do even unsigned file positions" case */
+       if (file->f_mode & FMODE_UNSIGNED_OFFSET)
+               return 0;
+
+       /* Yes, random drivers might want more. But I'm tired of buggy drivers */
+       return ULONG_MAX;
+}
+
+static inline bool file_mmap_ok(struct file *file, struct inode *inode,
+                               unsigned long pgoff, unsigned long len)
+{
+       u64 maxsize = file_mmap_size_max(file, inode);
+
+       if (maxsize && len > maxsize)
+               return false;
+       maxsize -= len;
+       if (pgoff > maxsize >> PAGE_SHIFT)
+               return false;
+       return true;
+}
+
 /*
  * The caller must hold down_write(&current->mm->mmap_sem).
  */
@@ -1400,6 +1438,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
                struct inode *inode = file_inode(file);
                unsigned long flags_mask;
 
+               if (!file_mmap_ok(file, inode, pgoff, len))
+                       return -EOVERFLOW;
+
                flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
 
                switch (flags & MAP_TYPE) {
@@ -3015,6 +3056,32 @@ void exit_mmap(struct mm_struct *mm)
        /* mm's last user has gone, and its about to be pulled down */
        mmu_notifier_release(mm);
 
+       if (unlikely(mm_is_oom_victim(mm))) {
+               /*
+                * Manually reap the mm to free as much memory as possible.
+                * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard
+                * this mm from further consideration.  Taking mm->mmap_sem for
+                * write after setting MMF_OOM_SKIP will guarantee that the oom
+                * reaper will not run on this mm again after mmap_sem is
+                * dropped.
+                *
+                * Nothing can be holding mm->mmap_sem here and the above call
+                * to mmu_notifier_release(mm) ensures mmu notifier callbacks in
+                * __oom_reap_task_mm() will not block.
+                *
+                * This needs to be done before calling munlock_vma_pages_all(),
+                * which clears VM_LOCKED, otherwise the oom reaper cannot
+                * reliably test it.
+                */
+               mutex_lock(&oom_lock);
+               __oom_reap_task_mm(mm);
+               mutex_unlock(&oom_lock);
+
+               set_bit(MMF_OOM_SKIP, &mm->flags);
+               down_write(&mm->mmap_sem);
+               up_write(&mm->mmap_sem);
+       }
+
        if (mm->locked_vm) {
                vma = mm->mmap;
                while (vma) {
@@ -3036,24 +3103,6 @@ void exit_mmap(struct mm_struct *mm)
        /* update_hiwater_rss(mm) here? but nobody should be looking */
        /* Use -1 here to ensure all VMAs in the mm are unmapped */
        unmap_vmas(&tlb, vma, 0, -1);
-
-       if (unlikely(mm_is_oom_victim(mm))) {
-               /*
-                * Wait for oom_reap_task() to stop working on this
-                * mm. Because MMF_OOM_SKIP is already set before
-                * calling down_read(), oom_reap_task() will not run
-                * on this "mm" post up_write().
-                *
-                * mm_is_oom_victim() cannot be set from under us
-                * either because victim->mm is already set to NULL
-                * under task_lock before calling mmput and oom_mm is
-                * set not NULL by the OOM killer only if victim->mm
-                * is found not NULL while holding the task_lock.
-                */
-               set_bit(MMF_OOM_SKIP, &mm->flags);
-               down_write(&mm->mmap_sem);
-               up_write(&mm->mmap_sem);
-       }
        free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
        tlb_finish_mmu(&tlb, 0, -1);
 
index ff992fa..8ba6cb8 100644 (file)
@@ -469,7 +469,6 @@ bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
        return false;
 }
 
-
 #ifdef CONFIG_MMU
 /*
  * OOM Reaper kernel thread which tries to reap the memory used by the OOM
@@ -480,16 +479,54 @@ static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
 static struct task_struct *oom_reaper_list;
 static DEFINE_SPINLOCK(oom_reaper_lock);
 
-static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
+void __oom_reap_task_mm(struct mm_struct *mm)
 {
-       struct mmu_gather tlb;
        struct vm_area_struct *vma;
+
+       /*
+        * Tell all users of get_user/copy_from_user etc... that the content
+        * is no longer stable. No barriers really needed because unmapping
+        * should imply barriers already and the reader would hit a page fault
+        * if it stumbled over a reaped memory.
+        */
+       set_bit(MMF_UNSTABLE, &mm->flags);
+
+       for (vma = mm->mmap ; vma; vma = vma->vm_next) {
+               if (!can_madv_dontneed_vma(vma))
+                       continue;
+
+               /*
+                * Only anonymous pages have a good chance to be dropped
+                * without additional steps which we cannot afford as we
+                * are OOM already.
+                *
+                * We do not even care about fs backed pages because all
+                * which are reclaimable have already been reclaimed and
+                * we do not want to block exit_mmap by keeping mm ref
+                * count elevated without a good reason.
+                */
+               if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
+                       const unsigned long start = vma->vm_start;
+                       const unsigned long end = vma->vm_end;
+                       struct mmu_gather tlb;
+
+                       tlb_gather_mmu(&tlb, mm, start, end);
+                       mmu_notifier_invalidate_range_start(mm, start, end);
+                       unmap_page_range(&tlb, vma, start, end, NULL);
+                       mmu_notifier_invalidate_range_end(mm, start, end);
+                       tlb_finish_mmu(&tlb, start, end);
+               }
+       }
+}
+
+static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
+{
        bool ret = true;
 
        /*
         * We have to make sure to not race with the victim exit path
         * and cause premature new oom victim selection:
-        * __oom_reap_task_mm           exit_mm
+        * oom_reap_task_mm             exit_mm
         *   mmget_not_zero
         *                                mmput
         *                                  atomic_dec_and_test
@@ -534,39 +571,8 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
 
        trace_start_task_reaping(tsk->pid);
 
-       /*
-        * Tell all users of get_user/copy_from_user etc... that the content
-        * is no longer stable. No barriers really needed because unmapping
-        * should imply barriers already and the reader would hit a page fault
-        * if it stumbled over a reaped memory.
-        */
-       set_bit(MMF_UNSTABLE, &mm->flags);
-
-       for (vma = mm->mmap ; vma; vma = vma->vm_next) {
-               if (!can_madv_dontneed_vma(vma))
-                       continue;
+       __oom_reap_task_mm(mm);
 
-               /*
-                * Only anonymous pages have a good chance to be dropped
-                * without additional steps which we cannot afford as we
-                * are OOM already.
-                *
-                * We do not even care about fs backed pages because all
-                * which are reclaimable have already been reclaimed and
-                * we do not want to block exit_mmap by keeping mm ref
-                * count elevated without a good reason.
-                */
-               if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
-                       const unsigned long start = vma->vm_start;
-                       const unsigned long end = vma->vm_end;
-
-                       tlb_gather_mmu(&tlb, mm, start, end);
-                       mmu_notifier_invalidate_range_start(mm, start, end);
-                       unmap_page_range(&tlb, vma, start, end, NULL);
-                       mmu_notifier_invalidate_range_end(mm, start, end);
-                       tlb_finish_mmu(&tlb, start, end);
-               }
-       }
        pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
                        task_pid_nr(tsk), tsk->comm,
                        K(get_mm_counter(mm, MM_ANONPAGES)),
@@ -587,14 +593,13 @@ static void oom_reap_task(struct task_struct *tsk)
        struct mm_struct *mm = tsk->signal->oom_mm;
 
        /* Retry the down_read_trylock(mmap_sem) a few times */
-       while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm))
+       while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
                schedule_timeout_idle(HZ/10);
 
        if (attempts <= MAX_OOM_REAP_RETRIES ||
            test_bit(MMF_OOM_SKIP, &mm->flags))
                goto done;
 
-
        pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
                task_pid_nr(tsk), tsk->comm);
        debug_show_all_locks();
index 62eef26..73dc2fc 100644 (file)
@@ -629,7 +629,7 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
        unsigned long pfn;
 
        for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
-               unsigned long section_nr = pfn_to_section_nr(start_pfn);
+               unsigned long section_nr = pfn_to_section_nr(pfn);
                struct mem_section *ms;
 
                /*
index 536332e..a2b9518 100644 (file)
@@ -1161,7 +1161,7 @@ const char * const vmstat_text[] = {
        "nr_vmscan_immediate_reclaim",
        "nr_dirtied",
        "nr_written",
-       "nr_indirectly_reclaimable",
+       "", /* nr_indirectly_reclaimable */
 
        /* enum writeback_stat_item counters */
        "nr_dirty_threshold",
@@ -1740,6 +1740,10 @@ static int vmstat_show(struct seq_file *m, void *arg)
        unsigned long *l = arg;
        unsigned long off = l - (unsigned long *)m->private;
 
+       /* Skip hidden vmstat items. */
+       if (*vmstat_text[off] == '\0')
+               return 0;
+
        seq_puts(m, vmstat_text[off]);
        seq_put_decimal_ull(m, " ", *l);
        seq_putc(m, '\n');
index c0bca61..4b366d1 100644 (file)
@@ -144,7 +144,8 @@ enum z3fold_page_flags {
        PAGE_HEADLESS = 0,
        MIDDLE_CHUNK_MAPPED,
        NEEDS_COMPACTING,
-       PAGE_STALE
+       PAGE_STALE,
+       UNDER_RECLAIM
 };
 
 /*****************
@@ -173,6 +174,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
        clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
        clear_bit(NEEDS_COMPACTING, &page->private);
        clear_bit(PAGE_STALE, &page->private);
+       clear_bit(UNDER_RECLAIM, &page->private);
 
        spin_lock_init(&zhdr->page_lock);
        kref_init(&zhdr->refcount);
@@ -756,6 +758,10 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
                atomic64_dec(&pool->pages_nr);
                return;
        }
+       if (test_bit(UNDER_RECLAIM, &page->private)) {
+               z3fold_page_unlock(zhdr);
+               return;
+       }
        if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
                z3fold_page_unlock(zhdr);
                return;
@@ -840,6 +846,8 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
                        kref_get(&zhdr->refcount);
                        list_del_init(&zhdr->buddy);
                        zhdr->cpu = -1;
+                       set_bit(UNDER_RECLAIM, &page->private);
+                       break;
                }
 
                list_del_init(&page->lru);
@@ -887,25 +895,35 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
                                goto next;
                }
 next:
-               spin_lock(&pool->lock);
                if (test_bit(PAGE_HEADLESS, &page->private)) {
                        if (ret == 0) {
-                               spin_unlock(&pool->lock);
                                free_z3fold_page(page);
                                return 0;
                        }
-               } else if (kref_put(&zhdr->refcount, release_z3fold_page)) {
-                       atomic64_dec(&pool->pages_nr);
+                       spin_lock(&pool->lock);
+                       list_add(&page->lru, &pool->lru);
+                       spin_unlock(&pool->lock);
+               } else {
+                       z3fold_page_lock(zhdr);
+                       clear_bit(UNDER_RECLAIM, &page->private);
+                       if (kref_put(&zhdr->refcount,
+                                       release_z3fold_page_locked)) {
+                               atomic64_dec(&pool->pages_nr);
+                               return 0;
+                       }
+                       /*
+                        * if we are here, the page is still not completely
+                        * free. Take the global pool lock then to be able
+                        * to add it back to the lru list
+                        */
+                       spin_lock(&pool->lock);
+                       list_add(&page->lru, &pool->lru);
                        spin_unlock(&pool->lock);
-                       return 0;
+                       z3fold_page_unlock(zhdr);
                }
 
-               /*
-                * Add to the beginning of LRU.
-                * Pool lock has to be kept here to ensure the page has
-                * not already been released
-                */
-               list_add(&page->lru, &pool->lru);
+               /* We started off locked to we need to lock the pool back */
+               spin_lock(&pool->lock);
        }
        spin_unlock(&pool->lock);
        return -EAGAIN;
index 38aa634..b718db2 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/module.h>
 
 /**
- *  p9_release_req_pages - Release pages after the transaction.
+ *  p9_release_pages - Release pages after the transaction.
  */
 void p9_release_pages(struct page **pages, int nr_pages)
 {
index 0cfba91..848969f 100644 (file)
@@ -1092,8 +1092,8 @@ static struct p9_trans_module p9_fd_trans = {
 };
 
 /**
- * p9_poll_proc - poll worker thread
- * @a: thread state and arguments
+ * p9_poll_workfn - poll worker thread
+ * @work: work queue
  *
  * polls all v9fs transports for new events and queues the appropriate
  * work to the work queue
index 6d8e303..3d414ac 100644 (file)
@@ -68,8 +68,6 @@
  * @pd: Protection Domain pointer
  * @qp: Queue Pair pointer
  * @cq: Completion Queue pointer
- * @dm_mr: DMA Memory Region pointer
- * @lkey: The local access only memory region key
  * @timeout: Number of uSecs to wait for connection management events
  * @privport: Whether a privileged port may be used
  * @port: The port to use
@@ -632,7 +630,7 @@ static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma)
 }
 
 /**
- * trans_create_rdma - Transport method for creating atransport instance
+ * rdma_create_trans - Transport method for creating a transport instance
  * @client: client instance
  * @addr: IP address string
  * @args: Mount options string
index 3aa5a93..4d03722 100644 (file)
@@ -60,7 +60,6 @@ static atomic_t vp_pinned = ATOMIC_INIT(0);
 
 /**
  * struct virtio_chan - per-instance transport information
- * @initialized: whether the channel is initialized
  * @inuse: whether the channel is in use
  * @lock: protects multiple elements within this structure
  * @client: client instance
@@ -385,8 +384,8 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
  * @uidata: user bffer that should be ued for zero copy read
  * @uodata: user buffer that shoud be user for zero copy write
  * @inlen: read buffer size
- * @olen: write buffer size
- * @hdrlen: reader header size, This is the size of response protocol data
+ * @outlen: write buffer size
+ * @in_hdr_len: reader header size, This is the size of response protocol data
  *
  */
 static int
index 086a4ab..0f19960 100644 (file)
@@ -485,7 +485,7 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev,
 
 static int xen_9pfs_front_resume(struct xenbus_device *dev)
 {
-       dev_warn(&dev->dev, "suspsend/resume unsupported\n");
+       dev_warn(&dev->dev, "suspend/resume unsupported\n");
        return 0;
 }
 
index 01d5d20..3138a86 100644 (file)
@@ -41,6 +41,9 @@ static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 };
 #include <linux/module.h>
 #include <linux/init.h>
 
+/* Hardening for Spectre-v1 */
+#include <linux/nospec.h>
+
 #include "lec.h"
 #include "lec_arpc.h"
 #include "resources.h"
@@ -687,8 +690,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
        bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
        if (bytes_left != 0)
                pr_info("copy from user failed for %d bytes\n", bytes_left);
-       if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
-           !dev_lec[ioc_data.dev_num])
+       if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF)
+               return -EINVAL;
+       ioc_data.dev_num = array_index_nospec(ioc_data.dev_num, MAX_LEC_ITF);
+       if (!dev_lec[ioc_data.dev_num])
                return -EINVAL;
        vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
        if (!vpriv)
index 82c1a6f..5bb6681 100644 (file)
@@ -518,8 +518,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
                return -ELOOP;
        }
 
-       /* Device is already being bridged */
-       if (br_port_exists(dev))
+       /* Device has master upper dev */
+       if (netdev_master_upper_dev_get(dev))
                return -EBUSY;
 
        /* No bridging devices that dislike that (e.g. wireless) */
index 032e0fe..28a4c34 100644 (file)
@@ -1825,13 +1825,14 @@ static int compat_table_info(const struct ebt_table_info *info,
 {
        unsigned int size = info->entries_size;
        const void *entries = info->entries;
-       int ret;
 
        newinfo->entries_size = size;
-
-       ret = xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
-       if (ret)
-               return ret;
+       if (info->nentries) {
+               int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
+                                                info->nentries);
+               if (ret)
+                       return ret;
+       }
 
        return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
                                                        entries, newinfo);
index fcb40c1..3b3d33e 100644 (file)
@@ -2569,6 +2569,11 @@ static int try_write(struct ceph_connection *con)
        int ret = 1;
 
        dout("try_write start %p state %lu\n", con, con->state);
+       if (con->state != CON_STATE_PREOPEN &&
+           con->state != CON_STATE_CONNECTING &&
+           con->state != CON_STATE_NEGOTIATING &&
+           con->state != CON_STATE_OPEN)
+               return 0;
 
 more:
        dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
@@ -2594,6 +2599,8 @@ more:
        }
 
 more_kvec:
+       BUG_ON(!con->sock);
+
        /* kvec data queued? */
        if (con->out_kvec_left) {
                ret = write_partial_kvec(con);
index b3dac24..21ac6e3 100644 (file)
@@ -209,6 +209,14 @@ static void reopen_session(struct ceph_mon_client *monc)
        __open_session(monc);
 }
 
+static void un_backoff(struct ceph_mon_client *monc)
+{
+       monc->hunt_mult /= 2; /* reduce by 50% */
+       if (monc->hunt_mult < 1)
+               monc->hunt_mult = 1;
+       dout("%s hunt_mult now %d\n", __func__, monc->hunt_mult);
+}
+
 /*
  * Reschedule delayed work timer.
  */
@@ -963,6 +971,7 @@ static void delayed_work(struct work_struct *work)
                if (!monc->hunting) {
                        ceph_con_keepalive(&monc->con);
                        __validate_auth(monc);
+                       un_backoff(monc);
                }
 
                if (is_auth &&
@@ -1123,9 +1132,8 @@ static void finish_hunting(struct ceph_mon_client *monc)
                dout("%s found mon%d\n", __func__, monc->cur_mon);
                monc->hunting = false;
                monc->had_a_connection = true;
-               monc->hunt_mult /= 2; /* reduce by 50% */
-               if (monc->hunt_mult < 1)
-                       monc->hunt_mult = 1;
+               un_backoff(monc);
+               __schedule_delayed(monc);
        }
 }
 
index ea2a6c9..d2667e5 100644 (file)
@@ -157,10 +157,12 @@ static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
 #endif /* CONFIG_BLOCK */
 
 static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data,
-                                    struct ceph_bvec_iter *bvec_pos)
+                                    struct ceph_bvec_iter *bvec_pos,
+                                    u32 num_bvecs)
 {
        osd_data->type = CEPH_OSD_DATA_TYPE_BVECS;
        osd_data->bvec_pos = *bvec_pos;
+       osd_data->num_bvecs = num_bvecs;
 }
 
 #define osd_req_op_data(oreq, whch, typ, fld)                          \
@@ -237,6 +239,22 @@ void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
 #endif /* CONFIG_BLOCK */
 
+void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
+                                     unsigned int which,
+                                     struct bio_vec *bvecs, u32 num_bvecs,
+                                     u32 bytes)
+{
+       struct ceph_osd_data *osd_data;
+       struct ceph_bvec_iter it = {
+               .bvecs = bvecs,
+               .iter = { .bi_size = bytes },
+       };
+
+       osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
+       ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
+}
+EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs);
+
 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
                                         unsigned int which,
                                         struct ceph_bvec_iter *bvec_pos)
@@ -244,7 +262,7 @@ void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
        struct ceph_osd_data *osd_data;
 
        osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
-       ceph_osd_data_bvecs_init(osd_data, bvec_pos);
+       ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0);
 }
 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos);
 
@@ -287,7 +305,8 @@ EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
 
 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
                                       unsigned int which,
-                                      struct bio_vec *bvecs, u32 bytes)
+                                      struct bio_vec *bvecs, u32 num_bvecs,
+                                      u32 bytes)
 {
        struct ceph_osd_data *osd_data;
        struct ceph_bvec_iter it = {
@@ -296,7 +315,7 @@ void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
        };
 
        osd_data = osd_req_op_data(osd_req, which, cls, request_data);
-       ceph_osd_data_bvecs_init(osd_data, &it);
+       ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
        osd_req->r_ops[which].cls.indata_len += bytes;
        osd_req->r_ops[which].indata_len += bytes;
 }
index 5ae7437..7242cce 100644 (file)
@@ -377,7 +377,8 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
            optname == SO_ATTACH_REUSEPORT_CBPF)
                return do_set_attach_filter(sock, level, optname,
                                            optval, optlen);
-       if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
+       if (!COMPAT_USE_64BIT_TIME &&
+           (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
                return do_set_sock_timeout(sock, level, optname, optval, optlen);
 
        return sock_setsockopt(sock, level, optname, optval, optlen);
@@ -448,7 +449,8 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
 static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
                                char __user *optval, int __user *optlen)
 {
-       if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
+       if (!COMPAT_USE_64BIT_TIME &&
+           (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
                return do_get_sock_timeout(sock, level, optname, optval, optlen);
        return sock_getsockopt(sock, level, optname, optval, optlen);
 }
index 03416e6..ba02f0d 100644 (file)
@@ -1032,6 +1032,11 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
                info_size = sizeof(info);
                if (copy_from_user(&info, useraddr, info_size))
                        return -EFAULT;
+               /* Since malicious users may modify the original data,
+                * we need to check whether FLOW_RSS is still requested.
+                */
+               if (!(info.flow_type & FLOW_RSS))
+                       return -EINVAL;
        }
 
        if (info.cmd == ETHTOOL_GRXCLSRLALL) {
index d31aff9..e77c30c 100644 (file)
@@ -3240,6 +3240,7 @@ BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
        skb_dst_set(skb, (struct dst_entry *) md);
 
        info = &md->u.tun_info;
+       memset(info, 0, sizeof(*info));
        info->mode = IP_TUNNEL_INFO_TX;
 
        info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
index 92d016e..385f153 100644 (file)
@@ -126,6 +126,16 @@ static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
                                                  DCCPF_SEQ_WMAX));
 }
 
+static void dccp_tasklet_schedule(struct sock *sk)
+{
+       struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
+
+       if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+               sock_hold(sk);
+               __tasklet_schedule(t);
+       }
+}
+
 static void ccid2_hc_tx_rto_expire(struct timer_list *t)
 {
        struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer);
@@ -166,7 +176,7 @@ static void ccid2_hc_tx_rto_expire(struct timer_list *t)
 
        /* if we were blocked before, we may now send cwnd=1 packet */
        if (sender_was_blocked)
-               tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+               dccp_tasklet_schedule(sk);
        /* restart backed-off timer */
        sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
 out:
@@ -706,7 +716,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
 done:
        /* check if incoming Acks allow pending packets to be sent */
        if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
-               tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+               dccp_tasklet_schedule(sk);
        dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
 }
 
index b50a873..1501a20 100644 (file)
@@ -232,6 +232,7 @@ static void dccp_write_xmitlet(unsigned long data)
        else
                dccp_write_xmit(sk);
        bh_unlock_sock(sk);
+       sock_put(sk);
 }
 
 static void dccp_write_xmit_timer(struct timer_list *t)
@@ -240,7 +241,6 @@ static void dccp_write_xmit_timer(struct timer_list *t)
        struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk;
 
        dccp_write_xmitlet((unsigned long)sk);
-       sock_put(sk);
 }
 
 void dccp_init_xmit_timers(struct sock *sk)
index b8d95cb..44a7e16 100644 (file)
@@ -20,8 +20,8 @@ typedef unsigned __bitwise lowpan_rx_result;
 struct frag_lowpan_compare_key {
        u16 tag;
        u16 d_size;
-       const struct ieee802154_addr src;
-       const struct ieee802154_addr dst;
+       struct ieee802154_addr src;
+       struct ieee802154_addr dst;
 };
 
 /* Equivalent of ipv4 struct ipq
index 1790b65..2cc2241 100644 (file)
@@ -75,14 +75,14 @@ fq_find(struct net *net, const struct lowpan_802154_cb *cb,
 {
        struct netns_ieee802154_lowpan *ieee802154_lowpan =
                net_ieee802154_lowpan(net);
-       struct frag_lowpan_compare_key key = {
-               .tag = cb->d_tag,
-               .d_size = cb->d_size,
-               .src = *src,
-               .dst = *dst,
-       };
+       struct frag_lowpan_compare_key key = {};
        struct inet_frag_queue *q;
 
+       key.tag = cb->d_tag;
+       key.d_size = cb->d_size;
+       key.src = *src;
+       key.dst = *dst;
+
        q = inet_frag_find(&ieee802154_lowpan->frags, &key);
        if (!q)
                return NULL;
@@ -372,7 +372,7 @@ int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
        struct lowpan_frag_queue *fq;
        struct net *net = dev_net(skb->dev);
        struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
-       struct ieee802154_hdr hdr;
+       struct ieee802154_hdr hdr = {};
        int err;
 
        if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
index 7d1ec76..13bbf8c 100644 (file)
@@ -69,6 +69,9 @@ void *ife_decode(struct sk_buff *skb, u16 *metalen)
        int total_pull;
        u16 ifehdrln;
 
+       if (!pskb_may_pull(skb, skb->dev->hard_header_len + IFE_METAHDRLEN))
+               return NULL;
+
        ifehdr = (struct ifeheadr *) (skb->data + skb->dev->hard_header_len);
        ifehdrln = ntohs(ifehdr->metalen);
        total_pull = skb->dev->hard_header_len + ifehdrln;
@@ -92,12 +95,43 @@ struct meta_tlvhdr {
        __be16 len;
 };
 
+static bool __ife_tlv_meta_valid(const unsigned char *skbdata,
+                                const unsigned char *ifehdr_end)
+{
+       const struct meta_tlvhdr *tlv;
+       u16 tlvlen;
+
+       if (unlikely(skbdata + sizeof(*tlv) > ifehdr_end))
+               return false;
+
+       tlv = (const struct meta_tlvhdr *)skbdata;
+       tlvlen = ntohs(tlv->len);
+
+       /* tlv length field is inc header, check on minimum */
+       if (tlvlen < NLA_HDRLEN)
+               return false;
+
+       /* overflow by NLA_ALIGN check */
+       if (NLA_ALIGN(tlvlen) < tlvlen)
+               return false;
+
+       if (unlikely(skbdata + NLA_ALIGN(tlvlen) > ifehdr_end))
+               return false;
+
+       return true;
+}
+
 /* Caller takes care of presenting data in network order
  */
-void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen)
+void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype,
+                         u16 *dlen, u16 *totlen)
 {
-       struct meta_tlvhdr *tlv = (struct meta_tlvhdr *) skbdata;
+       struct meta_tlvhdr *tlv;
+
+       if (!__ife_tlv_meta_valid(skbdata, ifehdr_end))
+               return NULL;
 
+       tlv = (struct meta_tlvhdr *)skbdata;
        *dlen = ntohs(tlv->len) - NLA_HDRLEN;
        *attrtype = ntohs(tlv->type);
 
index 05e47d7..56a0106 100644 (file)
@@ -775,8 +775,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        ipc.addr = faddr = daddr;
 
        if (ipc.opt && ipc.opt->opt.srr) {
-               if (!daddr)
-                       return -EINVAL;
+               if (!daddr) {
+                       err = -EINVAL;
+                       goto out_free;
+               }
                faddr = ipc.opt->opt.faddr;
        }
        tos = get_rttos(&ipc, inet);
@@ -842,6 +844,7 @@ back_from_confirm:
 
 out:
        ip_rt_put(rt);
+out_free:
        if (free)
                kfree(ipc.opt);
        if (!err) {
index ccb25d8..29268ef 100644 (file)
@@ -709,7 +709,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
                fnhe->fnhe_gw = gw;
                fnhe->fnhe_pmtu = pmtu;
                fnhe->fnhe_mtu_locked = lock;
-               fnhe->fnhe_expires = expires;
+               fnhe->fnhe_expires = max(1UL, expires);
 
                /* Exception created; mark the cached routes for the nexthop
                 * stale, so anyone caching it rechecks if this exception
@@ -1297,6 +1297,36 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
        return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
 }
 
+static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
+{
+       struct fnhe_hash_bucket *hash;
+       struct fib_nh_exception *fnhe, __rcu **fnhe_p;
+       u32 hval = fnhe_hashfun(daddr);
+
+       spin_lock_bh(&fnhe_lock);
+
+       hash = rcu_dereference_protected(nh->nh_exceptions,
+                                        lockdep_is_held(&fnhe_lock));
+       hash += hval;
+
+       fnhe_p = &hash->chain;
+       fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
+       while (fnhe) {
+               if (fnhe->fnhe_daddr == daddr) {
+                       rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
+                               fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
+                       fnhe_flush_routes(fnhe);
+                       kfree_rcu(fnhe, rcu);
+                       break;
+               }
+               fnhe_p = &fnhe->fnhe_next;
+               fnhe = rcu_dereference_protected(fnhe->fnhe_next,
+                                                lockdep_is_held(&fnhe_lock));
+       }
+
+       spin_unlock_bh(&fnhe_lock);
+}
+
 static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
 {
        struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
@@ -1310,8 +1340,14 @@ static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
 
        for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
             fnhe = rcu_dereference(fnhe->fnhe_next)) {
-               if (fnhe->fnhe_daddr == daddr)
+               if (fnhe->fnhe_daddr == daddr) {
+                       if (fnhe->fnhe_expires &&
+                           time_after(jiffies, fnhe->fnhe_expires)) {
+                               ip_del_fnhe(nh, daddr);
+                               break;
+                       }
                        return fnhe;
+               }
        }
        return NULL;
 }
@@ -1339,6 +1375,7 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
                        fnhe->fnhe_gw = 0;
                        fnhe->fnhe_pmtu = 0;
                        fnhe->fnhe_expires = 0;
+                       fnhe->fnhe_mtu_locked = false;
                        fnhe_flush_routes(fnhe);
                        orig = NULL;
                }
@@ -1636,36 +1673,6 @@ static void ip_handle_martian_source(struct net_device *dev,
 #endif
 }
 
-static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
-{
-       struct fnhe_hash_bucket *hash;
-       struct fib_nh_exception *fnhe, __rcu **fnhe_p;
-       u32 hval = fnhe_hashfun(daddr);
-
-       spin_lock_bh(&fnhe_lock);
-
-       hash = rcu_dereference_protected(nh->nh_exceptions,
-                                        lockdep_is_held(&fnhe_lock));
-       hash += hval;
-
-       fnhe_p = &hash->chain;
-       fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
-       while (fnhe) {
-               if (fnhe->fnhe_daddr == daddr) {
-                       rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
-                               fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
-                       fnhe_flush_routes(fnhe);
-                       kfree_rcu(fnhe, rcu);
-                       break;
-               }
-               fnhe_p = &fnhe->fnhe_next;
-               fnhe = rcu_dereference_protected(fnhe->fnhe_next,
-                                                lockdep_is_held(&fnhe_lock));
-       }
-
-       spin_unlock_bh(&fnhe_lock);
-}
-
 /* called in rcu_read_lock() section */
 static int __mkroute_input(struct sk_buff *skb,
                           const struct fib_result *res,
@@ -1719,20 +1726,10 @@ static int __mkroute_input(struct sk_buff *skb,
 
        fnhe = find_exception(&FIB_RES_NH(*res), daddr);
        if (do_cache) {
-               if (fnhe) {
+               if (fnhe)
                        rth = rcu_dereference(fnhe->fnhe_rth_input);
-                       if (rth && rth->dst.expires &&
-                           time_after(jiffies, rth->dst.expires)) {
-                               ip_del_fnhe(&FIB_RES_NH(*res), daddr);
-                               fnhe = NULL;
-                       } else {
-                               goto rt_cache;
-                       }
-               }
-
-               rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
-
-rt_cache:
+               else
+                       rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
                if (rt_cache_valid(rth)) {
                        skb_dst_set_noref(skb, &rth->dst);
                        goto out;
@@ -2216,39 +2213,31 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
                 * the loopback interface and the IP_PKTINFO ipi_ifindex will
                 * be set to the loopback interface as well.
                 */
-               fi = NULL;
+               do_cache = false;
        }
 
        fnhe = NULL;
        do_cache &= fi != NULL;
-       if (do_cache) {
+       if (fi) {
                struct rtable __rcu **prth;
                struct fib_nh *nh = &FIB_RES_NH(*res);
 
                fnhe = find_exception(nh, fl4->daddr);
+               if (!do_cache)
+                       goto add;
                if (fnhe) {
                        prth = &fnhe->fnhe_rth_output;
-                       rth = rcu_dereference(*prth);
-                       if (rth && rth->dst.expires &&
-                           time_after(jiffies, rth->dst.expires)) {
-                               ip_del_fnhe(nh, fl4->daddr);
-                               fnhe = NULL;
-                       } else {
-                               goto rt_cache;
+               } else {
+                       if (unlikely(fl4->flowi4_flags &
+                                    FLOWI_FLAG_KNOWN_NH &&
+                                    !(nh->nh_gw &&
+                                      nh->nh_scope == RT_SCOPE_LINK))) {
+                               do_cache = false;
+                               goto add;
                        }
+                       prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
                }
-
-               if (unlikely(fl4->flowi4_flags &
-                            FLOWI_FLAG_KNOWN_NH &&
-                            !(nh->nh_gw &&
-                              nh->nh_scope == RT_SCOPE_LINK))) {
-                       do_cache = false;
-                       goto add;
-               }
-               prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
                rth = rcu_dereference(*prth);
-
-rt_cache:
                if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
                        return rth;
        }
index 9ce1c72..c9d00ef 100644 (file)
@@ -697,7 +697,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
 {
        return skb->len < size_goal &&
               sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
-              skb != tcp_write_queue_head(sk) &&
+              !tcp_rtx_queue_empty(sk) &&
               refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
 }
 
@@ -1204,7 +1204,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
                        uarg->zerocopy = 0;
        }
 
-       if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) {
+       if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
+           !tp->repair) {
                err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
                if (err == -EINPROGRESS && copied_syn > 0)
                        goto out;
@@ -2673,7 +2674,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
        case TCP_REPAIR_QUEUE:
                if (!tp->repair)
                        err = -EPERM;
-               else if (val < TCP_QUEUES_NR)
+               else if ((unsigned int)val < TCP_QUEUES_NR)
                        tp->repair_queue = val;
                else
                        err = -EINVAL;
index 158d105..58e2f47 100644 (file)
@@ -806,7 +806,9 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
                        }
                }
        }
-       bbr->idle_restart = 0;
+       /* Restart after idle ends only once we process a new S/ACK for data */
+       if (rs->delivered > 0)
+               bbr->idle_restart = 0;
 }
 
 static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
index 367def6..e51c644 100644 (file)
@@ -3868,11 +3868,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
        int length = (th->doff << 2) - sizeof(*th);
        const u8 *ptr = (const u8 *)(th + 1);
 
-       /* If the TCP option is too short, we can short cut */
-       if (length < TCPOLEN_MD5SIG)
-               return NULL;
-
-       while (length > 0) {
+       /* If not enough data remaining, we can short cut */
+       while (length >= TCPOLEN_MD5SIG) {
                int opcode = *ptr++;
                int opsize;
 
index 24b5c59..b61a770 100644 (file)
@@ -401,9 +401,9 @@ static int compute_score(struct sock *sk, struct net *net,
                bool dev_match = (sk->sk_bound_dev_if == dif ||
                                  sk->sk_bound_dev_if == sdif);
 
-               if (exact_dif && !dev_match)
+               if (!dev_match)
                        return -1;
-               if (sk->sk_bound_dev_if && dev_match)
+               if (sk->sk_bound_dev_if)
                        score += 4;
        }
 
@@ -952,8 +952,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
 
        if (ipc.opt && ipc.opt->opt.srr) {
-               if (!daddr)
-                       return -EINVAL;
+               if (!daddr) {
+                       err = -EINVAL;
+                       goto out_free;
+               }
                faddr = ipc.opt->opt.faddr;
                connected = 0;
        }
@@ -1074,6 +1076,7 @@ do_append_data:
 
 out:
        ip_rt_put(rt);
+out_free:
        if (free)
                kfree(ipc.opt);
        if (!err)
index 6794ddf..11e4e80 100644 (file)
@@ -34,16 +34,15 @@ config IPV6_ROUTE_INFO
        bool "IPv6: Route Information (RFC 4191) support"
        depends on IPV6_ROUTER_PREF
        ---help---
-         This is experimental support of Route Information.
+         Support of Route Information.
 
          If unsure, say N.
 
 config IPV6_OPTIMISTIC_DAD
        bool "IPv6: Enable RFC 4429 Optimistic DAD"
        ---help---
-         This is experimental support for optimistic Duplicate
-         Address Detection.  It allows for autoconfigured addresses
-         to be used more quickly.
+         Support for optimistic Duplicate Address Detection. It allows for
+         autoconfigured addresses to be used more quickly.
 
          If unsure, say N.
 
@@ -280,7 +279,7 @@ config IPV6_MROUTE
        depends on IPV6
        select IP_MROUTE_COMMON
        ---help---
-         Experimental support for IPv6 multicast forwarding.
+         Support for IPv6 multicast forwarding.
          If unsure, say N.
 
 config IPV6_MROUTE_MULTIPLE_TABLES
index c214ffe..ca957dd 100644 (file)
@@ -669,7 +669,7 @@ static void vti6_link_config(struct ip6_tnl *t, bool keep_mtu)
        else
                mtu = ETH_DATA_LEN - LL_MAX_HEADER - sizeof(struct ipv6hdr);
 
-       dev->mtu = max_t(int, mtu, IPV6_MIN_MTU);
+       dev->mtu = max_t(int, mtu, IPV4_MIN_MTU);
 }
 
 /**
@@ -881,7 +881,7 @@ static void vti6_dev_setup(struct net_device *dev)
        dev->priv_destructor = vti6_dev_free;
 
        dev->type = ARPHRD_TUNNEL6;
-       dev->min_mtu = IPV6_MIN_MTU;
+       dev->min_mtu = IPV4_MIN_MTU;
        dev->max_mtu = IP_MAX_MTU - sizeof(struct ipv6hdr);
        dev->flags |= IFF_NOARP;
        dev->addr_len = sizeof(struct in6_addr);
index ccbfa83..ce77bcc 100644 (file)
@@ -48,6 +48,34 @@ config NFT_CHAIN_ROUTE_IPV6
          fields such as the source, destination, flowlabel, hop-limit and
          the packet mark.
 
+if NF_NAT_IPV6
+
+config NFT_CHAIN_NAT_IPV6
+       tristate "IPv6 nf_tables nat chain support"
+       help
+         This option enables the "nat" chain for IPv6 in nf_tables. This
+         chain type is used to perform Network Address Translation (NAT)
+         packet transformations such as the source, destination address and
+         source and destination ports.
+
+config NFT_MASQ_IPV6
+       tristate "IPv6 masquerade support for nf_tables"
+       depends on NFT_MASQ
+       select NF_NAT_MASQUERADE_IPV6
+       help
+         This is the expression that provides IPv4 masquerading support for
+         nf_tables.
+
+config NFT_REDIR_IPV6
+       tristate "IPv6 redirect support for nf_tables"
+       depends on NFT_REDIR
+       select NF_NAT_REDIRECT
+       help
+         This is the expression that provides IPv4 redirect support for
+         nf_tables.
+
+endif # NF_NAT_IPV6
+
 config NFT_REJECT_IPV6
        select NF_REJECT_IPV6
        default NFT_REJECT
@@ -107,39 +135,12 @@ config NF_NAT_IPV6
 
 if NF_NAT_IPV6
 
-config NFT_CHAIN_NAT_IPV6
-       depends on NF_TABLES_IPV6
-       tristate "IPv6 nf_tables nat chain support"
-       help
-         This option enables the "nat" chain for IPv6 in nf_tables. This
-         chain type is used to perform Network Address Translation (NAT)
-         packet transformations such as the source, destination address and
-         source and destination ports.
-
 config NF_NAT_MASQUERADE_IPV6
        tristate "IPv6 masquerade support"
        help
          This is the kernel functionality to provide NAT in the masquerade
          flavour (automatic source address selection) for IPv6.
 
-config NFT_MASQ_IPV6
-       tristate "IPv6 masquerade support for nf_tables"
-       depends on NF_TABLES_IPV6
-       depends on NFT_MASQ
-       select NF_NAT_MASQUERADE_IPV6
-       help
-         This is the expression that provides IPv4 masquerading support for
-         nf_tables.
-
-config NFT_REDIR_IPV6
-       tristate "IPv6 redirect support for nf_tables"
-       depends on NF_TABLES_IPV6
-       depends on NFT_REDIR
-       select NF_NAT_REDIRECT
-       help
-         This is the expression that provides IPv4 redirect support for
-         nf_tables.
-
 endif # NF_NAT_IPV6
 
 config IP6_NF_IPTABLES
index 49b954d..f4d6173 100644 (file)
@@ -1835,11 +1835,16 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb,
        const struct ipv6hdr *inner_iph;
        const struct icmp6hdr *icmph;
        struct ipv6hdr _inner_iph;
+       struct icmp6hdr _icmph;
 
        if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
                goto out;
 
-       icmph = icmp6_hdr(skb);
+       icmph = skb_header_pointer(skb, skb_transport_offset(skb),
+                                  sizeof(_icmph), &_icmph);
+       if (!icmph)
+               goto out;
+
        if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
            icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
            icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
@@ -3975,6 +3980,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
 
 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
        [RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
+       [RTA_PREFSRC]           = { .len = sizeof(struct in6_addr) },
        [RTA_OIF]               = { .type = NLA_U32 },
        [RTA_IIF]               = { .type = NLA_U32 },
        [RTA_PRIORITY]          = { .type = NLA_U32 },
@@ -3986,6 +3992,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
        [RTA_EXPIRES]           = { .type = NLA_U32 },
        [RTA_UID]               = { .type = NLA_U32 },
        [RTA_MARK]              = { .type = NLA_U32 },
+       [RTA_TABLE]             = { .type = NLA_U32 },
 };
 
 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
index f343e6f..5fe1394 100644 (file)
@@ -136,7 +136,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
        isrh->nexthdr = proto;
 
        hdr->daddr = isrh->segments[isrh->first_segment];
-       set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr);
+       set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr);
 
 #ifdef CONFIG_IPV6_SEG6_HMAC
        if (sr_has_hmac(isrh)) {
index 4ec76a8..ea07300 100644 (file)
@@ -148,9 +148,9 @@ static int compute_score(struct sock *sk, struct net *net,
                bool dev_match = (sk->sk_bound_dev_if == dif ||
                                  sk->sk_bound_dev_if == sdif);
 
-               if (exact_dif && !dev_match)
+               if (!dev_match)
                        return -1;
-               if (sk->sk_bound_dev_if && dev_match)
+               if (sk->sk_bound_dev_if)
                        score++;
        }
 
index f85f0d7..4a46df8 100644 (file)
@@ -341,6 +341,9 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net)
        struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
        unsigned int i;
 
+       xfrm_state_flush(net, IPSEC_PROTO_ANY, false);
+       xfrm_flush_gc();
+
        for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
                WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i]));
 
index 7e2e718..e62e52e 100644 (file)
@@ -437,6 +437,24 @@ static int verify_address_len(const void *p)
        return 0;
 }
 
+static inline int sadb_key_len(const struct sadb_key *key)
+{
+       int key_bytes = DIV_ROUND_UP(key->sadb_key_bits, 8);
+
+       return DIV_ROUND_UP(sizeof(struct sadb_key) + key_bytes,
+                           sizeof(uint64_t));
+}
+
+static int verify_key_len(const void *p)
+{
+       const struct sadb_key *key = p;
+
+       if (sadb_key_len(key) > key->sadb_key_len)
+               return -EINVAL;
+
+       return 0;
+}
+
 static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx)
 {
        return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) +
@@ -533,16 +551,25 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
                                return -EINVAL;
                        if (ext_hdrs[ext_type-1] != NULL)
                                return -EINVAL;
-                       if (ext_type == SADB_EXT_ADDRESS_SRC ||
-                           ext_type == SADB_EXT_ADDRESS_DST ||
-                           ext_type == SADB_EXT_ADDRESS_PROXY ||
-                           ext_type == SADB_X_EXT_NAT_T_OA) {
+                       switch (ext_type) {
+                       case SADB_EXT_ADDRESS_SRC:
+                       case SADB_EXT_ADDRESS_DST:
+                       case SADB_EXT_ADDRESS_PROXY:
+                       case SADB_X_EXT_NAT_T_OA:
                                if (verify_address_len(p))
                                        return -EINVAL;
-                       }
-                       if (ext_type == SADB_X_EXT_SEC_CTX) {
+                               break;
+                       case SADB_X_EXT_SEC_CTX:
                                if (verify_sec_ctx_len(p))
                                        return -EINVAL;
+                               break;
+                       case SADB_EXT_KEY_AUTH:
+                       case SADB_EXT_KEY_ENCRYPT:
+                               if (verify_key_len(p))
+                                       return -EINVAL;
+                               break;
+                       default:
+                               break;
                        }
                        ext_hdrs[ext_type-1] = (void *) p;
                }
@@ -1104,14 +1131,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
        key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
        if (key != NULL &&
            sa->sadb_sa_auth != SADB_X_AALG_NULL &&
-           ((key->sadb_key_bits+7) / 8 == 0 ||
-            (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
+           key->sadb_key_bits == 0)
                return ERR_PTR(-EINVAL);
        key = ext_hdrs[SADB_EXT_KEY_ENCRYPT-1];
        if (key != NULL &&
            sa->sadb_sa_encrypt != SADB_EALG_NULL &&
-           ((key->sadb_key_bits+7) / 8 == 0 ||
-            (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
+           key->sadb_key_bits == 0)
                return ERR_PTR(-EINVAL);
 
        x = xfrm_state_alloc(net);
index b8f9d45..7f1e842 100644 (file)
@@ -106,8 +106,11 @@ static void l2tp_dfs_seq_stop(struct seq_file *p, void *v)
                return;
 
        /* Drop reference taken by last invocation of l2tp_dfs_next_tunnel() */
-       if (pd->tunnel)
+       if (pd->tunnel) {
                l2tp_tunnel_dec_refcount(pd->tunnel);
+               pd->tunnel = NULL;
+               pd->session = NULL;
+       }
 }
 
 static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
index 7d0c963..1fd9e14 100644 (file)
@@ -619,6 +619,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
        lock_sock(sk);
 
        error = -EINVAL;
+
+       if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
+           sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
+           sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
+           sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
+               goto end;
+
        if (sp->sa_protocol != PX_PROTO_OL2TP)
                goto end;
 
@@ -1618,8 +1625,11 @@ static void pppol2tp_seq_stop(struct seq_file *p, void *v)
                return;
 
        /* Drop reference taken by last invocation of pppol2tp_next_tunnel() */
-       if (pd->tunnel)
+       if (pd->tunnel) {
                l2tp_tunnel_dec_refcount(pd->tunnel);
+               pd->tunnel = NULL;
+               pd->session = NULL;
+       }
 }
 
 static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
index 6d29b2b..1beeea9 100644 (file)
@@ -189,7 +189,6 @@ static int llc_ui_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
        struct llc_sock *llc;
-       struct llc_sap *sap;
 
        if (unlikely(sk == NULL))
                goto out;
@@ -200,15 +199,19 @@ static int llc_ui_release(struct socket *sock)
                llc->laddr.lsap, llc->daddr.lsap);
        if (!llc_send_disc(sk))
                llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
-       sap = llc->sap;
-       /* Hold this for release_sock(), so that llc_backlog_rcv() could still
-        * use it.
-        */
-       llc_sap_hold(sap);
-       if (!sock_flag(sk, SOCK_ZAPPED))
+       if (!sock_flag(sk, SOCK_ZAPPED)) {
+               struct llc_sap *sap = llc->sap;
+
+               /* Hold this for release_sock(), so that llc_backlog_rcv()
+                * could still use it.
+                */
+               llc_sap_hold(sap);
                llc_sap_remove_socket(llc->sap, sk);
-       release_sock(sk);
-       llc_sap_put(sap);
+               release_sock(sk);
+               llc_sap_put(sap);
+       } else {
+               release_sock(sk);
+       }
        if (llc->dev)
                dev_put(llc->dev);
        sock_put(sk);
@@ -927,6 +930,9 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        if (size > llc->dev->mtu)
                size = llc->dev->mtu;
        copied = size - hdrlen;
+       rc = -EINVAL;
+       if (copied < 0)
+               goto release;
        release_sock(sk);
        skb = sock_alloc_send_skb(sk, size, noblock, &rc);
        lock_sock(sk);
index 1631211..4d78375 100644 (file)
@@ -1099,14 +1099,7 @@ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb)
 
 int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
 {
-       struct llc_sock *llc = llc_sk(sk);
-
-       del_timer(&llc->pf_cycle_timer.timer);
-       del_timer(&llc->ack_timer.timer);
-       del_timer(&llc->rej_sent_timer.timer);
-       del_timer(&llc->busy_state_timer.timer);
-       llc->ack_must_be_send = 0;
-       llc->ack_pf = 0;
+       llc_sk_stop_all_timers(sk, false);
        return 0;
 }
 
index 110e32b..c0ac522 100644 (file)
@@ -961,6 +961,26 @@ out:
        return sk;
 }
 
+void llc_sk_stop_all_timers(struct sock *sk, bool sync)
+{
+       struct llc_sock *llc = llc_sk(sk);
+
+       if (sync) {
+               del_timer_sync(&llc->pf_cycle_timer.timer);
+               del_timer_sync(&llc->ack_timer.timer);
+               del_timer_sync(&llc->rej_sent_timer.timer);
+               del_timer_sync(&llc->busy_state_timer.timer);
+       } else {
+               del_timer(&llc->pf_cycle_timer.timer);
+               del_timer(&llc->ack_timer.timer);
+               del_timer(&llc->rej_sent_timer.timer);
+               del_timer(&llc->busy_state_timer.timer);
+       }
+
+       llc->ack_must_be_send = 0;
+       llc->ack_pf = 0;
+}
+
 /**
  *     llc_sk_free - Frees a LLC socket
  *     @sk - socket to free
@@ -973,7 +993,7 @@ void llc_sk_free(struct sock *sk)
 
        llc->state = LLC_CONN_OUT_OF_SVC;
        /* Stop all (possibly) running timers */
-       llc_conn_ac_stop_all_timers(sk, NULL);
+       llc_sk_stop_all_timers(sk, true);
 #ifdef DEBUG_LLC_CONN_ALLOC
        printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
                skb_queue_len(&llc->pdu_unack_q),
index 595c662..ac42952 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
  * Copyright(c) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -970,6 +971,9 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
 
                sta->ampdu_mlme.addba_req_num[tid] = 0;
 
+               tid_tx->timeout =
+                       le16_to_cpu(mgmt->u.action.u.addba_resp.timeout);
+
                if (tid_tx->timeout) {
                        mod_timer(&tid_tx->session_timer,
                                  TU_TO_EXP_TIME(tid_tx->timeout));
index 69449db..2330687 100644 (file)
@@ -36,6 +36,7 @@
 #define IEEE80211_AUTH_TIMEOUT         (HZ / 5)
 #define IEEE80211_AUTH_TIMEOUT_LONG    (HZ / 2)
 #define IEEE80211_AUTH_TIMEOUT_SHORT   (HZ / 10)
+#define IEEE80211_AUTH_TIMEOUT_SAE     (HZ * 2)
 #define IEEE80211_AUTH_MAX_TRIES       3
 #define IEEE80211_AUTH_WAIT_ASSOC      (HZ * 5)
 #define IEEE80211_ASSOC_TIMEOUT                (HZ / 5)
@@ -1787,7 +1788,7 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
                params[ac].acm = acm;
                params[ac].uapsd = uapsd;
 
-               if (params->cw_min == 0 ||
+               if (params[ac].cw_min == 0 ||
                    params[ac].cw_min > params[ac].cw_max) {
                        sdata_info(sdata,
                                   "AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n",
@@ -3814,16 +3815,19 @@ static int ieee80211_auth(struct ieee80211_sub_if_data *sdata)
                            tx_flags);
 
        if (tx_flags == 0) {
-               auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
-               auth_data->timeout_started = true;
-               run_again(sdata, auth_data->timeout);
+               if (auth_data->algorithm == WLAN_AUTH_SAE)
+                       auth_data->timeout = jiffies +
+                               IEEE80211_AUTH_TIMEOUT_SAE;
+               else
+                       auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
        } else {
                auth_data->timeout =
                        round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
-               auth_data->timeout_started = true;
-               run_again(sdata, auth_data->timeout);
        }
 
+       auth_data->timeout_started = true;
+       run_again(sdata, auth_data->timeout);
+
        return 0;
 }
 
@@ -3894,8 +3898,15 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                ifmgd->status_received = false;
                if (ifmgd->auth_data && ieee80211_is_auth(fc)) {
                        if (status_acked) {
-                               ifmgd->auth_data->timeout =
-                                       jiffies + IEEE80211_AUTH_TIMEOUT_SHORT;
+                               if (ifmgd->auth_data->algorithm ==
+                                   WLAN_AUTH_SAE)
+                                       ifmgd->auth_data->timeout =
+                                               jiffies +
+                                               IEEE80211_AUTH_TIMEOUT_SAE;
+                               else
+                                       ifmgd->auth_data->timeout =
+                                               jiffies +
+                                               IEEE80211_AUTH_TIMEOUT_SHORT;
                                run_again(sdata, ifmgd->auth_data->timeout);
                        } else {
                                ifmgd->auth_data->timeout = jiffies - 1;
index 535de31..05a265c 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007      Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright (C) 2018 Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -1135,7 +1136,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
        }
 
        /* reset session timer */
-       if (reset_agg_timer && tid_tx->timeout)
+       if (reset_agg_timer)
                tid_tx->last_tx = jiffies;
 
        return queued;
index 704b383..44d8a55 100644 (file)
@@ -594,6 +594,7 @@ config NFT_QUOTA
 config NFT_REJECT
        default m if NETFILTER_ADVANCED=n
        tristate "Netfilter nf_tables reject support"
+       depends on !NF_TABLES_INET || (IPV6!=m || m)
        help
          This option adds the "reject" expression that you can use to
          explicitly deny and notify via TCP reset/ICMP informational errors
index 5ebde4b..f360988 100644 (file)
@@ -2384,11 +2384,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
                        strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
                                sizeof(cfg.mcast_ifn));
                        cfg.syncid = dm->syncid;
-                       rtnl_lock();
-                       mutex_lock(&ipvs->sync_mutex);
                        ret = start_sync_thread(ipvs, &cfg, dm->state);
-                       mutex_unlock(&ipvs->sync_mutex);
-                       rtnl_unlock();
                } else {
                        mutex_lock(&ipvs->sync_mutex);
                        ret = stop_sync_thread(ipvs, dm->state);
@@ -3481,12 +3477,8 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
        if (ipvs->mixed_address_family_dests > 0)
                return -EINVAL;
 
-       rtnl_lock();
-       mutex_lock(&ipvs->sync_mutex);
        ret = start_sync_thread(ipvs, &c,
                                nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
-       mutex_unlock(&ipvs->sync_mutex);
-       rtnl_unlock();
        return ret;
 }
 
index fbaf3bd..001501e 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/kthread.h>
 #include <linux/wait.h>
 #include <linux/kernel.h>
+#include <linux/sched/signal.h>
 
 #include <asm/unaligned.h>             /* Used for ntoh_seq and hton_seq */
 
@@ -1360,15 +1361,9 @@ static void set_mcast_pmtudisc(struct sock *sk, int val)
 /*
  *      Specifiy default interface for outgoing multicasts
  */
-static int set_mcast_if(struct sock *sk, char *ifname)
+static int set_mcast_if(struct sock *sk, struct net_device *dev)
 {
-       struct net_device *dev;
        struct inet_sock *inet = inet_sk(sk);
-       struct net *net = sock_net(sk);
-
-       dev = __dev_get_by_name(net, ifname);
-       if (!dev)
-               return -ENODEV;
 
        if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
                return -EINVAL;
@@ -1396,19 +1391,14 @@ static int set_mcast_if(struct sock *sk, char *ifname)
  *      in the in_addr structure passed in as a parameter.
  */
 static int
-join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
+join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev)
 {
-       struct net *net = sock_net(sk);
        struct ip_mreqn mreq;
-       struct net_device *dev;
        int ret;
 
        memset(&mreq, 0, sizeof(mreq));
        memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
 
-       dev = __dev_get_by_name(net, ifname);
-       if (!dev)
-               return -ENODEV;
        if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
                return -EINVAL;
 
@@ -1423,15 +1413,10 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
 
 #ifdef CONFIG_IP_VS_IPV6
 static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
-                            char *ifname)
+                            struct net_device *dev)
 {
-       struct net *net = sock_net(sk);
-       struct net_device *dev;
        int ret;
 
-       dev = __dev_get_by_name(net, ifname);
-       if (!dev)
-               return -ENODEV;
        if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
                return -EINVAL;
 
@@ -1443,24 +1428,18 @@ static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
 }
 #endif
 
-static int bind_mcastif_addr(struct socket *sock, char *ifname)
+static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
 {
-       struct net *net = sock_net(sock->sk);
-       struct net_device *dev;
        __be32 addr;
        struct sockaddr_in sin;
 
-       dev = __dev_get_by_name(net, ifname);
-       if (!dev)
-               return -ENODEV;
-
        addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
        if (!addr)
                pr_err("You probably need to specify IP address on "
                       "multicast interface.\n");
 
        IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
-                 ifname, &addr);
+                 dev->name, &addr);
 
        /* Now bind the socket with the address of multicast interface */
        sin.sin_family       = AF_INET;
@@ -1493,7 +1472,8 @@ static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
 /*
  *      Set up sending multicast socket over UDP
  */
-static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
+static int make_send_sock(struct netns_ipvs *ipvs, int id,
+                         struct net_device *dev, struct socket **sock_ret)
 {
        /* multicast addr */
        union ipvs_sockaddr mcast_addr;
@@ -1505,9 +1485,10 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
                                  IPPROTO_UDP, &sock);
        if (result < 0) {
                pr_err("Error during creation of socket; terminating\n");
-               return ERR_PTR(result);
+               goto error;
        }
-       result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn);
+       *sock_ret = sock;
+       result = set_mcast_if(sock->sk, dev);
        if (result < 0) {
                pr_err("Error setting outbound mcast interface\n");
                goto error;
@@ -1522,7 +1503,7 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
                set_sock_size(sock->sk, 1, result);
 
        if (AF_INET == ipvs->mcfg.mcast_af)
-               result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn);
+               result = bind_mcastif_addr(sock, dev);
        else
                result = 0;
        if (result < 0) {
@@ -1538,19 +1519,18 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
                goto error;
        }
 
-       return sock;
+       return 0;
 
 error:
-       sock_release(sock);
-       return ERR_PTR(result);
+       return result;
 }
 
 
 /*
  *      Set up receiving multicast socket over UDP
  */
-static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
-                                       int ifindex)
+static int make_receive_sock(struct netns_ipvs *ipvs, int id,
+                            struct net_device *dev, struct socket **sock_ret)
 {
        /* multicast addr */
        union ipvs_sockaddr mcast_addr;
@@ -1562,8 +1542,9 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
                                  IPPROTO_UDP, &sock);
        if (result < 0) {
                pr_err("Error during creation of socket; terminating\n");
-               return ERR_PTR(result);
+               goto error;
        }
+       *sock_ret = sock;
        /* it is equivalent to the REUSEADDR option in user-space */
        sock->sk->sk_reuse = SK_CAN_REUSE;
        result = sysctl_sync_sock_size(ipvs);
@@ -1571,7 +1552,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
                set_sock_size(sock->sk, 0, result);
 
        get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
-       sock->sk->sk_bound_dev_if = ifindex;
+       sock->sk->sk_bound_dev_if = dev->ifindex;
        result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
        if (result < 0) {
                pr_err("Error binding to the multicast addr\n");
@@ -1582,21 +1563,20 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
 #ifdef CONFIG_IP_VS_IPV6
        if (ipvs->bcfg.mcast_af == AF_INET6)
                result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
-                                          ipvs->bcfg.mcast_ifn);
+                                          dev);
        else
 #endif
                result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
-                                         ipvs->bcfg.mcast_ifn);
+                                         dev);
        if (result < 0) {
                pr_err("Error joining to the multicast group\n");
                goto error;
        }
 
-       return sock;
+       return 0;
 
 error:
-       sock_release(sock);
-       return ERR_PTR(result);
+       return result;
 }
 
 
@@ -1778,13 +1758,12 @@ static int sync_thread_backup(void *data)
 int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
                      int state)
 {
-       struct ip_vs_sync_thread_data *tinfo;
+       struct ip_vs_sync_thread_data *tinfo = NULL;
        struct task_struct **array = NULL, *task;
-       struct socket *sock;
        struct net_device *dev;
        char *name;
        int (*threadfn)(void *data);
-       int id, count, hlen;
+       int id = 0, count, hlen;
        int result = -ENOMEM;
        u16 mtu, min_mtu;
 
@@ -1792,6 +1771,18 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
        IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n",
                  sizeof(struct ip_vs_sync_conn_v0));
 
+       /* Do not hold one mutex and then to block on another */
+       for (;;) {
+               rtnl_lock();
+               if (mutex_trylock(&ipvs->sync_mutex))
+                       break;
+               rtnl_unlock();
+               mutex_lock(&ipvs->sync_mutex);
+               if (rtnl_trylock())
+                       break;
+               mutex_unlock(&ipvs->sync_mutex);
+       }
+
        if (!ipvs->sync_state) {
                count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
                ipvs->threads_mask = count - 1;
@@ -1810,7 +1801,8 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
        dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
        if (!dev) {
                pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
-               return -ENODEV;
+               result = -ENODEV;
+               goto out_early;
        }
        hlen = (AF_INET6 == c->mcast_af) ?
               sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
@@ -1827,26 +1819,30 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
                c->sync_maxlen = mtu - hlen;
 
        if (state == IP_VS_STATE_MASTER) {
+               result = -EEXIST;
                if (ipvs->ms)
-                       return -EEXIST;
+                       goto out_early;
 
                ipvs->mcfg = *c;
                name = "ipvs-m:%d:%d";
                threadfn = sync_thread_master;
        } else if (state == IP_VS_STATE_BACKUP) {
+               result = -EEXIST;
                if (ipvs->backup_threads)
-                       return -EEXIST;
+                       goto out_early;
 
                ipvs->bcfg = *c;
                name = "ipvs-b:%d:%d";
                threadfn = sync_thread_backup;
        } else {
-               return -EINVAL;
+               result = -EINVAL;
+               goto out_early;
        }
 
        if (state == IP_VS_STATE_MASTER) {
                struct ipvs_master_sync_state *ms;
 
+               result = -ENOMEM;
                ipvs->ms = kcalloc(count, sizeof(ipvs->ms[0]), GFP_KERNEL);
                if (!ipvs->ms)
                        goto out;
@@ -1862,39 +1858,38 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
        } else {
                array = kcalloc(count, sizeof(struct task_struct *),
                                GFP_KERNEL);
+               result = -ENOMEM;
                if (!array)
                        goto out;
        }
 
-       tinfo = NULL;
        for (id = 0; id < count; id++) {
-               if (state == IP_VS_STATE_MASTER)
-                       sock = make_send_sock(ipvs, id);
-               else
-                       sock = make_receive_sock(ipvs, id, dev->ifindex);
-               if (IS_ERR(sock)) {
-                       result = PTR_ERR(sock);
-                       goto outtinfo;
-               }
+               result = -ENOMEM;
                tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
                if (!tinfo)
-                       goto outsocket;
+                       goto out;
                tinfo->ipvs = ipvs;
-               tinfo->sock = sock;
+               tinfo->sock = NULL;
                if (state == IP_VS_STATE_BACKUP) {
                        tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
                                             GFP_KERNEL);
                        if (!tinfo->buf)
-                               goto outtinfo;
+                               goto out;
                } else {
                        tinfo->buf = NULL;
                }
                tinfo->id = id;
+               if (state == IP_VS_STATE_MASTER)
+                       result = make_send_sock(ipvs, id, dev, &tinfo->sock);
+               else
+                       result = make_receive_sock(ipvs, id, dev, &tinfo->sock);
+               if (result < 0)
+                       goto out;
 
                task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
                if (IS_ERR(task)) {
                        result = PTR_ERR(task);
-                       goto outtinfo;
+                       goto out;
                }
                tinfo = NULL;
                if (state == IP_VS_STATE_MASTER)
@@ -1911,20 +1906,20 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
        ipvs->sync_state |= state;
        spin_unlock_bh(&ipvs->sync_buff_lock);
 
+       mutex_unlock(&ipvs->sync_mutex);
+       rtnl_unlock();
+
        /* increase the module use count */
        ip_vs_use_count_inc();
 
        return 0;
 
-outsocket:
-       sock_release(sock);
-
-outtinfo:
-       if (tinfo) {
-               sock_release(tinfo->sock);
-               kfree(tinfo->buf);
-               kfree(tinfo);
-       }
+out:
+       /* We do not need RTNL lock anymore, release it here so that
+        * sock_release below and in the kthreads can use rtnl_lock
+        * to leave the mcast group.
+        */
+       rtnl_unlock();
        count = id;
        while (count-- > 0) {
                if (state == IP_VS_STATE_MASTER)
@@ -1932,13 +1927,23 @@ outtinfo:
                else
                        kthread_stop(array[count]);
        }
-       kfree(array);
-
-out:
        if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
                kfree(ipvs->ms);
                ipvs->ms = NULL;
        }
+       mutex_unlock(&ipvs->sync_mutex);
+       if (tinfo) {
+               if (tinfo->sock)
+                       sock_release(tinfo->sock);
+               kfree(tinfo->buf);
+               kfree(tinfo);
+       }
+       kfree(array);
+       return result;
+
+out_early:
+       mutex_unlock(&ipvs->sync_mutex);
+       rtnl_unlock();
        return result;
 }
 
index 8ef21d9..4b2b3d5 100644 (file)
@@ -252,7 +252,7 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
 static inline int expect_matches(const struct nf_conntrack_expect *a,
                                 const struct nf_conntrack_expect *b)
 {
-       return a->master == b->master && a->class == b->class &&
+       return a->master == b->master &&
               nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
               nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
               net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
@@ -421,6 +421,9 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
        h = nf_ct_expect_dst_hash(net, &expect->tuple);
        hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
                if (expect_matches(i, expect)) {
+                       if (i->class != expect->class)
+                               return -EALREADY;
+
                        if (nf_ct_remove_expect(i))
                                break;
                } else if (expect_clash(i, expect)) {
index 9fe0ddc..277bbfe 100644 (file)
@@ -9,6 +9,7 @@
  *      2 of the License, or (at your option) any later version.
  */
 #include <linux/kernel.h>
+#include <linux/kmemleak.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/rcupdate.h>
@@ -71,6 +72,7 @@ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
        rcu_read_unlock();
 
        alloc = max(newlen, NF_CT_EXT_PREALLOC);
+       kmemleak_not_leak(old);
        new = __krealloc(old, alloc, gfp);
        if (!new)
                return NULL;
index 4dbb5ba..908e51e 100644 (file)
@@ -938,11 +938,19 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
                                       datalen, rtp_exp, rtcp_exp,
                                       mediaoff, medialen, daddr);
        else {
-               if (nf_ct_expect_related(rtp_exp) == 0) {
-                       if (nf_ct_expect_related(rtcp_exp) != 0)
-                               nf_ct_unexpect_related(rtp_exp);
-                       else
+               /* -EALREADY handling works around end-points that send
+                * SDP messages with identical port but different media type,
+                * we pretend expectation was set up.
+                */
+               int errp = nf_ct_expect_related(rtp_exp);
+
+               if (errp == 0 || errp == -EALREADY) {
+                       int errcp = nf_ct_expect_related(rtcp_exp);
+
+                       if (errcp == 0 || errcp == -EALREADY)
                                ret = NF_ACCEPT;
+                       else if (errp == 0)
+                               nf_ct_unexpect_related(rtp_exp);
                }
        }
        nf_ct_expect_put(rtcp_exp);
index 9134cc4..04d4e37 100644 (file)
@@ -2361,41 +2361,46 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
        }
 
        if (nlh->nlmsg_flags & NLM_F_REPLACE) {
-               if (nft_is_active_next(net, old_rule)) {
-                       trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
-                                                  old_rule);
-                       if (trans == NULL) {
-                               err = -ENOMEM;
-                               goto err2;
-                       }
-                       nft_deactivate_next(net, old_rule);
-                       chain->use--;
-                       list_add_tail_rcu(&rule->list, &old_rule->list);
-               } else {
+               if (!nft_is_active_next(net, old_rule)) {
                        err = -ENOENT;
                        goto err2;
                }
-       } else if (nlh->nlmsg_flags & NLM_F_APPEND)
-               if (old_rule)
-                       list_add_rcu(&rule->list, &old_rule->list);
-               else
-                       list_add_tail_rcu(&rule->list, &chain->rules);
-       else {
-               if (old_rule)
-                       list_add_tail_rcu(&rule->list, &old_rule->list);
-               else
-                       list_add_rcu(&rule->list, &chain->rules);
-       }
+               trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
+                                          old_rule);
+               if (trans == NULL) {
+                       err = -ENOMEM;
+                       goto err2;
+               }
+               nft_deactivate_next(net, old_rule);
+               chain->use--;
 
-       if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
-               err = -ENOMEM;
-               goto err3;
+               if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
+                       err = -ENOMEM;
+                       goto err2;
+               }
+
+               list_add_tail_rcu(&rule->list, &old_rule->list);
+       } else {
+               if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
+                       err = -ENOMEM;
+                       goto err2;
+               }
+
+               if (nlh->nlmsg_flags & NLM_F_APPEND) {
+                       if (old_rule)
+                               list_add_rcu(&rule->list, &old_rule->list);
+                       else
+                               list_add_tail_rcu(&rule->list, &chain->rules);
+                } else {
+                       if (old_rule)
+                               list_add_tail_rcu(&rule->list, &old_rule->list);
+                       else
+                               list_add_rcu(&rule->list, &chain->rules);
+               }
        }
        chain->use++;
        return 0;
 
-err3:
-       list_del_rcu(&rule->list);
 err2:
        nf_tables_rule_destroy(&ctx, rule);
 err1:
@@ -3207,18 +3212,20 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
 
        err = ops->init(set, &desc, nla);
        if (err < 0)
-               goto err2;
+               goto err3;
 
        err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
        if (err < 0)
-               goto err3;
+               goto err4;
 
        list_add_tail_rcu(&set->list, &table->sets);
        table->use++;
        return 0;
 
-err3:
+err4:
        ops->destroy(set);
+err3:
+       kfree(set->name);
 err2:
        kvfree(set);
 err1:
@@ -5738,7 +5745,7 @@ static void nft_chain_commit_update(struct nft_trans *trans)
        struct nft_base_chain *basechain;
 
        if (nft_trans_chain_name(trans))
-               strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans));
+               swap(trans->ctx.chain->name, nft_trans_chain_name(trans));
 
        if (!nft_is_base_chain(trans->ctx.chain))
                return;
index 773da82..94df000 100644 (file)
@@ -36,11 +36,10 @@ MODULE_ALIAS("ipt_connmark");
 MODULE_ALIAS("ip6t_connmark");
 
 static unsigned int
-connmark_tg_shift(struct sk_buff *skb,
-               const struct xt_connmark_tginfo1 *info,
-               u8 shift_bits, u8 shift_dir)
+connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
 {
        enum ip_conntrack_info ctinfo;
+       u_int32_t new_targetmark;
        struct nf_conn *ct;
        u_int32_t newmark;
 
@@ -51,34 +50,39 @@ connmark_tg_shift(struct sk_buff *skb,
        switch (info->mode) {
        case XT_CONNMARK_SET:
                newmark = (ct->mark & ~info->ctmask) ^ info->ctmark;
-               if (shift_dir == D_SHIFT_RIGHT)
-                       newmark >>= shift_bits;
+               if (info->shift_dir == D_SHIFT_RIGHT)
+                       newmark >>= info->shift_bits;
                else
-                       newmark <<= shift_bits;
+                       newmark <<= info->shift_bits;
+
                if (ct->mark != newmark) {
                        ct->mark = newmark;
                        nf_conntrack_event_cache(IPCT_MARK, ct);
                }
                break;
        case XT_CONNMARK_SAVE:
-               newmark = (ct->mark & ~info->ctmask) ^
-                         (skb->mark & info->nfmask);
-               if (shift_dir == D_SHIFT_RIGHT)
-                       newmark >>= shift_bits;
+               new_targetmark = (skb->mark & info->nfmask);
+               if (info->shift_dir == D_SHIFT_RIGHT)
+                       new_targetmark >>= info->shift_bits;
                else
-                       newmark <<= shift_bits;
+                       new_targetmark <<= info->shift_bits;
+
+               newmark = (ct->mark & ~info->ctmask) ^
+                         new_targetmark;
                if (ct->mark != newmark) {
                        ct->mark = newmark;
                        nf_conntrack_event_cache(IPCT_MARK, ct);
                }
                break;
        case XT_CONNMARK_RESTORE:
-               newmark = (skb->mark & ~info->nfmask) ^
-                         (ct->mark & info->ctmask);
-               if (shift_dir == D_SHIFT_RIGHT)
-                       newmark >>= shift_bits;
+               new_targetmark = (ct->mark & info->ctmask);
+               if (info->shift_dir == D_SHIFT_RIGHT)
+                       new_targetmark >>= info->shift_bits;
                else
-                       newmark <<= shift_bits;
+                       new_targetmark <<= info->shift_bits;
+
+               newmark = (skb->mark & ~info->nfmask) ^
+                         new_targetmark;
                skb->mark = newmark;
                break;
        }
@@ -89,8 +93,14 @@ static unsigned int
 connmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct xt_connmark_tginfo1 *info = par->targinfo;
-
-       return connmark_tg_shift(skb, info, 0, 0);
+       const struct xt_connmark_tginfo2 info2 = {
+               .ctmark = info->ctmark,
+               .ctmask = info->ctmask,
+               .nfmask = info->nfmask,
+               .mode   = info->mode,
+       };
+
+       return connmark_tg_shift(skb, &info2);
 }
 
 static unsigned int
@@ -98,8 +108,7 @@ connmark_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
 {
        const struct xt_connmark_tginfo2 *info = par->targinfo;
 
-       return connmark_tg_shift(skb, (const struct xt_connmark_tginfo1 *)info,
-                                info->shift_bits, info->shift_dir);
+       return connmark_tg_shift(skb, info);
 }
 
 static int connmark_tg_check(const struct xt_tgchk_param *par)
index 55342c4..2e2dd88 100644 (file)
@@ -2606,13 +2606,13 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
 {
        if (v == SEQ_START_TOKEN) {
                seq_puts(seq,
-                        "sk       Eth Pid    Groups   "
-                        "Rmem     Wmem     Dump     Locks     Drops     Inode\n");
+                        "sk               Eth Pid        Groups   "
+                        "Rmem     Wmem     Dump  Locks    Drops    Inode\n");
        } else {
                struct sock *s = v;
                struct netlink_sock *nlk = nlk_sk(s);
 
-               seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
+               seq_printf(seq, "%pK %-3d %-10u %08x %-8d %-8d %-5d %-8d %-8d %-8lu\n",
                           s,
                           s->sk_protocol,
                           nlk->portid,
index d7da99a..9696ef9 100644 (file)
@@ -57,6 +57,8 @@ int nsh_pop(struct sk_buff *skb)
                return -ENOMEM;
        nh = (struct nshhdr *)(skb->data);
        length = nsh_hdr_len(nh);
+       if (length < NSH_BASE_HDR_LEN)
+               return -EINVAL;
        inner_proto = tun_p_to_eth_p(nh->np);
        if (!pskb_may_pull(skb, length))
                return -ENOMEM;
@@ -90,6 +92,8 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
        if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
                goto out;
        nsh_len = nsh_hdr_len(nsh_hdr(skb));
+       if (nsh_len < NSH_BASE_HDR_LEN)
+               goto out;
        if (unlikely(!pskb_may_pull(skb, nsh_len)))
                goto out;
 
index 7322aa1..492ab0c 100644 (file)
@@ -1712,13 +1712,10 @@ static void nlattr_set(struct nlattr *attr, u8 val,
 
        /* The nlattr stream should already have been validated */
        nla_for_each_nested(nla, attr, rem) {
-               if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) {
-                       if (tbl[nla_type(nla)].next)
-                               tbl = tbl[nla_type(nla)].next;
-                       nlattr_set(nla, val, tbl);
-               } else {
+               if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
+                       nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl);
+               else
                        memset(nla_data(nla), val, nla_len(nla));
-               }
 
                if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
                        *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
index c31b068..01f3515 100644 (file)
@@ -329,11 +329,11 @@ static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
        skb_set_queue_mapping(skb, queue_index);
 }
 
-/* register_prot_hook must be invoked with the po->bind_lock held,
+/* __register_prot_hook must be invoked through register_prot_hook
  * or from a context in which asynchronous accesses to the packet
  * socket is not possible (packet_create()).
  */
-static void register_prot_hook(struct sock *sk)
+static void __register_prot_hook(struct sock *sk)
 {
        struct packet_sock *po = pkt_sk(sk);
 
@@ -348,8 +348,13 @@ static void register_prot_hook(struct sock *sk)
        }
 }
 
-/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
- * held.   If the sync parameter is true, we will temporarily drop
+static void register_prot_hook(struct sock *sk)
+{
+       lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
+       __register_prot_hook(sk);
+}
+
+/* If the sync parameter is true, we will temporarily drop
  * the po->bind_lock and do a synchronize_net to make sure no
  * asynchronous packet processing paths still refer to the elements
  * of po->prot_hook.  If the sync parameter is false, it is the
@@ -359,6 +364,8 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
 {
        struct packet_sock *po = pkt_sk(sk);
 
+       lockdep_assert_held_once(&po->bind_lock);
+
        po->running = 0;
 
        if (po->fanout)
@@ -3252,7 +3259,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
 
        if (proto) {
                po->prot_hook.type = proto;
-               register_prot_hook(sk);
+               __register_prot_hook(sk);
        }
 
        mutex_lock(&net->packet.sklist_lock);
@@ -3732,12 +3739,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (optlen != sizeof(val))
                        return -EINVAL;
-               if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
-                       return -EBUSY;
                if (copy_from_user(&val, optval, sizeof(val)))
                        return -EFAULT;
-               po->tp_loss = !!val;
-               return 0;
+
+               lock_sock(sk);
+               if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+                       ret = -EBUSY;
+               } else {
+                       po->tp_loss = !!val;
+                       ret = 0;
+               }
+               release_sock(sk);
+               return ret;
        }
        case PACKET_AUXDATA:
        {
@@ -3748,7 +3761,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
                if (copy_from_user(&val, optval, sizeof(val)))
                        return -EFAULT;
 
+               lock_sock(sk);
                po->auxdata = !!val;
+               release_sock(sk);
                return 0;
        }
        case PACKET_ORIGDEV:
@@ -3760,7 +3775,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
                if (copy_from_user(&val, optval, sizeof(val)))
                        return -EFAULT;
 
+               lock_sock(sk);
                po->origdev = !!val;
+               release_sock(sk);
                return 0;
        }
        case PACKET_VNET_HDR:
@@ -3769,15 +3786,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (sock->type != SOCK_RAW)
                        return -EINVAL;
-               if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
-                       return -EBUSY;
                if (optlen < sizeof(val))
                        return -EINVAL;
                if (copy_from_user(&val, optval, sizeof(val)))
                        return -EFAULT;
 
-               po->has_vnet_hdr = !!val;
-               return 0;
+               lock_sock(sk);
+               if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+                       ret = -EBUSY;
+               } else {
+                       po->has_vnet_hdr = !!val;
+                       ret = 0;
+               }
+               release_sock(sk);
+               return ret;
        }
        case PACKET_TIMESTAMP:
        {
@@ -3815,11 +3837,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (optlen != sizeof(val))
                        return -EINVAL;
-               if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
-                       return -EBUSY;
                if (copy_from_user(&val, optval, sizeof(val)))
                        return -EFAULT;
-               po->tp_tx_has_off = !!val;
+
+               lock_sock(sk);
+               if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+                       ret = -EBUSY;
+               } else {
+                       po->tp_tx_has_off = !!val;
+                       ret = 0;
+               }
+               release_sock(sk);
                return 0;
        }
        case PACKET_QDISC_BYPASS:
index a1d2b23..3bb7c5f 100644 (file)
@@ -112,10 +112,12 @@ struct packet_sock {
        int                     copy_thresh;
        spinlock_t              bind_lock;
        struct mutex            pg_vec_lock;
-       unsigned int            running:1,      /* prot_hook is attached*/
-                               auxdata:1,
+       unsigned int            running;        /* bind_lock must be held */
+       unsigned int            auxdata:1,      /* writer must hold sock lock */
                                origdev:1,
-                               has_vnet_hdr:1;
+                               has_vnet_hdr:1,
+                               tp_loss:1,
+                               tp_tx_has_off:1;
        int                     pressure;
        int                     ifindex;        /* bound device         */
        __be16                  num;
@@ -125,8 +127,6 @@ struct packet_sock {
        enum tpacket_versions   tp_version;
        unsigned int            tp_hdrlen;
        unsigned int            tp_reserve;
-       unsigned int            tp_loss:1;
-       unsigned int            tp_tx_has_off:1;
        unsigned int            tp_tstamp;
        struct net_device __rcu *cached_dev;
        int                     (*xmit)(struct sk_buff *skb);
index eea1d86..13b38ad 100644 (file)
@@ -547,7 +547,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
        rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd,
                 ic->i_send_cq, ic->i_recv_cq);
 
-       return ret;
+       goto out;
 
 sends_out:
        vfree(ic->i_sends);
@@ -572,6 +572,7 @@ send_cq_out:
                ic->i_send_cq = NULL;
 rds_ibdev_out:
        rds_ib_remove_conn(rds_ibdev, conn);
+out:
        rds_ib_dev_put(rds_ibdev);
 
        return ret;
index de50e21..dc67458 100644 (file)
@@ -558,6 +558,7 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
                struct rds_cmsg_rx_trace t;
                int i, j;
 
+               memset(&t, 0, sizeof(t));
                inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
                t.rx_traces =  rs->rs_rx_traces;
                for (i = 0; i < rs->rs_rx_traces; i++) {
index 41bd496..00192a9 100644 (file)
@@ -137,13 +137,18 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
 
        ret = rfkill_register(rfkill->rfkill_dev);
        if (ret < 0)
-               return ret;
+               goto err_destroy;
 
        platform_set_drvdata(pdev, rfkill);
 
        dev_info(&pdev->dev, "%s device registered.\n", rfkill->name);
 
        return 0;
+
+err_destroy:
+       rfkill_destroy(rfkill->rfkill_dev);
+
+       return ret;
 }
 
 static int rfkill_gpio_remove(struct platform_device *pdev)
index 9a2c8e7..2b46304 100644 (file)
@@ -313,7 +313,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
        memset(&cp, 0, sizeof(cp));
        cp.local                = rx->local;
        cp.key                  = key;
-       cp.security_level       = 0;
+       cp.security_level       = rx->min_sec_level;
        cp.exclusive            = false;
        cp.upgrade              = upgrade;
        cp.service_id           = srx->srx_service;
index 90d7079..19975d2 100644 (file)
@@ -476,6 +476,7 @@ enum rxrpc_call_flag {
        RXRPC_CALL_SEND_PING,           /* A ping will need to be sent */
        RXRPC_CALL_PINGING,             /* Ping in process */
        RXRPC_CALL_RETRANS_TIMEOUT,     /* Retransmission due to timeout occurred */
+       RXRPC_CALL_BEGAN_RX_TIMER,      /* We began the expect_rx_by timer */
 };
 
 /*
index c717152..1350f1b 100644 (file)
@@ -40,7 +40,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        } __attribute__((packed)) pkt;
        struct rxrpc_ackinfo ack_info;
        size_t len;
-       int ioc;
+       int ret, ioc;
        u32 serial, mtu, call_id, padding;
 
        _enter("%d", conn->debug_id);
@@ -135,10 +135,13 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
                break;
        }
 
-       kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
+       ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
        conn->params.peer->last_tx_at = ktime_get_real();
+       if (ret < 0)
+               trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
+                                   rxrpc_tx_fail_call_final_resend);
+
        _leave("");
-       return;
 }
 
 /*
@@ -236,6 +239,8 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
 
        ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
        if (ret < 0) {
+               trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
+                                   rxrpc_tx_fail_conn_abort);
                _debug("sendmsg failed: %d", ret);
                return -EAGAIN;
        }
index 0410d22..b5fd638 100644 (file)
@@ -971,7 +971,7 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
        if (timo) {
                unsigned long now = jiffies, expect_rx_by;
 
-               expect_rx_by = jiffies + timo;
+               expect_rx_by = now + timo;
                WRITE_ONCE(call->expect_rx_by, expect_rx_by);
                rxrpc_reduce_call_timer(call, expect_rx_by, now,
                                        rxrpc_timer_set_for_normal);
index 93b5d91..8325f1b 100644 (file)
@@ -71,7 +71,8 @@ static void rxrpc_send_version_request(struct rxrpc_local *local,
 
        ret = kernel_sendmsg(local->socket, &msg, iov, 2, len);
        if (ret < 0)
-               _debug("sendmsg failed: %d", ret);
+               trace_rxrpc_tx_fail(local->debug_id, 0, ret,
+                                   rxrpc_tx_fail_version_reply);
 
        _leave("");
 }
index 8b54e95..b493e6b 100644 (file)
@@ -134,22 +134,49 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
                }
        }
 
-       /* we want to receive ICMP errors */
-       opt = 1;
-       ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
-                               (char *) &opt, sizeof(opt));
-       if (ret < 0) {
-               _debug("setsockopt failed");
-               goto error;
-       }
+       switch (local->srx.transport.family) {
+       case AF_INET:
+               /* we want to receive ICMP errors */
+               opt = 1;
+               ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
+                                       (char *) &opt, sizeof(opt));
+               if (ret < 0) {
+                       _debug("setsockopt failed");
+                       goto error;
+               }
 
-       /* we want to set the don't fragment bit */
-       opt = IP_PMTUDISC_DO;
-       ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
-                               (char *) &opt, sizeof(opt));
-       if (ret < 0) {
-               _debug("setsockopt failed");
-               goto error;
+               /* we want to set the don't fragment bit */
+               opt = IP_PMTUDISC_DO;
+               ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
+                                       (char *) &opt, sizeof(opt));
+               if (ret < 0) {
+                       _debug("setsockopt failed");
+                       goto error;
+               }
+               break;
+
+       case AF_INET6:
+               /* we want to receive ICMP errors */
+               opt = 1;
+               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
+                                       (char *) &opt, sizeof(opt));
+               if (ret < 0) {
+                       _debug("setsockopt failed");
+                       goto error;
+               }
+
+               /* we want to set the don't fragment bit */
+               opt = IPV6_PMTUDISC_DO;
+               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
+                                       (char *) &opt, sizeof(opt));
+               if (ret < 0) {
+                       _debug("setsockopt failed");
+                       goto error;
+               }
+               break;
+
+       default:
+               BUG();
        }
 
        /* set the socket up */
index 7f1fc04..f03de1c 100644 (file)
@@ -210,6 +210,9 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
        if (ping)
                call->ping_time = now;
        conn->params.peer->last_tx_at = ktime_get_real();
+       if (ret < 0)
+               trace_rxrpc_tx_fail(call->debug_id, serial, ret,
+                                   rxrpc_tx_fail_call_ack);
 
        if (call->state < RXRPC_CALL_COMPLETE) {
                if (ret < 0) {
@@ -294,6 +297,10 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
        ret = kernel_sendmsg(conn->params.local->socket,
                             &msg, iov, 1, sizeof(pkt));
        conn->params.peer->last_tx_at = ktime_get_real();
+       if (ret < 0)
+               trace_rxrpc_tx_fail(call->debug_id, serial, ret,
+                                   rxrpc_tx_fail_call_abort);
+
 
        rxrpc_put_connection(conn);
        return ret;
@@ -387,6 +394,9 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
        conn->params.peer->last_tx_at = ktime_get_real();
 
        up_read(&conn->params.local->defrag_sem);
+       if (ret < 0)
+               trace_rxrpc_tx_fail(call->debug_id, serial, ret,
+                                   rxrpc_tx_fail_call_data_nofrag);
        if (ret == -EMSGSIZE)
                goto send_fragmentable;
 
@@ -414,6 +424,17 @@ done:
                                                        rxrpc_timer_set_for_lost_ack);
                        }
                }
+
+               if (sp->hdr.seq == 1 &&
+                   !test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER,
+                                     &call->flags)) {
+                       unsigned long nowj = jiffies, expect_rx_by;
+
+                       expect_rx_by = nowj + call->next_rx_timo;
+                       WRITE_ONCE(call->expect_rx_by, expect_rx_by);
+                       rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
+                                               rxrpc_timer_set_for_normal);
+               }
        }
 
        rxrpc_set_keepalive(call);
@@ -465,6 +486,10 @@ send_fragmentable:
 #endif
        }
 
+       if (ret < 0)
+               trace_rxrpc_tx_fail(call->debug_id, serial, ret,
+                                   rxrpc_tx_fail_call_data_frag);
+
        up_write(&conn->params.local->defrag_sem);
        goto done;
 }
@@ -482,6 +507,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
        struct kvec iov[2];
        size_t size;
        __be32 code;
+       int ret;
 
        _enter("%d", local->debug_id);
 
@@ -516,7 +542,10 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
                        whdr.flags      ^= RXRPC_CLIENT_INITIATED;
                        whdr.flags      &= RXRPC_CLIENT_INITIATED;
 
-                       kernel_sendmsg(local->socket, &msg, iov, 2, size);
+                       ret = kernel_sendmsg(local->socket, &msg, iov, 2, size);
+                       if (ret < 0)
+                               trace_rxrpc_tx_fail(local->debug_id, 0, ret,
+                                                   rxrpc_tx_fail_reject);
                }
 
                rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
@@ -567,7 +596,8 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
 
        ret = kernel_sendmsg(peer->local->socket, &msg, iov, 2, len);
        if (ret < 0)
-               _debug("sendmsg failed: %d", ret);
+               trace_rxrpc_tx_fail(peer->debug_id, 0, ret,
+                                   rxrpc_tx_fail_version_keepalive);
 
        peer->last_tx_at = ktime_get_real();
        _leave("");
index 78c2f95..0ed8b65 100644 (file)
@@ -28,39 +28,39 @@ static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
  * Find the peer associated with an ICMP packet.
  */
 static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
-                                                    const struct sk_buff *skb)
+                                                    const struct sk_buff *skb,
+                                                    struct sockaddr_rxrpc *srx)
 {
        struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
-       struct sockaddr_rxrpc srx;
 
        _enter("");
 
-       memset(&srx, 0, sizeof(srx));
-       srx.transport_type = local->srx.transport_type;
-       srx.transport_len = local->srx.transport_len;
-       srx.transport.family = local->srx.transport.family;
+       memset(srx, 0, sizeof(*srx));
+       srx->transport_type = local->srx.transport_type;
+       srx->transport_len = local->srx.transport_len;
+       srx->transport.family = local->srx.transport.family;
 
        /* Can we see an ICMP4 packet on an ICMP6 listening socket?  and vice
         * versa?
         */
-       switch (srx.transport.family) {
+       switch (srx->transport.family) {
        case AF_INET:
-               srx.transport.sin.sin_port = serr->port;
+               srx->transport.sin.sin_port = serr->port;
                switch (serr->ee.ee_origin) {
                case SO_EE_ORIGIN_ICMP:
                        _net("Rx ICMP");
-                       memcpy(&srx.transport.sin.sin_addr,
+                       memcpy(&srx->transport.sin.sin_addr,
                               skb_network_header(skb) + serr->addr_offset,
                               sizeof(struct in_addr));
                        break;
                case SO_EE_ORIGIN_ICMP6:
                        _net("Rx ICMP6 on v4 sock");
-                       memcpy(&srx.transport.sin.sin_addr,
+                       memcpy(&srx->transport.sin.sin_addr,
                               skb_network_header(skb) + serr->addr_offset + 12,
                               sizeof(struct in_addr));
                        break;
                default:
-                       memcpy(&srx.transport.sin.sin_addr, &ip_hdr(skb)->saddr,
+                       memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr,
                               sizeof(struct in_addr));
                        break;
                }
@@ -68,25 +68,25 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
 
 #ifdef CONFIG_AF_RXRPC_IPV6
        case AF_INET6:
-               srx.transport.sin6.sin6_port = serr->port;
+               srx->transport.sin6.sin6_port = serr->port;
                switch (serr->ee.ee_origin) {
                case SO_EE_ORIGIN_ICMP6:
                        _net("Rx ICMP6");
-                       memcpy(&srx.transport.sin6.sin6_addr,
+                       memcpy(&srx->transport.sin6.sin6_addr,
                               skb_network_header(skb) + serr->addr_offset,
                               sizeof(struct in6_addr));
                        break;
                case SO_EE_ORIGIN_ICMP:
                        _net("Rx ICMP on v6 sock");
-                       srx.transport.sin6.sin6_addr.s6_addr32[0] = 0;
-                       srx.transport.sin6.sin6_addr.s6_addr32[1] = 0;
-                       srx.transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
-                       memcpy(srx.transport.sin6.sin6_addr.s6_addr + 12,
+                       srx->transport.sin6.sin6_addr.s6_addr32[0] = 0;
+                       srx->transport.sin6.sin6_addr.s6_addr32[1] = 0;
+                       srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
+                       memcpy(srx->transport.sin6.sin6_addr.s6_addr + 12,
                               skb_network_header(skb) + serr->addr_offset,
                               sizeof(struct in_addr));
                        break;
                default:
-                       memcpy(&srx.transport.sin6.sin6_addr,
+                       memcpy(&srx->transport.sin6.sin6_addr,
                               &ipv6_hdr(skb)->saddr,
                               sizeof(struct in6_addr));
                        break;
@@ -98,7 +98,7 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
                BUG();
        }
 
-       return rxrpc_lookup_peer_rcu(local, &srx);
+       return rxrpc_lookup_peer_rcu(local, srx);
 }
 
 /*
@@ -146,6 +146,7 @@ static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *se
 void rxrpc_error_report(struct sock *sk)
 {
        struct sock_exterr_skb *serr;
+       struct sockaddr_rxrpc srx;
        struct rxrpc_local *local = sk->sk_user_data;
        struct rxrpc_peer *peer;
        struct sk_buff *skb;
@@ -166,7 +167,7 @@ void rxrpc_error_report(struct sock *sk)
        }
 
        rcu_read_lock();
-       peer = rxrpc_lookup_peer_icmp_rcu(local, skb);
+       peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
        if (peer && !rxrpc_get_peer_maybe(peer))
                peer = NULL;
        if (!peer) {
@@ -176,6 +177,8 @@ void rxrpc_error_report(struct sock *sk)
                return;
        }
 
+       trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
+
        if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
             serr->ee.ee_type == ICMP_DEST_UNREACH &&
             serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
@@ -209,9 +212,6 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
 
        ee = &serr->ee;
 
-       _net("Rx Error o=%d t=%d c=%d e=%d",
-            ee->ee_origin, ee->ee_type, ee->ee_code, ee->ee_errno);
-
        err = ee->ee_errno;
 
        switch (ee->ee_origin) {
index 588fea0..6c0ae27 100644 (file)
@@ -664,7 +664,8 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
 
        ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
        if (ret < 0) {
-               _debug("sendmsg failed: %d", ret);
+               trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
+                                   rxrpc_tx_fail_conn_challenge);
                return -EAGAIN;
        }
 
@@ -719,7 +720,8 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
 
        ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 3, len);
        if (ret < 0) {
-               _debug("sendmsg failed: %d", ret);
+               trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
+                                   rxrpc_tx_fail_conn_response);
                return -EAGAIN;
        }
 
index 206e802..be01f9c 100644 (file)
@@ -223,6 +223,15 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
 
        ret = rxrpc_send_data_packet(call, skb, false);
        if (ret < 0) {
+               switch (ret) {
+               case -ENETUNREACH:
+               case -EHOSTUNREACH:
+               case -ECONNREFUSED:
+                       rxrpc_set_call_completion(call,
+                                                 RXRPC_CALL_LOCAL_ERROR,
+                                                 0, ret);
+                       goto out;
+               }
                _debug("need instant resend %d", ret);
                rxrpc_instant_resend(call, ix);
        } else {
@@ -241,6 +250,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
                                        rxrpc_timer_set_for_send);
        }
 
+out:
        rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
        _leave("");
 }
index a5994cf..8527cfd 100644 (file)
@@ -652,7 +652,7 @@ static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
                }
        }
 
-       return 0;
+       return -ENOENT;
 }
 
 static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
@@ -682,7 +682,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
                u16 mtype;
                u16 dlen;
 
-               curr_data = ife_tlv_meta_decode(tlv_data, &mtype, &dlen, NULL);
+               curr_data = ife_tlv_meta_decode(tlv_data, ifehdr_end, &mtype,
+                                               &dlen, NULL);
+               if (!curr_data) {
+                       qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
+                       return TC_ACT_SHOT;
+               }
 
                if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
                        /* abuse overlimits to count when we receive metadata
index ddf69fc..6138d1d 100644 (file)
@@ -121,7 +121,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                return 0;
 
        if (!flags) {
-               tcf_idr_release(*a, bind);
+               if (exists)
+                       tcf_idr_release(*a, bind);
                return -EINVAL;
        }
 
index bbcbdce..ad050d7 100644 (file)
@@ -131,8 +131,11 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
        if (exists && bind)
                return 0;
 
-       if (!lflags)
+       if (!lflags) {
+               if (exists)
+                       tcf_idr_release(*a, bind);
                return -EINVAL;
+       }
 
        if (!exists) {
                ret = tcf_idr_create(tn, parm->index, est, a,
index b66754f..963e4bf 100644 (file)
@@ -152,8 +152,8 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
                        NL_SET_ERR_MSG(extack, "TC classifier not found");
                        err = -ENOENT;
                }
-               goto errout;
 #endif
+               goto errout;
        }
        tp->classify = tp->ops->classify;
        tp->protocol = protocol;
index a366e4c..4808713 100644 (file)
@@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f)
        return f->next == &detached;
 }
 
+static bool fq_flow_is_throttled(const struct fq_flow *f)
+{
+       return f->next == &throttled;
+}
+
+static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
+{
+       if (head->first)
+               head->last->next = flow;
+       else
+               head->first = flow;
+       head->last = flow;
+       flow->next = NULL;
+}
+
+static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
+{
+       rb_erase(&f->rate_node, &q->delayed);
+       q->throttled_flows--;
+       fq_flow_add_tail(&q->old_flows, f);
+}
+
 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
 {
        struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
@@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
 
 static struct kmem_cache *fq_flow_cachep __read_mostly;
 
-static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
-{
-       if (head->first)
-               head->last->next = flow;
-       else
-               head->first = flow;
-       head->last = flow;
-       flow->next = NULL;
-}
 
 /* limit number of collected flows per round */
 #define FQ_GC_MAX 8
@@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
                                     f->socket_hash != sk->sk_hash)) {
                                f->credit = q->initial_quantum;
                                f->socket_hash = sk->sk_hash;
+                               if (fq_flow_is_throttled(f))
+                                       fq_flow_unset_throttled(q, f);
                                f->time_next_packet = 0ULL;
                        }
                        return f;
@@ -438,9 +453,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
                        q->time_next_delayed_flow = f->time_next_packet;
                        break;
                }
-               rb_erase(p, &q->delayed);
-               q->throttled_flows--;
-               fq_flow_add_tail(&q->old_flows, f);
+               fq_flow_unset_throttled(q, f);
        }
 }
 
index 837806d..a47179d 100644 (file)
@@ -1024,8 +1024,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
        struct sctp_endpoint *ep;
        struct sctp_chunk *chunk;
        struct sctp_inq *inqueue;
-       int state;
+       int first_time = 1;     /* is this the first time through the loop */
        int error = 0;
+       int state;
 
        /* The association should be held so we should be safe. */
        ep = asoc->ep;
@@ -1036,6 +1037,30 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
                state = asoc->state;
                subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
 
+               /* If the first chunk in the packet is AUTH, do special
+                * processing specified in Section 6.3 of SCTP-AUTH spec
+                */
+               if (first_time && subtype.chunk == SCTP_CID_AUTH) {
+                       struct sctp_chunkhdr *next_hdr;
+
+                       next_hdr = sctp_inq_peek(inqueue);
+                       if (!next_hdr)
+                               goto normal;
+
+                       /* If the next chunk is COOKIE-ECHO, skip the AUTH
+                        * chunk while saving a pointer to it so we can do
+                        * Authentication later (during cookie-echo
+                        * processing).
+                        */
+                       if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
+                               chunk->auth_chunk = skb_clone(chunk->skb,
+                                                             GFP_ATOMIC);
+                               chunk->auth = 1;
+                               continue;
+                       }
+               }
+
+normal:
                /* SCTP-AUTH, Section 6.3:
                 *    The receiver has a list of chunk types which it expects
                 *    to be received only after an AUTH-chunk.  This list has
@@ -1074,6 +1099,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
                /* If there is an error on chunk, discard this packet. */
                if (error && chunk)
                        chunk->pdiscard = 1;
+
+               if (first_time)
+                       first_time = 0;
        }
        sctp_association_put(asoc);
 }
index 23ebc53..eb93ffe 100644 (file)
@@ -217,7 +217,7 @@ new_skb:
        skb_pull(chunk->skb, sizeof(*ch));
        chunk->subh.v = NULL; /* Subheader is no longer valid.  */
 
-       if (chunk->chunk_end + sizeof(*ch) < skb_tail_pointer(chunk->skb)) {
+       if (chunk->chunk_end + sizeof(*ch) <= skb_tail_pointer(chunk->skb)) {
                /* This is not a singleton */
                chunk->singleton = 0;
        } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
index 2e3f7b7..4224711 100644 (file)
@@ -895,6 +895,9 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
        if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
                return 1;
 
+       if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET)
+               return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr;
+
        return __sctp_v6_cmp_addr(addr1, addr2);
 }
 
index 5a4fb1d..e62addb 100644 (file)
@@ -1152,7 +1152,7 @@ struct sctp_chunk *sctp_make_violation_max_retrans(
                                        const struct sctp_association *asoc,
                                        const struct sctp_chunk *chunk)
 {
-       static const char error[] = "Association exceeded its max_retans count";
+       static const char error[] = "Association exceeded its max_retrans count";
        size_t payload_len = sizeof(error) + sizeof(struct sctp_errhdr);
        struct sctp_chunk *retval;
 
index dd0594a..c9ae340 100644 (file)
@@ -153,10 +153,7 @@ static enum sctp_disposition sctp_sf_violation_chunk(
                                        struct sctp_cmd_seq *commands);
 
 static enum sctp_ierror sctp_sf_authenticate(
-                                       struct net *net,
-                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
-                                       const union sctp_subtype type,
                                        struct sctp_chunk *chunk);
 
 static enum sctp_disposition __sctp_sf_do_9_1_abort(
@@ -626,6 +623,38 @@ enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net,
        return SCTP_DISPOSITION_CONSUME;
 }
 
+static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk,
+                                  const struct sctp_association *asoc)
+{
+       struct sctp_chunk auth;
+
+       if (!chunk->auth_chunk)
+               return true;
+
+       /* SCTP-AUTH:  auth_chunk pointer is only set when the cookie-echo
+        * is supposed to be authenticated and we have to do delayed
+        * authentication.  We've just recreated the association using
+        * the information in the cookie and now it's much easier to
+        * do the authentication.
+        */
+
+       /* Make sure that we and the peer are AUTH capable */
+       if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
+               return false;
+
+       /* set-up our fake chunk so that we can process it */
+       auth.skb = chunk->auth_chunk;
+       auth.asoc = chunk->asoc;
+       auth.sctp_hdr = chunk->sctp_hdr;
+       auth.chunk_hdr = (struct sctp_chunkhdr *)
+                               skb_push(chunk->auth_chunk,
+                                        sizeof(struct sctp_chunkhdr));
+       skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
+       auth.transport = chunk->transport;
+
+       return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR;
+}
+
 /*
  * Respond to a normal COOKIE ECHO chunk.
  * We are the side that is being asked for an association.
@@ -763,37 +792,9 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
        if (error)
                goto nomem_init;
 
-       /* SCTP-AUTH:  auth_chunk pointer is only set when the cookie-echo
-        * is supposed to be authenticated and we have to do delayed
-        * authentication.  We've just recreated the association using
-        * the information in the cookie and now it's much easier to
-        * do the authentication.
-        */
-       if (chunk->auth_chunk) {
-               struct sctp_chunk auth;
-               enum sctp_ierror ret;
-
-               /* Make sure that we and the peer are AUTH capable */
-               if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
-                       sctp_association_free(new_asoc);
-                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
-               }
-
-               /* set-up our fake chunk so that we can process it */
-               auth.skb = chunk->auth_chunk;
-               auth.asoc = chunk->asoc;
-               auth.sctp_hdr = chunk->sctp_hdr;
-               auth.chunk_hdr = (struct sctp_chunkhdr *)
-                                       skb_push(chunk->auth_chunk,
-                                                sizeof(struct sctp_chunkhdr));
-               skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
-               auth.transport = chunk->transport;
-
-               ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
-               if (ret != SCTP_IERROR_NO_ERROR) {
-                       sctp_association_free(new_asoc);
-                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
-               }
+       if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) {
+               sctp_association_free(new_asoc);
+               return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
        repl = sctp_make_cookie_ack(new_asoc, chunk);
@@ -1794,13 +1795,18 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
                               GFP_ATOMIC))
                goto nomem;
 
+       if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC))
+               goto nomem;
+
+       if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
+               return SCTP_DISPOSITION_DISCARD;
+
        /* Make sure no new addresses are being added during the
         * restart.  Though this is a pretty complicated attack
         * since you'd have to get inside the cookie.
         */
-       if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) {
+       if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands))
                return SCTP_DISPOSITION_CONSUME;
-       }
 
        /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes
         * the peer has restarted (Action A), it MUST NOT setup a new
@@ -1906,6 +1912,12 @@ static enum sctp_disposition sctp_sf_do_dupcook_b(
                               GFP_ATOMIC))
                goto nomem;
 
+       if (sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC))
+               goto nomem;
+
+       if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
+               return SCTP_DISPOSITION_DISCARD;
+
        /* Update the content of current association.  */
        sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
        sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
@@ -2003,6 +2015,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_d(
         * a COOKIE ACK.
         */
 
+       if (!sctp_auth_chunk_verify(net, chunk, asoc))
+               return SCTP_DISPOSITION_DISCARD;
+
        /* Don't accidentally move back into established state. */
        if (asoc->state < SCTP_STATE_ESTABLISHED) {
                sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
@@ -2050,7 +2065,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_d(
                }
        }
 
-       repl = sctp_make_cookie_ack(new_asoc, chunk);
+       repl = sctp_make_cookie_ack(asoc, chunk);
        if (!repl)
                goto nomem;
 
@@ -4165,10 +4180,7 @@ gen_shutdown:
  * The return value is the disposition of the chunk.
  */
 static enum sctp_ierror sctp_sf_authenticate(
-                                       struct net *net,
-                                       const struct sctp_endpoint *ep,
                                        const struct sctp_association *asoc,
-                                       const union sctp_subtype type,
                                        struct sctp_chunk *chunk)
 {
        struct sctp_shared_key *sh_key = NULL;
@@ -4269,7 +4281,7 @@ enum sctp_disposition sctp_sf_eat_auth(struct net *net,
                                                  commands);
 
        auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
-       error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
+       error = sctp_sf_authenticate(asoc, chunk);
        switch (error) {
        case SCTP_IERROR_AUTH_BAD_HMAC:
                /* Generate the ERROR chunk and discard the rest
index f799043..f1f1d1b 100644 (file)
@@ -240,6 +240,8 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
 
        new->out = NULL;
        new->in  = NULL;
+       new->outcnt = 0;
+       new->incnt  = 0;
 }
 
 static int sctp_send_reconf(struct sctp_association *asoc,
index 84207ad..8cb7d98 100644 (file)
@@ -715,7 +715,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
        return event;
 
 fail_mark:
-       sctp_chunk_put(chunk);
        kfree_skb(skb);
 fail:
        return NULL;
index f5d4b69..544bab4 100644 (file)
@@ -292,6 +292,17 @@ static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
        smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
 }
 
+/* register a new rmb */
+static int smc_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc)
+{
+       /* register memory region for new rmb */
+       if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) {
+               rmb_desc->regerr = 1;
+               return -EFAULT;
+       }
+       return 0;
+}
+
 static int smc_clnt_conf_first_link(struct smc_sock *smc)
 {
        struct smc_link_group *lgr = smc->conn.lgr;
@@ -321,9 +332,7 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc)
 
        smc_wr_remember_qp_attr(link);
 
-       rc = smc_wr_reg_send(link,
-                            smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]);
-       if (rc)
+       if (smc_reg_rmb(link, smc->conn.rmb_desc))
                return SMC_CLC_DECL_INTERR;
 
        /* send CONFIRM LINK response over RoCE fabric */
@@ -473,13 +482,8 @@ static int smc_connect_rdma(struct smc_sock *smc)
                        goto decline_rdma_unlock;
                }
        } else {
-               struct smc_buf_desc *buf_desc = smc->conn.rmb_desc;
-
-               if (!buf_desc->reused) {
-                       /* register memory region for new rmb */
-                       rc = smc_wr_reg_send(link,
-                                            buf_desc->mr_rx[SMC_SINGLE_LINK]);
-                       if (rc) {
+               if (!smc->conn.rmb_desc->reused) {
+                       if (smc_reg_rmb(link, smc->conn.rmb_desc)) {
                                reason_code = SMC_CLC_DECL_INTERR;
                                goto decline_rdma_unlock;
                        }
@@ -719,9 +723,7 @@ static int smc_serv_conf_first_link(struct smc_sock *smc)
 
        link = &lgr->lnk[SMC_SINGLE_LINK];
 
-       rc = smc_wr_reg_send(link,
-                            smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]);
-       if (rc)
+       if (smc_reg_rmb(link, smc->conn.rmb_desc))
                return SMC_CLC_DECL_INTERR;
 
        /* send CONFIRM LINK request to client over the RoCE fabric */
@@ -854,13 +856,8 @@ static void smc_listen_work(struct work_struct *work)
        smc_rx_init(new_smc);
 
        if (local_contact != SMC_FIRST_CONTACT) {
-               struct smc_buf_desc *buf_desc = new_smc->conn.rmb_desc;
-
-               if (!buf_desc->reused) {
-                       /* register memory region for new rmb */
-                       rc = smc_wr_reg_send(link,
-                                            buf_desc->mr_rx[SMC_SINGLE_LINK]);
-                       if (rc) {
+               if (!new_smc->conn.rmb_desc->reused) {
+                       if (smc_reg_rmb(link, new_smc->conn.rmb_desc)) {
                                reason_code = SMC_CLC_DECL_INTERR;
                                goto decline_rdma_unlock;
                        }
@@ -978,10 +975,6 @@ static void smc_tcp_listen_work(struct work_struct *work)
        }
 
 out:
-       if (lsmc->clcsock) {
-               sock_release(lsmc->clcsock);
-               lsmc->clcsock = NULL;
-       }
        release_sock(lsk);
        sock_put(&lsmc->sk); /* sock_hold in smc_listen */
 }
@@ -1170,13 +1163,15 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
                /* delegate to CLC child sock */
                release_sock(sk);
                mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
-               /* if non-blocking connect finished ... */
                lock_sock(sk);
-               if ((sk->sk_state == SMC_INIT) && (mask & EPOLLOUT)) {
-                       sk->sk_err = smc->clcsock->sk->sk_err;
-                       if (sk->sk_err) {
-                               mask |= EPOLLERR;
-                       } else {
+               sk->sk_err = smc->clcsock->sk->sk_err;
+               if (sk->sk_err) {
+                       mask |= EPOLLERR;
+               } else {
+                       /* if non-blocking connect finished ... */
+                       if (sk->sk_state == SMC_INIT &&
+                           mask & EPOLLOUT &&
+                           smc->clcsock->sk->sk_state != TCP_CLOSE) {
                                rc = smc_connect_rdma(smc);
                                if (rc < 0)
                                        mask |= EPOLLERR;
@@ -1320,8 +1315,11 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
 
        smc = smc_sk(sk);
        lock_sock(sk);
-       if (sk->sk_state != SMC_ACTIVE)
+       if (sk->sk_state != SMC_ACTIVE) {
+               release_sock(sk);
                goto out;
+       }
+       release_sock(sk);
        if (smc->use_fallback)
                rc = kernel_sendpage(smc->clcsock, page, offset,
                                     size, flags);
@@ -1329,7 +1327,6 @@ static ssize_t smc_sendpage(struct socket *sock, struct page *page,
                rc = sock_no_sendpage(sock, page, offset, size, flags);
 
 out:
-       release_sock(sk);
        return rc;
 }
 
index f44f680..d4bd01b 100644 (file)
@@ -32,6 +32,9 @@
 
 static u32 smc_lgr_num;                        /* unique link group number */
 
+static void smc_buf_free(struct smc_buf_desc *buf_desc, struct smc_link *lnk,
+                        bool is_rmb);
+
 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
 {
        /* client link group creation always follows the server link group
@@ -234,9 +237,22 @@ static void smc_buf_unuse(struct smc_connection *conn)
                conn->sndbuf_size = 0;
        }
        if (conn->rmb_desc) {
-               conn->rmb_desc->reused = true;
-               conn->rmb_desc->used = 0;
-               conn->rmbe_size = 0;
+               if (!conn->rmb_desc->regerr) {
+                       conn->rmb_desc->reused = 1;
+                       conn->rmb_desc->used = 0;
+                       conn->rmbe_size = 0;
+               } else {
+                       /* buf registration failed, reuse not possible */
+                       struct smc_link_group *lgr = conn->lgr;
+                       struct smc_link *lnk;
+
+                       write_lock_bh(&lgr->rmbs_lock);
+                       list_del(&conn->rmb_desc->list);
+                       write_unlock_bh(&lgr->rmbs_lock);
+
+                       lnk = &lgr->lnk[SMC_SINGLE_LINK];
+                       smc_buf_free(conn->rmb_desc, lnk, true);
+               }
        }
 }
 
index 07e2a39..5dfcb15 100644 (file)
@@ -123,7 +123,8 @@ struct smc_buf_desc {
                                                 */
        u32                     order;          /* allocation order */
        u32                     used;           /* currently used / unused */
-       bool                    reused;         /* new created / reused */
+       u8                      reused  : 1;    /* new created / reused */
+       u8                      regerr  : 1;    /* err during registration */
 };
 
 struct smc_rtoken {                            /* address/key of remote RMB */
index 805b139..092bebc 100644 (file)
@@ -67,7 +67,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
 
 static void strp_start_timer(struct strparser *strp, long timeo)
 {
-       if (timeo)
+       if (timeo && timeo != LONG_MAX)
                mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo);
 }
 
index 5cc68a8..f2f6395 100644 (file)
@@ -72,6 +72,7 @@ fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
        if (IS_ERR(mr->fmr.fm_mr))
                goto out_fmr_err;
 
+       INIT_LIST_HEAD(&mr->mr_list);
        return 0;
 
 out_fmr_err:
@@ -102,10 +103,6 @@ fmr_op_release_mr(struct rpcrdma_mr *mr)
        LIST_HEAD(unmap_list);
        int rc;
 
-       /* Ensure MW is not on any rl_registered list */
-       if (!list_empty(&mr->mr_list))
-               list_del(&mr->mr_list);
-
        kfree(mr->fmr.fm_physaddrs);
        kfree(mr->mr_sg);
 
index c5743a0..c59c5c7 100644 (file)
@@ -110,6 +110,7 @@ frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
        if (!mr->mr_sg)
                goto out_list_err;
 
+       INIT_LIST_HEAD(&mr->mr_list);
        sg_init_table(mr->mr_sg, depth);
        init_completion(&frwr->fr_linv_done);
        return 0;
@@ -133,10 +134,6 @@ frwr_op_release_mr(struct rpcrdma_mr *mr)
 {
        int rc;
 
-       /* Ensure MR is not on any rl_registered list */
-       if (!list_empty(&mr->mr_list))
-               list_del(&mr->mr_list);
-
        rc = ib_dereg_mr(mr->frwr.fr_mr);
        if (rc)
                pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
@@ -195,7 +192,7 @@ frwr_op_recover_mr(struct rpcrdma_mr *mr)
        return;
 
 out_release:
-       pr_err("rpcrdma: FRWR reset failed %d, %p release\n", rc, mr);
+       pr_err("rpcrdma: FRWR reset failed %d, %p released\n", rc, mr);
        r_xprt->rx_stats.mrs_orphaned++;
 
        spin_lock(&r_xprt->rx_buf.rb_mrlock);
@@ -476,7 +473,7 @@ frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
 
        list_for_each_entry(mr, mrs, mr_list)
                if (mr->mr_handle == rep->rr_inv_rkey) {
-                       list_del(&mr->mr_list);
+                       list_del_init(&mr->mr_list);
                        trace_xprtrdma_remoteinv(mr);
                        mr->frwr.fr_state = FRWR_IS_INVALID;
                        rpcrdma_mr_unmap_and_put(mr);
index fe5eaca..c345d36 100644 (file)
@@ -1254,6 +1254,11 @@ rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
                list_del(&mr->mr_all);
 
                spin_unlock(&buf->rb_mrlock);
+
+               /* Ensure MW is not on any rl_registered list */
+               if (!list_empty(&mr->mr_list))
+                       list_del(&mr->mr_list);
+
                ia->ri_ops->ro_release_mr(mr);
                count++;
                spin_lock(&buf->rb_mrlock);
index 3d3b423..cb41b12 100644 (file)
@@ -380,7 +380,7 @@ rpcrdma_mr_pop(struct list_head *list)
        struct rpcrdma_mr *mr;
 
        mr = list_first_entry(list, struct rpcrdma_mr, mr_list);
-       list_del(&mr->mr_list);
+       list_del_init(&mr->mr_list);
        return mr;
 }
 
index 6f98b56..f29549d 100644 (file)
@@ -1950,6 +1950,7 @@ out:
 int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
 {
        struct net *net = genl_info_net(info);
+       struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
        struct tipc_nl_msg msg;
        char *name;
        int err;
@@ -1957,9 +1958,19 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
        msg.portid = info->snd_portid;
        msg.seq = info->snd_seq;
 
-       if (!info->attrs[TIPC_NLA_LINK_NAME])
+       if (!info->attrs[TIPC_NLA_LINK])
                return -EINVAL;
-       name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
+
+       err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
+                              info->attrs[TIPC_NLA_LINK],
+                              tipc_nl_link_policy, info->extack);
+       if (err)
+               return err;
+
+       if (!attrs[TIPC_NLA_LINK_NAME])
+               return -EINVAL;
+
+       name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
 
        msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!msg.skb)
@@ -2244,7 +2255,7 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
 
        rtnl_lock();
        for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
-               err = __tipc_nl_add_monitor(net, &msg, prev_bearer);
+               err = __tipc_nl_add_monitor(net, &msg, bearer_id);
                if (err)
                        break;
        }
index 252a52a..6be2157 100644 (file)
@@ -1516,10 +1516,10 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
 
        srcaddr->sock.family = AF_TIPC;
        srcaddr->sock.addrtype = TIPC_ADDR_ID;
+       srcaddr->sock.scope = 0;
        srcaddr->sock.addr.id.ref = msg_origport(hdr);
        srcaddr->sock.addr.id.node = msg_orignode(hdr);
        srcaddr->sock.addr.name.domain = 0;
-       srcaddr->sock.scope = 0;
        m->msg_namelen = sizeof(struct sockaddr_tipc);
 
        if (!msg_in_group(hdr))
@@ -1528,6 +1528,7 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
        /* Group message users may also want to know sending member's id */
        srcaddr->member.family = AF_TIPC;
        srcaddr->member.addrtype = TIPC_ADDR_NAME;
+       srcaddr->member.scope = 0;
        srcaddr->member.addr.name.name.type = msg_nametype(hdr);
        srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
        srcaddr->member.addr.name.domain = 0;
index 0d37997..20cd93b 100644 (file)
@@ -114,6 +114,7 @@ int tls_push_sg(struct sock *sk,
        size = sg->length - offset;
        offset += sg->offset;
 
+       ctx->in_tcp_sendpages = true;
        while (1) {
                if (sg_is_last(sg))
                        sendpage_flags = flags;
@@ -134,6 +135,7 @@ retry:
                        offset -= sg->offset;
                        ctx->partially_sent_offset = offset;
                        ctx->partially_sent_record = (void *)sg;
+                       ctx->in_tcp_sendpages = false;
                        return ret;
                }
 
@@ -148,6 +150,8 @@ retry:
        }
 
        clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
+       ctx->in_tcp_sendpages = false;
+       ctx->sk_write_space(sk);
 
        return 0;
 }
@@ -217,6 +221,10 @@ static void tls_write_space(struct sock *sk)
 {
        struct tls_context *ctx = tls_get_ctx(sk);
 
+       /* We are already sending pages, ignore notification */
+       if (ctx->in_tcp_sendpages)
+               return;
+
        if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
                gfp_t sk_allocation = sk->sk_allocation;
                int rc;
@@ -241,16 +249,13 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
        struct tls_context *ctx = tls_get_ctx(sk);
        long timeo = sock_sndtimeo(sk, 0);
        void (*sk_proto_close)(struct sock *sk, long timeout);
+       bool free_ctx = false;
 
        lock_sock(sk);
        sk_proto_close = ctx->sk_proto_close;
 
-       if (ctx->conf == TLS_HW_RECORD)
-               goto skip_tx_cleanup;
-
-       if (ctx->conf == TLS_BASE) {
-               kfree(ctx);
-               ctx = NULL;
+       if (ctx->conf == TLS_BASE || ctx->conf == TLS_HW_RECORD) {
+               free_ctx = true;
                goto skip_tx_cleanup;
        }
 
@@ -287,7 +292,7 @@ skip_tx_cleanup:
        /* free ctx for TLS_HW_RECORD, used by tcp_set_state
         * for sk->sk_prot->unhash [tls_hw_unhash]
         */
-       if (ctx && ctx->conf == TLS_HW_RECORD)
+       if (free_ctx)
                kfree(ctx);
 }
 
index a6f3cac..c0fd8a8 100644 (file)
@@ -95,6 +95,9 @@ static int cfg80211_dev_check_name(struct cfg80211_registered_device *rdev,
 
        ASSERT_RTNL();
 
+       if (strlen(newname) > NL80211_WIPHY_NAME_MAXLEN)
+               return -EINVAL;
+
        /* prohibit calling the thing phy%d when %d is not its number */
        sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken);
        if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) {
index ff28f8f..a052693 100644 (file)
@@ -9214,6 +9214,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
 
        if (nla_get_flag(info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])) {
                if (!info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
+                       kzfree(connkeys);
                        GENL_SET_ERR_MSG(info,
                                         "external auth requires connection ownership");
                        return -EINVAL;
index 16c7e4e..ac3e12c 100644 (file)
@@ -1026,6 +1026,7 @@ static int regdb_query_country(const struct fwdb_header *db,
 
                        if (!tmp_rd) {
                                kfree(regdom);
+                               kfree(wmm_ptrs);
                                return -ENOMEM;
                        }
                        regdom = tmp_rd;
index f9d2f22..6c177ae 100644 (file)
@@ -2175,6 +2175,12 @@ struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
        return afinfo;
 }
 
+void xfrm_flush_gc(void)
+{
+       flush_work(&xfrm_state_gc_work);
+}
+EXPORT_SYMBOL(xfrm_flush_gc);
+
 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
 void xfrm_state_delete_tunnel(struct xfrm_state *x)
 {
index 9bf2881..fa53f4d 100644 (file)
@@ -65,11 +65,14 @@ $(src)/*.c: verify_target_bpf
 # asm/sysreg.h - inline assembly used by it is incompatible with llvm.
 # But, there is no easy way to fix it, so just exclude it since it is
 # useless for BPF samples.
+#
+# -target bpf option required with SK_MSG programs, this is to ensure
+#  reading 'void *' data types for data and data_end are __u64 reads.
 $(obj)/%.o: $(src)/%.c
        $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \
                -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
                -Wno-compare-distinct-pointer-types \
                -Wno-gnu-variable-sized-type-not-at-end \
                -Wno-address-of-packed-member -Wno-tautological-compare \
-               -Wno-unknown-warning-option \
-               -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
+               -Wno-unknown-warning-option -O2 -target bpf \
+               -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
index b2a95af..7f5c862 100644 (file)
@@ -14,7 +14,7 @@ ifdef CONFIG_GCC_PLUGINS
   endif
 
   ifdef CONFIG_GCC_PLUGIN_SANCOV
-    ifeq ($(CFLAGS_KCOV),)
+    ifeq ($(strip $(CFLAGS_KCOV)),)
       # It is needed because of the gcc-plugin.sh and gcc version checks.
       gcc-plugin-$(CONFIG_GCC_PLUGIN_SANCOV)           += sancov_plugin.so
 
index 07d0740..5af34a2 100644 (file)
@@ -196,7 +196,7 @@ $(obj)/%.tab.c: $(src)/%.y FORCE
        $(call if_changed,bison)
 
 quiet_cmd_bison_h = YACC    $@
-      cmd_bison_h = bison -o/dev/null --defines=$@ -t -l $<
+      cmd_bison_h = $(YACC) -o/dev/null --defines=$@ -t -l $<
 
 $(obj)/%.tab.h: $(src)/%.y FORCE
        $(call if_changed,bison_h)
index c07ba4d..815eaf1 100644 (file)
@@ -787,10 +787,9 @@ static void check_pci_bridge(struct check *c, struct dt_info *dti, struct node *
                FAIL(c, dti, node, "incorrect #size-cells for PCI bridge");
 
        prop = get_property(node, "bus-range");
-       if (!prop) {
-               FAIL(c, dti, node, "missing bus-range for PCI bridge");
+       if (!prop)
                return;
-       }
+
        if (prop->val.len != (sizeof(cell_t) * 2)) {
                FAIL_PROP(c, dti, node, prop, "value must be 2 cells");
                return;
index 61d9b25..a1c51b7 100755 (executable)
@@ -1,6 +1,6 @@
 #!/usr/bin/env perl
 
-# Copyright (c) Mauro Carvalho Chehab <mchehab@infradead.org>
+# Copyright (c) Mauro Carvalho Chehab <mchehab@kernel.org>
 # Released under GPLv2
 #
 # In order to use, you need to:
index 9e5735a..1876a74 100755 (executable)
@@ -170,7 +170,10 @@ __faddr2line() {
                echo "$file_lines" | while read -r line
                do
                        echo $line
-                       eval $(echo $line | awk -F "[ :]" '{printf("n1=%d;n2=%d;f=%s",$NF-5, $NF+5, $(NF-1))}')
+                       n=$(echo $line | sed 's/.*:\([0-9]\+\).*/\1/g')
+                       n1=$[$n-5]
+                       n2=$[$n+5]
+                       f=$(echo $line | sed 's/.*at \(.\+\):.*/\1/g')
                        awk 'NR>=strtonum("'$n1'") && NR<=strtonum("'$n2'") {printf("%d\t%s\n", NR, $0)}' $f
                done
 
index ef0287e..03b7ce9 100644 (file)
@@ -14,14 +14,14 @@ genksyms-objs       := genksyms.o parse.tab.o lex.lex.o
 # so that 'bison: not found' will be displayed if it is missing.
 ifeq ($(findstring 1,$(KBUILD_ENABLE_EXTRA_GCC_CHECKS)),)
 
-quiet_cmd_bison_no_warn = $(quet_cmd_bison)
+quiet_cmd_bison_no_warn = $(quiet_cmd_bison)
       cmd_bison_no_warn = $(YACC) --version >/dev/null; \
                          $(cmd_bison) 2>/dev/null
 
 $(obj)/parse.tab.c: $(src)/parse.y FORCE
        $(call if_changed,bison_no_warn)
 
-quiet_cmd_bison_h_no_warn = $(quet_cmd_bison_h)
+quiet_cmd_bison_h_no_warn = $(quiet_cmd_bison_h)
       cmd_bison_h_no_warn = $(YACC) --version >/dev/null; \
                            $(cmd_bison_h) 2>/dev/null
 
index 944418d..0f6dcb4 100644 (file)
@@ -330,14 +330,7 @@ static int parse_source_files(const char *objfile, struct md4_ctx *md)
                goto out;
        }
 
-       /* There will be a line like so:
-               deps_drivers/net/dummy.o := \
-                 drivers/net/dummy.c \
-                   $(wildcard include/config/net/fastroute.h) \
-                 include/linux/module.h \
-
-          Sum all files in the same dir or subdirs.
-       */
+       /* Sum all files in the same dir or subdirs. */
        while ((line = get_next_line(&pos, file, flen)) != NULL) {
                char* p = line;
 
index bfe16cb..c3db607 100755 (executable)
@@ -1,7 +1,7 @@
 #!/usr/bin/perl
 # SPDX-License-Identifier: GPL-2.0
 #
-# Author: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+# Author: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
 #
 # Produce manpages from kernel-doc.
 # See Documentation/doc-guide/kernel-doc.rst for instructions
index 48620c9..1ce701f 100644 (file)
@@ -449,6 +449,8 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
                                magic |= VFS_CAP_FLAGS_EFFECTIVE;
                        memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
                        cap->magic_etc = cpu_to_le32(magic);
+               } else {
+                       size = -ENOMEM;
                }
        }
        kfree(tmpbuf);
index 4cafe6a..be5817d 100644 (file)
@@ -4576,6 +4576,7 @@ static int selinux_socket_post_create(struct socket *sock, int family,
 static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen)
 {
        struct sock *sk = sock->sk;
+       struct sk_security_struct *sksec = sk->sk_security;
        u16 family;
        int err;
 
@@ -4587,11 +4588,11 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
        family = sk->sk_family;
        if (family == PF_INET || family == PF_INET6) {
                char *addrp;
-               struct sk_security_struct *sksec = sk->sk_security;
                struct common_audit_data ad;
                struct lsm_network_audit net = {0,};
                struct sockaddr_in *addr4 = NULL;
                struct sockaddr_in6 *addr6 = NULL;
+               u16 family_sa = address->sa_family;
                unsigned short snum;
                u32 sid, node_perm;
 
@@ -4601,11 +4602,20 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
                 * need to check address->sa_family as it is possible to have
                 * sk->sk_family = PF_INET6 with addr->sa_family = AF_INET.
                 */
-               switch (address->sa_family) {
+               switch (family_sa) {
+               case AF_UNSPEC:
                case AF_INET:
                        if (addrlen < sizeof(struct sockaddr_in))
                                return -EINVAL;
                        addr4 = (struct sockaddr_in *)address;
+                       if (family_sa == AF_UNSPEC) {
+                               /* see __inet_bind(), we only want to allow
+                                * AF_UNSPEC if the address is INADDR_ANY
+                                */
+                               if (addr4->sin_addr.s_addr != htonl(INADDR_ANY))
+                                       goto err_af;
+                               family_sa = AF_INET;
+                       }
                        snum = ntohs(addr4->sin_port);
                        addrp = (char *)&addr4->sin_addr.s_addr;
                        break;
@@ -4617,15 +4627,14 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
                        addrp = (char *)&addr6->sin6_addr.s6_addr;
                        break;
                default:
-                       /* Note that SCTP services expect -EINVAL, whereas
-                        * others expect -EAFNOSUPPORT.
-                        */
-                       if (sksec->sclass == SECCLASS_SCTP_SOCKET)
-                               return -EINVAL;
-                       else
-                               return -EAFNOSUPPORT;
+                       goto err_af;
                }
 
+               ad.type = LSM_AUDIT_DATA_NET;
+               ad.u.net = &net;
+               ad.u.net->sport = htons(snum);
+               ad.u.net->family = family_sa;
+
                if (snum) {
                        int low, high;
 
@@ -4637,10 +4646,6 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
                                                      snum, &sid);
                                if (err)
                                        goto out;
-                               ad.type = LSM_AUDIT_DATA_NET;
-                               ad.u.net = &net;
-                               ad.u.net->sport = htons(snum);
-                               ad.u.net->family = family;
                                err = avc_has_perm(&selinux_state,
                                                   sksec->sid, sid,
                                                   sksec->sclass,
@@ -4672,16 +4677,11 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
                        break;
                }
 
-               err = sel_netnode_sid(addrp, family, &sid);
+               err = sel_netnode_sid(addrp, family_sa, &sid);
                if (err)
                        goto out;
 
-               ad.type = LSM_AUDIT_DATA_NET;
-               ad.u.net = &net;
-               ad.u.net->sport = htons(snum);
-               ad.u.net->family = family;
-
-               if (address->sa_family == AF_INET)
+               if (family_sa == AF_INET)
                        ad.u.net->v4info.saddr = addr4->sin_addr.s_addr;
                else
                        ad.u.net->v6info.saddr = addr6->sin6_addr;
@@ -4694,6 +4694,11 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
        }
 out:
        return err;
+err_af:
+       /* Note that SCTP services expect -EINVAL, others -EAFNOSUPPORT. */
+       if (sksec->sclass == SECCLASS_SCTP_SOCKET)
+               return -EINVAL;
+       return -EAFNOSUPPORT;
 }
 
 /* This supports connect(2) and SCTP connect services such as sctp_connectx(3)
@@ -4771,7 +4776,7 @@ static int selinux_socket_connect_helper(struct socket *sock,
                ad.type = LSM_AUDIT_DATA_NET;
                ad.u.net = &net;
                ad.u.net->dport = htons(snum);
-               ad.u.net->family = sk->sk_family;
+               ad.u.net->family = address->sa_family;
                err = avc_has_perm(&selinux_state,
                                   sksec->sid, sid, sksec->sclass, perm, &ad);
                if (err)
@@ -5272,6 +5277,7 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname,
        while (walk_size < addrlen) {
                addr = addr_buf;
                switch (addr->sa_family) {
+               case AF_UNSPEC:
                case AF_INET:
                        len = sizeof(struct sockaddr_in);
                        break;
@@ -5279,7 +5285,7 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname,
                        len = sizeof(struct sockaddr_in6);
                        break;
                default:
-                       return -EAFNOSUPPORT;
+                       return -EINVAL;
                }
 
                err = -EINVAL;
index 2ba95d6..caae484 100644 (file)
@@ -195,6 +195,12 @@ struct kvm_arch_memory_slot {
 #define KVM_REG_ARM_VFP_FPINST         0x1009
 #define KVM_REG_ARM_VFP_FPINST2                0x100A
 
+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW                 (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r)          (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
+                                        KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION       KVM_REG_ARM_FW_REG(0)
+
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR      0
 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
index 9abbf30..04b3256 100644 (file)
@@ -206,6 +206,12 @@ struct kvm_arch_memory_slot {
 #define KVM_REG_ARM_TIMER_CNT          ARM64_SYS_REG(3, 3, 14, 3, 2)
 #define KVM_REG_ARM_TIMER_CVAL         ARM64_SYS_REG(3, 3, 14, 0, 2)
 
+/* KVM-as-firmware specific pseudo-registers */
+#define KVM_REG_ARM_FW                 (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_REG(r)          (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+                                        KVM_REG_ARM_FW | ((r) & 0xffff))
+#define KVM_REG_ARM_PSCI_VERSION       KVM_REG_ARM_FW_REG(0)
+
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR      0
 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
index d554c11..578793e 100644 (file)
 #define X86_FEATURE_AVX512_VPOPCNTDQ   (16*32+14) /* POPCNT for vectors of DW/QW */
 #define X86_FEATURE_LA57               (16*32+16) /* 5-level page tables */
 #define X86_FEATURE_RDPID              (16*32+22) /* RDPID instruction */
+#define X86_FEATURE_CLDEMOTE           (16*32+25) /* CLDEMOTE instruction */
 
 /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
 #define X86_FEATURE_OVERFLOW_RECOV     (17*32+ 0) /* MCA overflow recovery support */
index 1ea5459..53b60ad 100644 (file)
@@ -76,6 +76,8 @@ $(OUTPUT)bpf_asm: $(OUTPUT)bpf_asm.o $(OUTPUT)bpf_exp.yacc.o $(OUTPUT)bpf_exp.le
        $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $^
 
 $(OUTPUT)bpf_exp.lex.c: $(OUTPUT)bpf_exp.yacc.c
+$(OUTPUT)bpf_exp.yacc.o: $(OUTPUT)bpf_exp.yacc.c
+$(OUTPUT)bpf_exp.lex.o: $(OUTPUT)bpf_exp.lex.c
 
 clean: bpftool_clean
        $(call QUIET_CLEAN, bpf-progs)
index 4f254bc..61b9aa5 100644 (file)
@@ -1063,7 +1063,7 @@ static int cmd_load_pcap(char *file)
 
 static int cmd_load(char *arg)
 {
-       char *subcmd, *cont, *tmp = strdup(arg);
+       char *subcmd, *cont = NULL, *tmp = strdup(arg);
        int ret = CMD_OK;
 
        subcmd = strtok_r(tmp, " ", &cont);
@@ -1073,7 +1073,10 @@ static int cmd_load(char *arg)
                bpf_reset();
                bpf_reset_breakpoints();
 
-               ret = cmd_load_bpf(cont);
+               if (!cont)
+                       ret = CMD_ERR;
+               else
+                       ret = cmd_load_bpf(cont);
        } else if (matches(subcmd, "pcap") == 0) {
                ret = cmd_load_pcap(cont);
        } else {
index 1065006..b02c41e 100644 (file)
@@ -676,6 +676,13 @@ struct kvm_ioeventfd {
        __u8  pad[36];
 };
 
+#define KVM_X86_DISABLE_EXITS_MWAIT          (1 << 0)
+#define KVM_X86_DISABLE_EXITS_HTL            (1 << 1)
+#define KVM_X86_DISABLE_EXITS_PAUSE          (1 << 2)
+#define KVM_X86_DISABLE_VALID_EXITS          (KVM_X86_DISABLE_EXITS_MWAIT | \
+                                              KVM_X86_DISABLE_EXITS_HTL | \
+                                              KVM_X86_DISABLE_EXITS_PAUSE)
+
 /* for KVM_ENABLE_CAP */
 struct kvm_enable_cap {
        /* in */
index 8806ed5..f8d2167 100644 (file)
@@ -28,29 +28,46 @@ OPTIONS
 <command>...::
        Any command you can specify in a shell.
 
+-i::
+--input=<file>::
+       Input file name.
+
 -f::
 --force::
        Don't do ownership validation
 
 -t::
---type=::
+--type=<type>::
        Select the memory operation type: load or store (default: load,store)
 
 -D::
---dump-raw-samples=::
+--dump-raw-samples::
        Dump the raw decoded samples on the screen in a format that is easy to parse with
        one sample per line.
 
 -x::
---field-separator::
+--field-separator=<separator>::
        Specify the field separator used when dump raw samples (-D option). By default,
        The separator is the space character.
 
 -C::
---cpu-list::
-       Restrict dump of raw samples to those provided via this option. Note that the same
-       option can be passed in record mode. It will be interpreted the same way as perf
-       record.
+--cpu=<cpu>::
+       Monitor only on the list of CPUs provided. Multiple CPUs can be provided as a
+        comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2. Default
+        is to monitor all CPUS.
+-U::
+--hide-unresolved::
+       Only display entries resolved to a symbol.
+
+-p::
+--phys-data::
+       Record/Report sample physical addresses
+
+RECORD OPTIONS
+--------------
+-e::
+--event <event>::
+       Event selector. Use 'perf mem record -e list' to list available events.
 
 -K::
 --all-kernel::
@@ -60,12 +77,12 @@ OPTIONS
 --all-user::
        Configure all used events to run in user space.
 
---ldload::
-       Specify desired latency for loads event.
+-v::
+--verbose::
+       Be more verbose (show counter open errors, etc)
 
--p::
---phys-data::
-       Record/Report sample physical addresses
+--ldlat <n>::
+       Specify desired latency for loads event.
 
 In addition, for report all perf report options are valid, and for record
 all perf record options.
index 6cb48e4..3afe825 100644 (file)
@@ -87,6 +87,7 @@ struct auxtrace_record *auxtrace_record__init(struct perf_evlist *evlist,
        struct perf_evsel *pos;
        int diagnose = 0;
 
+       *err = 0;
        if (evlist->nr_entries == 0)
                return NULL;
 
index a4c30f1..163b92f 100644 (file)
@@ -146,21 +146,3 @@ char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
                zfree(&buf);
        return buf;
 }
-
-/*
- * Compare the cpuid string returned by get_cpuid() function
- * with the name generated by the jevents file read from
- * pmu-events/arch/s390/mapfile.csv.
- *
- * Parameter mapcpuid is the cpuid as stored in the
- * pmu-events/arch/s390/mapfile.csv. This is just the type number.
- * Parameter cpuid is the cpuid returned by function get_cpuid().
- */
-int strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
-{
-       char *cp = strchr(cpuid, ',');
-
-       if (cp == NULL)
-               return -1;
-       return strncmp(cp + 1, mapcpuid, strlen(mapcpuid));
-}
index 944070e..63eb490 100644 (file)
@@ -175,7 +175,7 @@ static const struct option options[] = {
        OPT_UINTEGER('s', "nr_secs"     , &p0.nr_secs,          "max number of seconds to run (default: 5 secs)"),
        OPT_UINTEGER('u', "usleep"      , &p0.sleep_usecs,      "usecs to sleep per loop iteration"),
 
-       OPT_BOOLEAN('R', "data_reads"   , &p0.data_reads,       "access the data via writes (can be mixed with -W)"),
+       OPT_BOOLEAN('R', "data_reads"   , &p0.data_reads,       "access the data via reads (can be mixed with -W)"),
        OPT_BOOLEAN('W', "data_writes"  , &p0.data_writes,      "access the data via writes (can be mixed with -R)"),
        OPT_BOOLEAN('B', "data_backwards", &p0.data_backwards,  "access the data backwards as well"),
        OPT_BOOLEAN('Z', "data_zero_memset", &p0.data_zero_memset,"access the data via glibc bzero only"),
index 147a27e..f17dc60 100644 (file)
@@ -172,6 +172,7 @@ static bool                 interval_count;
 static const char              *output_name;
 static int                     output_fd;
 static int                     print_free_counters_hint;
+static int                     print_mixed_hw_group_error;
 
 struct perf_stat {
        bool                     record;
@@ -1126,6 +1127,30 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
                fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
 }
 
+static bool is_mixed_hw_group(struct perf_evsel *counter)
+{
+       struct perf_evlist *evlist = counter->evlist;
+       u32 pmu_type = counter->attr.type;
+       struct perf_evsel *pos;
+
+       if (counter->nr_members < 2)
+               return false;
+
+       evlist__for_each_entry(evlist, pos) {
+               /* software events can be part of any hardware group */
+               if (pos->attr.type == PERF_TYPE_SOFTWARE)
+                       continue;
+               if (pmu_type == PERF_TYPE_SOFTWARE) {
+                       pmu_type = pos->attr.type;
+                       continue;
+               }
+               if (pmu_type != pos->attr.type)
+                       return true;
+       }
+
+       return false;
+}
+
 static void printout(int id, int nr, struct perf_evsel *counter, double uval,
                     char *prefix, u64 run, u64 ena, double noise,
                     struct runtime_stat *st)
@@ -1178,8 +1203,11 @@ static void printout(int id, int nr, struct perf_evsel *counter, double uval,
                        counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
                        csv_sep);
 
-               if (counter->supported)
+               if (counter->supported) {
                        print_free_counters_hint = 1;
+                       if (is_mixed_hw_group(counter))
+                               print_mixed_hw_group_error = 1;
+               }
 
                fprintf(stat_config.output, "%-*s%s",
                        csv_output ? 0 : unit_width,
@@ -1256,7 +1284,8 @@ static void uniquify_event_name(struct perf_evsel *counter)
        char *new_name;
        char *config;
 
-       if (!counter->pmu_name || !strncmp(counter->name, counter->pmu_name,
+       if (counter->uniquified_name ||
+           !counter->pmu_name || !strncmp(counter->name, counter->pmu_name,
                                           strlen(counter->pmu_name)))
                return;
 
@@ -1274,6 +1303,8 @@ static void uniquify_event_name(struct perf_evsel *counter)
                        counter->name = new_name;
                }
        }
+
+       counter->uniquified_name = true;
 }
 
 static void collect_all_aliases(struct perf_evsel *counter,
@@ -1757,6 +1788,11 @@ static void print_footer(void)
 "      echo 0 > /proc/sys/kernel/nmi_watchdog\n"
 "      perf stat ...\n"
 "      echo 1 > /proc/sys/kernel/nmi_watchdog\n");
+
+       if (print_mixed_hw_group_error)
+               fprintf(output,
+                       "The events in group usually have to be from "
+                       "the same PMU. Try reorganizing the group.\n");
 }
 
 static void print_counters(struct timespec *ts, int argc, const char **argv)
index ca76827..78bcf7f 100644 (file)
@@ -1,6 +1,6 @@
 Family-model,Version,Filename,EventType
-209[78],1,cf_z10,core
-281[78],1,cf_z196,core
-282[78],1,cf_zec12,core
-296[45],1,cf_z13,core
-3906,3,cf_z14,core
+^IBM.209[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z10,core
+^IBM.281[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z196,core
+^IBM.282[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_zec12,core
+^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core
+^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core
index 93656f2..7e3cce3 100644 (file)
@@ -29,7 +29,6 @@ GenuineIntel-6-4D,v13,silvermont,core
 GenuineIntel-6-4C,v13,silvermont,core
 GenuineIntel-6-2A,v15,sandybridge,core
 GenuineIntel-6-2C,v2,westmereep-dp,core
-GenuineIntel-6-2C,v2,westmereep-dp,core
 GenuineIntel-6-25,v2,westmereep-sp,core
 GenuineIntel-6-2F,v2,westmereex,core
 GenuineIntel-6-55,v1,skylakex,core
index f906b79..8a33ca4 100644 (file)
@@ -35,3 +35,6 @@ inherit=0
 # sampling disabled
 sample_freq=0
 sample_period=0
+freq=0
+write_backward=0
+sample_id_all=0
index 1ecc1f0..016882d 100755 (executable)
@@ -19,12 +19,10 @@ trace_libc_inet_pton_backtrace() {
        expected[1]=".*inet_pton[[:space:]]\($libc\)$"
        case "$(uname -m)" in
        s390x)
-               eventattr='call-graph=dwarf'
+               eventattr='call-graph=dwarf,max-stack=4'
                expected[2]="gaih_inet.*[[:space:]]\($libc|inlined\)$"
-               expected[3]="__GI_getaddrinfo[[:space:]]\($libc|inlined\)$"
+               expected[3]="(__GI_)?getaddrinfo[[:space:]]\($libc|inlined\)$"
                expected[4]="main[[:space:]]\(.*/bin/ping.*\)$"
-               expected[5]="__libc_start_main[[:space:]]\($libc\)$"
-               expected[6]="_start[[:space:]]\(.*/bin/ping.*\)$"
                ;;
        *)
                eventattr='max-stack=3'
index 3e87486..4cd2cf9 100644 (file)
@@ -930,8 +930,11 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
         * than leader in case leader 'leads' the sampling.
         */
        if ((leader != evsel) && leader->sample_read) {
-               attr->sample_freq   = 0;
-               attr->sample_period = 0;
+               attr->freq           = 0;
+               attr->sample_freq    = 0;
+               attr->sample_period  = 0;
+               attr->write_backward = 0;
+               attr->sample_id_all  = 0;
        }
 
        if (opts->no_samples)
@@ -1922,7 +1925,8 @@ try_fallback:
                goto fallback_missing_features;
        } else if (!perf_missing_features.group_read &&
                    evsel->attr.inherit &&
-                  (evsel->attr.read_format & PERF_FORMAT_GROUP)) {
+                  (evsel->attr.read_format & PERF_FORMAT_GROUP) &&
+                  perf_evsel__is_group_leader(evsel)) {
                perf_missing_features.group_read = true;
                pr_debug2("switching off group read\n");
                goto fallback_missing_features;
@@ -2754,8 +2758,14 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
                   (paranoid = perf_event_paranoid()) > 1) {
                const char *name = perf_evsel__name(evsel);
                char *new_name;
+               const char *sep = ":";
 
-               if (asprintf(&new_name, "%s%su", name, strchr(name, ':') ? "" : ":") < 0)
+               /* Is there already the separator in the name. */
+               if (strchr(name, '/') ||
+                   strchr(name, ':'))
+                       sep = "";
+
+               if (asprintf(&new_name, "%s%su", name, sep) < 0)
                        return false;
 
                if (evsel->name)
index d3ee3af..92ec009 100644 (file)
@@ -115,6 +115,7 @@ struct perf_evsel {
        unsigned int            sample_size;
        int                     id_pos;
        int                     is_pos;
+       bool                    uniquified_name;
        bool                    snapshot;
        bool                    supported;
        bool                    needs_swap;
index 2eca847..32d5049 100644 (file)
@@ -1019,13 +1019,6 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type)
        return ret;
 }
 
-static void map_groups__fixup_end(struct map_groups *mg)
-{
-       int i;
-       for (i = 0; i < MAP__NR_TYPES; ++i)
-               __map_groups__fixup_end(mg, i);
-}
-
 static char *get_kernel_version(const char *root_dir)
 {
        char version[PATH_MAX];
@@ -1233,6 +1226,7 @@ int machine__create_kernel_maps(struct machine *machine)
 {
        struct dso *kernel = machine__get_kernel(machine);
        const char *name = NULL;
+       struct map *map;
        u64 addr = 0;
        int ret;
 
@@ -1259,13 +1253,25 @@ int machine__create_kernel_maps(struct machine *machine)
                        machine__destroy_kernel_maps(machine);
                        return -1;
                }
-               machine__set_kernel_mmap(machine, addr, 0);
+
+               /* we have a real start address now, so re-order the kmaps */
+               map = machine__kernel_map(machine);
+
+               map__get(map);
+               map_groups__remove(&machine->kmaps, map);
+
+               /* assume it's the last in the kmaps */
+               machine__set_kernel_mmap(machine, addr, ~0ULL);
+
+               map_groups__insert(&machine->kmaps, map);
+               map__put(map);
        }
 
-       /*
-        * Now that we have all the maps created, just set the ->end of them:
-        */
-       map_groups__fixup_end(&machine->kmaps);
+       /* update end address of the kernel map using adjacent module address */
+       map = map__next(machine__kernel_map(machine));
+       if (map)
+               machine__set_kernel_mmap(machine, addr, map->start);
+
        return 0;
 }
 
index 61a5e50..d2fb597 100644 (file)
@@ -539,9 +539,10 @@ static bool pmu_is_uncore(const char *name)
 
 /*
  *  PMU CORE devices have different name other than cpu in sysfs on some
- *  platforms. looking for possible sysfs files to identify as core device.
+ *  platforms.
+ *  Looking for possible sysfs files to identify the arm core device.
  */
-static int is_pmu_core(const char *name)
+static int is_arm_pmu_core(const char *name)
 {
        struct stat st;
        char path[PATH_MAX];
@@ -550,12 +551,6 @@ static int is_pmu_core(const char *name)
        if (!sysfs)
                return 0;
 
-       /* Look for cpu sysfs (x86 and others) */
-       scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu", sysfs);
-       if ((stat(path, &st) == 0) &&
-                       (strncmp(name, "cpu", strlen("cpu")) == 0))
-               return 1;
-
        /* Look for cpu sysfs (specific to arm) */
        scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/cpus",
                                sysfs, name);
@@ -586,7 +581,7 @@ char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
  * cpuid string generated on this platform.
  * Otherwise return non-zero.
  */
-int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
+int strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
 {
        regex_t re;
        regmatch_t pmatch[1];
@@ -668,6 +663,7 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
        struct pmu_events_map *map;
        struct pmu_event *pe;
        const char *name = pmu->name;
+       const char *pname;
 
        map = perf_pmu__find_map(pmu);
        if (!map)
@@ -686,11 +682,9 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
                        break;
                }
 
-               if (!is_pmu_core(name)) {
-                       /* check for uncore devices */
-                       if (pe->pmu == NULL)
-                               continue;
-                       if (strncmp(pe->pmu, name, strlen(pe->pmu)))
+               if (!is_arm_pmu_core(name)) {
+                       pname = pe->pmu ? pe->pmu : "cpu";
+                       if (strncmp(pname, name, strlen(pname)))
                                continue;
                }
 
index 2cccbba..f304be7 100644 (file)
@@ -56,6 +56,7 @@ INSTALL_SCRIPT = ${INSTALL_PROGRAM}
 # to compile vs uClibc, that can be done here as well.
 CROSS = #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
 CROSS_COMPILE ?= $(CROSS)
+LD = $(CC)
 HOSTCC = gcc
 
 # check if compiler option is supported
index 9cf83f8..5e1ab2f 100644 (file)
@@ -12,3 +12,6 @@ test_tcpbpf_user
 test_verifier_log
 feature
 test_libbpf_open
+test_sock
+test_sock_addr
+urandom_read
index faadbe2..4123d0a 100644 (file)
@@ -1108,7 +1108,7 @@ static void test_stacktrace_build_id(void)
 
        assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
               == 0);
-       assert(system("./urandom_read if=/dev/urandom of=/dev/zero count=4 2> /dev/null") == 0);
+       assert(system("./urandom_read") == 0);
        /* disable stack trace collection */
        key = 0;
        val = 1;
@@ -1158,7 +1158,7 @@ static void test_stacktrace_build_id(void)
        } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
 
        CHECK(build_id_matches < 1, "build id match",
-             "Didn't find expected build ID from the map");
+             "Didn't find expected build ID from the map\n");
 
 disable_pmu:
        ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
index 73bb20c..f4d99fa 100644 (file)
@@ -13,6 +13,7 @@
 #include <bpf/bpf.h>
 
 #include "cgroup_helpers.h"
+#include "bpf_rlimit.h"
 
 #ifndef ARRAY_SIZE
 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
index d488f20..2950f80 100644 (file)
@@ -15,6 +15,7 @@
 #include <bpf/libbpf.h>
 
 #include "cgroup_helpers.h"
+#include "bpf_rlimit.h"
 
 #define CG_PATH        "/foo"
 #define CONNECT4_PROG_PATH     "./connect4_prog.o"
index c6e1dcf..9832a87 100755 (executable)
@@ -4,7 +4,7 @@ set -eu
 
 ping_once()
 {
-       ping -q -c 1 -W 1 ${1%%/*} >/dev/null 2>&1
+       ping -${1} -q -c 1 -W 1 ${2%%/*} >/dev/null 2>&1
 }
 
 wait_for_ip()
@@ -13,7 +13,7 @@ wait_for_ip()
        echo -n "Wait for testing IPv4/IPv6 to become available "
        for _i in $(seq ${MAX_PING_TRIES}); do
                echo -n "."
-               if ping_once ${TEST_IPv4} && ping_once ${TEST_IPv6}; then
+               if ping_once 4 ${TEST_IPv4} && ping_once 6 ${TEST_IPv6}; then
                        echo " OK"
                        return
                fi
index 826f38d..261c81f 100644 (file)
@@ -4,6 +4,7 @@
 all:
 
 TEST_PROGS := fw_run_tests.sh
+TEST_FILES := fw_fallback.sh fw_filesystem.sh fw_lib.sh
 
 include ../lib.mk
 
index 9ea31b5..962d7f4 100755 (executable)
@@ -154,11 +154,13 @@ test_finish()
        if [ "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
                echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
        fi
-       if [ "$OLD_FWPATH" = "" ]; then
-               OLD_FWPATH=" "
-       fi
        if [ "$TEST_REQS_FW_SET_CUSTOM_PATH" = "yes" ]; then
-               echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
+               if [ "$OLD_FWPATH" = "" ]; then
+                       # A zero-length write won't work; write a null byte
+                       printf '\000' >/sys/module/firmware_class/parameters/path
+               else
+                       echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
+               fi
        fi
        if [ -f $FW ]; then
                rm -f "$FW"
index 06d638e..cffdd4e 100755 (executable)
@@ -66,5 +66,5 @@ if [ -f $FW_FORCE_SYSFS_FALLBACK ]; then
        run_test_config_0003
 else
        echo "Running basic kernel configuration, working with your config"
-       run_test
+       run_tests
 fi
index 786dce7..2aabab3 100644 (file)
@@ -29,7 +29,7 @@ do_reset
 
 echo "Test extended error support"
 echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' > events/sched/sched_wakeup/trigger
-echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger &>/dev/null
+! echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="ping"' >> events/sched/sched_wakeup/trigger 2> /dev/null
 if ! grep -q "ERROR:" events/sched/sched_wakeup/hist; then
     fail "Failed to generate extended error in histogram"
 fi
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
new file mode 100644 (file)
index 0000000..c193dce
--- /dev/null
@@ -0,0 +1,44 @@
+#!/bin/sh
+# description: event trigger - test multiple actions on hist trigger
+
+
+do_reset() {
+    reset_trigger
+    echo > set_event
+    clear_trace
+}
+
+fail() { #msg
+    do_reset
+    echo $1
+    exit_fail
+}
+
+if [ ! -f set_event ]; then
+    echo "event tracing is not supported"
+    exit_unsupported
+fi
+
+if [ ! -f synthetic_events ]; then
+    echo "synthetic event is not supported"
+    exit_unsupported
+fi
+
+clear_synthetic_events
+reset_tracer
+do_reset
+
+echo "Test multiple actions on hist trigger"
+echo 'wakeup_latency u64 lat; pid_t pid' >> synthetic_events
+TRIGGER1=events/sched/sched_wakeup/trigger
+TRIGGER2=events/sched/sched_switch/trigger
+
+echo 'hist:keys=pid:ts0=common_timestamp.usecs if comm=="cyclictest"' > $TRIGGER1
+echo 'hist:keys=next_pid:wakeup_lat=common_timestamp.usecs-$ts0 if next_comm=="cyclictest"' >> $TRIGGER2
+echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,next_pid) if next_comm=="cyclictest"' >> $TRIGGER2
+echo 'hist:keys=next_pid:onmatch(sched.sched_wakeup).wakeup_latency(sched.sched_switch.$wakeup_lat,prev_pid) if next_comm=="cyclictest"' >> $TRIGGER2
+echo 'hist:keys=next_pid if next_comm=="cyclictest"' >> $TRIGGER2
+
+do_reset
+
+exit 0
index 195e9d4..c1b1a4d 100644 (file)
@@ -20,10 +20,10 @@ all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
 
 .ONESHELL:
 define RUN_TESTS
-       @export KSFT_TAP_LEVEL=`echo 1`;
-       @test_num=`echo 0`;
-       @echo "TAP version 13";
-       @for TEST in $(1); do                           \
+       @export KSFT_TAP_LEVEL=`echo 1`;                \
+       test_num=`echo 0`;                              \
+       echo "TAP version 13";                          \
+       for TEST in $(1); do                            \
                BASENAME_TEST=`basename $$TEST`;        \
                test_num=`echo $$test_num+1 | bc`;      \
                echo "selftests: $$BASENAME_TEST";      \
index 8f1e13d..3ff81a4 100644 (file)
@@ -5,7 +5,8 @@ CFLAGS =  -Wall -Wl,--no-as-needed -O2 -g
 CFLAGS += -I../../../../usr/include/
 
 TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh
-TEST_PROGS += fib_tests.sh fib-onlink-tests.sh in_netns.sh pmtu.sh
+TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh
+TEST_PROGS_EXTENDED := in_netns.sh
 TEST_GEN_FILES =  socket
 TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
 TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
index 5b012f4..6f289a4 100644 (file)
@@ -66,7 +66,7 @@
         "cmdUnderTest": "$TC action add action bpf object-file _b.o index 667",
         "expExitCode": "0",
         "verifyCmd": "$TC action get action bpf index 667",
-        "matchPattern": "action order [0-9]*: bpf _b.o:\\[action\\] id [0-9]* tag 3b185187f1855c4c default-action pipe.*index 667 ref",
+        "matchPattern": "action order [0-9]*: bpf _b.o:\\[action\\] id [0-9]* tag 3b185187f1855c4c( jited)? default-action pipe.*index 667 ref",
         "matchCount": "1",
         "teardown": [
             "$TC action flush action bpf",
         "cmdUnderTest": "$TC action add action bpf object-file _c.o index 667",
         "expExitCode": "255",
         "verifyCmd": "$TC action get action bpf index 667",
-        "matchPattern": "action order [0-9]*: bpf _b.o:\\[action\\] id [0-9].*index 667 ref",
+        "matchPattern": "action order [0-9]*: bpf _c.o:\\[action\\] id [0-9].*index 667 ref",
         "matchCount": "0",
         "teardown": [
-            "$TC action flush action bpf",
+            [
+                "$TC action flush action bpf",
+                0,
+                1,
+                255
+            ],
             "rm -f _c.o"
         ]
     },
index 4037035..c9c3281 100644 (file)
@@ -100,12 +100,19 @@ asm (
        "       shl     $32, %r8\n"
        "       orq     $0x7f7f7f7f, %r8\n"
        "       movq    %r8, %r9\n"
-       "       movq    %r8, %r10\n"
-       "       movq    %r8, %r11\n"
-       "       movq    %r8, %r12\n"
-       "       movq    %r8, %r13\n"
-       "       movq    %r8, %r14\n"
-       "       movq    %r8, %r15\n"
+       "       incq    %r9\n"
+       "       movq    %r9, %r10\n"
+       "       incq    %r10\n"
+       "       movq    %r10, %r11\n"
+       "       incq    %r11\n"
+       "       movq    %r11, %r12\n"
+       "       incq    %r12\n"
+       "       movq    %r12, %r13\n"
+       "       incq    %r13\n"
+       "       movq    %r13, %r14\n"
+       "       incq    %r14\n"
+       "       movq    %r14, %r15\n"
+       "       incq    %r15\n"
        "       ret\n"
        "       .code32\n"
        "       .popsection\n"
@@ -128,12 +135,13 @@ int check_regs64(void)
        int err = 0;
        int num = 8;
        uint64_t *r64 = &regs64.r8;
+       uint64_t expected = 0x7f7f7f7f7f7f7f7fULL;
 
        if (!kernel_is_64bit)
                return 0;
 
        do {
-               if (*r64 == 0x7f7f7f7f7f7f7f7fULL)
+               if (*r64 == expected++)
                        continue; /* register did not change */
                if (syscall_addr != (long)&int80) {
                        /*
@@ -147,18 +155,17 @@ int check_regs64(void)
                                continue;
                        }
                } else {
-                       /* INT80 syscall entrypoint can be used by
+                       /*
+                        * INT80 syscall entrypoint can be used by
                         * 64-bit programs too, unlike SYSCALL/SYSENTER.
                         * Therefore it must preserve R12+
                         * (they are callee-saved registers in 64-bit C ABI).
                         *
-                        * This was probably historically not intended,
-                        * but R8..11 are clobbered (cleared to 0).
-                        * IOW: they are the only registers which aren't
-                        * preserved across INT80 syscall.
+                        * Starting in Linux 4.17 (and any kernel that
+                        * backports the change), R8..11 are preserved.
+                        * Historically (and probably unintentionally), they
+                        * were clobbered or zeroed.
                         */
-                       if (*r64 == 0 && num <= 11)
-                               continue;
                }
                printf("[FAIL]\tR%d has changed:%016llx\n", num, *r64);
                err++;
index dba629c..a4c1b76 100644 (file)
@@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
 static u32 kvm_next_vmid;
 static unsigned int kvm_vmid_bits __read_mostly;
-static DEFINE_SPINLOCK(kvm_vmid_lock);
+static DEFINE_RWLOCK(kvm_vmid_lock);
 
 static bool vgic_present;
 
@@ -473,11 +473,16 @@ static void update_vttbr(struct kvm *kvm)
 {
        phys_addr_t pgd_phys;
        u64 vmid;
+       bool new_gen;
 
-       if (!need_new_vmid_gen(kvm))
+       read_lock(&kvm_vmid_lock);
+       new_gen = need_new_vmid_gen(kvm);
+       read_unlock(&kvm_vmid_lock);
+
+       if (!new_gen)
                return;
 
-       spin_lock(&kvm_vmid_lock);
+       write_lock(&kvm_vmid_lock);
 
        /*
         * We need to re-check the vmid_gen here to ensure that if another vcpu
@@ -485,7 +490,7 @@ static void update_vttbr(struct kvm *kvm)
         * use the same vmid.
         */
        if (!need_new_vmid_gen(kvm)) {
-               spin_unlock(&kvm_vmid_lock);
+               write_unlock(&kvm_vmid_lock);
                return;
        }
 
@@ -519,7 +524,7 @@ static void update_vttbr(struct kvm *kvm)
        vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
        kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
 
-       spin_unlock(&kvm_vmid_lock);
+       write_unlock(&kvm_vmid_lock);
 }
 
 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
index 6919352..c4762be 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/arm-smccc.h>
 #include <linux/preempt.h>
 #include <linux/kvm_host.h>
+#include <linux/uaccess.h>
 #include <linux/wait.h>
 
 #include <asm/cputype.h>
@@ -427,3 +428,62 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
        smccc_set_retval(vcpu, val, 0, 0, 0);
        return 1;
 }
+
+int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
+{
+       return 1;               /* PSCI version */
+}
+
+int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
+{
+       if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
+               return -EFAULT;
+
+       return 0;
+}
+
+int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
+               void __user *uaddr = (void __user *)(long)reg->addr;
+               u64 val;
+
+               val = kvm_psci_version(vcpu, vcpu->kvm);
+               if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
+                       return -EFAULT;
+
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+{
+       if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
+               void __user *uaddr = (void __user *)(long)reg->addr;
+               bool wants_02;
+               u64 val;
+
+               if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
+                       return -EFAULT;
+
+               wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
+
+               switch (val) {
+               case KVM_ARM_PSCI_0_1:
+                       if (wants_02)
+                               return -EINVAL;
+                       vcpu->kvm->arch.psci_version = val;
+                       return 0;
+               case KVM_ARM_PSCI_0_2:
+               case KVM_ARM_PSCI_1_0:
+                       if (!wants_02)
+                               return -EINVAL;
+                       vcpu->kvm->arch.psci_version = val;
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
+}
index 68378fe..e07156c 100644 (file)
@@ -423,7 +423,7 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
         * We cannot rely on the vgic maintenance interrupt to be
         * delivered synchronously. This means we can only use it to
         * exit the VM, and we perform the handling of EOIed
-        * interrupts on the exit path (see vgic_process_maintenance).
+        * interrupts on the exit path (see vgic_fold_lr_state).
         */
        return IRQ_HANDLED;
 }
index e21e2f4..ffc587b 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/irqchip/arm-gic.h>
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
+#include <linux/nospec.h>
+
 #include <kvm/iodev.h>
 #include <kvm/arm_vgic.h>
 
@@ -324,6 +326,9 @@ static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
 
                if (n > vgic_v3_max_apr_idx(vcpu))
                        return 0;
+
+               n = array_index_nospec(n, 4);
+
                /* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
                return vgicv3->vgic_ap1r[n];
        }
index dbe99d6..ff9655c 100644 (file)
@@ -289,10 +289,16 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
               irq->vcpu->cpu != -1) /* VCPU thread is running */
                cond_resched_lock(&irq->irq_lock);
 
-       if (irq->hw)
+       if (irq->hw) {
                vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
-       else
+       } else {
+               u32 model = vcpu->kvm->arch.vgic.vgic_model;
+
                irq->active = active;
+               if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
+                   active && vgic_irq_is_sgi(irq->intid))
+                       irq->active_source = requester_vcpu->vcpu_id;
+       }
 
        if (irq->active)
                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
index 45aa433..a5f2e44 100644 (file)
@@ -37,13 +37,6 @@ void vgic_v2_init_lrs(void)
                vgic_v2_write_lr(i, 0);
 }
 
-void vgic_v2_set_npie(struct kvm_vcpu *vcpu)
-{
-       struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
-
-       cpuif->vgic_hcr |= GICH_HCR_NPIE;
-}
-
 void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
 {
        struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
@@ -71,13 +64,18 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
        int lr;
        unsigned long flags;
 
-       cpuif->vgic_hcr &= ~(GICH_HCR_UIE | GICH_HCR_NPIE);
+       cpuif->vgic_hcr &= ~GICH_HCR_UIE;
 
        for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
                u32 val = cpuif->vgic_lr[lr];
-               u32 intid = val & GICH_LR_VIRTUALID;
+               u32 cpuid, intid = val & GICH_LR_VIRTUALID;
                struct vgic_irq *irq;
 
+               /* Extract the source vCPU id from the LR */
+               cpuid = val & GICH_LR_PHYSID_CPUID;
+               cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
+               cpuid &= 7;
+
                /* Notify fds when the guest EOI'ed a level-triggered SPI */
                if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
                        kvm_notify_acked_irq(vcpu->kvm, 0,
@@ -90,17 +88,16 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
                /* Always preserve the active bit */
                irq->active = !!(val & GICH_LR_ACTIVE_BIT);
 
+               if (irq->active && vgic_irq_is_sgi(intid))
+                       irq->active_source = cpuid;
+
                /* Edge is the only case where we preserve the pending bit */
                if (irq->config == VGIC_CONFIG_EDGE &&
                    (val & GICH_LR_PENDING_BIT)) {
                        irq->pending_latch = true;
 
-                       if (vgic_irq_is_sgi(intid)) {
-                               u32 cpuid = val & GICH_LR_PHYSID_CPUID;
-
-                               cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
+                       if (vgic_irq_is_sgi(intid))
                                irq->source |= (1 << cpuid);
-                       }
                }
 
                /*
@@ -152,8 +149,15 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
        u32 val = irq->intid;
        bool allow_pending = true;
 
-       if (irq->active)
+       if (irq->active) {
                val |= GICH_LR_ACTIVE_BIT;
+               if (vgic_irq_is_sgi(irq->intid))
+                       val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
+               if (vgic_irq_is_multi_sgi(irq)) {
+                       allow_pending = false;
+                       val |= GICH_LR_EOI;
+               }
+       }
 
        if (irq->hw) {
                val |= GICH_LR_HW;
@@ -190,8 +194,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
                        BUG_ON(!src);
                        val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
                        irq->source &= ~(1 << (src - 1));
-                       if (irq->source)
+                       if (irq->source) {
                                irq->pending_latch = true;
+                               val |= GICH_LR_EOI;
+                       }
                }
        }
 
index 8195f52..c7423f3 100644 (file)
@@ -27,13 +27,6 @@ static bool group1_trap;
 static bool common_trap;
 static bool gicv4_enable;
 
-void vgic_v3_set_npie(struct kvm_vcpu *vcpu)
-{
-       struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
-
-       cpuif->vgic_hcr |= ICH_HCR_NPIE;
-}
-
 void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
 {
        struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
@@ -55,17 +48,23 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
        int lr;
        unsigned long flags;
 
-       cpuif->vgic_hcr &= ~(ICH_HCR_UIE | ICH_HCR_NPIE);
+       cpuif->vgic_hcr &= ~ICH_HCR_UIE;
 
        for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
                u64 val = cpuif->vgic_lr[lr];
-               u32 intid;
+               u32 intid, cpuid;
                struct vgic_irq *irq;
+               bool is_v2_sgi = false;
 
-               if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
+               cpuid = val & GICH_LR_PHYSID_CPUID;
+               cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
+
+               if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
                        intid = val & ICH_LR_VIRTUAL_ID_MASK;
-               else
+               } else {
                        intid = val & GICH_LR_VIRTUALID;
+                       is_v2_sgi = vgic_irq_is_sgi(intid);
+               }
 
                /* Notify fds when the guest EOI'ed a level-triggered IRQ */
                if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
@@ -81,18 +80,16 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
                /* Always preserve the active bit */
                irq->active = !!(val & ICH_LR_ACTIVE_BIT);
 
+               if (irq->active && is_v2_sgi)
+                       irq->active_source = cpuid;
+
                /* Edge is the only case where we preserve the pending bit */
                if (irq->config == VGIC_CONFIG_EDGE &&
                    (val & ICH_LR_PENDING_BIT)) {
                        irq->pending_latch = true;
 
-                       if (vgic_irq_is_sgi(intid) &&
-                           model == KVM_DEV_TYPE_ARM_VGIC_V2) {
-                               u32 cpuid = val & GICH_LR_PHYSID_CPUID;
-
-                               cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
+                       if (is_v2_sgi)
                                irq->source |= (1 << cpuid);
-                       }
                }
 
                /*
@@ -133,10 +130,20 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
 {
        u32 model = vcpu->kvm->arch.vgic.vgic_model;
        u64 val = irq->intid;
-       bool allow_pending = true;
+       bool allow_pending = true, is_v2_sgi;
 
-       if (irq->active)
+       is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
+                    model == KVM_DEV_TYPE_ARM_VGIC_V2);
+
+       if (irq->active) {
                val |= ICH_LR_ACTIVE_BIT;
+               if (is_v2_sgi)
+                       val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
+               if (vgic_irq_is_multi_sgi(irq)) {
+                       allow_pending = false;
+                       val |= ICH_LR_EOI;
+               }
+       }
 
        if (irq->hw) {
                val |= ICH_LR_HW;
@@ -174,8 +181,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
                        BUG_ON(!src);
                        val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
                        irq->source &= ~(1 << (src - 1));
-                       if (irq->source)
+                       if (irq->source) {
                                irq->pending_latch = true;
+                               val |= ICH_LR_EOI;
+                       }
                }
        }
 
index e74baec..97bfba8 100644 (file)
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 #include <linux/list_sort.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
+#include <linux/nospec.h>
+
 #include <asm/kvm_hyp.h>
 
 #include "vgic.h"
@@ -101,12 +103,16 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
                              u32 intid)
 {
        /* SGIs and PPIs */
-       if (intid <= VGIC_MAX_PRIVATE)
+       if (intid <= VGIC_MAX_PRIVATE) {
+               intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
                return &vcpu->arch.vgic_cpu.private_irqs[intid];
+       }
 
        /* SPIs */
-       if (intid <= VGIC_MAX_SPI)
+       if (intid <= VGIC_MAX_SPI) {
+               intid = array_index_nospec(intid, VGIC_MAX_SPI);
                return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
+       }
 
        /* LPIs */
        if (intid >= VGIC_MIN_LPI)
@@ -594,6 +600,7 @@ retry:
 
        list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
                struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
+               bool target_vcpu_needs_kick = false;
 
                spin_lock(&irq->irq_lock);
 
@@ -664,11 +671,18 @@ retry:
                        list_del(&irq->ap_list);
                        irq->vcpu = target_vcpu;
                        list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
+                       target_vcpu_needs_kick = true;
                }
 
                spin_unlock(&irq->irq_lock);
                spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
                spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags);
+
+               if (target_vcpu_needs_kick) {
+                       kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
+                       kvm_vcpu_kick(target_vcpu);
+               }
+
                goto retry;
        }
 
@@ -711,14 +725,6 @@ static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
                vgic_v3_set_underflow(vcpu);
 }
 
-static inline void vgic_set_npie(struct kvm_vcpu *vcpu)
-{
-       if (kvm_vgic_global_state.type == VGIC_V2)
-               vgic_v2_set_npie(vcpu);
-       else
-               vgic_v3_set_npie(vcpu);
-}
-
 /* Requires the ap_list_lock to be held. */
 static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
                                 bool *multi_sgi)
@@ -732,17 +738,15 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
        DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
 
        list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
+               int w;
+
                spin_lock(&irq->irq_lock);
                /* GICv2 SGIs can count for more than one... */
-               if (vgic_irq_is_sgi(irq->intid) && irq->source) {
-                       int w = hweight8(irq->source);
-
-                       count += w;
-                       *multi_sgi |= (w > 1);
-               } else {
-                       count++;
-               }
+               w = vgic_irq_get_lr_count(irq);
                spin_unlock(&irq->irq_lock);
+
+               count += w;
+               *multi_sgi |= (w > 1);
        }
        return count;
 }
@@ -753,7 +757,6 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
        struct vgic_irq *irq;
        int count;
-       bool npie = false;
        bool multi_sgi;
        u8 prio = 0xff;
 
@@ -783,10 +786,8 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
                if (likely(vgic_target_oracle(irq) == vcpu)) {
                        vgic_populate_lr(vcpu, irq, count++);
 
-                       if (irq->source) {
-                               npie = true;
+                       if (irq->source)
                                prio = irq->priority;
-                       }
                }
 
                spin_unlock(&irq->irq_lock);
@@ -799,9 +800,6 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
                }
        }
 
-       if (npie)
-               vgic_set_npie(vcpu);
-
        vcpu->arch.vgic_cpu.used_lrs = count;
 
        /* Nuke remaining LRs */
index 830e815..32c25d4 100644 (file)
@@ -110,6 +110,20 @@ static inline bool vgic_irq_is_mapped_level(struct vgic_irq *irq)
        return irq->config == VGIC_CONFIG_LEVEL && irq->hw;
 }
 
+static inline int vgic_irq_get_lr_count(struct vgic_irq *irq)
+{
+       /* Account for the active state as an interrupt */
+       if (vgic_irq_is_sgi(irq->intid) && irq->source)
+               return hweight8(irq->source) + irq->active;
+
+       return irq_is_pending(irq) || irq->active;
+}
+
+static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
+{
+       return vgic_irq_get_lr_count(irq) > 1;
+}
+
 /*
  * This struct provides an intermediate representation of the fields contained
  * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC