Merge tag 'driver-core-5.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 24 Jan 2021 19:05:48 +0000 (11:05 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 24 Jan 2021 19:05:48 +0000 (11:05 -0800)
Pull driver core fixes from Greg KH:
 "Here are some small driver core fixes for 5.11-rc5 that resolve some
  reported problems:

   - revert of a -rc1 patch that was causing problems with some machines

   - device link device name collision problem fix (busses only have to
     name devices unique to their bus, not unique to all busses)

   - kernfs splice bugfixes to resolve firmware loading problems for
     Qualcomm systems.

   - other tiny driver core fixes for minor issues reported.

  All of these have been in linux-next with no reported problems"

* tag 'driver-core-5.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core:
  driver core: Fix device link device name collision
  driver core: Extend device_is_dependent()
  kernfs: wire up ->splice_read and ->splice_write
  kernfs: implement ->write_iter
  kernfs: implement ->read_iter
  Revert "driver core: Reorder devices on successful probe"
  Driver core: platform: Add extra error check in devm_platform_get_irqs_affinity()
  drivers core: Free dma_range_map when driver probe failed

1008 files changed:
.mailmap
CREDITS
Documentation/ABI/testing/sysfs-driver-ufs
Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst
Documentation/RCU/Design/Requirements/Requirements.rst
Documentation/admin-guide/binfmt-misc.rst
Documentation/admin-guide/bootconfig.rst
Documentation/admin-guide/device-mapper/dm-integrity.rst
Documentation/admin-guide/kernel-parameters.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/mm/concepts.rst
Documentation/core-api/index.rst
Documentation/dev-tools/kunit/usage.rst
Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml
Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
Documentation/devicetree/bindings/iio/accel/bosch,bma255.yaml
Documentation/devicetree/bindings/net/renesas,etheravb.yaml
Documentation/devicetree/bindings/net/snps,dwmac.yaml
Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
Documentation/devicetree/bindings/regulator/qcom,rpmh-regulator.txt
Documentation/devicetree/bindings/sound/ti,j721e-cpb-audio.yaml
Documentation/devicetree/bindings/sound/ti,j721e-cpb-ivi-audio.yaml
Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
Documentation/doc-guide/sphinx.rst
Documentation/firmware-guide/acpi/apei/einj.rst
Documentation/hwmon/sbtsi_temp.rst
Documentation/kbuild/makefiles.rst
Documentation/kernel-hacking/locking.rst
Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
Documentation/networking/netdev-FAQ.rst
Documentation/networking/netdevices.rst
Documentation/networking/packet_mmap.rst
Documentation/networking/tls-offload.rst
Documentation/process/4.Coding.rst
Documentation/sound/alsa-configuration.rst
Documentation/sound/kernel-api/writing-an-alsa-driver.rst
Documentation/virt/kvm/api.rst
MAINTAINERS
Makefile
arch/Kconfig
arch/arc/Makefile
arch/arc/boot/Makefile
arch/arc/include/asm/page.h
arch/arc/kernel/entry.S
arch/arc/plat-hsdk/Kconfig
arch/arm/boot/dts/omap3-n950-n9.dtsi
arch/arm/boot/dts/picoxcell-pc3x2.dtsi
arch/arm/boot/dts/ste-ux500-samsung-golden.dts
arch/arm/configs/omap2plus_defconfig
arch/arm/crypto/chacha-glue.c
arch/arm/mach-omap2/omap_device.c
arch/arm/mach-omap2/pmic-cpcap.c
arch/arm/xen/enlighten.c
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/dts/bitmain/bm1880.dtsi
arch/arm64/include/asm/atomic.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry-ftrace.S
arch/arm64/kernel/entry.S
arch/arm64/kernel/perf_event.c
arch/arm64/kernel/probes/kprobes.c
arch/arm64/kernel/probes/kprobes_trampoline.S
arch/arm64/kernel/signal.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/syscall.c
arch/arm64/kernel/traps.c
arch/arm64/kernel/vdso/Makefile
arch/arm64/kernel/vdso/vdso.lds.S
arch/arm64/kvm/Kconfig
arch/arm64/kvm/Makefile
arch/arm64/kvm/arch_timer.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/hyp-smp.c
arch/arm64/kvm/hyp/nvhe/psci-relay.c
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/va_layout.c
arch/arm64/kvm/vgic/vgic-init.c
arch/arm64/kvm/vgic/vgic-v2.c
arch/arm64/kvm/vgic/vgic-v3.c
arch/arm64/mm/fault.c
arch/arm64/mm/init.c
arch/arm64/mm/proc.S
arch/ia64/include/asm/sparsemem.h
arch/mips/boot/compressed/decompress.c
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/kernel/binfmt_elfn32.c
arch/mips/kernel/binfmt_elfo32.c
arch/mips/kernel/relocate.c
arch/openrisc/include/asm/io.h
arch/openrisc/mm/ioremap.c
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/include/asm/feature-fixups.h
arch/powerpc/include/asm/vdso/gettimeofday.h
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/head_book3s_32.S
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/lib/feature-fixups.c
arch/riscv/Kconfig
arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
arch/riscv/configs/defconfig
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/vdso.h
arch/riscv/kernel/cacheinfo.c
arch/riscv/kernel/entry.S
arch/riscv/kernel/setup.c
arch/riscv/kernel/stacktrace.c
arch/riscv/kernel/time.c
arch/riscv/kernel/vdso.c
arch/riscv/mm/init.c
arch/riscv/mm/kasan_init.c
arch/x86/Kconfig
arch/x86/entry/common.c
arch/x86/hyperv/hv_init.c
arch/x86/hyperv/mmu.c
arch/x86/include/asm/fpu/api.h
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mshyperv.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/topology.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/cpu/topology.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/sev-es-shared.c
arch/x86/kernel/sev-es.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/mmx_32.c
arch/x86/mm/pgtable.c
arch/x86/xen/enlighten_hvm.c
arch/x86/xen/smp_hvm.c
block/bfq-iosched.c
block/blk-iocost.c
block/blk-mq-debugfs.c
block/genhd.c
crypto/asymmetric_keys/asym_tpm.c
crypto/asymmetric_keys/public_key.c
crypto/ecdh.c
crypto/xor.c
drivers/acpi/Kconfig
drivers/acpi/internal.h
drivers/acpi/scan.c
drivers/acpi/x86/s2idle.c
drivers/atm/idt77252.c
drivers/base/core.c
drivers/base/regmap/regmap-debugfs.c
drivers/block/Kconfig
drivers/block/rnbd/Kconfig
drivers/block/rnbd/README
drivers/block/rnbd/rnbd-clt.c
drivers/block/rnbd/rnbd-srv.c
drivers/clk/tegra/clk-tegra30.c
drivers/counter/ti-eqep.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/powernow-k8.c
drivers/crypto/Kconfig
drivers/dma-buf/dma-buf.c
drivers/dma-buf/heaps/cma_heap.c
drivers/dma/dw-edma/dw-edma-core.c
drivers/dma/idxd/sysfs.c
drivers/dma/mediatek/mtk-hsdma.c
drivers/dma/milbeaut-xdmac.c
drivers/dma/qcom/bam_dma.c
drivers/dma/qcom/gpi.c
drivers/dma/stm32-mdma.c
drivers/dma/ti/k3-udma.c
drivers/dma/xilinx/xilinx_dma.c
drivers/gpio/Kconfig
drivers/gpio/gpio-mvebu.c
drivers/gpio/gpiolib-cdev.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
drivers/gpu/drm/amd/display/Kconfig
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
drivers/gpu/drm/amd/display/dc/calcs/Makefile
drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/dcn10/Makefile
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/Makefile
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/Makefile
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/Makefile
drivers/gpu/drm/amd/display/dc/dcn301/Makefile
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/dcn302/Makefile
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
drivers/gpu/drm/amd/display/dc/dsc/Makefile
drivers/gpu/drm/amd/display/dc/os_types.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_gem_vram_helper.c
drivers/gpu/drm/drm_plane.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/display/icl_dsi.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_display_types.h
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp.h
drivers/gpu/drm/i915/display/intel_hdcp.c
drivers/gpu/drm/i915/display/intel_panel.c
drivers/gpu/drm/i915/display/vlv_dsi.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gt/gen7_renderclear.c
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_ring_submission.c
drivers/gpu/drm/i915/gt/intel_timeline.c
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_mitigations.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_mitigations.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/msm/adreno/a2xx_gpu.c
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.h
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/msm/dp/dp_panel.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/nouveau/dispnv50/Kbuild
drivers/gpu/drm/nouveau/dispnv50/core.c
drivers/gpu/drm/nouveau/dispnv50/curs.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/dispnv50/disp.h
drivers/gpu/drm/nouveau/dispnv50/wimm.c
drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
drivers/gpu/drm/nouveau/dispnv50/wndw.c
drivers/gpu/drm/nouveau/dispnv50/wndw.h
drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvif/cl0080.h
drivers/gpu/drm/nouveau/include/nvif/class.h
drivers/gpu/drm/nouveau/include/nvkm/core/device.h
drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
drivers/gpu/drm/nouveau/nouveau_backlight.c
drivers/gpu/drm/nouveau/nvif/disp.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowramin.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gpio/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/ttm/ttm_pool.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/hid/Kconfig
drivers/hid/amd-sfh-hid/amd_sfh_client.c
drivers/hid/amd-sfh-hid/amd_sfh_hid.h
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-uclogic-params.c
drivers/hid/hid-wiimote-core.c
drivers/hid/wacom_sys.c
drivers/hv/vmbus_drv.c
drivers/hwmon/amd_energy.c
drivers/hwmon/pwm-fan.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/busses/i2c-octeon-core.c
drivers/i2c/busses/i2c-sprd.c
drivers/i2c/busses/i2c-tegra-bpmp.c
drivers/i2c/busses/i2c-tegra.c
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/common/st_sensors/st_sensors_trigger.c
drivers/iio/dac/ad5504.c
drivers/iio/proximity/sx9310.c
drivers/iio/temperature/mlx90632.c
drivers/infiniband/core/cma_configfs.c
drivers/infiniband/core/restrack.c
drivers/infiniband/core/ucma.c
drivers/infiniband/core/umem.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
drivers/interconnect/imx/imx.c
drivers/interconnect/imx/imx8mq.c
drivers/interconnect/qcom/Kconfig
drivers/iommu/amd/init.c
drivers/iommu/amd/iommu.c
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
drivers/iommu/dma-iommu.c
drivers/iommu/intel/dmar.c
drivers/iommu/intel/iommu.c
drivers/iommu/intel/irq_remapping.c
drivers/iommu/intel/svm.c
drivers/iommu/iova.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-bcm2836.c
drivers/irqchip/irq-loongson-liointc.c
drivers/irqchip/irq-mips-cpu.c
drivers/irqchip/irq-sl28cpld.c
drivers/isdn/mISDN/Kconfig
drivers/lightnvm/Kconfig
drivers/md/Kconfig
drivers/md/bcache/features.c
drivers/md/bcache/features.h
drivers/md/bcache/super.c
drivers/md/dm-bufio.c
drivers/md/dm-crypt.c
drivers/md/dm-integrity.c
drivers/md/dm-raid.c
drivers/md/dm-snap.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/misc/habanalabs/common/command_submission.c
drivers/misc/habanalabs/common/device.c
drivers/misc/habanalabs/common/firmware_if.c
drivers/misc/habanalabs/common/habanalabs.h
drivers/misc/habanalabs/common/habanalabs_drv.c
drivers/misc/habanalabs/common/habanalabs_ioctl.c
drivers/misc/habanalabs/common/hw_queue.c
drivers/misc/habanalabs/common/pci.c
drivers/misc/habanalabs/gaudi/gaudi.c
drivers/misc/habanalabs/gaudi/gaudiP.h
drivers/misc/habanalabs/gaudi/gaudi_coresight.c
drivers/misc/habanalabs/goya/goya.c
drivers/misc/habanalabs/include/common/hl_boot_if.h
drivers/misc/pvpanic.c
drivers/mmc/core/queue.c
drivers/mmc/host/sdhci-brcmstb.c
drivers/mmc/host/sdhci-of-dwcmshc.c
drivers/mmc/host/sdhci-xenon.c
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/raw/intel-nand-controller.c
drivers/mtd/nand/raw/nandsim.c
drivers/mtd/nand/raw/omap2.c
drivers/mtd/nand/spi/core.c
drivers/net/bareudp.c
drivers/net/can/Kconfig
drivers/net/can/dev.c
drivers/net/can/m_can/m_can.c
drivers/net/can/m_can/tcan4x5x.c
drivers/net/can/rcar/Kconfig
drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
drivers/net/can/vxcan.c
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/hirschmann/Kconfig
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/mv88e6xxx/global1_vtu.c
drivers/net/ethernet/aquantia/Kconfig
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
drivers/net/ethernet/freescale/fs_enet/mii-fec.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/freescale/ucc_geth.h
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
drivers/net/ethernet/marvell/octeontx2/af/cgx.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
drivers/net/ethernet/mellanox/mlx5/core/lag.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/rdma.c
drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_net.c
drivers/net/ethernet/natsemi/macsonic.c
drivers/net/ethernet/natsemi/xtsonic.c
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
drivers/net/ethernet/qlogic/Kconfig
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qede/qede_fp.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/stmicro/stmmac/dwmac5.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/ethernet/ti/cpts.c
drivers/net/ipa/gsi.c
drivers/net/ipa/ipa_clock.c
drivers/net/ipa/ipa_modem.c
drivers/net/mdio/mdio-bitbang.c
drivers/net/phy/smsc.c
drivers/net/ppp/ppp_generic.c
drivers/net/tun.c
drivers/net/usb/Kconfig
drivers/net/usb/cdc_ether.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/r8153_ecm.c
drivers/net/usb/rndis_host.c
drivers/net/virtio_net.c
drivers/net/wan/Kconfig
drivers/net/wan/hdlc_ppp.c
drivers/net/wireless/ath/ath11k/core.c
drivers/net/wireless/ath/ath11k/dp_rx.c
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/ath/ath11k/pci.c
drivers/net/wireless/ath/ath11k/pci.h
drivers/net/wireless/ath/ath11k/peer.c
drivers/net/wireless/ath/ath11k/peer.h
drivers/net/wireless/ath/ath11k/qmi.c
drivers/net/wireless/ath/ath11k/qmi.h
drivers/net/wireless/ath/ath11k/wmi.c
drivers/net/wireless/ath/wil6210/Kconfig
drivers/net/wireless/mediatek/mt76/mt7915/init.c
drivers/net/wireless/mediatek/mt76/sdio.c
drivers/net/wireless/mediatek/mt76/usb.c
drivers/net/wireless/realtek/rtlwifi/core.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/tcp.c
drivers/nvme/target/fcloop.c
drivers/nvme/target/rdma.c
drivers/perf/arm_pmu.c
drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
drivers/pinctrl/nomadik/pinctrl-nomadik.c
drivers/pinctrl/pinctrl-ingenic.c
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/pinctrl/qcom/pinctrl-msm.h
drivers/platform/surface/Kconfig
drivers/platform/surface/surface_gpe.c
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/i2c-multi-instantiate.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel-vbtn.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/touchscreen_dmi.c
drivers/ptp/Kconfig
drivers/regulator/Kconfig
drivers/regulator/bd718x7-regulator.c
drivers/regulator/pf8x00-regulator.c
drivers/regulator/qcom-rpmh-regulator.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/fnic/vnic_dev.c
drivers/scsi/hisi_sas/hisi_sas.h
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/libfc/fc_exch.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fusion.c
drivers/scsi/mpt3sas/Kconfig
drivers/scsi/qedi/qedi_main.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_transport_srp.c
drivers/scsi/sd.c
drivers/scsi/ufs/Kconfig
drivers/scsi/ufs/ufshcd.c
drivers/soc/litex/litex_soc_ctrl.c
drivers/spi/spi-altera.c
drivers/spi/spi-cadence.c
drivers/spi/spi-fsl-spi.c
drivers/spi/spi-geni-qcom.c
drivers/spi/spi-stm32.c
drivers/spi/spi.c
drivers/staging/comedi/comedi_fops.c
drivers/staging/hikey9xx/hisi-spmi-controller.c
drivers/staging/media/atomisp/pci/atomisp_subdev.c
drivers/staging/mt7621-dma/mtk-hsdma.c
drivers/target/target_core_user.c
drivers/target/target_core_xcopy.c
drivers/target/target_core_xcopy.h
drivers/thunderbolt/icm.c
drivers/tty/Kconfig
drivers/tty/Makefile
drivers/tty/serial/mvebu-uart.c
drivers/tty/serial/sifive.c
drivers/tty/tty_io.c
drivers/tty/ttynull.c
drivers/usb/cdns3/cdns3-imx.c
drivers/usb/chipidea/ci_hdrc_imx.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-wdm.c
drivers/usb/class/usblp.c
drivers/usb/core/hcd.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/dwc3-meson-g12a.c
drivers/usb/dwc3/gadget.c
drivers/usb/dwc3/ulpi.c
drivers/usb/gadget/Kconfig
drivers/usb/gadget/composite.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/f_printer.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/gadget/function/u_ether.c
drivers/usb/gadget/legacy/acm_ms.c
drivers/usb/gadget/udc/Kconfig
drivers/usb/gadget/udc/Makefile
drivers/usb/gadget/udc/aspeed-vhub/epn.c
drivers/usb/gadget/udc/bdc/Kconfig
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/gadget/udc/fsl_mxc_udc.c [deleted file]
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci-tegra.c
drivers/usb/host/xhci.c
drivers/usb/misc/yurex.c
drivers/usb/serial/iuu_phoenix.c
drivers/usb/serial/option.c
drivers/usb/storage/unusual_uas.h
drivers/usb/typec/altmodes/Kconfig
drivers/usb/typec/class.c
drivers/usb/typec/mux/intel_pmc_mux.c
drivers/usb/usbip/vhci_hcd.c
drivers/vhost/net.c
drivers/vhost/vsock.c
drivers/xen/events/events_base.c
drivers/xen/platform-pci.c
drivers/xen/privcmd.c
drivers/xen/xenbus/xenbus.h
drivers/xen/xenbus/xenbus_comms.c
drivers/xen/xenbus/xenbus_probe.c
fs/afs/dir.c
fs/afs/dir_edit.c
fs/afs/xdr_fs.h
fs/block_dev.c
fs/btrfs/backref.c
fs/btrfs/block-group.c
fs/btrfs/btrfs_inode.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/dev-replace.c
fs/btrfs/discard.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file-item.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/print-tree.c
fs/btrfs/print-tree.h
fs/btrfs/qgroup.c
fs/btrfs/reflink.c
fs/btrfs/relocation.c
fs/btrfs/send.c
fs/btrfs/space-info.c
fs/btrfs/super.c
fs/btrfs/tests/btrfs-tests.c
fs/btrfs/tests/inode-tests.c
fs/btrfs/transaction.c
fs/btrfs/tree-checker.c
fs/btrfs/volumes.c
fs/cachefiles/rdwr.c
fs/ceph/mds_client.c
fs/cifs/connect.c
fs/cifs/dfs_cache.c
fs/cifs/fs_context.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/transport.c
fs/ext4/ext4_jbd2.c
fs/ext4/ext4_jbd2.h
fs/ext4/fast_commit.c
fs/ext4/file.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/namei.c
fs/ext4/resize.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/fs-writeback.c
fs/io_uring.c
fs/namespace.c
fs/nfs/delegation.c
fs/nfs/internal.h
fs/nfs/nfs4proc.c
fs/nfs/nfs4super.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/pnfs_nfs.c
fs/nfsd/nfs3xdr.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfssvc.c
fs/nfsd/xdr4.h
fs/notify/fanotify/fanotify_user.c
fs/proc/task_mmu.c
fs/select.c
fs/udf/super.c
fs/zonefs/Kconfig
include/asm-generic/bitops/atomic.h
include/kvm/arm_pmu.h
include/linux/acpi.h
include/linux/compiler-gcc.h
include/linux/compiler_attributes.h
include/linux/compiler_types.h
include/linux/console.h
include/linux/dm-bufio.h
include/linux/intel-iommu.h
include/linux/kasan.h
include/linux/kcov.h
include/linux/kthread.h
include/linux/ktime.h
include/linux/mdio-bitbang.h
include/linux/memcontrol.h
include/linux/mlx5/mlx5_ifc.h
include/linux/perf/arm_pmu.h
include/linux/rcupdate.h
include/linux/skbuff.h
include/linux/syscalls.h
include/linux/timekeeping32.h [deleted file]
include/linux/usb/usbnet.h
include/net/cfg80211.h
include/net/inet_connection_sock.h
include/net/mac80211.h
include/net/red.h
include/net/sock.h
include/net/xdp_sock.h
include/net/xsk_buff_pool.h
include/soc/nps/common.h [deleted file]
include/soc/nps/mtm.h [deleted file]
include/trace/events/afs.h
include/trace/events/sched.h
include/trace/events/sunrpc.h
include/uapi/linux/bcache.h
include/uapi/linux/if_link.h
include/uapi/linux/kvm.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/ppp-ioctl.h
include/uapi/misc/habanalabs.h
include/xen/xenbus.h
init/main.c
kernel/bpf/bpf_inode_storage.c
kernel/bpf/bpf_task_storage.c
kernel/bpf/btf.c
kernel/bpf/cgroup.c
kernel/bpf/hashtab.c
kernel/bpf/helpers.c
kernel/bpf/syscall.c
kernel/bpf/task_iter.c
kernel/bpf/verifier.c
kernel/configs/android-recommended.config
kernel/fork.c
kernel/irq/manage.c
kernel/irq/msi.c
kernel/kthread.c
kernel/locking/lockdep.c
kernel/printk/printk.c
kernel/printk/printk_ringbuffer.c
kernel/rcu/tasks.h
kernel/sched/core.c
kernel/sched/sched.h
kernel/signal.c
kernel/smpboot.c
kernel/time/ntp.c
kernel/time/timekeeping.c
kernel/trace/Kconfig
kernel/trace/trace_kprobe.c
kernel/workqueue.c
lib/Kconfig.debug
lib/fonts/font_ter16x32.c
lib/iov_iter.c
lib/raid6/Makefile
mm/hugetlb.c
mm/kasan/init.c
mm/memblock.c
mm/memory-failure.c
mm/mempolicy.c
mm/page-writeback.c
mm/page_alloc.c
mm/process_vm_access.c
mm/slub.c
mm/vmalloc.c
mm/vmscan.c
net/8021q/vlan.c
net/bpf/test_run.c
net/can/isotp.c
net/ceph/auth_x.c
net/ceph/crypto.c
net/ceph/messenger_v1.c
net/ceph/messenger_v2.c
net/ceph/mon_client.c
net/ceph/osd_client.c
net/core/dev.c
net/core/devlink.c
net/core/gen_estimator.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock_reuseport.c
net/dcb/dcbnl.c
net/dsa/dsa2.c
net/dsa/master.c
net/ipv4/esp4.c
net/ipv4/fib_frontend.c
net/ipv4/gre_demux.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_output.c
net/ipv4/ip_tunnel.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_rpfilter.c
net/ipv4/nexthop.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/esp6.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_output.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/sit.c
net/lapb/lapb_iface.c
net/mac80211/debugfs.c
net/mac80211/rx.c
net/mac80211/tx.c
net/mptcp/protocol.c
net/ncsi/ncsi-rsp.c
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_dynset.c
net/netfilter/xt_RATEEST.c
net/nfc/nci/core.c
net/packet/af_packet.c
net/qrtr/ns.c
net/qrtr/qrtr.c
net/qrtr/qrtr.h
net/rxrpc/input.c
net/rxrpc/key.c
net/sched/cls_flower.c
net/sched/cls_tcindex.c
net/sched/sch_api.c
net/sched/sch_choke.c
net/sched/sch_gred.c
net/sched/sch_red.c
net/sched/sch_sfq.c
net/sched/sch_taprio.c
net/smc/smc_core.c
net/smc/smc_ib.c
net/smc/smc_ism.c
net/sunrpc/addr.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcsock.c
net/tipc/link.c
net/tipc/node.c
net/wireless/Kconfig
net/wireless/reg.c
net/xdp/xsk.c
net/xdp/xsk_buff_pool.c
net/xdp/xsk_queue.h
scripts/config
scripts/gcc-plugins/Makefile
scripts/kconfig/Makefile
scripts/kconfig/mconf-cfg.sh
security/lsm_audit.c
sound/core/seq/oss/seq_oss_synth.c
sound/firewire/fireface/ff-transaction.c
sound/firewire/tascam/tascam-transaction.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_tegra.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/soc/amd/raven/pci-acp3x.c
sound/soc/amd/renoir/rn-pci-acp3x.c
sound/soc/atmel/Kconfig
sound/soc/codecs/Kconfig
sound/soc/codecs/hdmi-codec.c
sound/soc/codecs/max98373-i2c.c
sound/soc/codecs/max98373-sdw.c
sound/soc/codecs/max98373.c
sound/soc/codecs/max98373.h
sound/soc/codecs/rt711.c
sound/soc/fsl/imx-hdmi.c
sound/soc/intel/boards/haswell.c
sound/soc/intel/skylake/cnl-sst.c
sound/soc/meson/axg-tdm-interface.c
sound/soc/meson/axg-tdmin.c
sound/soc/qcom/lpass-cpu.c
sound/soc/qcom/lpass-platform.c
sound/soc/sh/rcar/adg.c
sound/soc/soc-dapm.c
sound/soc/sof/Kconfig
sound/soc/sof/intel/hda-codec.c
sound/soc/sof/intel/hda-dsp.c
sound/soc/sof/intel/hda.h
sound/usb/card.c
sound/usb/card.h
sound/usb/clock.c
sound/usb/endpoint.c
sound/usb/endpoint.h
sound/usb/implicit.c
sound/usb/midi.c
sound/usb/pcm.c
sound/usb/quirks-table.h
sound/usb/quirks.c
sound/usb/usbaudio.h
tools/bootconfig/scripts/bconf2ftrace.sh
tools/bootconfig/scripts/ftrace2bconf.sh
tools/bpf/bpftool/net.c
tools/bpf/resolve_btfids/main.c
tools/gpio/gpio-event-mon.c
tools/gpio/gpio-watch.c
tools/include/linux/build_bug.h
tools/include/uapi/linux/kvm.h
tools/lib/bpf/btf.c
tools/lib/perf/evlist.c
tools/lib/perf/tests/test-cpumap.c
tools/lib/perf/tests/test-evlist.c
tools/lib/perf/tests/test-evsel.c
tools/lib/perf/tests/test-threadmap.c
tools/objtool/check.c
tools/objtool/elf.c
tools/perf/builtin-script.c
tools/perf/examples/bpf/5sec.c
tools/perf/tests/shell/stat+shadow_stat.sh
tools/perf/util/header.c
tools/perf/util/machine.c
tools/perf/util/metricgroup.c
tools/perf/util/session.c
tools/perf/util/stat-shadow.c
tools/power/x86/intel-speed-select/isst-config.c
tools/testing/kunit/kunit.py
tools/testing/kunit/kunit_config.py
tools/testing/kunit/kunit_json.py
tools/testing/kunit/kunit_kernel.py
tools/testing/kunit/kunit_parser.py
tools/testing/selftests/Makefile
tools/testing/selftests/arm64/fp/fpsimd-test.S
tools/testing/selftests/arm64/fp/sve-test.S
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/prog_tests/test_local_storage.c
tools/testing/selftests/bpf/progs/bprm_opts.c
tools/testing/selftests/bpf/progs/local_storage.c
tools/testing/selftests/bpf/test_maps.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/bpf/verifier/spill_fill.c
tools/testing/selftests/bpf/xdpxceiver.c
tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/demand_paging_test.c
tools/testing/selftests/kvm/dirty_log_perf_test.c
tools/testing/selftests/kvm/dirty_log_test.c
tools/testing/selftests/kvm/include/guest_modes.h [new file with mode: 0644]
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/include/perf_test_util.h
tools/testing/selftests/kvm/lib/guest_modes.c [new file with mode: 0644]
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/perf_test_util.c [new file with mode: 0644]
tools/testing/selftests/net/fib_nexthops.sh
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/pmtu.sh
tools/testing/selftests/net/tls.c
tools/testing/selftests/net/udpgro.sh
tools/testing/selftests/netfilter/Makefile
tools/testing/selftests/netfilter/ipip-conntrack-mtu.sh [new file with mode: 0755]
tools/testing/selftests/netfilter/nft_conntrack_helper.sh
tools/testing/selftests/powerpc/alignment/alignment_handler.c
tools/testing/selftests/powerpc/mm/pkey_exec_prot.c
tools/testing/selftests/powerpc/mm/pkey_siginfo.c
tools/testing/selftests/vDSO/.gitignore
tools/testing/selftests/vDSO/vdso_test_correctness.c
tools/testing/selftests/wireguard/qemu/debug.config
virt/kvm/kvm_main.c

index 632700c..b1ab012 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -55,6 +55,8 @@ Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
 Ben Gardner <bgardner@wabtec.com>
 Ben M Cahill <ben.m.cahill@intel.com>
 Björn Steinbrink <B.Steinbrink@gmx.de>
+Björn Töpel <bjorn@kernel.org> <bjorn.topel@gmail.com>
+Björn Töpel <bjorn@kernel.org> <bjorn.topel@intel.com>
 Boris Brezillon <bbrezillon@kernel.org> <b.brezillon.dev@gmail.com>
 Boris Brezillon <bbrezillon@kernel.org> <b.brezillon@overkiz.com>
 Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>
diff --git a/CREDITS b/CREDITS
index 090ed4b..9add7e6 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -710,6 +710,10 @@ S: Las Cuevas 2385 - Bo Guemes
 S: Las Heras, Mendoza CP 5539
 S: Argentina
 
+N: Jay Cliburn
+E: jcliburn@gmail.com
+D: ATLX Ethernet drivers
+
 N: Steven P. Cole
 E: scole@lanl.gov
 E: elenstev@mesatop.com
@@ -1284,6 +1288,10 @@ D: Major kbuild rework during the 2.5 cycle
 D: ISDN Maintainer
 S: USA
 
+N: Gerrit Renker
+E: gerrit@erg.abdn.ac.uk
+D: DCCP protocol support.
+
 N: Philip Gladstone
 E: philip@gladstonefamily.net
 D: Kernel / timekeeping stuff
@@ -2138,6 +2146,10 @@ E: seasons@falcon.sch.bme.hu
 E: seasons@makosteszta.sote.hu
 D: Original author of software suspend
 
+N: Alexey Kuznetsov
+E: kuznet@ms2.inr.ac.ru
+D: Author and maintainer of large parts of the networking stack
+
 N: Jaroslav Kysela
 E: perex@perex.cz
 W: https://www.perex.cz
@@ -2696,6 +2708,10 @@ N: Wolfgang Muees
 E: wolfgang@iksw-muees.de
 D: Auerswald USB driver
 
+N: Shrijeet Mukherjee
+E: shrijeet@gmail.com
+D: Network routing domains (VRF).
+
 N: Paul Mundt
 E: paul.mundt@gmail.com
 D: SuperH maintainer
@@ -4110,6 +4126,10 @@ S: B-1206 Jingmao Guojigongyu
 S: 16 Baliqiao Nanjie, Beijing 101100
 S: People's Repulic of China
 
+N: Aviad Yehezkel
+E: aviadye@nvidia.com
+D: Kernel TLS implementation and offload support.
+
 N: Victor Yodaiken
 E: yodaiken@fsmlabs.com
 D: RTLinux (RealTime Linux)
@@ -4167,6 +4187,10 @@ S: 1507 145th Place SE #B5
 S: Bellevue, Washington 98007
 S: USA
 
+N: Wensong Zhang
+E: wensong@linux-vs.org
+D: IP virtual server (IPVS).
+
 N: Haojian Zhuang
 E: haojian.zhuang@gmail.com
 D: MMP support
index adc0d0e..75ccc5c 100644 (file)
@@ -916,21 +916,25 @@ Date:             September 2014
 Contact:       Subhash Jadavani <subhashj@codeaurora.org>
 Description:   This entry could be used to set or show the UFS device
                runtime power management level. The current driver
-               implementation supports 6 levels with next target states:
+               implementation supports 7 levels with next target states:
 
                ==  ====================================================
-               0   an UFS device will stay active, an UIC link will
+               0   UFS device will stay active, UIC link will
                    stay active
-               1   an UFS device will stay active, an UIC link will
+               1   UFS device will stay active, UIC link will
                    hibernate
-               2   an UFS device will moved to sleep, an UIC link will
+               2   UFS device will be moved to sleep, UIC link will
                    stay active
-               3   an UFS device will moved to sleep, an UIC link will
+               3   UFS device will be moved to sleep, UIC link will
                    hibernate
-               4   an UFS device will be powered off, an UIC link will
+               4   UFS device will be powered off, UIC link will
                    hibernate
-               5   an UFS device will be powered off, an UIC link will
+               5   UFS device will be powered off, UIC link will
                    be powered off
+               6   UFS device will be moved to deep sleep, UIC link
+                   will be powered off. Note, deep sleep might not be
+                   supported in which case this value will not be
+                   accepted
                ==  ====================================================
 
 What:          /sys/bus/platform/drivers/ufshcd/*/rpm_target_dev_state
@@ -954,21 +958,25 @@ Date:             September 2014
 Contact:       Subhash Jadavani <subhashj@codeaurora.org>
 Description:   This entry could be used to set or show the UFS device
                system power management level. The current driver
-               implementation supports 6 levels with next target states:
+               implementation supports 7 levels with next target states:
 
                ==  ====================================================
-               0   an UFS device will stay active, an UIC link will
+               0   UFS device will stay active, UIC link will
                    stay active
-               1   an UFS device will stay active, an UIC link will
+               1   UFS device will stay active, UIC link will
                    hibernate
-               2   an UFS device will moved to sleep, an UIC link will
+               2   UFS device will be moved to sleep, UIC link will
                    stay active
-               3   an UFS device will moved to sleep, an UIC link will
+               3   UFS device will be moved to sleep, UIC link will
                    hibernate
-               4   an UFS device will be powered off, an UIC link will
+               4   UFS device will be powered off, UIC link will
                    hibernate
-               5   an UFS device will be powered off, an UIC link will
+               5   UFS device will be powered off, UIC link will
                    be powered off
+               6   UFS device will be moved to deep sleep, UIC link
+                   will be powered off. Note, deep sleep might not be
+                   supported in which case this value will not be
+                   accepted
                ==  ====================================================
 
 What:          /sys/bus/platform/drivers/ufshcd/*/spm_target_dev_state
index 83ae3b7..a648b42 100644 (file)
@@ -473,7 +473,7 @@ read-side critical sections that follow the idle period (the oval near
 the bottom of the diagram above).
 
 Plumbing this into the full grace-period execution is described
-`below <#Forcing%20Quiescent%20States>`__.
+`below <Forcing Quiescent States_>`__.
 
 CPU-Hotplug Interface
 ^^^^^^^^^^^^^^^^^^^^^
@@ -494,7 +494,7 @@ mask to detect CPUs having gone offline since the beginning of this
 grace period.
 
 Plumbing this into the full grace-period execution is described
-`below <#Forcing%20Quiescent%20States>`__.
+`below <Forcing Quiescent States_>`__.
 
 Forcing Quiescent States
 ^^^^^^^^^^^^^^^^^^^^^^^^
@@ -532,7 +532,7 @@ from other CPUs.
 | RCU. But this diagram is complex enough as it is, so simplicity       |
 | overrode accuracy. You can think of it as poetic license, or you can  |
 | think of it as misdirection that is resolved in the                   |
-| `stitched-together diagram <#Putting%20It%20All%20Together>`__.       |
+| `stitched-together diagram <Putting It All Together_>`__.             |
 +-----------------------------------------------------------------------+
 
 Grace-Period Cleanup
@@ -596,7 +596,7 @@ maintain ordering. For example, if the callback function wakes up a task
 that runs on some other CPU, proper ordering must in place in both the
 callback function and the task being awakened. To see why this is
 important, consider the top half of the `grace-period
-cleanup <#Grace-Period%20Cleanup>`__ diagram. The callback might be
+cleanup`_ diagram. The callback might be
 running on a CPU corresponding to the leftmost leaf ``rcu_node``
 structure, and awaken a task that is to run on a CPU corresponding to
 the rightmost leaf ``rcu_node`` structure, and the grace-period kernel
index e8c84fc..d4c9a01 100644 (file)
@@ -45,7 +45,7 @@ requirements:
 #. `Other RCU Flavors`_
 #. `Possible Future Changes`_
 
-This is followed by a `summary <#Summary>`__, however, the answers to
+This is followed by a summary_, however, the answers to
 each quick quiz immediately follows the quiz. Select the big white space
 with your mouse to see the answer.
 
@@ -1096,7 +1096,7 @@ memory barriers.
 | case, voluntary context switch) within an RCU read-side critical      |
 | section. However, sleeping locks may be used within userspace RCU     |
 | read-side critical sections, and also within Linux-kernel sleepable   |
-| RCU `(SRCU) <#Sleepable%20RCU>`__ read-side critical sections. In     |
+| RCU `(SRCU) <Sleepable RCU_>`__ read-side critical sections. In       |
 | addition, the -rt patchset turns spinlocks into a sleeping locks so   |
 | that the corresponding critical sections can be preempted, which also |
 | means that these sleeplockified spinlocks (but not other sleeping     |
@@ -1186,7 +1186,7 @@ non-preemptible (``CONFIG_PREEMPT=n``) kernels, and thus `tiny
 RCU <https://lkml.kernel.org/g/20090113221724.GA15307@linux.vnet.ibm.com>`__
 was born. Josh Triplett has since taken over the small-memory banner
 with his `Linux kernel tinification <https://tiny.wiki.kernel.org/>`__
-project, which resulted in `SRCU <#Sleepable%20RCU>`__ becoming optional
+project, which resulted in `SRCU <Sleepable RCU_>`__ becoming optional
 for those kernels not needing it.
 
 The remaining performance requirements are, for the most part,
@@ -1457,8 +1457,8 @@ will vary as the value of ``HZ`` varies, and can also be changed using
 the relevant Kconfig options and kernel boot parameters. RCU currently
 does not do much sanity checking of these parameters, so please use
 caution when changing them. Note that these forward-progress measures
-are provided only for RCU, not for `SRCU <#Sleepable%20RCU>`__ or `Tasks
-RCU <#Tasks%20RCU>`__.
+are provided only for RCU, not for `SRCU <Sleepable RCU_>`__ or `Tasks
+RCU`_.
 
 RCU takes the following steps in ``call_rcu()`` to encourage timely
 invocation of callbacks when any given non-\ ``rcu_nocbs`` CPU has
@@ -1477,8 +1477,8 @@ encouragement was provided:
 
 Again, these are default values when running at ``HZ=1000``, and can be
 overridden. Again, these forward-progress measures are provided only for
-RCU, not for `SRCU <#Sleepable%20RCU>`__ or `Tasks
-RCU <#Tasks%20RCU>`__. Even for RCU, callback-invocation forward
+RCU, not for `SRCU <Sleepable RCU_>`__ or `Tasks
+RCU`_. Even for RCU, callback-invocation forward
 progress for ``rcu_nocbs`` CPUs is much less well-developed, in part
 because workloads benefiting from ``rcu_nocbs`` CPUs tend to invoke
 ``call_rcu()`` relatively infrequently. If workloads emerge that need
@@ -1920,7 +1920,7 @@ Hotplug CPU
 
 The Linux kernel supports CPU hotplug, which means that CPUs can come
 and go. It is of course illegal to use any RCU API member from an
-offline CPU, with the exception of `SRCU <#Sleepable%20RCU>`__ read-side
+offline CPU, with the exception of `SRCU <Sleepable RCU_>`__ read-side
 critical sections. This requirement was present from day one in
 DYNIX/ptx, but on the other hand, the Linux kernel's CPU-hotplug
 implementation is “interesting.”
@@ -2177,7 +2177,7 @@ handles these states differently:
 However, RCU must be reliably informed as to whether any given CPU is
 currently in the idle loop, and, for ``NO_HZ_FULL``, also whether that
 CPU is executing in usermode, as discussed
-`earlier <#Energy%20Efficiency>`__. It also requires that the
+`earlier <Energy Efficiency_>`__. It also requires that the
 scheduling-clock interrupt be enabled when RCU needs it to be:
 
 #. If a CPU is either idle or executing in usermode, and RCU believes it
@@ -2294,7 +2294,7 @@ Performance, Scalability, Response Time, and Reliability
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Expanding on the `earlier
-discussion <#Performance%20and%20Scalability>`__, RCU is used heavily by
+discussion <Performance and Scalability_>`__, RCU is used heavily by
 hot code paths in performance-critical portions of the Linux kernel's
 networking, security, virtualization, and scheduling code paths. RCU
 must therefore use efficient implementations, especially in its
index 7a86413..59cd902 100644 (file)
@@ -23,7 +23,7 @@ Here is what the fields mean:
 
 - ``name``
    is an identifier string. A new /proc file will be created with this
-   ``name below /proc/sys/fs/binfmt_misc``; cannot contain slashes ``/`` for
+   name below ``/proc/sys/fs/binfmt_misc``; cannot contain slashes ``/`` for
    obvious reasons.
 - ``type``
    is the type of recognition. Give ``M`` for magic and ``E`` for extension.
@@ -83,7 +83,7 @@ Here is what the fields mean:
       ``F`` - fix binary
             The usual behaviour of binfmt_misc is to spawn the
            binary lazily when the misc format file is invoked.  However,
-           this doesn``t work very well in the face of mount namespaces and
+           this doesn't work very well in the face of mount namespaces and
            changeroots, so the ``F`` mode opens the binary as soon as the
            emulation is installed and uses the opened image to spawn the
            emulator, meaning it is always available once installed,
index 9b90efc..452b7dc 100644 (file)
@@ -154,7 +154,7 @@ get the boot configuration data.
 Because of this "piggyback" method, there is no need to change or
 update the boot loader and the kernel image itself as long as the boot
 loader passes the correct initrd file size. If by any chance, the boot
-loader passes a longer size, the kernel feils to find the bootconfig data.
+loader passes a longer size, the kernel fails to find the bootconfig data.
 
 To do this operation, Linux kernel provides "bootconfig" command under
 tools/bootconfig, which allows admin to apply or delete the config file
index 4e6f504..2cc5488 100644 (file)
@@ -177,14 +177,20 @@ bitmap_flush_interval:number
        The bitmap flush interval in milliseconds. The metadata buffers
        are synchronized when this interval expires.
 
+allow_discards
+       Allow block discard requests (a.k.a. TRIM) for the integrity device.
+       Discards are only allowed to devices using internal hash.
+
 fix_padding
        Use a smaller padding of the tag area that is more
        space-efficient. If this option is not present, large padding is
        used - that is for compatibility with older kernels.
 
-allow_discards
-       Allow block discard requests (a.k.a. TRIM) for the integrity device.
-       Discards are only allowed to devices using internal hash.
+legacy_recalculate
+       Allow recalculating of volumes with HMAC keys. This is disabled by
+       default for security reasons - an attacker could modify the volume,
+       set recalc_sector to zero, and the kernel would not detect the
+       modification.
 
 The journal mode (D/J), buffer_sectors, journal_watermark, commit_time and
 allow_discards can be changed when reloading the target (load an inactive
index 06fb1b4..682ab28 100644 (file)
@@ -3,8 +3,8 @@
 The kernel's command-line parameters
 ====================================
 
-The following is a consolidated list of the kernel parameters as
-implemented by the __setup(), core_param() and module_param() macros
+The following is a consolidated list of the kernel parameters as implemented
+by the __setup(), early_param(), core_param() and module_param() macros
 and sorted into English Dictionary order (defined as ignoring all
 punctuation and sorting digits before letters in a case insensitive
 manner), and with descriptions where known.
index c722ec1..a10b545 100644 (file)
 
        ftrace_filter=[function-list]
                        [FTRACE] Limit the functions traced by the function
-                       tracer at boot up. function-list is a comma separated
+                       tracer at boot up. function-list is a comma-separated
                        list of functions. This list can be changed at run
                        time by the set_ftrace_filter file in the debugfs
                        tracing directory.
        ftrace_graph_filter=[function-list]
                        [FTRACE] Limit the top level callers functions traced
                        by the function graph tracer at boot up.
-                       function-list is a comma separated list of functions
+                       function-list is a comma-separated list of functions
                        that can be changed at run time by the
                        set_graph_function file in the debugfs tracing directory.
 
        ftrace_graph_notrace=[function-list]
                        [FTRACE] Do not trace from the functions specified in
-                       function-list.  This list is a comma separated list of
+                       function-list.  This list is a comma-separated list of
                        functions that can be changed at run time by the
                        set_graph_notrace file in the debugfs tracing directory.
 
                        when set.
                        Format: <int>
 
-       libata.force=   [LIBATA] Force configurations.  The format is comma
+       libata.force=   [LIBATA] Force configurations.  The format is comma-
                        separated list of "[ID:]VAL" where ID is
                        PORT[.DEVICE].  PORT and DEVICE are decimal numbers
                        matching port, link or device.  Basically, it matches
 
        stacktrace_filter=[function-list]
                        [FTRACE] Limit the functions that the stack tracer
-                       will trace at boot up. function-list is a comma separated
+                       will trace at boot up. function-list is a comma-separated
                        list of functions. This list can be changed at run
                        time by the stack_trace_filter file in the debugfs
                        tracing directory. Note, this enables stack tracing
        trace_event=[event-list]
                        [FTRACE] Set and start specified trace events in order
                        to facilitate early boot debugging. The event-list is a
-                       comma separated list of trace events to enable. See
+                       comma-separated list of trace events to enable. See
                        also Documentation/trace/events.rst
 
        trace_options=[option-list]
                        This option is obsoleted by the "nopv" option, which
                        has equivalent effect for XEN platform.
 
+       xen_no_vector_callback
+                       [KNL,X86,XEN] Disable the vector callback for Xen
+                       event channel interrupts.
+
        xen_scrub_pages=        [XEN]
                        Boolean option to control scrubbing pages before giving them back
                        to Xen, for use by other domains. Can be also changed at runtime
index fa0974f..b966fcf 100644 (file)
@@ -184,7 +184,7 @@ pages either asynchronously or synchronously, depending on the state
 of the system. When the system is not loaded, most of the memory is free
 and allocation requests will be satisfied immediately from the free
 pages supply. As the load increases, the amount of the free pages goes
-down and when it reaches a certain threshold (high watermark), an
+down and when it reaches a certain threshold (low watermark), an
 allocation request will awaken the ``kswapd`` daemon. It will
 asynchronously scan memory pages and either just free them if the data
 they contain is available elsewhere, or evict to the backing storage
index 69171b1..f1c9d20 100644 (file)
@@ -53,7 +53,6 @@ How Linux keeps everything from happening at the same time.  See
 .. toctree::
    :maxdepth: 1
 
-   atomic_ops
    refcount-vs-atomic
    irq/index
    local_ops
index d9fdc14..650f995 100644 (file)
@@ -522,6 +522,63 @@ There's more boilerplate involved, but it can:
   * E.g. if we wanted to also test ``sha256sum``, we could add a ``sha256``
     field and reuse ``cases``.
 
+* be converted to a "parameterized test", see below.
+
+Parameterized Testing
+~~~~~~~~~~~~~~~~~~~~~
+
+The table-driven testing pattern is common enough that KUnit has special
+support for it.
+
+Reusing the same ``cases`` array from above, we can write the test as a
+"parameterized test" with the following.
+
+.. code-block:: c
+
+       // This is copy-pasted from above.
+       struct sha1_test_case {
+               const char *str;
+               const char *sha1;
+       };
+       struct sha1_test_case cases[] = {
+               {
+                       .str = "hello world",
+                       .sha1 = "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed",
+               },
+               {
+                       .str = "hello world!",
+                       .sha1 = "430ce34d020724ed75a196dfc2ad67c77772d169",
+               },
+       };
+
+       // Need a helper function to generate a name for each test case.
+       static void case_to_desc(const struct sha1_test_case *t, char *desc)
+       {
+               strcpy(desc, t->str);
+       }
+       // Creates `sha1_gen_params()` to iterate over `cases`.
+       KUNIT_ARRAY_PARAM(sha1, cases, case_to_desc);
+
+       // Looks no different from a normal test.
+       static void sha1_test(struct kunit *test)
+       {
+               // This function can just contain the body of the for-loop.
+               // The former `cases[i]` is accessible under test->param_value.
+               char out[40];
+               struct sha1_test_case *test_param = (struct sha1_test_case *)(test->param_value);
+
+               sha1sum(test_param->str, out);
+               KUNIT_EXPECT_STREQ_MSG(test, (char *)out, test_param->sha1,
+                                     "sha1sum(%s)", test_param->str);
+       }
+
+       // Instead of KUNIT_CASE, we use KUNIT_CASE_PARAM and pass in the
+       // function declared by KUNIT_ARRAY_PARAM.
+       static struct kunit_case sha1_test_cases[] = {
+               KUNIT_CASE_PARAM(sha1_test, sha1_gen_params),
+               {}
+       };
+
 .. _kunit-on-non-uml:
 
 KUnit on non-UML architectures
index b15f68c..df29d59 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
 %YAML 1.2
 ---
 $id: http://devicetree.org/schemas/dma/ti/k3-bcdma.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments K3 DMSS BCDMA Device Tree Bindings
 
 maintainers:
-  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+  - Peter Ujfalusi <peter.ujfalusi@gmail.com>
 
 description: |
   The Block Copy DMA (BCDMA) is intended to perform similar functions as the TR
index b13ab60..ea19d12 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
 %YAML 1.2
 ---
 $id: http://devicetree.org/schemas/dma/ti/k3-pktdma.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments K3 DMSS PKTDMA Device Tree Bindings
 
 maintainers:
-  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+  - Peter Ujfalusi <peter.ujfalusi@gmail.com>
 
 description: |
   The Packet DMA (PKTDMA) is intended to perform similar functions as the packet
index 9a87fd9..6a09bbf 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2019 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
 %YAML 1.2
 ---
 $id: http://devicetree.org/schemas/dma/ti/k3-udma.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments K3 NAVSS Unified DMA Device Tree Bindings
 
 maintainers:
-  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+  - Peter Ujfalusi <peter.ujfalusi@gmail.com>
 
 description: |
   The UDMA-P is intended to perform similar (but significantly upgraded)
index 6eef348..c2efbb8 100644 (file)
@@ -16,8 +16,8 @@ description:
 properties:
   compatible:
     enum:
-      - bosch,bmc150
-      - bosch,bmi055
+      - bosch,bmc150_accel
+      - bosch,bmi055_accel
       - bosch,bma255
       - bosch,bma250e
       - bosch,bma222
index 244befb..de9dd57 100644 (file)
@@ -163,6 +163,7 @@ allOf:
             enum:
               - renesas,etheravb-r8a774a1
               - renesas,etheravb-r8a774b1
+              - renesas,etheravb-r8a774e1
               - renesas,etheravb-r8a7795
               - renesas,etheravb-r8a7796
               - renesas,etheravb-r8a77961
index b2f6083..dfbf5fe 100644 (file)
@@ -161,7 +161,8 @@ properties:
             * snps,route-dcbcp, DCB Control Packets
             * snps,route-up, Untagged Packets
             * snps,route-multi-broad, Multicast & Broadcast Packets
-          * snps,priority, RX queue priority (Range 0x0 to 0xF)
+          * snps,priority, bitmask of the tagged frames priorities assigned to
+            the queue
 
   snps,mtl-tx-config:
     $ref: /schemas/types.yaml#/definitions/phandle
@@ -188,7 +189,10 @@ properties:
             * snps,idle_slope, unlock on WoL
             * snps,high_credit, max write outstanding req. limit
             * snps,low_credit, max read outstanding req. limit
-          * snps,priority, TX queue priority (Range 0x0 to 0xF)
+          * snps,priority, bitmask of the priorities assigned to the queue.
+            When a PFC frame is received with priorities matching the bitmask,
+            the queue is blocked from transmitting for the pause time specified
+            in the PFC frame.
 
   snps,reset-gpio:
     deprecated: true
index a6c259c..956156f 100644 (file)
@@ -19,7 +19,9 @@ description: |
 properties:
   compatible:
     enum:
-      - nxp,pf8x00
+      - nxp,pf8100
+      - nxp,pf8121a
+      - nxp,pf8200
 
   reg:
     maxItems: 1
@@ -118,7 +120,7 @@ examples:
         #size-cells = <0>;
 
         pmic@8 {
-            compatible = "nxp,pf8x00";
+            compatible = "nxp,pf8100";
             reg = <0x08>;
 
             regulators {
index b8f0b78..7d462b8 100644 (file)
@@ -44,6 +44,7 @@ First Level Nodes - PMIC
        Definition: Must be one of below:
                    "qcom,pm8005-rpmh-regulators"
                    "qcom,pm8009-rpmh-regulators"
+                   "qcom,pm8009-1-rpmh-regulators"
                    "qcom,pm8150-rpmh-regulators"
                    "qcom,pm8150l-rpmh-regulators"
                    "qcom,pm8350-rpmh-regulators"
index 805da4d..ec06789 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
 %YAML 1.2
 ---
 $id: http://devicetree.org/schemas/sound/ti,j721e-cpb-audio.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments J721e Common Processor Board Audio Support
 
 maintainers:
-  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+  - Peter Ujfalusi <peter.ujfalusi@gmail.com>
 
 description: |
   The audio support on the board is using pcm3168a codec connected to McASP10
index bb780f6..ee9f960 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
 %YAML 1.2
 ---
 $id: http://devicetree.org/schemas/sound/ti,j721e-cpb-ivi-audio.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments J721e Common Processor Board Audio Support
 
 maintainers:
-  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+  - Peter Ujfalusi <peter.ujfalusi@gmail.com>
 
 description: |
   The Infotainment board plugs into the Common Processor Board, the support of the
index 388245b..148b3fb 100644 (file)
@@ -11,8 +11,12 @@ maintainers:
 
 properties:
   compatible:
-    items:
+    oneOf:
       - const: ti,j721e-usb
+      - const: ti,am64-usb
+      - items:
+          - const: ti,j721e-usb
+          - const: ti,am64-usb
 
   reg:
     description: module registers
index 2fb2ff2..36ac216 100644 (file)
@@ -48,12 +48,12 @@ or ``virtualenv``, depending on how your distribution packaged Python 3.
       those versions, you should run ``pip install 'docutils==0.12'``.
 
    #) It is recommended to use the RTD theme for html output. Depending
-      on the Sphinx version, it should be installed  in separate,
+      on the Sphinx version, it should be installed separately,
       with ``pip install sphinx_rtd_theme``.
 
-   #) Some ReST pages contain math expressions. Due to the way Sphinx work,
+   #) Some ReST pages contain math expressions. Due to the way Sphinx works,
       those expressions are written using LaTeX notation. It needs texlive
-      installed with amdfonts and amsmath in order to evaluate them.
+      installed with amsfonts and amsmath in order to evaluate them.
 
 In summary, if you want to install Sphinx version 1.7.9, you should do::
 
@@ -128,7 +128,7 @@ Sphinx Build
 ============
 
 The usual way to generate the documentation is to run ``make htmldocs`` or
-``make pdfdocs``. There are also other formats available, see the documentation
+``make pdfdocs``. There are also other formats available: see the documentation
 section of ``make help``. The generated documentation is placed in
 format-specific subdirectories under ``Documentation/output``.
 
@@ -303,17 +303,17 @@ and *targets* (e.g. a ref to ``:ref:`last row <last row>``` / :ref:`last row
         - head col 3
         - head col 4
 
-      * - column 1
+      * - row 1
         - field 1.1
         - field 1.2 with autospan
 
-      * - column 2
+      * - row 2
         - field 2.1
         - :rspan:`1` :cspan:`1` field 2.2 - 3.3
 
       * .. _`last row`:
 
-        - column 3
+        - row 3
 
 Rendered as:
 
@@ -325,17 +325,17 @@ Rendered as:
         - head col 3
         - head col 4
 
-      * - column 1
+      * - row 1
         - field 1.1
         - field 1.2 with autospan
 
-      * - column 2
+      * - row 2
         - field 2.1
         - :rspan:`1` :cspan:`1` field 2.2 - 3.3
 
       * .. _`last row`:
 
-        - column 3
+        - row 3
 
 Cross-referencing
 -----------------
@@ -361,7 +361,7 @@ Figures & Images
 
 If you want to add an image, you should use the ``kernel-figure`` and
 ``kernel-image`` directives. E.g. to insert a figure with a scalable
-image format use SVG (:ref:`svg_image_example`)::
+image format, use SVG (:ref:`svg_image_example`)::
 
     .. kernel-figure::  svg_image.svg
        :alt:    simple SVG image
@@ -375,7 +375,7 @@ image format use SVG (:ref:`svg_image_example`)::
 
    SVG image example
 
-The kernel figure (and image) directive support **DOT** formatted files, see
+The kernel figure (and image) directive supports **DOT** formatted files, see
 
 * DOT: http://graphviz.org/pdf/dotguide.pdf
 * Graphviz: http://www.graphviz.org/content/dot-language
@@ -394,7 +394,7 @@ A simple example (:ref:`hello_dot_file`)::
 
    DOT's hello world example
 
-Embed *render* markups (or languages) like Graphviz's **DOT** is provided by the
+Embedded *render* markups (or languages) like Graphviz's **DOT** are provided by the
 ``kernel-render`` directives.::
 
   .. kernel-render:: DOT
@@ -406,7 +406,7 @@ Embed *render* markups (or languages) like Graphviz's **DOT** is provided by the
      }
 
 How this will be rendered depends on the installed tools. If Graphviz is
-installed, you will see an vector image. If not the raw markup is inserted as
+installed, you will see a vector image. If not, the raw markup is inserted as
 *literal-block* (:ref:`hello_dot_render`).
 
 .. _hello_dot_render:
@@ -421,8 +421,8 @@ installed, you will see an vector image. If not the raw markup is inserted as
 
 The *render* directive has all the options known from the *figure* directive,
 plus option ``caption``.  If ``caption`` has a value, a *figure* node is
-inserted. If not, a *image* node is inserted. A ``caption`` is also needed, if
-you want to refer it (:ref:`hello_svg_render`).
+inserted. If not, an *image* node is inserted. A ``caption`` is also needed, if
+you want to refer to it (:ref:`hello_svg_render`).
 
 Embedded **SVG**::
 
index e588bcc..c042176 100644 (file)
@@ -50,8 +50,8 @@ The following files belong to it:
   0x00000010        Memory Uncorrectable non-fatal
   0x00000020        Memory Uncorrectable fatal
   0x00000040        PCI Express Correctable
-  0x00000080        PCI Express Uncorrectable fatal
-  0x00000100        PCI Express Uncorrectable non-fatal
+  0x00000080        PCI Express Uncorrectable non-fatal
+  0x00000100        PCI Express Uncorrectable fatal
   0x00000200        Platform Correctable
   0x00000400        Platform Uncorrectable non-fatal
   0x00000800        Platform Uncorrectable fatal
index 922b3c8..749f518 100644 (file)
@@ -1,7 +1,7 @@
 .. SPDX-License-Identifier: GPL-2.0-or-later
 
 Kernel driver sbtsi_temp
-==================
+========================
 
 Supported hardware:
 
index d36768c..9f6a118 100644 (file)
@@ -598,7 +598,7 @@ more details, with real examples.
        explicitly added to $(targets).
 
        Assignments to $(targets) are without $(obj)/ prefix. if_changed may be
-       used in conjunction with custom rules as defined in "3.9 Custom Rules".
+       used in conjunction with custom rules as defined in "3.11 Custom Rules".
 
        Note: It is a typical mistake to forget the FORCE prerequisite.
        Another common pitfall is that whitespace is sometimes significant; for
index 6ed806e..c344892 100644 (file)
@@ -118,11 +118,11 @@ spinlock, but you may block holding a mutex. If you can't lock a mutex,
 your task will suspend itself, and be woken up when the mutex is
 released. This means the CPU can do something else while you are
 waiting. There are many cases when you simply can't sleep (see
-`What Functions Are Safe To Call From Interrupts? <#sleeping-things>`__),
+`What Functions Are Safe To Call From Interrupts?`_),
 and so have to use a spinlock instead.
 
 Neither type of lock is recursive: see
-`Deadlock: Simple and Advanced <#deadlock>`__.
+`Deadlock: Simple and Advanced`_.
 
 Locks and Uniprocessor Kernels
 ------------------------------
@@ -179,7 +179,7 @@ perfect world).
 
 Note that you can also use spin_lock_irq() or
 spin_lock_irqsave() here, which stop hardware interrupts
-as well: see `Hard IRQ Context <#hard-irq-context>`__.
+as well: see `Hard IRQ Context`_.
 
 This works perfectly for UP as well: the spin lock vanishes, and this
 macro simply becomes local_bh_disable()
@@ -230,7 +230,7 @@ The Same Softirq
 ~~~~~~~~~~~~~~~~
 
 The same softirq can run on the other CPUs: you can use a per-CPU array
-(see `Per-CPU Data <#per-cpu-data>`__) for better performance. If you're
+(see `Per-CPU Data`_) for better performance. If you're
 going so far as to use a softirq, you probably care about scalable
 performance enough to justify the extra complexity.
 
index d3fcf53..61e8504 100644 (file)
@@ -164,46 +164,56 @@ Devlink health reporters
 
 NPA Reporters
 -------------
-The NPA reporters are responsible for reporting and recovering the following group of errors
+The NPA reporters are responsible for reporting and recovering the following group of errors:
+
 1. GENERAL events
+
    - Error due to operation of unmapped PF.
    - Error due to disabled alloc/free for other HW blocks (NIX, SSO, TIM, DPI and AURA).
+
 2. ERROR events
+
    - Fault due to NPA_AQ_INST_S read or NPA_AQ_RES_S write.
    - AQ Doorbell Error.
+
 3. RAS events
+
    - RAS Error Reporting for NPA_AQ_INST_S/NPA_AQ_RES_S.
+
 4. RVU events
+
    - Error due to unmapped slot.
 
-Sample Output
--------------
-~# devlink health
-pci/0002:01:00.0:
-  reporter hw_npa_intr
-      state healthy error 2872 recover 2872 last_dump_date 2020-12-10 last_dump_time 09:39:09 grace_period 0 auto_recover true auto_dump true
-  reporter hw_npa_gen
-      state healthy error 2872 recover 2872 last_dump_date 2020-12-11 last_dump_time 04:43:04 grace_period 0 auto_recover true auto_dump true
-  reporter hw_npa_err
-      state healthy error 2871 recover 2871 last_dump_date 2020-12-10 last_dump_time 09:39:17 grace_period 0 auto_recover true auto_dump true
-   reporter hw_npa_ras
-      state healthy error 0 recover 0 last_dump_date 2020-12-10 last_dump_time 09:32:40 grace_period 0 auto_recover true auto_dump true
+Sample Output::
+
+       ~# devlink health
+       pci/0002:01:00.0:
+         reporter hw_npa_intr
+             state healthy error 2872 recover 2872 last_dump_date 2020-12-10 last_dump_time 09:39:09 grace_period 0 auto_recover true auto_dump true
+         reporter hw_npa_gen
+             state healthy error 2872 recover 2872 last_dump_date 2020-12-11 last_dump_time 04:43:04 grace_period 0 auto_recover true auto_dump true
+         reporter hw_npa_err
+             state healthy error 2871 recover 2871 last_dump_date 2020-12-10 last_dump_time 09:39:17 grace_period 0 auto_recover true auto_dump true
+          reporter hw_npa_ras
+             state healthy error 0 recover 0 last_dump_date 2020-12-10 last_dump_time 09:32:40 grace_period 0 auto_recover true auto_dump true
 
 Each reporter dumps the
+
  - Error Type
  - Error Register value
  - Reason in words
 
-For eg:
-~# devlink health dump show  pci/0002:01:00.0 reporter hw_npa_gen
- NPA_AF_GENERAL:
-         NPA General Interrupt Reg : 1
-         NIX0: free disabled RX
-~# devlink health dump show  pci/0002:01:00.0 reporter hw_npa_intr
- NPA_AF_RVU:
-         NPA RVU Interrupt Reg : 1
-         Unmap Slot Error
-~# devlink health dump show  pci/0002:01:00.0 reporter hw_npa_err
- NPA_AF_ERR:
-        NPA Error Interrupt Reg : 4096
-        AQ Doorbell Error
+For example::
+
+       ~# devlink health dump show  pci/0002:01:00.0 reporter hw_npa_gen
+        NPA_AF_GENERAL:
+                NPA General Interrupt Reg : 1
+                NIX0: free disabled RX
+       ~# devlink health dump show  pci/0002:01:00.0 reporter hw_npa_intr
+        NPA_AF_RVU:
+                NPA RVU Interrupt Reg : 1
+                Unmap Slot Error
+       ~# devlink health dump show  pci/0002:01:00.0 reporter hw_npa_err
+        NPA_AF_ERR:
+               NPA Error Interrupt Reg : 4096
+               AQ Doorbell Error
index 4b9ed58..ae2ae37 100644 (file)
@@ -6,9 +6,9 @@
 netdev FAQ
 ==========
 
-Q: What is netdev?
-------------------
-A: It is a mailing list for all network-related Linux stuff.  This
+What is netdev?
+---------------
+It is a mailing list for all network-related Linux stuff.  This
 includes anything found under net/ (i.e. core code like IPv6) and
 drivers/net (i.e. hardware specific drivers) in the Linux source tree.
 
@@ -25,9 +25,9 @@ Aside from subsystems like that mentioned above, all network-related
 Linux development (i.e. RFC, review, comments, etc.) takes place on
 netdev.
 
-Q: How do the changes posted to netdev make their way into Linux?
------------------------------------------------------------------
-A: There are always two trees (git repositories) in play.  Both are
+How do the changes posted to netdev make their way into Linux?
+--------------------------------------------------------------
+There are always two trees (git repositories) in play.  Both are
 driven by David Miller, the main network maintainer.  There is the
 ``net`` tree, and the ``net-next`` tree.  As you can probably guess from
 the names, the ``net`` tree is for fixes to existing code already in the
@@ -37,9 +37,9 @@ for the future release.  You can find the trees here:
 - https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
 - https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
 
-Q: How often do changes from these trees make it to the mainline Linus tree?
-----------------------------------------------------------------------------
-A: To understand this, you need to know a bit of background information on
+How often do changes from these trees make it to the mainline Linus tree?
+-------------------------------------------------------------------------
+To understand this, you need to know a bit of background information on
 the cadence of Linux development.  Each new release starts off with a
 two week "merge window" where the main maintainers feed their new stuff
 to Linus for merging into the mainline tree.  After the two weeks, the
@@ -81,7 +81,8 @@ focus for ``net`` is on stabilization and bug fixes.
 
 Finally, the vX.Y gets released, and the whole cycle starts over.
 
-Q: So where are we now in this cycle?
+So where are we now in this cycle?
+----------------------------------
 
 Load the mainline (Linus) page here:
 
@@ -91,9 +92,9 @@ and note the top of the "tags" section.  If it is rc1, it is early in
 the dev cycle.  If it was tagged rc7 a week ago, then a release is
 probably imminent.
 
-Q: How do I indicate which tree (net vs. net-next) my patch should be in?
--------------------------------------------------------------------------
-A: Firstly, think whether you have a bug fix or new "next-like" content.
+How do I indicate which tree (net vs. net-next) my patch should be in?
+----------------------------------------------------------------------
+Firstly, think whether you have a bug fix or new "next-like" content.
 Then once decided, assuming that you use git, use the prefix flag, i.e.
 ::
 
@@ -105,48 +106,45 @@ in the above is just the subject text of the outgoing e-mail, and you
 can manually change it yourself with whatever MUA you are comfortable
 with.
 
-Q: I sent a patch and I'm wondering what happened to it?
---------------------------------------------------------
-Q: How can I tell whether it got merged?
-A: Start by looking at the main patchworks queue for netdev:
+I sent a patch and I'm wondering what happened to it - how can I tell whether it got merged?
+--------------------------------------------------------------------------------------------
+Start by looking at the main patchworks queue for netdev:
 
   https://patchwork.kernel.org/project/netdevbpf/list/
 
 The "State" field will tell you exactly where things are at with your
 patch.
 
-Q: The above only says "Under Review".  How can I find out more?
-----------------------------------------------------------------
-A: Generally speaking, the patches get triaged quickly (in less than
+The above only says "Under Review".  How can I find out more?
+-------------------------------------------------------------
+Generally speaking, the patches get triaged quickly (in less than
 48h).  So be patient.  Asking the maintainer for status updates on your
 patch is a good way to ensure your patch is ignored or pushed to the
 bottom of the priority list.
 
-Q: I submitted multiple versions of the patch series
-----------------------------------------------------
-Q: should I directly update patchwork for the previous versions of these
-patch series?
-A: No, please don't interfere with the patch status on patchwork, leave
+I submitted multiple versions of the patch series. Should I directly update patchwork for the previous versions of these patch series?
+--------------------------------------------------------------------------------------------------------------------------------------
+No, please don't interfere with the patch status on patchwork, leave
 it to the maintainer to figure out what is the most recent and current
 version that should be applied. If there is any doubt, the maintainer
 will reply and ask what should be done.
 
-Q: I made changes to only a few patches in a patch series should I resend only those changed?
----------------------------------------------------------------------------------------------
-A: No, please resend the entire patch series and make sure you do number your
+I made changes to only a few patches in a patch series should I resend only those changed?
+------------------------------------------------------------------------------------------
+No, please resend the entire patch series and make sure you do number your
 patches such that it is clear this is the latest and greatest set of patches
 that can be applied.
 
-Q: I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
--------------------------------------------------------------------------------------------------------------------------------------------
-A: There is no revert possible, once it is pushed out, it stays like that.
+I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
+----------------------------------------------------------------------------------------------------------------------------------------
+There is no revert possible, once it is pushed out, it stays like that.
 Please send incremental versions on top of what has been merged in order to fix
 the patches the way they would look like if your latest patch series was to be
 merged.
 
-Q: How can I tell what patches are queued up for backporting to the various stable releases?
---------------------------------------------------------------------------------------------
-A: Normally Greg Kroah-Hartman collects stable commits himself, but for
+How can I tell what patches are queued up for backporting to the various stable releases?
+-----------------------------------------------------------------------------------------
+Normally Greg Kroah-Hartman collects stable commits himself, but for
 networking, Dave collects up patches he deems critical for the
 networking subsystem, and then hands them off to Greg.
 
@@ -169,11 +167,9 @@ simply clone the repo, and then git grep the mainline commit ID, e.g.
   releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
   stable/stable-queue$
 
-Q: I see a network patch and I think it should be backported to stable.
------------------------------------------------------------------------
-Q: Should I request it via stable@vger.kernel.org like the references in
-the kernel's Documentation/process/stable-kernel-rules.rst file say?
-A: No, not for networking.  Check the stable queues as per above first
+I see a network patch and I think it should be backported to stable. Should I request it via stable@vger.kernel.org like the references in the kernel's Documentation/process/stable-kernel-rules.rst file say?
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+No, not for networking.  Check the stable queues as per above first
 to see if it is already queued.  If not, then send a mail to netdev,
 listing the upstream commit ID and why you think it should be a stable
 candidate.
@@ -190,11 +186,9 @@ mainline, the better the odds that it is an OK candidate for stable.  So
 scrambling to request a commit be added the day after it appears should
 be avoided.
 
-Q: I have created a network patch and I think it should be backported to stable.
---------------------------------------------------------------------------------
-Q: Should I add a Cc: stable@vger.kernel.org like the references in the
-kernel's Documentation/ directory say?
-A: No.  See above answer.  In short, if you think it really belongs in
+I have created a network patch and I think it should be backported to stable. Should I add a Cc: stable@vger.kernel.org like the references in the kernel's Documentation/ directory say?
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+No.  See above answer.  In short, if you think it really belongs in
 stable, then ensure you write a decent commit log that describes who
 gets impacted by the bug fix and how it manifests itself, and when the
 bug was introduced.  If you do that properly, then the commit will get
@@ -207,18 +201,18 @@ marker line as described in
 :ref:`Documentation/process/submitting-patches.rst <the_canonical_patch_format>`
 to temporarily embed that information into the patch that you send.
 
-Q: Are all networking bug fixes backported to all stable releases?
-------------------------------------------------------------------
-A: Due to capacity, Dave could only take care of the backports for the
+Are all networking bug fixes backported to all stable releases?
+---------------------------------------------------------------
+Due to capacity, Dave could only take care of the backports for the
 last two stable releases. For earlier stable releases, each stable
 branch maintainer is supposed to take care of them. If you find any
 patch is missing from an earlier stable branch, please notify
 stable@vger.kernel.org with either a commit ID or a formal patch
 backported, and CC Dave and other relevant networking developers.
 
-Q: Is the comment style convention different for the networking content?
-------------------------------------------------------------------------
-A: Yes, in a largely trivial way.  Instead of this::
+Is the comment style convention different for the networking content?
+---------------------------------------------------------------------
+Yes, in a largely trivial way.  Instead of this::
 
   /*
    * foobar blah blah blah
@@ -231,32 +225,30 @@ it is requested that you make it look like this::
    * another line of text
    */
 
-Q: I am working in existing code that has the former comment style and not the latter.
---------------------------------------------------------------------------------------
-Q: Should I submit new code in the former style or the latter?
-A: Make it the latter style, so that eventually all code in the domain
+I am working in existing code that has the former comment style and not the latter. Should I submit new code in the former style or the latter?
+-----------------------------------------------------------------------------------------------------------------------------------------------
+Make it the latter style, so that eventually all code in the domain
 of netdev is of this format.
 
-Q: I found a bug that might have possible security implications or similar.
----------------------------------------------------------------------------
-Q: Should I mail the main netdev maintainer off-list?**
-A: No. The current netdev maintainer has consistently requested that
+I found a bug that might have possible security implications or similar. Should I mail the main netdev maintainer off-list?
+---------------------------------------------------------------------------------------------------------------------------
+No. The current netdev maintainer has consistently requested that
 people use the mailing lists and not reach out directly.  If you aren't
 OK with that, then perhaps consider mailing security@kernel.org or
 reading about http://oss-security.openwall.org/wiki/mailing-lists/distros
 as possible alternative mechanisms.
 
-Q: What level of testing is expected before I submit my change?
----------------------------------------------------------------
-A: If your changes are against ``net-next``, the expectation is that you
+What level of testing is expected before I submit my change?
+------------------------------------------------------------
+If your changes are against ``net-next``, the expectation is that you
 have tested by layering your changes on top of ``net-next``.  Ideally
 you will have done run-time testing specific to your change, but at a
 minimum, your changes should survive an ``allyesconfig`` and an
 ``allmodconfig`` build without new warnings or failures.
 
-Q: How do I post corresponding changes to user space components?
-----------------------------------------------------------------
-A: User space code exercising kernel features should be posted
+How do I post corresponding changes to user space components?
+-------------------------------------------------------------
+User space code exercising kernel features should be posted
 alongside kernel patches. This gives reviewers a chance to see
 how any new interface is used and how well it works.
 
@@ -280,9 +272,9 @@ to the mailing list, e.g.::
 Posting as one thread is discouraged because it confuses patchwork
 (as of patchwork 2.2.2).
 
-Q: Any other tips to help ensure my net/net-next patch gets OK'd?
------------------------------------------------------------------
-A: Attention to detail.  Re-read your own work as if you were the
+Any other tips to help ensure my net/net-next patch gets OK'd?
+--------------------------------------------------------------
+Attention to detail.  Re-read your own work as if you were the
 reviewer.  You can start with using ``checkpatch.pl``, perhaps even with
 the ``--strict`` flag.  But do not be mindlessly robotic in doing so.
 If your change is a bug fix, make sure your commit log indicates the
index 5a85fcc..17bdcb7 100644 (file)
@@ -10,18 +10,177 @@ Introduction
 The following is a random collection of documentation regarding
 network devices.
 
-struct net_device allocation rules
-==================================
+struct net_device lifetime rules
+================================
 Network device structures need to persist even after module is unloaded and
 must be allocated with alloc_netdev_mqs() and friends.
 If device has registered successfully, it will be freed on last use
-by free_netdev(). This is required to handle the pathologic case cleanly
-(example: rmmod mydriver </sys/class/net/myeth/mtu )
+by free_netdev(). This is required to handle the pathological case cleanly
+(example: ``rmmod mydriver </sys/class/net/myeth/mtu``)
 
-alloc_netdev_mqs()/alloc_netdev() reserve extra space for driver
+alloc_netdev_mqs() / alloc_netdev() reserve extra space for driver
 private data which gets freed when the network device is freed. If
 separately allocated data is attached to the network device
-(netdev_priv(dev)) then it is up to the module exit handler to free that.
+(netdev_priv()) then it is up to the module exit handler to free that.
+
+There are two groups of APIs for registering struct net_device.
+First group can be used in normal contexts where ``rtnl_lock`` is not already
+held: register_netdev(), unregister_netdev().
+Second group can be used when ``rtnl_lock`` is already held:
+register_netdevice(), unregister_netdevice(), free_netdevice().
+
+Simple drivers
+--------------
+
+Most drivers (especially device drivers) handle lifetime of struct net_device
+in context where ``rtnl_lock`` is not held (e.g. driver probe and remove paths).
+
+In that case the struct net_device registration is done using
+the register_netdev(), and unregister_netdev() functions:
+
+.. code-block:: c
+
+  int probe()
+  {
+    struct my_device_priv *priv;
+    int err;
+
+    dev = alloc_netdev_mqs(...);
+    if (!dev)
+      return -ENOMEM;
+    priv = netdev_priv(dev);
+
+    /* ... do all device setup before calling register_netdev() ...
+     */
+
+    err = register_netdev(dev);
+    if (err)
+      goto err_undo;
+
+    /* net_device is visible to the user! */
+
+  err_undo:
+    /* ... undo the device setup ... */
+    free_netdev(dev);
+    return err;
+  }
+
+  void remove()
+  {
+    unregister_netdev(dev);
+    free_netdev(dev);
+  }
+
+Note that after calling register_netdev() the device is visible in the system.
+Users can open it and start sending / receiving traffic immediately,
+or run any other callback, so all initialization must be done prior to
+registration.
+
+unregister_netdev() closes the device and waits for all users to be done
+with it. The memory of struct net_device itself may still be referenced
+by sysfs but all operations on that device will fail.
+
+free_netdev() can be called after unregister_netdev() returns on when
+register_netdev() failed.
+
+Device management under RTNL
+----------------------------
+
+Registering struct net_device while in context which already holds
+the ``rtnl_lock`` requires extra care. In those scenarios most drivers
+will want to make use of struct net_device's ``needs_free_netdev``
+and ``priv_destructor`` members for freeing of state.
+
+Example flow of netdev handling under ``rtnl_lock``:
+
+.. code-block:: c
+
+  static void my_setup(struct net_device *dev)
+  {
+    dev->needs_free_netdev = true;
+  }
+
+  static void my_destructor(struct net_device *dev)
+  {
+    some_obj_destroy(priv->obj);
+    some_uninit(priv);
+  }
+
+  int create_link()
+  {
+    struct my_device_priv *priv;
+    int err;
+
+    ASSERT_RTNL();
+
+    dev = alloc_netdev(sizeof(*priv), "net%d", NET_NAME_UNKNOWN, my_setup);
+    if (!dev)
+      return -ENOMEM;
+    priv = netdev_priv(dev);
+
+    /* Implicit constructor */
+    err = some_init(priv);
+    if (err)
+      goto err_free_dev;
+
+    priv->obj = some_obj_create();
+    if (!priv->obj) {
+      err = -ENOMEM;
+      goto err_some_uninit;
+    }
+    /* End of constructor, set the destructor: */
+    dev->priv_destructor = my_destructor;
+
+    err = register_netdevice(dev);
+    if (err)
+      /* register_netdevice() calls destructor on failure */
+      goto err_free_dev;
+
+    /* If anything fails now unregister_netdevice() (or unregister_netdev())
+     * will take care of calling my_destructor and free_netdev().
+     */
+
+    return 0;
+
+  err_some_uninit:
+    some_uninit(priv);
+  err_free_dev:
+    free_netdev(dev);
+    return err;
+  }
+
+If struct net_device.priv_destructor is set it will be called by the core
+some time after unregister_netdevice(), it will also be called if
+register_netdevice() fails. The callback may be invoked with or without
+``rtnl_lock`` held.
+
+There is no explicit constructor callback, driver "constructs" the private
+netdev state after allocating it and before registration.
+
+Setting struct net_device.needs_free_netdev makes core call free_netdevice()
+automatically after unregister_netdevice() when all references to the device
+are gone. It only takes effect after a successful call to register_netdevice()
+so if register_netdevice() fails driver is responsible for calling
+free_netdev().
+
+free_netdev() is safe to call on error paths right after unregister_netdevice()
+or when register_netdevice() fails. Parts of netdev (de)registration process
+happen after ``rtnl_lock`` is released, therefore in those cases free_netdev()
+will defer some of the processing until ``rtnl_lock`` is released.
+
+Devices spawned from struct rtnl_link_ops should never free the
+struct net_device directly.
+
+.ndo_init and .ndo_uninit
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``.ndo_init`` and ``.ndo_uninit`` callbacks are called during net_device
+registration and de-registration, under ``rtnl_lock``. Drivers can use
+those e.g. when parts of their init process need to run under ``rtnl_lock``.
+
+``.ndo_init`` runs before device is visible in the system, ``.ndo_uninit``
+runs during de-registering after device is closed but other subsystems
+may still have outstanding references to the netdevice.
 
 MTU
 ===
@@ -64,8 +223,8 @@ ndo_do_ioctl:
        Context: process
 
 ndo_get_stats:
-       Synchronization: dev_base_lock rwlock.
-       Context: nominally process, but don't sleep inside an rwlock
+       Synchronization: rtnl_lock() semaphore, dev_base_lock rwlock, or RCU.
+       Context: atomic (can't sleep under rwlock or RCU)
 
 ndo_start_xmit:
        Synchronization: __netif_tx_lock spinlock.
index 6c009ce..500ef60 100644 (file)
@@ -8,7 +8,7 @@ Abstract
 ========
 
 This file documents the mmap() facility available with the PACKET
-socket interface on 2.4/2.6/3.x kernels. This type of sockets is used for
+socket interface. This type of sockets is used for
 
 i) capture network traffic with utilities like tcpdump,
 ii) transmit network traffic, or any other that needs raw
@@ -25,12 +25,12 @@ Please send your comments to
 Why use PACKET_MMAP
 ===================
 
-In Linux 2.4/2.6/3.x if PACKET_MMAP is not enabled, the capture process is very
+Non PACKET_MMAP capture process (plain AF_PACKET) is very
 inefficient. It uses very limited buffers and requires one system call to
 capture each packet, it requires two if you want to get packet's timestamp
 (like libpcap always does).
 
-In the other hand PACKET_MMAP is very efficient. PACKET_MMAP provides a size
+On the other hand PACKET_MMAP is very efficient. PACKET_MMAP provides a size
 configurable circular buffer mapped in user space that can be used to either
 send or receive packets. This way reading packets just needs to wait for them,
 most of the time there is no need to issue a single system call. Concerning
@@ -252,8 +252,7 @@ PACKET_MMAP setting constraints
 
 In kernel versions prior to 2.4.26 (for the 2.4 branch) and 2.6.5 (2.6 branch),
 the PACKET_MMAP buffer could hold only 32768 frames in a 32 bit architecture or
-16384 in a 64 bit architecture. For information on these kernel versions
-see http://pusa.uv.es/~ulisses/packet_mmap/packet_mmap.pre-2.4.26_2.6.5.txt
+16384 in a 64 bit architecture.
 
 Block size limit
 ----------------
@@ -437,7 +436,7 @@ and the following flags apply:
 Capture process
 ^^^^^^^^^^^^^^^
 
-     from include/linux/if_packet.h
+From include/linux/if_packet.h::
 
      #define TP_STATUS_COPY          (1 << 1)
      #define TP_STATUS_LOSING        (1 << 2)
index 0f55c6d..5f0dea3 100644 (file)
@@ -530,7 +530,10 @@ TLS device feature flags only control adding of new TLS connection
 offloads, old connections will remain active after flags are cleared.
 
 TLS encryption cannot be offloaded to devices without checksum calculation
-offload. Hence, TLS TX device feature flag requires NETIF_F_HW_CSUM being set.
+offload. Hence, TLS TX device feature flag requires TX csum offload being set.
 Disabling the latter implies clearing the former. Disabling TX checksum offload
 should not affect old connections, and drivers should make sure checksum
 calculation does not break for them.
+Similarly, device-offloaded TLS decryption implies doing RXCSUM. If the user
+does not want to enable RX csum offload, TLS RX device feature is disabled
+as well.
index c27e59d..0825dc4 100644 (file)
@@ -249,10 +249,8 @@ features; most of these are found in the "kernel hacking" submenu.  Several
 of these options should be turned on for any kernel used for development or
 testing purposes.  In particular, you should turn on:
 
- - ENABLE_MUST_CHECK and FRAME_WARN to get an
-   extra set of warnings for problems like the use of deprecated interfaces
-   or ignoring an important return value from a function.  The output
-   generated by these warnings can be verbose, but one need not worry about
+ - FRAME_WARN to get warnings for stack frames larger than a given amount.
+   The output generated can be verbose, but one need not worry about
    warnings from other parts of the kernel.
 
  - DEBUG_OBJECTS will add code to track the lifetime of various objects
index fe52c31..b36af65 100644 (file)
@@ -1501,7 +1501,7 @@ Module for Digigram miXart8 sound cards.
 
 This module supports multiple cards.
 Note: One miXart8 board will be represented as 4 alsa cards.
-See MIXART.txt for details.
+See Documentation/sound/cards/mixart.rst for details.
 
 When the driver is compiled as a module and the hotplug firmware
 is supported, the firmware data is loaded via hotplug automatically.
index 73bbd59..e636583 100644 (file)
@@ -71,7 +71,7 @@ core/oss
 The codes for PCM and mixer OSS emulation modules are stored in this
 directory. The rawmidi OSS emulation is included in the ALSA rawmidi
 code since it's quite small. The sequencer code is stored in
-``core/seq/oss`` directory (see `below <#core-seq-oss>`__).
+``core/seq/oss`` directory (see `below <core/seq/oss_>`__).
 
 core/seq
 ~~~~~~~~
@@ -382,7 +382,7 @@ where ``enable[dev]`` is the module option.
 Each time the ``probe`` callback is called, check the availability of
 the device. If not available, simply increment the device index and
 returns. dev will be incremented also later (`step 7
-<#set-the-pci-driver-data-and-return-zero>`__).
+<7) Set the PCI driver data and return zero._>`__).
 
 2) Create a card instance
 ~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -450,10 +450,10 @@ field contains the information shown in ``/proc/asound/cards``.
 5) Create other components, such as mixer, MIDI, etc.
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-Here you define the basic components such as `PCM <#PCM-Interface>`__,
-mixer (e.g. `AC97 <#API-for-AC97-Codec>`__), MIDI (e.g.
-`MPU-401 <#MIDI-MPU401-UART-Interface>`__), and other interfaces.
-Also, if you want a `proc file <#Proc-Interface>`__, define it here,
+Here you define the basic components such as `PCM <PCM Interface_>`__,
+mixer (e.g. `AC97 <API for AC97 Codec_>`__), MIDI (e.g.
+`MPU-401 <MIDI (MPU401-UART) Interface_>`__), and other interfaces.
+Also, if you want a `proc file <Proc Interface_>`__, define it here,
 too.
 
 6) Register the card instance.
@@ -941,7 +941,7 @@ The allocation of an interrupt source is done like this:
   chip->irq = pci->irq;
 
 where :c:func:`snd_mychip_interrupt()` is the interrupt handler
-defined `later <#pcm-interface-interrupt-handler>`__. Note that
+defined `later <PCM Interrupt Handler_>`__. Note that
 ``chip->irq`` should be defined only when :c:func:`request_irq()`
 succeeded.
 
@@ -3104,7 +3104,7 @@ processing the output stream in the irq handler.
 
 If the MPU-401 interface shares its interrupt with the other logical
 devices on the card, set ``MPU401_INFO_IRQ_HOOK`` (see
-`below <#MIDI-Interrupt-Handler>`__).
+`below <MIDI Interrupt Handler_>`__).
 
 Usually, the port address corresponds to the command port and port + 1
 corresponds to the data port. If not, you may change the ``cport``
index 70254ea..c136e25 100644 (file)
@@ -392,9 +392,14 @@ This ioctl is obsolete and has been removed.
 
 Errors:
 
-  =====      =============================
+  =======    ==============================================================
   EINTR      an unmasked signal is pending
-  =====      =============================
+  ENOEXEC    the vcpu hasn't been initialized or the guest tried to execute
+             instructions from device memory (arm64)
+  ENOSYS     data abort outside memslots with no syndrome info and
+             KVM_CAP_ARM_NISV_TO_USER not enabled (arm64)
+  EPERM      SVE feature set but not finalized (arm64)
+  =======    ==============================================================
 
 This ioctl is used to run a guest virtual cpu.  While there are no
 explicit parameters, there is an implicit parameter block that can be
index 6eff4f7..a625361 100644 (file)
@@ -203,8 +203,8 @@ F:  include/uapi/linux/nl80211.h
 F:     net/wireless/
 
 8169 10/100/1000 GIGABIT ETHERNET DRIVER
-M:     Realtek linux nic maintainers <nic_swsd@realtek.com>
 M:     Heiner Kallweit <hkallweit1@gmail.com>
+M:     nic_swsd@realtek.com
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/realtek/r8169*
@@ -820,7 +820,6 @@ M:  Netanel Belgazal <netanel@amazon.com>
 M:     Arthur Kiyanovski <akiyano@amazon.com>
 R:     Guy Tzalik <gtzalik@amazon.com>
 R:     Saeed Bishara <saeedb@amazon.com>
-R:     Zorik Machulsky <zorik@amazon.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/networking/device_drivers/ethernet/amazon/ena.rst
@@ -907,7 +906,7 @@ AMD KFD
 M:     Felix Kuehling <Felix.Kuehling@amd.com>
 L:     amd-gfx@lists.freedesktop.org
 S:     Supported
-T:     git git://people.freedesktop.org/~agd5f/linux
+T:     git https://gitlab.freedesktop.org/agd5f/linux.git
 F:     drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd*.[ch]
 F:     drivers/gpu/drm/amd/amdkfd/
 F:     drivers/gpu/drm/amd/include/cik_structs.h
@@ -2119,7 +2118,7 @@ N:        atmel
 ARM/Microchip Sparx5 SoC support
 M:     Lars Povlsen <lars.povlsen@microchip.com>
 M:     Steen Hegelund <Steen.Hegelund@microchip.com>
-M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
+M:     UNGLinuxDriver@microchip.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 T:     git git://github.com/microchip-ung/linux-upstream.git
@@ -2942,7 +2941,6 @@ S:        Maintained
 F:     drivers/hwmon/asus_atk0110.c
 
 ATLX ETHERNET DRIVERS
-M:     Jay Cliburn <jcliburn@gmail.com>
 M:     Chris Snook <chris.snook@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -3336,7 +3334,7 @@ F:        arch/riscv/net/
 X:     arch/riscv/net/bpf_jit_comp64.c
 
 BPF JIT for RISC-V (64-bit)
-M:     Björn Töpel <bjorn.topel@gmail.com>
+M:     Björn Töpel <bjorn@kernel.org>
 L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
 S:     Maintained
@@ -3556,7 +3554,7 @@ S:        Supported
 F:     drivers/net/ethernet/broadcom/bnxt/
 
 BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
-M:     Arend van Spriel <arend.vanspriel@broadcom.com>
+M:     Arend van Spriel <aspriel@gmail.com>
 M:     Franky Lin <franky.lin@broadcom.com>
 M:     Hante Meuleman <hante.meuleman@broadcom.com>
 M:     Chi-hsien Lin <chi-hsien.lin@infineon.com>
@@ -3881,9 +3879,9 @@ F:        Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
 F:     drivers/mtd/nand/raw/cadence-nand-controller.c
 
 CADENCE USB3 DRD IP DRIVER
-M:     Peter Chen <peter.chen@nxp.com>
+M:     Peter Chen <peter.chen@kernel.org>
 M:     Pawel Laszczak <pawell@cadence.com>
-M:     Roger Quadros <rogerq@ti.com>
+R:     Roger Quadros <rogerq@kernel.org>
 R:     Aswath Govindraju <a-govindraju@ti.com>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
@@ -3961,7 +3959,7 @@ F:        net/can/
 CAN-J1939 NETWORK LAYER
 M:     Robin van der Gracht <robin@protonic.nl>
 M:     Oleksij Rempel <o.rempel@pengutronix.de>
-R:     Pengutronix Kernel Team <kernel@pengutronix.de>
+R:     kernel@pengutronix.de
 L:     linux-can@vger.kernel.org
 S:     Maintained
 F:     Documentation/networking/j1939.rst
@@ -4163,7 +4161,7 @@ S:        Maintained
 F:     Documentation/translations/zh_CN/
 
 CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
-M:     Peter Chen <Peter.Chen@nxp.com>
+M:     Peter Chen <peter.chen@kernel.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
@@ -4922,9 +4920,8 @@ F:        Documentation/scsi/dc395x.rst
 F:     drivers/scsi/dc395x.*
 
 DCCP PROTOCOL
-M:     Gerrit Renker <gerrit@erg.abdn.ac.uk>
 L:     dccp@vger.kernel.org
-S:     Maintained
+S:     Orphan
 W:     http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
 F:     include/linux/dccp.h
 F:     include/linux/tfrc.h
@@ -7363,7 +7360,6 @@ L:        linux-hardening@vger.kernel.org
 S:     Maintained
 F:     Documentation/kbuild/gcc-plugins.rst
 F:     scripts/Makefile.gcc-plugins
-F:     scripts/gcc-plugin.sh
 F:     scripts/gcc-plugins/
 
 GCOV BASED KERNEL PROFILING
@@ -9240,7 +9236,7 @@ F:        tools/testing/selftests/sgx/*
 K:     \bSGX_
 
 INTERCONNECT API
-M:     Georgi Djakov <georgi.djakov@linaro.org>
+M:     Georgi Djakov <djakov@kernel.org>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/interconnect/
@@ -9273,7 +9269,7 @@ F:        drivers/net/ethernet/sgi/ioc3-eth.c
 
 IOMAP FILESYSTEM LIBRARY
 M:     Christoph Hellwig <hch@infradead.org>
-M:     Darrick J. Wong <darrick.wong@oracle.com>
+M:     Darrick J. Wong <djwong@kernel.org>
 M:     linux-xfs@vger.kernel.org
 M:     linux-fsdevel@vger.kernel.org
 L:     linux-xfs@vger.kernel.org
@@ -9327,7 +9323,6 @@ W:        http://www.adaptec.com/
 F:     drivers/scsi/ips*
 
 IPVS
-M:     Wensong Zhang <wensong@linux-vs.org>
 M:     Simon Horman <horms@verge.net.au>
 M:     Julian Anastasov <ja@ssi.bg>
 L:     netdev@vger.kernel.org
@@ -9776,7 +9771,7 @@ F:        tools/testing/selftests/kvm/s390x/
 
 KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
 M:     Paolo Bonzini <pbonzini@redhat.com>
-R:     Sean Christopherson <sean.j.christopherson@intel.com>
+R:     Sean Christopherson <seanjc@google.com>
 R:     Vitaly Kuznetsov <vkuznets@redhat.com>
 R:     Wanpeng Li <wanpengli@tencent.com>
 R:     Jim Mattson <jmattson@google.com>
@@ -10260,7 +10255,6 @@ S:      Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
 F:     Documentation/atomic_bitops.txt
 F:     Documentation/atomic_t.txt
-F:     Documentation/core-api/atomic_ops.rst
 F:     Documentation/core-api/refcount-vs-atomic.rst
 F:     Documentation/litmus-tests/
 F:     Documentation/memory-barriers.txt
@@ -10847,7 +10841,7 @@ F:      drivers/media/radio/radio-maxiradio*
 
 MCAN MMIO DEVICE DRIVER
 M:     Dan Murphy <dmurphy@ti.com>
-M:     Sriram Dash <sriram.dash@samsung.com>
+M:     Pankaj Sharma <pankj.sharma@samsung.com>
 L:     linux-can@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
@@ -11667,7 +11661,7 @@ F:      drivers/media/platform/atmel/atmel-isi.h
 
 MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
 M:     Woojung Huh <woojung.huh@microchip.com>
-M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
+M:     UNGLinuxDriver@microchip.com
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
@@ -11677,7 +11671,7 @@ F:      net/dsa/tag_ksz.c
 
 MICROCHIP LAN743X ETHERNET DRIVER
 M:     Bryan Whitehead <bryan.whitehead@microchip.com>
-M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
+M:     UNGLinuxDriver@microchip.com
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/microchip/lan743x_*
@@ -11771,7 +11765,7 @@ F:      drivers/net/wireless/microchip/wilc1000/
 
 MICROSEMI MIPS SOCS
 M:     Alexandre Belloni <alexandre.belloni@bootlin.com>
-M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
+M:     UNGLinuxDriver@microchip.com
 L:     linux-mips@vger.kernel.org
 S:     Supported
 F:     Documentation/devicetree/bindings/mips/mscc.txt
@@ -12418,7 +12412,6 @@ F:      tools/testing/selftests/net/ipsec.c
 
 NETWORKING [IPv4/IPv6]
 M:     "David S. Miller" <davem@davemloft.net>
-M:     Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
 M:     Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -12475,7 +12468,6 @@ F:      net/ipv6/tcp*.c
 
 NETWORKING [TLS]
 M:     Boris Pismenny <borisp@nvidia.com>
-M:     Aviad Yehezkel <aviadye@nvidia.com>
 M:     John Fastabend <john.fastabend@gmail.com>
 M:     Daniel Borkmann <daniel@iogearbox.net>
 M:     Jakub Kicinski <kuba@kernel.org>
@@ -12825,10 +12817,10 @@ F:    tools/objtool/
 F:     include/linux/objtool.h
 
 OCELOT ETHERNET SWITCH DRIVER
-M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
 M:     Vladimir Oltean <vladimir.oltean@nxp.com>
 M:     Claudiu Manoil <claudiu.manoil@nxp.com>
 M:     Alexandre Belloni <alexandre.belloni@bootlin.com>
+M:     UNGLinuxDriver@microchip.com
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/dsa/ocelot/*
@@ -12850,7 +12842,7 @@ F:      include/misc/ocxl*
 F:     include/uapi/misc/ocxl.h
 
 OMAP AUDIO SUPPORT
-M:     Peter Ujfalusi <peter.ujfalusi@ti.com>
+M:     Peter Ujfalusi <peter.ujfalusi@gmail.com>
 M:     Jarkko Nikula <jarkko.nikula@bitmer.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 L:     linux-omap@vger.kernel.org
@@ -13890,7 +13882,7 @@ F:      drivers/platform/x86/peaq-wmi.c
 
 PENSANDO ETHERNET DRIVERS
 M:     Shannon Nelson <snelson@pensando.io>
-M:     Pensando Drivers <drivers@pensando.io>
+M:     drivers@pensando.io
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/networking/device_drivers/ethernet/pensando/ionic.rst
@@ -14512,10 +14504,18 @@ S:    Supported
 F:     drivers/crypto/qat/
 
 QCOM AUDIO (ASoC) DRIVERS
-M:     Patrick Lai <plai@codeaurora.org>
+M:     Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
 M:     Banajit Goswami <bgoswami@codeaurora.org>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
+F:     sound/soc/codecs/lpass-va-macro.c
+F:     sound/soc/codecs/lpass-wsa-macro.*
+F:     sound/soc/codecs/msm8916-wcd-analog.c
+F:     sound/soc/codecs/msm8916-wcd-digital.c
+F:     sound/soc/codecs/wcd9335.*
+F:     sound/soc/codecs/wcd934x.c
+F:     sound/soc/codecs/wcd-clsh-v2.*
+F:     sound/soc/codecs/wsa881x.c
 F:     sound/soc/qcom/
 
 QCOM IPA DRIVER
@@ -14669,7 +14669,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
 F:     drivers/net/wireless/ath/ath11k/
 
 QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
-M:     QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
+M:     ath9k-devel@qca.qualcomm.com
 L:     linux-wireless@vger.kernel.org
 S:     Supported
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/ath9k
@@ -14820,7 +14820,7 @@ M:      Alex Deucher <alexander.deucher@amd.com>
 M:     Christian König <christian.koenig@amd.com>
 L:     amd-gfx@lists.freedesktop.org
 S:     Supported
-T:     git git://people.freedesktop.org/~agd5f/linux
+T:     git https://gitlab.freedesktop.org/agd5f/linux.git
 F:     drivers/gpu/drm/amd/
 F:     drivers/gpu/drm/radeon/
 F:     include/uapi/drm/amdgpu_drm.h
@@ -16321,6 +16321,7 @@ M:      Pekka Enberg <penberg@kernel.org>
 M:     David Rientjes <rientjes@google.com>
 M:     Joonsoo Kim <iamjoonsoo.kim@lge.com>
 M:     Andrew Morton <akpm@linux-foundation.org>
+M:     Vlastimil Babka <vbabka@suse.cz>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     include/linux/sl?b*.h
@@ -16710,6 +16711,8 @@ M:      Samuel Thibault <samuel.thibault@ens-lyon.org>
 L:     speakup@linux-speakup.org
 S:     Odd Fixes
 W:     http://www.linux-speakup.org/
+W:     https://github.com/linux-speakup/speakup
+B:     https://github.com/linux-speakup/speakup/issues
 F:     drivers/accessibility/speakup/
 
 SPEAR CLOCK FRAMEWORK SUPPORT
@@ -16964,7 +16967,7 @@ M:      Olivier Moysan <olivier.moysan@st.com>
 M:     Arnaud Pouliquen <arnaud.pouliquen@st.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
-F:     Documentation/devicetree/bindings/sound/st,stm32-*.txt
+F:     Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
 F:     sound/soc/stm/
 
 STM32 TIMER/LPTIMER DRIVERS
@@ -17541,7 +17544,7 @@ F:      arch/xtensa/
 F:     drivers/irqchip/irq-xtensa-*
 
 TEXAS INSTRUMENTS ASoC DRIVERS
-M:     Peter Ujfalusi <peter.ujfalusi@ti.com>
+M:     Peter Ujfalusi <peter.ujfalusi@gmail.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
 F:     sound/soc/ti/
@@ -17553,6 +17556,19 @@ S:     Supported
 F:     Documentation/devicetree/bindings/iio/dac/ti,dac7612.txt
 F:     drivers/iio/dac/ti-dac7612.c
 
+TEXAS INSTRUMENTS DMA DRIVERS
+M:     Peter Ujfalusi <peter.ujfalusi@gmail.com>
+L:     dmaengine@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt
+F:     Documentation/devicetree/bindings/dma/ti-edma.txt
+F:     Documentation/devicetree/bindings/dma/ti/
+F:     drivers/dma/ti/
+X:     drivers/dma/ti/cppi41.c
+F:     include/linux/dma/k3-udma-glue.h
+F:     include/linux/dma/ti-cppi5.h
+F:     include/linux/dma/k3-psil.h
+
 TEXAS INSTRUMENTS' SYSTEM CONTROL INTERFACE (TISCI) PROTOCOL DRIVER
 M:     Nishanth Menon <nm@ti.com>
 M:     Tero Kristo <t-kristo@ti.com>
@@ -17838,7 +17854,7 @@ F:      Documentation/devicetree/bindings/net/nfc/trf7970a.txt
 F:     drivers/nfc/trf7970a.c
 
 TI TWL4030 SERIES SOC CODEC DRIVER
-M:     Peter Ujfalusi <peter.ujfalusi@ti.com>
+M:     Peter Ujfalusi <peter.ujfalusi@gmail.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
 F:     sound/soc/codecs/twl4030*
@@ -18370,7 +18386,7 @@ F:      include/linux/usb/isp116x.h
 
 USB LAN78XX ETHERNET DRIVER
 M:     Woojung Huh <woojung.huh@microchip.com>
-M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
+M:     UNGLinuxDriver@microchip.com
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/microchip,lan78xx.txt
@@ -18404,7 +18420,7 @@ F:      Documentation/usb/ohci.rst
 F:     drivers/usb/host/ohci*
 
 USB OTG FSM (Finite State Machine)
-M:     Peter Chen <Peter.Chen@nxp.com>
+M:     Peter Chen <peter.chen@kernel.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
@@ -18484,7 +18500,7 @@ F:      drivers/net/usb/smsc75xx.*
 
 USB SMSC95XX ETHERNET DRIVER
 M:     Steve Glendinning <steve.glendinning@shawell.net>
-M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
+M:     UNGLinuxDriver@microchip.com
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/usb/smsc95xx.*
@@ -19031,7 +19047,7 @@ F:      drivers/input/mouse/vmmouse.h
 
 VMWARE VMXNET3 ETHERNET DRIVER
 M:     Ronak Doshi <doshir@vmware.com>
-M:     "VMware, Inc." <pv-drivers@vmware.com>
+M:     pv-drivers@vmware.com
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/vmxnet3/
@@ -19058,7 +19074,6 @@ K:      regulator_get_optional
 
 VRF
 M:     David Ahern <dsahern@kernel.org>
-M:     Shrijeet Mukherjee <shrijeet@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/networking/vrf.rst
@@ -19409,7 +19424,7 @@ F:      drivers/net/ethernet/*/*/*xdp*
 K:     (?:\b|_)xdp(?:\b|_)
 
 XDP SOCKETS (AF_XDP)
-M:     Björn Töpel <bjorn.topel@intel.com>
+M:     Björn Töpel <bjorn@kernel.org>
 M:     Magnus Karlsson <magnus.karlsson@intel.com>
 R:     Jonathan Lemon <jonathan.lemon@gmail.com>
 L:     netdev@vger.kernel.org
@@ -19505,7 +19520,7 @@ F:      arch/x86/xen/*swiotlb*
 F:     drivers/xen/*swiotlb*
 
 XFS FILESYSTEM
-M:     Darrick J. Wong <darrick.wong@oracle.com>
+M:     Darrick J. Wong <djwong@kernel.org>
 M:     linux-xfs@vger.kernel.org
 L:     linux-xfs@vger.kernel.org
 S:     Supported
index 8b2c3f8..b0e4767 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 11
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
index 78c6f05..24862d1 100644 (file)
@@ -1105,6 +1105,12 @@ config HAVE_ARCH_PFN_VALID
 config ARCH_SUPPORTS_DEBUG_PAGEALLOC
        bool
 
+config ARCH_SPLIT_ARG64
+       bool
+       help
+          If a 32-bit architecture requires 64-bit arguments to be split into
+          pairs of 32-bit arguments, select this option.
+
 source "kernel/gcov/Kconfig"
 
 source "scripts/gcc-plugins/Kconfig"
index 0c6bf0d..578bdbb 100644 (file)
@@ -102,16 +102,22 @@ libs-y            += arch/arc/lib/ $(LIBGCC)
 
 boot           := arch/arc/boot
 
-#default target for make without any arguments.
-KBUILD_IMAGE   := $(boot)/bootpImage
-
-all:   bootpImage
-bootpImage: vmlinux
-
-boot_targets += uImage uImage.bin uImage.gz
+boot_targets := uImage.bin uImage.gz uImage.lzma
 
+PHONY += $(boot_targets)
 $(boot_targets): vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
+uimage-default-y                       := uImage.bin
+uimage-default-$(CONFIG_KERNEL_GZIP)   := uImage.gz
+uimage-default-$(CONFIG_KERNEL_LZMA)   := uImage.lzma
+
+PHONY += uImage
+uImage: $(uimage-default-y)
+       @ln -sf $< $(boot)/uImage
+       @$(kecho) '  Image $(boot)/uImage is ready'
+
+CLEAN_FILES += $(boot)/uImage
+
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
index 538b92f..5648748 100644 (file)
@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-targets := vmlinux.bin vmlinux.bin.gz uImage
 
 # uImage build relies on mkimage being availble on your host for ARC target
 # You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
@@ -7,23 +6,18 @@ targets := vmlinux.bin vmlinux.bin.gz uImage
 
 OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
 
-LINUX_START_TEXT = $$(readelf -h vmlinux | \
+LINUX_START_TEXT = $$($(READELF) -h vmlinux | \
                        grep "Entry point address" | grep -o 0x.*)
 
 UIMAGE_LOADADDR    = $(CONFIG_LINUX_LINK_BASE)
 UIMAGE_ENTRYADDR   = $(LINUX_START_TEXT)
 
-suffix-y := bin
-suffix-$(CONFIG_KERNEL_GZIP)   := gz
-suffix-$(CONFIG_KERNEL_LZMA)   := lzma
-
-targets += uImage
+targets += vmlinux.bin
+targets += vmlinux.bin.gz
+targets += vmlinux.bin.lzma
 targets += uImage.bin
 targets += uImage.gz
 targets += uImage.lzma
-extra-y += vmlinux.bin
-extra-y += vmlinux.bin.gz
-extra-y += vmlinux.bin.lzma
 
 $(obj)/vmlinux.bin: vmlinux FORCE
        $(call if_changed,objcopy)
@@ -42,7 +36,3 @@ $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
 
 $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
        $(call if_changed,uimage,lzma)
-
-$(obj)/uImage: $(obj)/uImage.$(suffix-y)
-       @ln -sf $(notdir $<) $@
-       @echo '  Image $@ is ready'
index 23e41e8..ad9b7fe 100644 (file)
@@ -10,6 +10,7 @@
 #ifndef __ASSEMBLY__
 
 #define clear_page(paddr)              memset((paddr), 0, PAGE_SIZE)
+#define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
 #define copy_page(to, from)            memcpy((to), (from), PAGE_SIZE)
 
 struct vm_area_struct;
index 1f5308a..1743506 100644 (file)
@@ -307,7 +307,7 @@ resume_user_mode_begin:
        mov r0, sp      ; pt_regs for arg to do_signal()/do_notify_resume()
 
        GET_CURR_THR_INFO_FLAGS   r9
-       and.f  0,  r9, TIF_SIGPENDING|TIF_NOTIFY_SIGNAL
+       and.f  0,  r9, _TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL
        bz .Lchk_notify_resume
 
        ; Normal Trap/IRQ entry only saves Scratch (caller-saved) regs
index 6b5c545..a2d10c2 100644 (file)
@@ -7,6 +7,7 @@ menuconfig ARC_SOC_HSDK
        depends on ISA_ARCV2
        select ARC_HAS_ACCL_REGS
        select ARC_IRQ_NO_AUTOSAVE
+       select ARC_FPU_SAVE_RESTORE
        select CLK_HSDK
        select RESET_CONTROLLER
        select RESET_HSDK
index 11d41e8..7dde9fb 100644 (file)
                clock-names = "sysclk";
        };
 };
+
+&aes1_target {
+       status = "disabled";
+};
+
+&aes2_target {
+       status = "disabled";
+};
index c4c6c7e..5898879 100644 (file)
                emac: gem@30000 {
                        compatible = "cadence,gem";
                        reg = <0x30000 0x10000>;
+                       interrupt-parent = <&vic0>;
                        interrupts = <31>;
                };
 
                dmac1: dmac@40000 {
                        compatible = "snps,dw-dmac";
                        reg = <0x40000 0x10000>;
+                       interrupt-parent = <&vic0>;
                        interrupts = <25>;
                };
 
                dmac2: dmac@50000 {
                        compatible = "snps,dw-dmac";
                        reg = <0x50000 0x10000>;
+                       interrupt-parent = <&vic0>;
                        interrupts = <26>;
                };
 
                axi2pico@c0000000 {
                        compatible = "picochip,axi2pico-pc3x2";
                        reg = <0xc0000000 0x10000>;
+                       interrupt-parent = <&vic0>;
                        interrupts = <13 14 15 16 17 18 19 20 21>;
                };
        };
index 496f9d3..60fe618 100644 (file)
                                panel@0 {
                                        compatible = "samsung,s6e63m0";
                                        reg = <0>;
+                                       max-brightness = <15>;
                                        vdd3-supply = <&panel_reg_3v0>;
                                        vci-supply = <&panel_reg_1v8>;
                                        reset-gpios = <&gpio4 11 GPIO_ACTIVE_LOW>;
index 1c11d15..b515c31 100644 (file)
@@ -279,6 +279,7 @@ CONFIG_SERIAL_OMAP_CONSOLE=y
 CONFIG_SERIAL_DEV_BUS=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_SPI=y
+CONFIG_SPI_GPIO=m
 CONFIG_SPI_OMAP24XX=y
 CONFIG_SPI_TI_QSPI=m
 CONFIG_HSI=m
@@ -296,7 +297,6 @@ CONFIG_GPIO_TWL4030=y
 CONFIG_W1=m
 CONFIG_HDQ_MASTER_OMAP=m
 CONFIG_W1_SLAVE_DS250X=m
-CONFIG_POWER_AVS=y
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_GPIO=y
 CONFIG_BATTERY_BQ27XXX=m
index 7b5cf84..cdde8fd 100644 (file)
@@ -60,6 +60,7 @@ static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
                chacha_block_xor_neon(state, d, s, nrounds);
                if (d != dst)
                        memcpy(dst, buf, bytes);
+               state[12]++;
        }
 }
 
index f319170..56d6814 100644 (file)
@@ -230,10 +230,12 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
                break;
        case BUS_NOTIFY_BIND_DRIVER:
                od = to_omap_device(pdev);
-               if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
-                   pm_runtime_status_suspended(dev)) {
+               if (od) {
                        od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
-                       pm_runtime_set_active(dev);
+                       if (od->_state == OMAP_DEVICE_STATE_ENABLED &&
+                           pm_runtime_status_suspended(dev)) {
+                               pm_runtime_set_active(dev);
+                       }
                }
                break;
        case BUS_NOTIFY_ADD_DEVICE:
index eab281a..09076ad 100644 (file)
@@ -71,7 +71,7 @@ static struct omap_voltdm_pmic omap_cpcap_iva = {
        .vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN,
        .vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX,
        .vddmin = 900000,
-       .vddmax = 1350000,
+       .vddmax = 1375000,
        .vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US,
        .i2c_slave_addr = 0x44,
        .volt_reg_addr = 0x0,
index 60e901c..5a957a9 100644 (file)
@@ -371,7 +371,7 @@ static int __init xen_guest_init(void)
        }
        gnttab_init();
        if (!xen_initial_domain())
-               xenbus_probe(NULL);
+               xenbus_probe();
 
        /*
         * Making sure board specific code will not set up ops for
index 05e1735..f39568b 100644 (file)
@@ -174,8 +174,6 @@ config ARM64
        select HAVE_NMI
        select HAVE_PATA_PLATFORM
        select HAVE_PERF_EVENTS
-       select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI && HW_PERF_EVENTS
-       select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
        select HAVE_REGS_AND_STACK_ACCESS_API
index 6be9b37..9030920 100644 (file)
@@ -10,7 +10,7 @@
 #
 # Copyright (C) 1995-2001 by Russell King
 
-LDFLAGS_vmlinux        :=--no-undefined -X -z norelro
+LDFLAGS_vmlinux        :=--no-undefined -X
 
 ifeq ($(CONFIG_RELOCATABLE), y)
 # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
@@ -115,16 +115,20 @@ KBUILD_CPPFLAGS   += -mbig-endian
 CHECKFLAGS     += -D__AARCH64EB__
 # Prefer the baremetal ELF build target, but not all toolchains include
 # it so fall back to the standard linux version if needed.
-KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
+KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb -z norelro)
 UTS_MACHINE    := aarch64_be
 else
 KBUILD_CPPFLAGS        += -mlittle-endian
 CHECKFLAGS     += -D__AARCH64EL__
 # Same as above, prefer ELF but fall back to linux target if needed.
-KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux)
+KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux -z norelro)
 UTS_MACHINE    := aarch64
 endif
 
+ifeq ($(CONFIG_LD_IS_LLD), y)
+KBUILD_LDFLAGS += -z norelro
+endif
+
 CHECKFLAGS     += -D__aarch64__
 
 ifeq ($(CONFIG_DYNAMIC_FTRACE_WITH_REGS),y)
index fa6e690..53a9b76 100644 (file)
                                compatible = "snps,dw-apb-gpio-port";
                                gpio-controller;
                                #gpio-cells = <2>;
-                               snps,nr-gpios = <32>;
+                               ngpios = <32>;
                                reg = <0>;
                                interrupt-controller;
                                #interrupt-cells = <2>;
                                compatible = "snps,dw-apb-gpio-port";
                                gpio-controller;
                                #gpio-cells = <2>;
-                               snps,nr-gpios = <32>;
+                               ngpios = <32>;
                                reg = <0>;
                                interrupt-controller;
                                #interrupt-cells = <2>;
                                compatible = "snps,dw-apb-gpio-port";
                                gpio-controller;
                                #gpio-cells = <2>;
-                               snps,nr-gpios = <8>;
+                               ngpios = <8>;
                                reg = <0>;
                                interrupt-controller;
                                #interrupt-cells = <2>;
index 015ddff..b56a4b2 100644 (file)
@@ -17,7 +17,7 @@
 #include <asm/lse.h>
 
 #define ATOMIC_OP(op)                                                  \
-static inline void arch_##op(int i, atomic_t *v)                       \
+static __always_inline void arch_##op(int i, atomic_t *v)              \
 {                                                                      \
        __lse_ll_sc_body(op, i, v);                                     \
 }
@@ -32,7 +32,7 @@ ATOMIC_OP(atomic_sub)
 #undef ATOMIC_OP
 
 #define ATOMIC_FETCH_OP(name, op)                                      \
-static inline int arch_##op##name(int i, atomic_t *v)                  \
+static __always_inline int arch_##op##name(int i, atomic_t *v)         \
 {                                                                      \
        return __lse_ll_sc_body(op##name, i, v);                        \
 }
@@ -56,7 +56,7 @@ ATOMIC_FETCH_OPS(atomic_sub_return)
 #undef ATOMIC_FETCH_OPS
 
 #define ATOMIC64_OP(op)                                                        \
-static inline void arch_##op(long i, atomic64_t *v)                    \
+static __always_inline void arch_##op(long i, atomic64_t *v)           \
 {                                                                      \
        __lse_ll_sc_body(op, i, v);                                     \
 }
@@ -71,7 +71,7 @@ ATOMIC64_OP(atomic64_sub)
 #undef ATOMIC64_OP
 
 #define ATOMIC64_FETCH_OP(name, op)                                    \
-static inline long arch_##op##name(long i, atomic64_t *v)              \
+static __always_inline long arch_##op##name(long i, atomic64_t *v)     \
 {                                                                      \
        return __lse_ll_sc_body(op##name, i, v);                        \
 }
@@ -94,7 +94,7 @@ ATOMIC64_FETCH_OPS(atomic64_sub_return)
 #undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_FETCH_OPS
 
-static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
+static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
 {
        return __lse_ll_sc_body(atomic64_dec_if_positive, v);
 }
index 11beda8..8fcfab0 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/jump_label.h>
 #include <linux/kvm_types.h>
 #include <linux/percpu.h>
+#include <linux/psci.h>
 #include <asm/arch_gicv3.h>
 #include <asm/barrier.h>
 #include <asm/cpufeature.h>
@@ -240,6 +241,28 @@ struct kvm_host_data {
        struct kvm_pmu_events pmu_events;
 };
 
+struct kvm_host_psci_config {
+       /* PSCI version used by host. */
+       u32 version;
+
+       /* Function IDs used by host if version is v0.1. */
+       struct psci_0_1_function_ids function_ids_0_1;
+
+       bool psci_0_1_cpu_suspend_implemented;
+       bool psci_0_1_cpu_on_implemented;
+       bool psci_0_1_cpu_off_implemented;
+       bool psci_0_1_migrate_implemented;
+};
+
+extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
+#define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
+
+extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
+#define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
+
+extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
+#define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
+
 struct vcpu_reset_state {
        unsigned long   pc;
        unsigned long   r0;
index 6f986e0..f0fe0cc 100644 (file)
@@ -176,10 +176,21 @@ static inline void __uaccess_enable_hw_pan(void)
  * The Tag check override (TCO) bit disables temporarily the tag checking
  * preventing the issue.
  */
-static inline void uaccess_disable_privileged(void)
+static inline void __uaccess_disable_tco(void)
 {
        asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
                                 ARM64_MTE, CONFIG_KASAN_HW_TAGS));
+}
+
+static inline void __uaccess_enable_tco(void)
+{
+       asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
+                                ARM64_MTE, CONFIG_KASAN_HW_TAGS));
+}
+
+static inline void uaccess_disable_privileged(void)
+{
+       __uaccess_disable_tco();
 
        if (uaccess_ttbr0_disable())
                return;
@@ -189,8 +200,7 @@ static inline void uaccess_disable_privileged(void)
 
 static inline void uaccess_enable_privileged(void)
 {
-       asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
-                                ARM64_MTE, CONFIG_KASAN_HW_TAGS));
+       __uaccess_enable_tco();
 
        if (uaccess_ttbr0_enable())
                return;
index f42fd9e..3017844 100644 (file)
@@ -75,7 +75,7 @@ int main(void)
   DEFINE(S_SDEI_TTBR1,         offsetof(struct pt_regs, sdei_ttbr1));
   DEFINE(S_PMR_SAVE,           offsetof(struct pt_regs, pmr_save));
   DEFINE(S_STACKFRAME,         offsetof(struct pt_regs, stackframe));
-  DEFINE(S_FRAME_SIZE,         sizeof(struct pt_regs));
+  DEFINE(PT_REGS_SIZE,         sizeof(struct pt_regs));
   BLANK();
 #ifdef CONFIG_COMPAT
   DEFINE(COMPAT_SIGFRAME_REGS_OFFSET,          offsetof(struct compat_sigframe, uc.uc_mcontext.arm_r0));
index 7ffb5f1..e99edde 100644 (file)
@@ -2568,7 +2568,7 @@ static void verify_hyp_capabilities(void)
        int parange, ipa_max;
        unsigned int safe_vmid_bits, vmid_bits;
 
-       if (!IS_ENABLED(CONFIG_KVM) || !IS_ENABLED(CONFIG_KVM_ARM_HOST))
+       if (!IS_ENABLED(CONFIG_KVM))
                return;
 
        safe_mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
index a338f40..b3e4f9a 100644 (file)
@@ -35,7 +35,7 @@
  */
        .macro  ftrace_regs_entry, allregs=0
        /* Make room for pt_regs, plus a callee frame */
-       sub     sp, sp, #(S_FRAME_SIZE + 16)
+       sub     sp, sp, #(PT_REGS_SIZE + 16)
 
        /* Save function arguments (and x9 for simplicity) */
        stp     x0, x1, [sp, #S_X0]
        .endif
 
        /* Save the callsite's SP and LR */
-       add     x10, sp, #(S_FRAME_SIZE + 16)
+       add     x10, sp, #(PT_REGS_SIZE + 16)
        stp     x9, x10, [sp, #S_LR]
 
        /* Save the PC after the ftrace callsite */
        str     x30, [sp, #S_PC]
 
        /* Create a frame record for the callsite above pt_regs */
-       stp     x29, x9, [sp, #S_FRAME_SIZE]
-       add     x29, sp, #S_FRAME_SIZE
+       stp     x29, x9, [sp, #PT_REGS_SIZE]
+       add     x29, sp, #PT_REGS_SIZE
 
        /* Create our frame record within pt_regs. */
        stp     x29, x30, [sp, #S_STACKFRAME]
@@ -120,7 +120,7 @@ ftrace_common_return:
        ldr     x9, [sp, #S_PC]
 
        /* Restore the callsite's SP */
-       add     sp, sp, #S_FRAME_SIZE + 16
+       add     sp, sp, #PT_REGS_SIZE + 16
 
        ret     x9
 SYM_CODE_END(ftrace_common)
@@ -130,7 +130,7 @@ SYM_CODE_START(ftrace_graph_caller)
        ldr     x0, [sp, #S_PC]
        sub     x0, x0, #AARCH64_INSN_SIZE      // ip (callsite's BL insn)
        add     x1, sp, #S_LR                   // parent_ip (callsite's LR)
-       ldr     x2, [sp, #S_FRAME_SIZE]         // parent fp (callsite's FP)
+       ldr     x2, [sp, #PT_REGS_SIZE]         // parent fp (callsite's FP)
        bl      prepare_ftrace_return
        b       ftrace_common_return
 SYM_CODE_END(ftrace_graph_caller)
index 2a93fa5..c9bae73 100644 (file)
@@ -75,7 +75,7 @@ alternative_else_nop_endif
        .endif
 #endif
 
-       sub     sp, sp, #S_FRAME_SIZE
+       sub     sp, sp, #PT_REGS_SIZE
 #ifdef CONFIG_VMAP_STACK
        /*
         * Test whether the SP has overflowed, without corrupting a GPR.
@@ -96,7 +96,7 @@ alternative_else_nop_endif
         * userspace, and can clobber EL0 registers to free up GPRs.
         */
 
-       /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
+       /* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */
        msr     tpidr_el0, x0
 
        /* Recover the original x0 value and stash it in tpidrro_el0 */
@@ -182,7 +182,6 @@ alternative_else_nop_endif
        mrs_s   \tmp2, SYS_GCR_EL1
        bfi     \tmp2, \tmp, #0, #16
        msr_s   SYS_GCR_EL1, \tmp2
-       isb
 #endif
        .endm
 
@@ -194,6 +193,7 @@ alternative_else_nop_endif
        ldr_l   \tmp, gcr_kernel_excl
 
        mte_set_gcr \tmp, \tmp2
+       isb
 1:
 #endif
        .endm
@@ -253,7 +253,7 @@ alternative_else_nop_endif
 
        scs_load tsk, x20
        .else
-       add     x21, sp, #S_FRAME_SIZE
+       add     x21, sp, #PT_REGS_SIZE
        get_current_task tsk
        .endif /* \el == 0 */
        mrs     x22, elr_el1
@@ -377,7 +377,7 @@ alternative_else_nop_endif
        ldp     x26, x27, [sp, #16 * 13]
        ldp     x28, x29, [sp, #16 * 14]
        ldr     lr, [sp, #S_LR]
-       add     sp, sp, #S_FRAME_SIZE           // restore sp
+       add     sp, sp, #PT_REGS_SIZE           // restore sp
 
        .if     \el == 0
 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
@@ -580,12 +580,12 @@ __bad_stack:
 
        /*
         * Store the original GPRs to the new stack. The orginal SP (minus
-        * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
+        * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.
         */
-       sub     sp, sp, #S_FRAME_SIZE
+       sub     sp, sp, #PT_REGS_SIZE
        kernel_entry 1
        mrs     x0, tpidr_el0
-       add     x0, x0, #S_FRAME_SIZE
+       add     x0, x0, #PT_REGS_SIZE
        str     x0, [sp, #S_SP]
 
        /* Stash the regs for handle_bad_stack */
index 38bb07e..3605f77 100644 (file)
@@ -23,8 +23,6 @@
 #include <linux/platform_device.h>
 #include <linux/sched_clock.h>
 #include <linux/smp.h>
-#include <linux/nmi.h>
-#include <linux/cpufreq.h>
 
 /* ARMv8 Cortex-A53 specific event types. */
 #define ARMV8_A53_PERFCTR_PREF_LINEFILL                                0xC2
@@ -1250,21 +1248,10 @@ static struct platform_driver armv8_pmu_driver = {
 
 static int __init armv8_pmu_driver_init(void)
 {
-       int ret;
-
        if (acpi_disabled)
-               ret = platform_driver_register(&armv8_pmu_driver);
+               return platform_driver_register(&armv8_pmu_driver);
        else
-               ret = arm_pmu_acpi_probe(armv8_pmuv3_init);
-
-       /*
-        * Try to re-initialize lockup detector after PMU init in
-        * case PMU events are triggered via NMIs.
-        */
-       if (ret == 0 && arm_pmu_irq_is_nmi())
-               lockup_detector_init();
-
-       return ret;
+               return arm_pmu_acpi_probe(armv8_pmuv3_init);
 }
 device_initcall(armv8_pmu_driver_init)
 
@@ -1322,27 +1309,3 @@ void arch_perf_update_userpage(struct perf_event *event,
        userpg->cap_user_time_zero = 1;
        userpg->cap_user_time_short = 1;
 }
-
-#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
-/*
- * Safe maximum CPU frequency in case a particular platform doesn't implement
- * cpufreq driver. Although, architecture doesn't put any restrictions on
- * maximum frequency but 5 GHz seems to be safe maximum given the available
- * Arm CPUs in the market which are clocked much less than 5 GHz. On the other
- * hand, we can't make it much higher as it would lead to a large hard-lockup
- * detection timeout on parts which are running slower (eg. 1GHz on
- * Developerbox) and doesn't possess a cpufreq driver.
- */
-#define SAFE_MAX_CPU_FREQ      5000000000UL // 5 GHz
-u64 hw_nmi_get_sample_period(int watchdog_thresh)
-{
-       unsigned int cpu = smp_processor_id();
-       unsigned long max_cpu_freq;
-
-       max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL;
-       if (!max_cpu_freq)
-               max_cpu_freq = SAFE_MAX_CPU_FREQ;
-
-       return (u64)max_cpu_freq * watchdog_thresh;
-}
-#endif
index 89c64ad..66aac28 100644 (file)
@@ -352,8 +352,8 @@ kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr)
        unsigned long addr = instruction_pointer(regs);
        struct kprobe *cur = kprobe_running();
 
-       if (cur && (kcb->kprobe_status == KPROBE_HIT_SS)
-           && ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
+       if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
+           ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
                kprobes_restore_local_irqflag(kcb, regs);
                post_kprobe_handler(cur, kcb, regs);
 
index 890ca72..288a84e 100644 (file)
@@ -25,7 +25,7 @@
        stp x24, x25, [sp, #S_X24]
        stp x26, x27, [sp, #S_X26]
        stp x28, x29, [sp, #S_X28]
-       add x0, sp, #S_FRAME_SIZE
+       add x0, sp, #PT_REGS_SIZE
        stp lr, x0, [sp, #S_LR]
        /*
         * Construct a useful saved PSTATE
@@ -62,7 +62,7 @@
        .endm
 
 SYM_CODE_START(kretprobe_trampoline)
-       sub sp, sp, #S_FRAME_SIZE
+       sub sp, sp, #PT_REGS_SIZE
 
        save_all_base_regs
 
@@ -76,7 +76,7 @@ SYM_CODE_START(kretprobe_trampoline)
 
        restore_all_base_regs
 
-       add sp, sp, #S_FRAME_SIZE
+       add sp, sp, #PT_REGS_SIZE
        ret
 
 SYM_CODE_END(kretprobe_trampoline)
index f71d6ce..6237486 100644 (file)
@@ -914,13 +914,6 @@ static void do_signal(struct pt_regs *regs)
 asmlinkage void do_notify_resume(struct pt_regs *regs,
                                 unsigned long thread_flags)
 {
-       /*
-        * The assembly code enters us with IRQs off, but it hasn't
-        * informed the tracing code of that for efficiency reasons.
-        * Update the trace code with the current status.
-        */
-       trace_hardirqs_off();
-
        do {
                if (thread_flags & _TIF_NEED_RESCHED) {
                        /* Unmask Debug and SError for the next task */
index 6bc3a36..ad00f99 100644 (file)
@@ -434,7 +434,7 @@ static void __init hyp_mode_check(void)
                           "CPU: CPUs started in inconsistent modes");
        else
                pr_info("CPU: All CPU(s) started at EL1\n");
-       if (IS_ENABLED(CONFIG_KVM))
+       if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode())
                kvm_compute_layout();
 }
 
@@ -807,7 +807,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        unsigned int cpu, i;
 
        for (i = 0; i < NR_IPI; i++) {
-               unsigned int irq = irq_desc_get_irq(ipi_desc[i]);
                seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
                           prec >= 4 ? " " : "");
                for_each_online_cpu(cpu)
index f61e9d8..c2877c3 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <asm/daifflags.h>
 #include <asm/debug-monitors.h>
+#include <asm/exception.h>
 #include <asm/fpsimd.h>
 #include <asm/syscall.h>
 #include <asm/thread_info.h>
@@ -165,15 +166,8 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
        if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
                local_daif_mask();
                flags = current_thread_info()->flags;
-               if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) {
-                       /*
-                        * We're off to userspace, where interrupts are
-                        * always enabled after we restore the flags from
-                        * the SPSR.
-                        */
-                       trace_hardirqs_on();
+               if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
                        return;
-               }
                local_daif_restore(DAIF_PROCCTX);
        }
 
index 08156be..6895ce7 100644 (file)
@@ -42,7 +42,6 @@
 #include <asm/smp.h>
 #include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
-#include <asm/exception.h>
 #include <asm/system_misc.h>
 #include <asm/sysreg.h>
 
index a8f8e40..cd9c3fa 100644 (file)
@@ -24,8 +24,7 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti
 # routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so
 # preparation in build-time C")).
 ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv       \
-            -Bsymbolic $(call ld-option, --no-eh-frame-hdr) --build-id=sha1 -n \
-            $(btildflags-y) -T
+            -Bsymbolic --build-id=sha1 -n $(btildflags-y) -T
 
 ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
 ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
index d808ad3..61dbb4c 100644 (file)
@@ -40,9 +40,6 @@ SECTIONS
        PROVIDE (_etext = .);
        PROVIDE (etext = .);
 
-       .eh_frame_hdr   : { *(.eh_frame_hdr) }          :text   :eh_frame_hdr
-       .eh_frame       : { KEEP (*(.eh_frame)) }       :text
-
        .dynamic        : { *(.dynamic) }               :text   :dynamic
 
        .rodata         : { *(.rodata*) }               :text
@@ -54,6 +51,7 @@ SECTIONS
                *(.note.GNU-stack)
                *(.data .data.* .gnu.linkonce.d.* .sdata*)
                *(.bss .sbss .dynbss .dynsbss)
+               *(.eh_frame .eh_frame_hdr)
        }
 }
 
@@ -66,7 +64,6 @@ PHDRS
        text            PT_LOAD         FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
        dynamic         PT_DYNAMIC      FLAGS(4);               /* PF_R */
        note            PT_NOTE         FLAGS(4);               /* PF_R */
-       eh_frame_hdr    PT_GNU_EH_FRAME;
 }
 
 /*
index 043756d..3964acf 100644 (file)
@@ -49,14 +49,6 @@ if KVM
 
 source "virt/kvm/Kconfig"
 
-config KVM_ARM_PMU
-       bool "Virtual Performance Monitoring Unit (PMU) support"
-       depends on HW_PERF_EVENTS
-       default y
-       help
-         Adds support for a virtual Performance Monitoring Unit (PMU) in
-         virtual machines.
-
 endif # KVM
 
 endif # VIRTUALIZATION
index 60fd181..13b0172 100644 (file)
@@ -24,4 +24,4 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
         vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \
         vgic/vgic-its.o vgic/vgic-debug.o
 
-kvm-$(CONFIG_KVM_ARM_PMU)  += pmu-emul.o
+kvm-$(CONFIG_HW_PERF_EVENTS)  += pmu-emul.o
index 32ba6fb..74e0699 100644 (file)
@@ -1129,9 +1129,10 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
        if (!irqchip_in_kernel(vcpu->kvm))
                goto no_vgic;
 
-       if (!vgic_initialized(vcpu->kvm))
-               return -ENODEV;
-
+       /*
+        * At this stage, we have the guarantee that the vgic is both
+        * available and initialized.
+        */
        if (!timer_irqs_are_valid(vcpu)) {
                kvm_debug("incorrectly configured timer irqs\n");
                return -EINVAL;
index 6e637d2..04c4485 100644 (file)
@@ -65,10 +65,6 @@ static bool vgic_present;
 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
 DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
 
-extern u64 kvm_nvhe_sym(__cpu_logical_map)[NR_CPUS];
-extern u32 kvm_nvhe_sym(kvm_host_psci_version);
-extern struct psci_0_1_function_ids kvm_nvhe_sym(kvm_host_psci_0_1_function_ids);
-
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
        return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
@@ -584,11 +580,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
                 * Map the VGIC hardware resources before running a vcpu the
                 * first time on this VM.
                 */
-               if (unlikely(!vgic_ready(kvm))) {
-                       ret = kvm_vgic_map_resources(kvm);
-                       if (ret)
-                               return ret;
-               }
+               ret = kvm_vgic_map_resources(kvm);
+               if (ret)
+                       return ret;
        } else {
                /*
                 * Tell the rest of the code that there are userspace irqchip
@@ -1574,12 +1568,12 @@ static struct notifier_block hyp_init_cpu_pm_nb = {
        .notifier_call = hyp_init_cpu_pm_notifier,
 };
 
-static void __init hyp_cpu_pm_init(void)
+static void hyp_cpu_pm_init(void)
 {
        if (!is_protected_kvm_enabled())
                cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
 }
-static void __init hyp_cpu_pm_exit(void)
+static void hyp_cpu_pm_exit(void)
 {
        if (!is_protected_kvm_enabled())
                cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
@@ -1604,9 +1598,12 @@ static void init_cpu_logical_map(void)
         * allow any other CPUs from the `possible` set to boot.
         */
        for_each_online_cpu(cpu)
-               kvm_nvhe_sym(__cpu_logical_map)[cpu] = cpu_logical_map(cpu);
+               hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
 }
 
+#define init_psci_0_1_impl_state(config, what) \
+       config.psci_0_1_ ## what ## _implemented = psci_ops.what
+
 static bool init_psci_relay(void)
 {
        /*
@@ -1618,8 +1615,15 @@ static bool init_psci_relay(void)
                return false;
        }
 
-       kvm_nvhe_sym(kvm_host_psci_version) = psci_ops.get_version();
-       kvm_nvhe_sym(kvm_host_psci_0_1_function_ids) = get_psci_0_1_function_ids();
+       kvm_host_psci_config.version = psci_ops.get_version();
+
+       if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
+               kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
+               init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend);
+               init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on);
+               init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off);
+               init_psci_0_1_impl_state(kvm_host_psci_config, migrate);
+       }
        return true;
 }
 
index b1f6092..6171635 100644 (file)
@@ -59,4 +59,13 @@ static inline void __adjust_pc(struct kvm_vcpu *vcpu)
        }
 }
 
+/*
+ * Skip an instruction while host sysregs are live.
+ * Assumes host is always 64-bit.
+ */
+static inline void kvm_skip_host_instr(void)
+{
+       write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
+}
+
 #endif
index bde658d..a906f9e 100644 (file)
@@ -157,11 +157,6 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
        __kvm_hyp_host_forward_smc(host_ctxt);
 }
 
-static void skip_host_instruction(void)
-{
-       write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
-}
-
 static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
 {
        bool handled;
@@ -170,11 +165,8 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
        if (!handled)
                default_host_smc_handler(host_ctxt);
 
-       /*
-        * Unlike HVC, the return address of an SMC is the instruction's PC.
-        * Move the return address past the instruction.
-        */
-       skip_host_instruction();
+       /* SMC was trapped, move ELR past the current PC. */
+       kvm_skip_host_instr();
 }
 
 void handle_trap(struct kvm_cpu_context *host_ctxt)
index cbab0c6..2997aa1 100644 (file)
  * Other CPUs should not be allowed to boot because their features were
  * not checked against the finalized system capabilities.
  */
-u64 __ro_after_init __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
+u64 __ro_after_init hyp_cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
 
 u64 cpu_logical_map(unsigned int cpu)
 {
-       if (cpu >= ARRAY_SIZE(__cpu_logical_map))
+       if (cpu >= ARRAY_SIZE(hyp_cpu_logical_map))
                hyp_panic();
 
-       return __cpu_logical_map[cpu];
+       return hyp_cpu_logical_map[cpu];
 }
 
 unsigned long __hyp_per_cpu_offset(unsigned int cpu)
index 08dc9de..e394784 100644 (file)
@@ -7,11 +7,8 @@
 #include <asm/kvm_asm.h>
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
-#include <kvm/arm_hypercalls.h>
 #include <linux/arm-smccc.h>
 #include <linux/kvm_host.h>
-#include <linux/psci.h>
-#include <kvm/arm_psci.h>
 #include <uapi/linux/psci.h>
 
 #include <nvhe/trap_handler.h>
@@ -22,9 +19,8 @@ void kvm_hyp_cpu_resume(unsigned long r0);
 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
 
 /* Config options set by the host. */
-__ro_after_init u32 kvm_host_psci_version;
-__ro_after_init struct psci_0_1_function_ids kvm_host_psci_0_1_function_ids;
-__ro_after_init s64 hyp_physvirt_offset;
+struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
+s64 __ro_after_init hyp_physvirt_offset;
 
 #define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)
 
@@ -47,19 +43,16 @@ struct psci_boot_args {
 static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
 static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;
 
-static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt)
-{
-       DECLARE_REG(u64, func_id, host_ctxt, 0);
-
-       return func_id;
-}
+#define        is_psci_0_1(what, func_id)                                      \
+       (kvm_host_psci_config.psci_0_1_ ## what ## _implemented &&      \
+        (func_id) == kvm_host_psci_config.function_ids_0_1.what)
 
 static bool is_psci_0_1_call(u64 func_id)
 {
-       return (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend) ||
-              (func_id == kvm_host_psci_0_1_function_ids.cpu_on) ||
-              (func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
-              (func_id == kvm_host_psci_0_1_function_ids.migrate);
+       return (is_psci_0_1(cpu_suspend, func_id) ||
+               is_psci_0_1(cpu_on, func_id) ||
+               is_psci_0_1(cpu_off, func_id) ||
+               is_psci_0_1(migrate, func_id));
 }
 
 static bool is_psci_0_2_call(u64 func_id)
@@ -69,16 +62,6 @@ static bool is_psci_0_2_call(u64 func_id)
               (PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
 }
 
-static bool is_psci_call(u64 func_id)
-{
-       switch (kvm_host_psci_version) {
-       case PSCI_VERSION(0, 1):
-               return is_psci_0_1_call(func_id);
-       default:
-               return is_psci_0_2_call(func_id);
-       }
-}
-
 static unsigned long psci_call(unsigned long fn, unsigned long arg0,
                               unsigned long arg1, unsigned long arg2)
 {
@@ -248,15 +231,14 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
 
 static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
 {
-       if ((func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
-           (func_id == kvm_host_psci_0_1_function_ids.migrate))
+       if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
                return psci_forward(host_ctxt);
-       else if (func_id == kvm_host_psci_0_1_function_ids.cpu_on)
+       if (is_psci_0_1(cpu_on, func_id))
                return psci_cpu_on(func_id, host_ctxt);
-       else if (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend)
+       if (is_psci_0_1(cpu_suspend, func_id))
                return psci_cpu_suspend(func_id, host_ctxt);
-       else
-               return PSCI_RET_NOT_SUPPORTED;
+
+       return PSCI_RET_NOT_SUPPORTED;
 }
 
 static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
@@ -298,20 +280,23 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_
 
 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
 {
-       u64 func_id = get_psci_func_id(host_ctxt);
+       DECLARE_REG(u64, func_id, host_ctxt, 0);
        unsigned long ret;
 
-       if (!is_psci_call(func_id))
-               return false;
-
-       switch (kvm_host_psci_version) {
+       switch (kvm_host_psci_config.version) {
        case PSCI_VERSION(0, 1):
+               if (!is_psci_0_1_call(func_id))
+                       return false;
                ret = psci_0_1_handler(func_id, host_ctxt);
                break;
        case PSCI_VERSION(0, 2):
+               if (!is_psci_0_2_call(func_id))
+                       return false;
                ret = psci_0_2_handler(func_id, host_ctxt);
                break;
        default:
+               if (!is_psci_0_2_call(func_id))
+                       return false;
                ret = psci_1_0_handler(func_id, host_ctxt);
                break;
        }
index 398f6df..4ad66a5 100644 (file)
@@ -850,8 +850,6 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
                   return -EINVAL;
        }
 
-       kvm_pmu_vcpu_reset(vcpu);
-
        return 0;
 }
 
index 3313ded..42ccc27 100644 (file)
@@ -594,6 +594,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 {
        u64 pmcr, val;
 
+       /* No PMU available, PMCR_EL0 may UNDEF... */
+       if (!kvm_arm_support_pmu_v3())
+               return;
+
        pmcr = read_sysreg(pmcr_el0);
        /*
         * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
@@ -919,7 +923,7 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 
 #define reg_to_encoding(x)                                             \
        sys_reg((u32)(x)->Op0, (u32)(x)->Op1,                           \
-               (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2);
+               (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
 
 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
 #define DBG_BCR_BVR_WCR_WVR_EL1(n)                                     \
index d8cc51b..70fcd6a 100644 (file)
@@ -34,17 +34,16 @@ static u64 __early_kern_hyp_va(u64 addr)
 }
 
 /*
- * Store a hyp VA <-> PA offset into a hyp-owned variable.
+ * Store a hyp VA <-> PA offset into a EL2-owned variable.
  */
 static void init_hyp_physvirt_offset(void)
 {
-       extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
        u64 kern_va, hyp_va;
 
        /* Compute the offset from the hyp VA and PA of a random symbol. */
-       kern_va = (u64)kvm_ksym_ref(__hyp_text_start);
+       kern_va = (u64)lm_alias(__hyp_text_start);
        hyp_va = __early_kern_hyp_va(kern_va);
-       CHOOSE_NVHE_SYM(hyp_physvirt_offset) = (s64)__pa(kern_va) - (s64)hyp_va;
+       hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
 }
 
 /*
index 32e32d6..052917d 100644 (file)
@@ -419,7 +419,8 @@ int vgic_lazy_init(struct kvm *kvm)
  * Map the MMIO regions depending on the VGIC model exposed to the guest
  * called on the first VCPU run.
  * Also map the virtual CPU interface into the VM.
- * v2/v3 derivatives call vgic_init if not already done.
+ * v2 calls vgic_init() if not already done.
+ * v3 and derivatives return an error if the VGIC is not initialized.
  * vgic_ready() returns true if this function has succeeded.
  * @kvm: kvm struct pointer
  */
@@ -428,7 +429,13 @@ int kvm_vgic_map_resources(struct kvm *kvm)
        struct vgic_dist *dist = &kvm->arch.vgic;
        int ret = 0;
 
+       if (likely(vgic_ready(kvm)))
+               return 0;
+
        mutex_lock(&kvm->lock);
+       if (vgic_ready(kvm))
+               goto out;
+
        if (!irqchip_in_kernel(kvm))
                goto out;
 
@@ -439,6 +446,8 @@ int kvm_vgic_map_resources(struct kvm *kvm)
 
        if (ret)
                __kvm_vgic_destroy(kvm);
+       else
+               dist->ready = true;
 
 out:
        mutex_unlock(&kvm->lock);
index ebf53a4..11934c2 100644 (file)
@@ -306,20 +306,15 @@ int vgic_v2_map_resources(struct kvm *kvm)
        struct vgic_dist *dist = &kvm->arch.vgic;
        int ret = 0;
 
-       if (vgic_ready(kvm))
-               goto out;
-
        if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
            IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
                kvm_err("Need to set vgic cpu and dist addresses first\n");
-               ret = -ENXIO;
-               goto out;
+               return -ENXIO;
        }
 
        if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
                kvm_err("VGIC CPU and dist frames overlap\n");
-               ret = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        /*
@@ -329,13 +324,13 @@ int vgic_v2_map_resources(struct kvm *kvm)
        ret = vgic_init(kvm);
        if (ret) {
                kvm_err("Unable to initialize VGIC dynamic data structures\n");
-               goto out;
+               return ret;
        }
 
        ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
        if (ret) {
                kvm_err("Unable to register VGIC MMIO regions\n");
-               goto out;
+               return ret;
        }
 
        if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
@@ -344,14 +339,11 @@ int vgic_v2_map_resources(struct kvm *kvm)
                                            KVM_VGIC_V2_CPU_SIZE, true);
                if (ret) {
                        kvm_err("Unable to remap VGIC CPU to VCPU\n");
-                       goto out;
+                       return ret;
                }
        }
 
-       dist->ready = true;
-
-out:
-       return ret;
+       return 0;
 }
 
 DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
index 9cdf39a..52915b3 100644 (file)
@@ -500,29 +500,23 @@ int vgic_v3_map_resources(struct kvm *kvm)
        int ret = 0;
        int c;
 
-       if (vgic_ready(kvm))
-               goto out;
-
        kvm_for_each_vcpu(c, vcpu, kvm) {
                struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 
                if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
                        kvm_debug("vcpu %d redistributor base not set\n", c);
-                       ret = -ENXIO;
-                       goto out;
+                       return -ENXIO;
                }
        }
 
        if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
                kvm_err("Need to set vgic distributor addresses first\n");
-               ret = -ENXIO;
-               goto out;
+               return -ENXIO;
        }
 
        if (!vgic_v3_check_base(kvm)) {
                kvm_err("VGIC redist and dist frames overlap\n");
-               ret = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        /*
@@ -530,22 +524,19 @@ int vgic_v3_map_resources(struct kvm *kvm)
         * the VGIC before we need to use it.
         */
        if (!vgic_initialized(kvm)) {
-               ret = -EBUSY;
-               goto out;
+               return -EBUSY;
        }
 
        ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
        if (ret) {
                kvm_err("Unable to register VGICv3 dist MMIO regions\n");
-               goto out;
+               return ret;
        }
 
        if (kvm_vgic_global_state.has_gicv4_1)
                vgic_v4_configure_vsgis(kvm);
-       dist->ready = true;
 
-out:
-       return ret;
+       return 0;
 }
 
 DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
index 3c40da4..35d75c6 100644 (file)
@@ -709,10 +709,11 @@ static int do_tag_check_fault(unsigned long far, unsigned int esr,
                              struct pt_regs *regs)
 {
        /*
-        * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN for tag
-        * check faults. Mask them out now so that userspace doesn't see them.
+        * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN
+        * for tag check faults. Set them to corresponding bits in the untagged
+        * address.
         */
-       far &= (1UL << 60) - 1;
+       far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK);
        do_bad_area(far, esr, regs);
        return 0;
 }
index 75addb3..709d98f 100644 (file)
@@ -53,13 +53,13 @@ s64 memstart_addr __ro_after_init = -1;
 EXPORT_SYMBOL(memstart_addr);
 
 /*
- * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
- * memory as some devices, namely the Raspberry Pi 4, have peripherals with
- * this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
- * bit addressable memory area.
+ * If the corresponding config options are enabled, we create both ZONE_DMA
+ * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
+ * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
+ * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
+ * otherwise it is empty.
  */
 phys_addr_t arm64_dma_phys_limit __ro_after_init;
-static phys_addr_t arm64_dma32_phys_limit __ro_after_init;
 
 #ifdef CONFIG_KEXEC_CORE
 /*
@@ -84,7 +84,7 @@ static void __init reserve_crashkernel(void)
 
        if (crash_base == 0) {
                /* Current arm64 boot protocol requires 2MB alignment */
-               crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit,
+               crash_base = memblock_find_in_range(0, arm64_dma_phys_limit,
                                crash_size, SZ_2M);
                if (crash_base == 0) {
                        pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
@@ -196,6 +196,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
        unsigned long max_zone_pfns[MAX_NR_ZONES]  = {0};
        unsigned int __maybe_unused acpi_zone_dma_bits;
        unsigned int __maybe_unused dt_zone_dma_bits;
+       phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
 
 #ifdef CONFIG_ZONE_DMA
        acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
@@ -205,8 +206,12 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
        max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
 #endif
 #ifdef CONFIG_ZONE_DMA32
-       max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
+       max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
+       if (!arm64_dma_phys_limit)
+               arm64_dma_phys_limit = dma32_phys_limit;
 #endif
+       if (!arm64_dma_phys_limit)
+               arm64_dma_phys_limit = PHYS_MASK + 1;
        max_zone_pfns[ZONE_NORMAL] = max;
 
        free_area_init(max_zone_pfns);
@@ -394,16 +399,9 @@ void __init arm64_memblock_init(void)
 
        early_init_fdt_scan_reserved_mem();
 
-       if (IS_ENABLED(CONFIG_ZONE_DMA32))
-               arm64_dma32_phys_limit = max_zone_phys(32);
-       else
-               arm64_dma32_phys_limit = PHYS_MASK + 1;
-
        reserve_elfcorehdr();
 
        high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
-
-       dma_contiguous_reserve(arm64_dma32_phys_limit);
 }
 
 void __init bootmem_init(void)
@@ -438,6 +436,11 @@ void __init bootmem_init(void)
        sparse_init();
        zone_sizes_init(min, max);
 
+       /*
+        * Reserve the CMA area after arm64_dma_phys_limit was initialised.
+        */
+       dma_contiguous_reserve(arm64_dma_phys_limit);
+
        /*
         * request_standard_resources() depends on crashkernel's memory being
         * reserved, so do it here.
@@ -455,7 +458,7 @@ void __init bootmem_init(void)
 void __init mem_init(void)
 {
        if (swiotlb_force == SWIOTLB_FORCE ||
-           max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
+           max_pfn > PFN_DOWN(arm64_dma_phys_limit))
                swiotlb_init(1);
        else
                swiotlb_force = SWIOTLB_NO_FORCE;
index 37a54b5..1f7ee8c 100644 (file)
@@ -46,7 +46,7 @@
 #endif
 
 #ifdef CONFIG_KASAN_HW_TAGS
-#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1
+#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
 #else
 #define TCR_KASAN_HW_FLAGS 0
 #endif
index dd8c166..42ed524 100644 (file)
@@ -3,6 +3,7 @@
 #define _ASM_IA64_SPARSEMEM_H
 
 #ifdef CONFIG_SPARSEMEM
+#include <asm/page.h>
 /*
  * SECTION_SIZE_BITS            2^N: how big each section will be
  * MAX_PHYSMEM_BITS             2^N: how much memory we can have in that space
index c61c641..e3946b0 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/libfdt.h>
 
 #include <asm/addrspace.h>
+#include <asm/unaligned.h>
 
 /*
  * These two variables specify the free mem region
@@ -117,7 +118,7 @@ void decompress_kernel(unsigned long boot_heap_start)
                dtb_size = fdt_totalsize((void *)&__appended_dtb);
 
                /* last four bytes is always image size in little endian */
-               image_size = le32_to_cpup((void *)&__image_end - 4);
+               image_size = get_unaligned_le32((void *)&__image_end - 4);
 
                /* copy dtb to where the booted kernel will expect it */
                memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size,
index bd47e15..be5d4af 100644 (file)
@@ -1444,7 +1444,7 @@ static void octeon_irq_setup_secondary_ciu2(void)
 static int __init octeon_irq_init_ciu(
        struct device_node *ciu_node, struct device_node *parent)
 {
-       unsigned int i, r;
+       int i, r;
        struct irq_chip *chip;
        struct irq_chip *chip_edge;
        struct irq_chip *chip_mbox;
index 6ee3f72..c444141 100644 (file)
@@ -103,4 +103,11 @@ jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
 #undef ns_to_kernel_old_timeval
 #define ns_to_kernel_old_timeval ns_to_old_timeval32
 
+/*
+ * Some data types as stored in coredump.
+ */
+#define user_long_t             compat_long_t
+#define user_siginfo_t          compat_siginfo_t
+#define copy_siginfo_to_external        copy_siginfo_to_external32
+
 #include "../../../fs/binfmt_elf.c"
index 6dd103d..7b2a23f 100644 (file)
@@ -106,4 +106,11 @@ jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
 #undef ns_to_kernel_old_timeval
 #define ns_to_kernel_old_timeval ns_to_old_timeval32
 
+/*
+ * Some data types as stored in coredump.
+ */
+#define user_long_t             compat_long_t
+#define user_siginfo_t          compat_siginfo_t
+#define copy_siginfo_to_external        copy_siginfo_to_external32
+
 #include "../../../fs/binfmt_elf.c"
index 47aeb33..0e365b7 100644 (file)
@@ -187,8 +187,14 @@ static int __init relocate_exception_table(long offset)
 static inline __init unsigned long rotate_xor(unsigned long hash,
                                              const void *area, size_t size)
 {
-       size_t i;
-       unsigned long *ptr = (unsigned long *)area;
+       const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
+       size_t diff, i;
+
+       diff = (void *)ptr - area;
+       if (unlikely(size < diff + sizeof(hash)))
+               return hash;
+
+       size = ALIGN_DOWN(size - diff, sizeof(hash));
 
        for (i = 0; i < size / sizeof(hash); i++) {
                /* Rotate by odd number of bits and XOR. */
index 7d6b4a7..c298061 100644 (file)
@@ -31,7 +31,7 @@
 void __iomem *ioremap(phys_addr_t offset, unsigned long size);
 
 #define iounmap iounmap
-extern void iounmap(void *addr);
+extern void iounmap(void __iomem *addr);
 
 #include <asm-generic/io.h>
 
index 5aed97a..daae13a 100644 (file)
@@ -77,7 +77,7 @@ void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size)
 }
 EXPORT_SYMBOL(ioremap);
 
-void iounmap(void *addr)
+void iounmap(void __iomem *addr)
 {
        /* If the page is from the fixmap pool then we just clear out
         * the fixmap mapping.
index 1d32b17..c1a8aac 100644 (file)
        nop;                                                            \
        nop;
 
+#define SCV_ENTRY_FLUSH_SLOT                                           \
+       SCV_ENTRY_FLUSH_FIXUP_SECTION;                                  \
+       nop;                                                            \
+       nop;                                                            \
+       nop;
+
 /*
  * r10 must be free to use, r13 must be paca
  */
        STF_ENTRY_BARRIER_SLOT;                                         \
        ENTRY_FLUSH_SLOT
 
+/*
+ * r10, ctr must be free to use, r13 must be paca
+ */
+#define SCV_INTERRUPT_TO_KERNEL                                                \
+       STF_ENTRY_BARRIER_SLOT;                                         \
+       SCV_ENTRY_FLUSH_SLOT
+
 /*
  * Macros for annotating the expected destination of (h)rfid
  *
index f6d2acb..ac605fc 100644 (file)
@@ -240,6 +240,14 @@ label##3:                                          \
        FTR_ENTRY_OFFSET 957b-958b;                     \
        .popsection;
 
+#define SCV_ENTRY_FLUSH_FIXUP_SECTION                  \
+957:                                                   \
+       .pushsection __scv_entry_flush_fixup,"a";       \
+       .align 2;                                       \
+958:                                                   \
+       FTR_ENTRY_OFFSET 957b-958b;                     \
+       .popsection;
+
 #define RFI_FLUSH_FIXUP_SECTION                                \
 951:                                                   \
        .pushsection __rfi_flush_fixup,"a";             \
@@ -273,10 +281,12 @@ label##3:                                         \
 
 extern long stf_barrier_fallback;
 extern long entry_flush_fallback;
+extern long scv_entry_flush_fallback;
 extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
 extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
 extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
 extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
+extern long __start___scv_entry_flush_fixup, __stop___scv_entry_flush_fixup;
 extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
 extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
 extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
index 81671aa..77c635c 100644 (file)
@@ -103,6 +103,8 @@ int gettimeofday_fallback(struct __kernel_old_timeval *_tv, struct timezone *_tz
        return do_syscall_2(__NR_gettimeofday, (unsigned long)_tv, (unsigned long)_tz);
 }
 
+#ifdef __powerpc64__
+
 static __always_inline
 int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
 {
@@ -115,10 +117,22 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
        return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts);
 }
 
-#ifdef CONFIG_VDSO32
+#else
 
 #define BUILD_VDSO32           1
 
+static __always_inline
+int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+       return do_syscall_2(__NR_clock_gettime64, _clkid, (unsigned long)_ts);
+}
+
+static __always_inline
+int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+       return do_syscall_2(__NR_clock_getres_time64, _clkid, (unsigned long)_ts);
+}
+
 static __always_inline
 int clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
 {
index aa1af13..33ddfee 100644 (file)
@@ -75,7 +75,7 @@ BEGIN_FTR_SECTION
        bne     .Ltabort_syscall
 END_FTR_SECTION_IFSET(CPU_FTR_TM)
 #endif
-       INTERRUPT_TO_KERNEL
+       SCV_INTERRUPT_TO_KERNEL
        mr      r10,r1
        ld      r1,PACAKSAVE(r13)
        std     r10,0(r1)
index e02ad6f..6e53f76 100644 (file)
@@ -2993,6 +2993,25 @@ TRAMP_REAL_BEGIN(entry_flush_fallback)
        ld      r11,PACA_EXRFI+EX_R11(r13)
        blr
 
+/*
+ * The SCV entry flush happens with interrupts enabled, so it must disable
+ * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10
+ * (containing LR) does not need to be preserved here because scv entry
+ * puts 0 in the pt_regs, CTR can be clobbered for the same reason.
+ */
+TRAMP_REAL_BEGIN(scv_entry_flush_fallback)
+       li      r10,0
+       mtmsrd  r10,1
+       lbz     r10,PACAIRQHAPPENED(r13)
+       ori     r10,r10,PACA_IRQ_HARD_DIS
+       stb     r10,PACAIRQHAPPENED(r13)
+       std     r11,PACA_EXRFI+EX_R11(r13)
+       L1D_DISPLACEMENT_FLUSH
+       ld      r11,PACA_EXRFI+EX_R11(r13)
+       li      r10,MSR_RI
+       mtmsrd  r10,1
+       blr
+
 TRAMP_REAL_BEGIN(rfi_flush_fallback)
        SET_SCRATCH0(r13);
        GET_PACA(r13);
index 349bf3f..858fbc8 100644 (file)
@@ -260,10 +260,19 @@ __secondary_hold_acknowledge:
 MachineCheck:
        EXCEPTION_PROLOG_0
 #ifdef CONFIG_PPC_CHRP
+#ifdef CONFIG_VMAP_STACK
+       mtspr   SPRN_SPRG_SCRATCH2,r1
+       mfspr   r1, SPRN_SPRG_THREAD
+       lwz     r1, RTAS_SP(r1)
+       cmpwi   cr1, r1, 0
+       bne     cr1, 7f
+       mfspr   r1, SPRN_SPRG_SCRATCH2
+#else
        mfspr   r11, SPRN_SPRG_THREAD
        lwz     r11, RTAS_SP(r11)
        cmpwi   cr1, r11, 0
        bne     cr1, 7f
+#endif
 #endif /* CONFIG_PPC_CHRP */
        EXCEPTION_PROLOG_1 for_rtas=1
 7:     EXCEPTION_PROLOG_2
index 0318ba4..72fa3c0 100644 (file)
@@ -85,7 +85,7 @@ SECTIONS
                ALIGN_FUNCTION();
 #endif
                /* careful! __ftr_alt_* sections need to be close to .text */
-               *(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text);
+               *(.text.hot .text.hot.* TEXT_MAIN .text.fixup .text.unlikely .text.unlikely.* .fixup __ftr_alt_* .ref.text);
 #ifdef CONFIG_PPC64
                *(.tramp.ftrace.text);
 #endif
@@ -145,6 +145,13 @@ SECTIONS
                __stop___entry_flush_fixup = .;
        }
 
+       . = ALIGN(8);
+       __scv_entry_flush_fixup : AT(ADDR(__scv_entry_flush_fixup) - LOAD_OFFSET) {
+               __start___scv_entry_flush_fixup = .;
+               *(__scv_entry_flush_fixup)
+               __stop___scv_entry_flush_fixup = .;
+       }
+
        . = ALIGN(8);
        __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
                __start___stf_exit_barrier_fixup = .;
@@ -187,6 +194,12 @@ SECTIONS
        .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
                _sinittext = .;
                INIT_TEXT
+
+               /*
+                *.init.text might be RO so we must ensure this section ends on
+                * a page boundary.
+                */
+               . = ALIGN(PAGE_SIZE);
                _einittext = .;
 #ifdef CONFIG_PPC64
                *(.tramp.ftrace.init);
@@ -200,6 +213,8 @@ SECTIONS
                EXIT_TEXT
        }
 
+       . = ALIGN(PAGE_SIZE);
+
        INIT_DATA_SECTION(16)
 
        . = ALIGN(8);
index 4782105..1fd31b4 100644 (file)
@@ -290,9 +290,6 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
        long *start, *end;
        int i;
 
-       start = PTRRELOC(&__start___entry_flush_fixup);
-       end = PTRRELOC(&__stop___entry_flush_fixup);
-
        instrs[0] = 0x60000000; /* nop */
        instrs[1] = 0x60000000; /* nop */
        instrs[2] = 0x60000000; /* nop */
@@ -312,6 +309,8 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
        if (types & L1D_FLUSH_MTTRIG)
                instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
 
+       start = PTRRELOC(&__start___entry_flush_fixup);
+       end = PTRRELOC(&__stop___entry_flush_fixup);
        for (i = 0; start < end; start++, i++) {
                dest = (void *)start + *start;
 
@@ -328,6 +327,25 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
                patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
        }
 
+       start = PTRRELOC(&__start___scv_entry_flush_fixup);
+       end = PTRRELOC(&__stop___scv_entry_flush_fixup);
+       for (; start < end; start++, i++) {
+               dest = (void *)start + *start;
+
+               pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+               patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+
+               if (types == L1D_FLUSH_FALLBACK)
+                       patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&scv_entry_flush_fallback,
+                                    BRANCH_SET_LINK);
+               else
+                       patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
+
+               patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+       }
+
+
        printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
                (types == L1D_FLUSH_NONE)       ? "no" :
                (types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
index 81b76d4..e9e2c1f 100644 (file)
@@ -137,7 +137,7 @@ config PA_BITS
 
 config PAGE_OFFSET
        hex
-       default 0xC0000000 if 32BIT && MAXPHYSMEM_2GB
+       default 0xC0000000 if 32BIT && MAXPHYSMEM_1GB
        default 0x80000000 if 64BIT && !MMU
        default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
        default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
@@ -247,10 +247,12 @@ config MODULE_SECTIONS
 
 choice
        prompt "Maximum Physical Memory"
-       default MAXPHYSMEM_2GB if 32BIT
+       default MAXPHYSMEM_1GB if 32BIT
        default MAXPHYSMEM_2GB if 64BIT && CMODEL_MEDLOW
        default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
 
+       config MAXPHYSMEM_1GB
+               bool "1GiB"
        config MAXPHYSMEM_2GB
                bool "2GiB"
        config MAXPHYSMEM_128GB
index 4a2729f..24d75a1 100644 (file)
@@ -88,7 +88,9 @@
        phy-mode = "gmii";
        phy-handle = <&phy0>;
        phy0: ethernet-phy@0 {
+               compatible = "ethernet-phy-id0007.0771";
                reg = <0>;
+               reset-gpios = <&gpio 12 GPIO_ACTIVE_LOW>;
        };
 };
 
index d222d35..8c3d1e4 100644 (file)
@@ -64,6 +64,8 @@ CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_VIRTIO=y
 CONFIG_SPI=y
 CONFIG_SPI_SIFIVE=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SIFIVE=y
 # CONFIG_PTP_1588_CLOCK is not set
 CONFIG_POWER_RESET=y
 CONFIG_DRM=y
index 41a7286..251e1db 100644 (file)
@@ -99,7 +99,6 @@
                                | _PAGE_DIRTY)
 
 #define PAGE_KERNEL            __pgprot(_PAGE_KERNEL)
-#define PAGE_KERNEL_EXEC       __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
 #define PAGE_KERNEL_READ       __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
 #define PAGE_KERNEL_EXEC       __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
 #define PAGE_KERNEL_READ_EXEC  __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
index 8454f74..1453a2f 100644 (file)
@@ -10,7 +10,7 @@
 
 #include <linux/types.h>
 
-#ifndef GENERIC_TIME_VSYSCALL
+#ifndef CONFIG_GENERIC_TIME_VSYSCALL
 struct vdso_data {
 };
 #endif
index de59dd4..d867813 100644 (file)
@@ -26,7 +26,16 @@ cache_get_priv_group(struct cacheinfo *this_leaf)
 
 static struct cacheinfo *get_cacheinfo(u32 level, enum cache_type type)
 {
-       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(smp_processor_id());
+       /*
+        * Using raw_smp_processor_id() elides a preemptability check, but this
+        * is really indicative of a larger problem: the cacheinfo UABI assumes
+        * that cores have a homonogenous view of the cache hierarchy.  That
+        * happens to be the case for the current set of RISC-V systems, but
+        * likely won't be true in general.  Since there's no way to provide
+        * correct information for these systems via the current UABI we're
+        * just eliding the check for now.
+        */
+       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(raw_smp_processor_id());
        struct cacheinfo *this_leaf;
        int index;
 
index 524d918..744f320 100644 (file)
@@ -124,15 +124,15 @@ skip_context_tracking:
        REG_L a1, (a1)
        jr a1
 1:
-#ifdef CONFIG_TRACE_IRQFLAGS
-       call trace_hardirqs_on
-#endif
        /*
         * Exceptions run with interrupts enabled or disabled depending on the
         * state of SR_PIE in m/sstatus.
         */
        andi t0, s1, SR_PIE
        beqz t0, 1f
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call trace_hardirqs_on
+#endif
        csrs CSR_STATUS, SR_IE
 
 1:
@@ -155,6 +155,15 @@ skip_context_tracking:
        tail do_trap_unknown
 
 handle_syscall:
+#ifdef CONFIG_RISCV_M_MODE
+       /*
+        * When running is M-Mode (no MMU config), MPIE does not get set.
+        * As a result, we need to force enable interrupts here because
+        * handle_exception did not do set SR_IE as it always sees SR_PIE
+        * being cleared.
+        */
+       csrs CSR_STATUS, SR_IE
+#endif
 #if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
        /* Recover a0 - a7 for system calls */
        REG_L a0, PT_A0(sp)
@@ -186,14 +195,7 @@ check_syscall_nr:
         * Syscall number held in a7.
         * If syscall number is above allowed value, redirect to ni_syscall.
         */
-       bge a7, t0, 1f
-       /*
-        * Check if syscall is rejected by tracer, i.e., a7 == -1.
-        * If yes, we pretend it was executed.
-        */
-       li t1, -1
-       beq a7, t1, ret_from_syscall_rejected
-       blt a7, t1, 1f
+       bgeu a7, t0, 1f
        /* Call syscall */
        la s0, sys_call_table
        slli t0, a7, RISCV_LGPTR
index 1d85e9b..3fa3f26 100644 (file)
@@ -127,7 +127,9 @@ static void __init init_resources(void)
 {
        struct memblock_region *region = NULL;
        struct resource *res = NULL;
-       int ret = 0;
+       struct resource *mem_res = NULL;
+       size_t mem_res_sz = 0;
+       int ret = 0, i = 0;
 
        code_res.start = __pa_symbol(_text);
        code_res.end = __pa_symbol(_etext) - 1;
@@ -145,16 +147,17 @@ static void __init init_resources(void)
        bss_res.end = __pa_symbol(__bss_stop) - 1;
        bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 
+       mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt) * sizeof(*mem_res);
+       mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES);
+       if (!mem_res)
+               panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz);
        /*
         * Start by adding the reserved regions, if they overlap
         * with /memory regions, insert_resource later on will take
         * care of it.
         */
        for_each_reserved_mem_region(region) {
-               res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
-               if (!res)
-                       panic("%s: Failed to allocate %zu bytes\n", __func__,
-                             sizeof(struct resource));
+               res = &mem_res[i++];
 
                res->name = "Reserved";
                res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
@@ -171,8 +174,10 @@ static void __init init_resources(void)
                 * Ignore any other reserved regions within
                 * system memory.
                 */
-               if (memblock_is_memory(res->start))
+               if (memblock_is_memory(res->start)) {
+                       memblock_free((phys_addr_t) res, sizeof(struct resource));
                        continue;
+               }
 
                ret = add_resource(&iomem_resource, res);
                if (ret < 0)
@@ -181,10 +186,7 @@ static void __init init_resources(void)
 
        /* Add /memory regions to the resource tree */
        for_each_mem_region(region) {
-               res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
-               if (!res)
-                       panic("%s: Failed to allocate %zu bytes\n", __func__,
-                             sizeof(struct resource));
+               res = &mem_res[i++];
 
                if (unlikely(memblock_is_nomap(region))) {
                        res->name = "Reserved";
@@ -205,9 +207,9 @@ static void __init init_resources(void)
        return;
 
  error:
-       memblock_free((phys_addr_t) res, sizeof(struct resource));
        /* Better an empty resource tree than an inconsistent one */
        release_child_resources(&iomem_resource);
+       memblock_free((phys_addr_t) mem_res, mem_res_sz);
 }
 
 
index 48b870a..df5d2da 100644 (file)
@@ -14,7 +14,7 @@
 
 #include <asm/stacktrace.h>
 
-register unsigned long sp_in_global __asm__("sp");
+register const unsigned long sp_in_global __asm__("sp");
 
 #ifdef CONFIG_FRAME_POINTER
 
@@ -28,9 +28,8 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
                sp = user_stack_pointer(regs);
                pc = instruction_pointer(regs);
        } else if (task == NULL || task == current) {
-               const register unsigned long current_sp = sp_in_global;
                fp = (unsigned long)__builtin_frame_address(0);
-               sp = current_sp;
+               sp = sp_in_global;
                pc = (unsigned long)walk_stackframe;
        } else {
                /* task blocked in __switch_to */
index 4d3a104..8a5cf99 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 2017 SiFive
  */
 
+#include <linux/of_clk.h>
 #include <linux/clocksource.h>
 #include <linux/delay.h>
 #include <asm/sbi.h>
@@ -24,6 +25,8 @@ void __init time_init(void)
        riscv_timebase = prop;
 
        lpj_fine = riscv_timebase / HZ;
+
+       of_clk_init(NULL);
        timer_probe();
 }
 
index 6782042..3f1d35e 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/binfmts.h>
 #include <linux/err.h>
 #include <asm/page.h>
-#ifdef GENERIC_TIME_VSYSCALL
+#ifdef CONFIG_GENERIC_TIME_VSYSCALL
 #include <vdso/datapage.h>
 #else
 #include <asm/vdso.h>
index bf53791..7cd4993 100644 (file)
@@ -157,9 +157,10 @@ disable:
 void __init setup_bootmem(void)
 {
        phys_addr_t mem_start = 0;
-       phys_addr_t start, end = 0;
+       phys_addr_t start, dram_end, end = 0;
        phys_addr_t vmlinux_end = __pa_symbol(&_end);
        phys_addr_t vmlinux_start = __pa_symbol(&_start);
+       phys_addr_t max_mapped_addr = __pa(~(ulong)0);
        u64 i;
 
        /* Find the memory region containing the kernel */
@@ -181,7 +182,18 @@ void __init setup_bootmem(void)
        /* Reserve from the start of the kernel to the end of the kernel */
        memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
 
-       max_pfn = PFN_DOWN(memblock_end_of_DRAM());
+       dram_end = memblock_end_of_DRAM();
+
+       /*
+        * memblock allocator is not aware of the fact that last 4K bytes of
+        * the addressable memory can not be mapped because of IS_ERR_VALUE
+        * macro. Make sure that last 4k bytes are not usable by memblock
+        * if end of dram is equal to maximum addressable memory.
+        */
+       if (max_mapped_addr == (dram_end - 1))
+               memblock_set_current_limit(max_mapped_addr - 4096);
+
+       max_pfn = PFN_DOWN(dram_end);
        max_low_pfn = max_pfn;
        dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
        set_max_mapnr(max_low_pfn);
index 12ddd1f..a8a2ffd 100644 (file)
@@ -93,8 +93,8 @@ void __init kasan_init(void)
                                                                VMALLOC_END));
 
        for_each_mem_range(i, &_start, &_end) {
-               void *start = (void *)_start;
-               void *end = (void *)_end;
+               void *start = (void *)__va(_start);
+               void *end = (void *)__va(_end);
 
                if (start >= end)
                        break;
index 7b6dd10..21f8511 100644 (file)
@@ -19,6 +19,7 @@ config X86_32
        select KMAP_LOCAL
        select MODULES_USE_ELF_REL
        select OLD_SIGACTION
+       select ARCH_SPLIT_ARG64
 
 config X86_64
        def_bool y
index 18d8f17..0904f56 100644 (file)
@@ -73,10 +73,8 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs,
                                                  unsigned int nr)
 {
        if (likely(nr < IA32_NR_syscalls)) {
-               instrumentation_begin();
                nr = array_index_nospec(nr, IA32_NR_syscalls);
                regs->ax = ia32_sys_call_table[nr](regs);
-               instrumentation_end();
        }
 }
 
@@ -91,8 +89,11 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
         * or may not be necessary, but it matches the old asm behavior.
         */
        nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
+       instrumentation_begin();
 
        do_syscall_32_irqs_on(regs, nr);
+
+       instrumentation_end();
        syscall_exit_to_user_mode(regs);
 }
 
@@ -121,11 +122,12 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
                res = get_user(*(u32 *)&regs->bp,
                       (u32 __user __force *)(unsigned long)(u32)regs->sp);
        }
-       instrumentation_end();
 
        if (res) {
                /* User code screwed up. */
                regs->ax = -EFAULT;
+
+               instrumentation_end();
                syscall_exit_to_user_mode(regs);
                return false;
        }
@@ -135,6 +137,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
 
        /* Now this is just like a normal syscall. */
        do_syscall_32_irqs_on(regs, nr);
+
+       instrumentation_end();
        syscall_exit_to_user_mode(regs);
        return true;
 }
index e04d90a..6375967 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/hyperv-tlfs.h>
 #include <asm/mshyperv.h>
 #include <asm/idtentry.h>
+#include <linux/kexec.h>
 #include <linux/version.h>
 #include <linux/vmalloc.h>
 #include <linux/mm.h>
@@ -26,6 +27,8 @@
 #include <linux/syscore_ops.h>
 #include <clocksource/hyperv_timer.h>
 
+int hyperv_init_cpuhp;
+
 void *hv_hypercall_pg;
 EXPORT_SYMBOL_GPL(hv_hypercall_pg);
 
@@ -312,6 +315,25 @@ static struct syscore_ops hv_syscore_ops = {
        .resume         = hv_resume,
 };
 
+static void (* __initdata old_setup_percpu_clockev)(void);
+
+static void __init hv_stimer_setup_percpu_clockev(void)
+{
+       /*
+        * Ignore any errors in setting up stimer clockevents
+        * as we can run with the LAPIC timer as a fallback.
+        */
+       (void)hv_stimer_alloc();
+
+       /*
+        * Still register the LAPIC timer, because the direct-mode STIMER is
+        * not supported by old versions of Hyper-V. This also allows users
+        * to switch to LAPIC timer via /sys, if they want to.
+        */
+       if (old_setup_percpu_clockev)
+               old_setup_percpu_clockev();
+}
+
 /*
  * This function is to be invoked early in the boot sequence after the
  * hypervisor has been detected.
@@ -390,10 +412,14 @@ void __init hyperv_init(void)
        wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
 
        /*
-        * Ignore any errors in setting up stimer clockevents
-        * as we can run with the LAPIC timer as a fallback.
+        * hyperv_init() is called before LAPIC is initialized: see
+        * apic_intr_mode_init() -> x86_platform.apic_post_init() and
+        * apic_bsp_setup() -> setup_local_APIC(). The direct-mode STIMER
+        * depends on LAPIC, so hv_stimer_alloc() should be called from
+        * x86_init.timers.setup_percpu_clockev.
         */
-       (void)hv_stimer_alloc();
+       old_setup_percpu_clockev = x86_init.timers.setup_percpu_clockev;
+       x86_init.timers.setup_percpu_clockev = hv_stimer_setup_percpu_clockev;
 
        hv_apic_init();
 
@@ -401,6 +427,7 @@ void __init hyperv_init(void)
 
        register_syscore_ops(&hv_syscore_ops);
 
+       hyperv_init_cpuhp = cpuhp;
        return;
 
 remove_cpuhp_state:
index 5208ba4..2c87350 100644 (file)
@@ -66,11 +66,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
        if (!hv_hypercall_pg)
                goto do_native;
 
-       if (cpumask_empty(cpus))
-               return;
-
        local_irq_save(flags);
 
+       /*
+        * Only check the mask _after_ interrupt has been disabled to avoid the
+        * mask changing under our feet.
+        */
+       if (cpumask_empty(cpus)) {
+               local_irq_restore(flags);
+               return;
+       }
+
        flush_pcpu = (struct hv_tlb_flush **)
                     this_cpu_ptr(hyperv_pcpu_input_arg);
 
index a5aba4a..67a4f1c 100644 (file)
  * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
  * disables preemption so be careful if you intend to use it for long periods
  * of time.
- * If you intend to use the FPU in softirq you need to check first with
+ * If you intend to use the FPU in irq/softirq you need to check first with
  * irq_fpu_usable() if it is possible.
  */
-extern void kernel_fpu_begin(void);
+
+/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
+#define KFPU_387       _BITUL(0)       /* 387 state will be initialized */
+#define KFPU_MXCSR     _BITUL(1)       /* MXCSR will be initialized */
+
+extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
 extern void kernel_fpu_end(void);
 extern bool irq_fpu_usable(void);
 extern void fpregs_mark_activate(void);
 
+/* Code that is unaware of kernel_fpu_begin_mask() can use this */
+static inline void kernel_fpu_begin(void)
+{
+       kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
+}
+
 /*
  * Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
  * A context switch will (and softirq might) save CPU's FPU registers to
index 5e658ba..9abe842 100644 (file)
@@ -97,6 +97,7 @@
 
 #define        INTEL_FAM6_LAKEFIELD            0x8A
 #define INTEL_FAM6_ALDERLAKE           0x97
+#define INTEL_FAM6_ALDERLAKE_L         0x9A
 
 /* "Small Core" Processors (Atom) */
 
index 3ab7b46..3d6616f 100644 (file)
@@ -1010,9 +1010,21 @@ struct kvm_arch {
         */
        bool tdp_mmu_enabled;
 
-       /* List of struct tdp_mmu_pages being used as roots */
+       /*
+        * List of struct kvmp_mmu_pages being used as roots.
+        * All struct kvm_mmu_pages in the list should have
+        * tdp_mmu_page set.
+        * All struct kvm_mmu_pages in the list should have a positive
+        * root_count except when a thread holds the MMU lock and is removing
+        * an entry from the list.
+        */
        struct list_head tdp_mmu_roots;
-       /* List of struct tdp_mmu_pages not being used as roots */
+
+       /*
+        * List of struct kvmp_mmu_pages not being used as roots.
+        * All struct kvm_mmu_pages in the list should have
+        * tdp_mmu_page set and a root_count of 0.
+        */
        struct list_head tdp_mmu_pages;
 };
 
@@ -1287,6 +1299,8 @@ struct kvm_x86_ops {
        void (*migrate_timers)(struct kvm_vcpu *vcpu);
        void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
        int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
+
+       void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
 };
 
 struct kvm_x86_nested_ops {
@@ -1468,6 +1482,7 @@ int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
 int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
+int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu);
 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
 
 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
index ffc2899..30f76b9 100644 (file)
@@ -74,6 +74,8 @@ static inline void hv_disable_stimer0_percpu_irq(int irq) {}
 
 
 #if IS_ENABLED(CONFIG_HYPERV)
+extern int hyperv_init_cpuhp;
+
 extern void *hv_hypercall_pg;
 extern void  __percpu  **hyperv_pcpu_input_arg;
 
index 0b4920a..e16cccd 100644 (file)
@@ -86,7 +86,7 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
  * think of extending them - you will be slapped with a stinking trout or a frozen
  * shark will reach you, wherever you are! You've been warned.
  */
-static inline unsigned long long notrace __rdmsr(unsigned int msr)
+static __always_inline unsigned long long __rdmsr(unsigned int msr)
 {
        DECLARE_ARGS(val, low, high);
 
@@ -98,7 +98,7 @@ static inline unsigned long long notrace __rdmsr(unsigned int msr)
        return EAX_EDX_VAL(val, low, high);
 }
 
-static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high)
+static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
 {
        asm volatile("1: wrmsr\n"
                     "2:\n"
index 488a8e8..9239399 100644 (file)
@@ -110,6 +110,8 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
 #define topology_die_id(cpu)                   (cpu_data(cpu).cpu_die_id)
 #define topology_core_id(cpu)                  (cpu_data(cpu).cpu_core_id)
 
+extern unsigned int __max_die_per_package;
+
 #ifdef CONFIG_SMP
 #define topology_die_cpumask(cpu)              (per_cpu(cpu_die_map, cpu))
 #define topology_core_cpumask(cpu)             (per_cpu(cpu_core_map, cpu))
@@ -118,8 +120,6 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
 extern unsigned int __max_logical_packages;
 #define topology_max_packages()                        (__max_logical_packages)
 
-extern unsigned int __max_die_per_package;
-
 static inline int topology_max_die_per_package(void)
 {
        return __max_die_per_package;
index f8ca66f..347a956 100644 (file)
@@ -542,12 +542,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                u32 ecx;
 
                ecx = cpuid_ecx(0x8000001e);
-               nodes_per_socket = ((ecx >> 8) & 7) + 1;
+               __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
        } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
                u64 value;
 
                rdmsrl(MSR_FAM10H_NODE_ID, value);
-               nodes_per_socket = ((value >> 3) & 7) + 1;
+               __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
        }
 
        if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
index 13d3f1c..e133ce1 100644 (file)
@@ -1992,10 +1992,9 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
         * that out because it's an indirect call. Annotate it.
         */
        instrumentation_begin();
-       trace_hardirqs_off_finish();
+
        machine_check_vector(regs);
-       if (regs->flags & X86_EFLAGS_IF)
-               trace_hardirqs_on_prepare();
+
        instrumentation_end();
        irqentry_nmi_exit(regs, irq_state);
 }
@@ -2004,7 +2003,9 @@ static __always_inline void exc_machine_check_user(struct pt_regs *regs)
 {
        irqentry_enter_from_user_mode(regs);
        instrumentation_begin();
+
        machine_check_vector(regs);
+
        instrumentation_end();
        irqentry_exit_to_user_mode(regs);
 }
index f628e3d..43b54be 100644 (file)
@@ -135,14 +135,32 @@ static void hv_machine_shutdown(void)
 {
        if (kexec_in_progress && hv_kexec_handler)
                hv_kexec_handler();
+
+       /*
+        * Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor
+        * corrupts the old VP Assist Pages and can crash the kexec kernel.
+        */
+       if (kexec_in_progress && hyperv_init_cpuhp > 0)
+               cpuhp_remove_state(hyperv_init_cpuhp);
+
+       /* The function calls stop_other_cpus(). */
        native_machine_shutdown();
+
+       /* Disable the hypercall page when there is only 1 active CPU. */
+       if (kexec_in_progress)
+               hyperv_cleanup();
 }
 
 static void hv_machine_crash_shutdown(struct pt_regs *regs)
 {
        if (hv_crash_handler)
                hv_crash_handler(regs);
+
+       /* The function calls crash_smp_send_stop(). */
        native_machine_crash_shutdown(regs);
+
+       /* Disable the hypercall page when there is only 1 active CPU. */
+       hyperv_cleanup();
 }
 #endif /* CONFIG_KEXEC_CORE */
 #endif /* CONFIG_HYPERV */
index 23ad8e9..a29997e 100644 (file)
@@ -167,9 +167,6 @@ static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
        *repeat = 0;
        *uniform = 1;
 
-       /* Make end inclusive instead of exclusive */
-       end--;
-
        prev_match = MTRR_TYPE_INVALID;
        for (i = 0; i < num_var_ranges; ++i) {
                unsigned short start_state, end_state, inclusive;
@@ -261,6 +258,9 @@ u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
        int repeat;
        u64 partial_end;
 
+       /* Make end inclusive instead of exclusive */
+       end--;
+
        if (!mtrr_state_set)
                return MTRR_TYPE_INVALID;
 
index 29ffb95..460f3e0 100644 (file)
@@ -525,89 +525,70 @@ static void rdtgroup_remove(struct rdtgroup *rdtgrp)
        kfree(rdtgrp);
 }
 
-struct task_move_callback {
-       struct callback_head    work;
-       struct rdtgroup         *rdtgrp;
-};
-
-static void move_myself(struct callback_head *head)
+static void _update_task_closid_rmid(void *task)
 {
-       struct task_move_callback *callback;
-       struct rdtgroup *rdtgrp;
-
-       callback = container_of(head, struct task_move_callback, work);
-       rdtgrp = callback->rdtgrp;
-
        /*
-        * If resource group was deleted before this task work callback
-        * was invoked, then assign the task to root group and free the
-        * resource group.
+        * If the task is still current on this CPU, update PQR_ASSOC MSR.
+        * Otherwise, the MSR is updated when the task is scheduled in.
         */
-       if (atomic_dec_and_test(&rdtgrp->waitcount) &&
-           (rdtgrp->flags & RDT_DELETED)) {
-               current->closid = 0;
-               current->rmid = 0;
-               rdtgroup_remove(rdtgrp);
-       }
-
-       if (unlikely(current->flags & PF_EXITING))
-               goto out;
-
-       preempt_disable();
-       /* update PQR_ASSOC MSR to make resource group go into effect */
-       resctrl_sched_in();
-       preempt_enable();
+       if (task == current)
+               resctrl_sched_in();
+}
 
-out:
-       kfree(callback);
+static void update_task_closid_rmid(struct task_struct *t)
+{
+       if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
+               smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
+       else
+               _update_task_closid_rmid(t);
 }
 
 static int __rdtgroup_move_task(struct task_struct *tsk,
                                struct rdtgroup *rdtgrp)
 {
-       struct task_move_callback *callback;
-       int ret;
-
-       callback = kzalloc(sizeof(*callback), GFP_KERNEL);
-       if (!callback)
-               return -ENOMEM;
-       callback->work.func = move_myself;
-       callback->rdtgrp = rdtgrp;
+       /* If the task is already in rdtgrp, no need to move the task. */
+       if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid &&
+            tsk->rmid == rdtgrp->mon.rmid) ||
+           (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid &&
+            tsk->closid == rdtgrp->mon.parent->closid))
+               return 0;
 
        /*
-        * Take a refcount, so rdtgrp cannot be freed before the
-        * callback has been invoked.
+        * Set the task's closid/rmid before the PQR_ASSOC MSR can be
+        * updated by them.
+        *
+        * For ctrl_mon groups, move both closid and rmid.
+        * For monitor groups, can move the tasks only from
+        * their parent CTRL group.
         */
-       atomic_inc(&rdtgrp->waitcount);
-       ret = task_work_add(tsk, &callback->work, TWA_RESUME);
-       if (ret) {
-               /*
-                * Task is exiting. Drop the refcount and free the callback.
-                * No need to check the refcount as the group cannot be
-                * deleted before the write function unlocks rdtgroup_mutex.
-                */
-               atomic_dec(&rdtgrp->waitcount);
-               kfree(callback);
-               rdt_last_cmd_puts("Task exited\n");
-       } else {
-               /*
-                * For ctrl_mon groups move both closid and rmid.
-                * For monitor groups, can move the tasks only from
-                * their parent CTRL group.
-                */
-               if (rdtgrp->type == RDTCTRL_GROUP) {
-                       tsk->closid = rdtgrp->closid;
+
+       if (rdtgrp->type == RDTCTRL_GROUP) {
+               tsk->closid = rdtgrp->closid;
+               tsk->rmid = rdtgrp->mon.rmid;
+       } else if (rdtgrp->type == RDTMON_GROUP) {
+               if (rdtgrp->mon.parent->closid == tsk->closid) {
                        tsk->rmid = rdtgrp->mon.rmid;
-               } else if (rdtgrp->type == RDTMON_GROUP) {
-                       if (rdtgrp->mon.parent->closid == tsk->closid) {
-                               tsk->rmid = rdtgrp->mon.rmid;
-                       } else {
-                               rdt_last_cmd_puts("Can't move task to different control group\n");
-                               ret = -EINVAL;
-                       }
+               } else {
+                       rdt_last_cmd_puts("Can't move task to different control group\n");
+                       return -EINVAL;
                }
        }
-       return ret;
+
+       /*
+        * Ensure the task's closid and rmid are written before determining if
+        * the task is current that will decide if it will be interrupted.
+        */
+       barrier();
+
+       /*
+        * By now, the task's closid and rmid are set. If the task is current
+        * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource
+        * group go into effect. If the task is not current, the MSR will be
+        * updated when the task is scheduled in.
+        */
+       update_task_closid_rmid(tsk);
+
+       return 0;
 }
 
 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
index 1068002..8678864 100644 (file)
 #define BITS_SHIFT_NEXT_LEVEL(eax)     ((eax) & 0x1f)
 #define LEVEL_MAX_SIBLINGS(ebx)                ((ebx) & 0xffff)
 
-#ifdef CONFIG_SMP
 unsigned int __max_die_per_package __read_mostly = 1;
 EXPORT_SYMBOL(__max_die_per_package);
 
+#ifdef CONFIG_SMP
 /*
  * Check if given CPUID extended toplogy "leaf" is implemented
  */
index eb86a2b..571220a 100644 (file)
@@ -121,7 +121,7 @@ int copy_fpregs_to_fpstate(struct fpu *fpu)
 }
 EXPORT_SYMBOL(copy_fpregs_to_fpstate);
 
-void kernel_fpu_begin(void)
+void kernel_fpu_begin_mask(unsigned int kfpu_mask)
 {
        preempt_disable();
 
@@ -141,13 +141,14 @@ void kernel_fpu_begin(void)
        }
        __cpu_invalidate_fpregs_state();
 
-       if (boot_cpu_has(X86_FEATURE_XMM))
+       /* Put sane initial values into the control registers. */
+       if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
                ldmxcsr(MXCSR_DEFAULT);
 
-       if (boot_cpu_has(X86_FEATURE_FPU))
+       if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
                asm volatile ("fninit");
 }
-EXPORT_SYMBOL_GPL(kernel_fpu_begin);
+EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
 
 void kernel_fpu_end(void)
 {
index 7d04b35..cdc04d0 100644 (file)
@@ -305,14 +305,14 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
        case 0xe4:
        case 0xe5:
                *exitinfo |= IOIO_TYPE_IN;
-               *exitinfo |= (u64)insn->immediate.value << 16;
+               *exitinfo |= (u8)insn->immediate.value << 16;
                break;
 
        /* OUT immediate opcodes */
        case 0xe6:
        case 0xe7:
                *exitinfo |= IOIO_TYPE_OUT;
-               *exitinfo |= (u64)insn->immediate.value << 16;
+               *exitinfo |= (u8)insn->immediate.value << 16;
                break;
 
        /* IN register opcodes */
index 0bd1a0f..84c1821 100644 (file)
@@ -225,7 +225,7 @@ static inline u64 sev_es_rd_ghcb_msr(void)
        return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
 }
 
-static inline void sev_es_wr_ghcb_msr(u64 val)
+static __always_inline void sev_es_wr_ghcb_msr(u64 val)
 {
        u32 low, high;
 
@@ -286,6 +286,12 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
        u16 d2;
        u8  d1;
 
+       /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
+       if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
+               memcpy(dst, buf, size);
+               return ES_OK;
+       }
+
        switch (size) {
        case 1:
                memcpy(&d1, buf, 1);
@@ -335,6 +341,12 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
        u16 d2;
        u8  d1;
 
+       /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
+       if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
+               memcpy(buf, src, size);
+               return ES_OK;
+       }
+
        switch (size) {
        case 1:
                if (get_user(d1, s))
index 8ca66af..117e24f 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/numa.h>
 #include <linux/pgtable.h>
 #include <linux/overflow.h>
+#include <linux/syscore_ops.h>
 
 #include <asm/acpi.h>
 #include <asm/desc.h>
@@ -2083,6 +2084,23 @@ static void init_counter_refs(void)
        this_cpu_write(arch_prev_mperf, mperf);
 }
 
+#ifdef CONFIG_PM_SLEEP
+static struct syscore_ops freq_invariance_syscore_ops = {
+       .resume = init_counter_refs,
+};
+
+static void register_freq_invariance_syscore_ops(void)
+{
+       /* Bail out if registered already. */
+       if (freq_invariance_syscore_ops.node.prev)
+               return;
+
+       register_syscore_ops(&freq_invariance_syscore_ops);
+}
+#else
+static inline void register_freq_invariance_syscore_ops(void) {}
+#endif
+
 static void init_freq_invariance(bool secondary, bool cppc_ready)
 {
        bool ret = false;
@@ -2109,6 +2127,7 @@ static void init_freq_invariance(bool secondary, bool cppc_ready)
        if (ret) {
                init_counter_refs();
                static_branch_enable(&arch_scale_freq_key);
+               register_freq_invariance_syscore_ops();
                pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
        } else {
                pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");
index 3136e05..43ccead 100644 (file)
@@ -674,7 +674,7 @@ static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
                           (unsigned long long)vcpu->arch.pv_eoi.msr_val);
                return false;
        }
-       return val & 0x1;
+       return val & KVM_PV_EOI_ENABLED;
 }
 
 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
@@ -2898,7 +2898,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
                        /* evaluate pending_events before reading the vector */
                        smp_rmb();
                        sipi_vector = apic->sipi_vector;
-                       kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
+                       kvm_x86_ops.vcpu_deliver_sipi_vector(vcpu, sipi_vector);
                        vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
                }
        }
index 9c4a9c8..581925e 100644 (file)
@@ -49,7 +49,7 @@ static inline u64 rsvd_bits(int s, int e)
        if (e < s)
                return 0;
 
-       return ((1ULL << (e - s + 1)) - 1) << s;
+       return ((2ULL << (e - s)) - 1) << s;
 }
 
 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask);
index c478904..6d16481 100644 (file)
@@ -3493,26 +3493,25 @@ static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
  * Return the level of the lowest level SPTE added to sptes.
  * That SPTE may be non-present.
  */
-static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
+static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level)
 {
        struct kvm_shadow_walk_iterator iterator;
-       int leaf = vcpu->arch.mmu->root_level;
+       int leaf = -1;
        u64 spte;
 
-
        walk_shadow_page_lockless_begin(vcpu);
 
-       for (shadow_walk_init(&iterator, vcpu, addr);
+       for (shadow_walk_init(&iterator, vcpu, addr),
+            *root_level = iterator.level;
             shadow_walk_okay(&iterator);
             __shadow_walk_next(&iterator, spte)) {
                leaf = iterator.level;
                spte = mmu_spte_get_lockless(iterator.sptep);
 
-               sptes[leaf - 1] = spte;
+               sptes[leaf] = spte;
 
                if (!is_shadow_present_pte(spte))
                        break;
-
        }
 
        walk_shadow_page_lockless_end(vcpu);
@@ -3520,14 +3519,12 @@ static int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
        return leaf;
 }
 
-/* return true if reserved bit is detected on spte. */
+/* return true if reserved bit(s) are detected on a valid, non-MMIO SPTE. */
 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
 {
-       u64 sptes[PT64_ROOT_MAX_LEVEL];
+       u64 sptes[PT64_ROOT_MAX_LEVEL + 1];
        struct rsvd_bits_validate *rsvd_check;
-       int root = vcpu->arch.mmu->shadow_root_level;
-       int leaf;
-       int level;
+       int root, leaf, level;
        bool reserved = false;
 
        if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) {
@@ -3536,35 +3533,45 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
        }
 
        if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
-               leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes);
+               leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
        else
-               leaf = get_walk(vcpu, addr, sptes);
+               leaf = get_walk(vcpu, addr, sptes, &root);
+
+       if (unlikely(leaf < 0)) {
+               *sptep = 0ull;
+               return reserved;
+       }
+
+       *sptep = sptes[leaf];
+
+       /*
+        * Skip reserved bits checks on the terminal leaf if it's not a valid
+        * SPTE.  Note, this also (intentionally) skips MMIO SPTEs, which, by
+        * design, always have reserved bits set.  The purpose of the checks is
+        * to detect reserved bits on non-MMIO SPTEs. i.e. buggy SPTEs.
+        */
+       if (!is_shadow_present_pte(sptes[leaf]))
+               leaf++;
 
        rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
 
-       for (level = root; level >= leaf; level--) {
-               if (!is_shadow_present_pte(sptes[level - 1]))
-                       break;
+       for (level = root; level >= leaf; level--)
                /*
                 * Use a bitwise-OR instead of a logical-OR to aggregate the
                 * reserved bit and EPT's invalid memtype/XWR checks to avoid
                 * adding a Jcc in the loop.
                 */
-               reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level - 1]) |
-                           __is_rsvd_bits_set(rsvd_check, sptes[level - 1],
-                                              level);
-       }
+               reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) |
+                           __is_rsvd_bits_set(rsvd_check, sptes[level], level);
 
        if (reserved) {
                pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
                       __func__, addr);
                for (level = root; level >= leaf; level--)
                        pr_err("------ spte 0x%llx level %d.\n",
-                              sptes[level - 1], level);
+                              sptes[level], level);
        }
 
-       *sptep = sptes[leaf - 1];
-
        return reserved;
 }
 
index 4bd2f1d..2ef8615 100644 (file)
@@ -44,7 +44,48 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
        WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
 }
 
-#define for_each_tdp_mmu_root(_kvm, _root)                         \
+static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
+{
+       if (kvm_mmu_put_root(kvm, root))
+               kvm_tdp_mmu_free_root(kvm, root);
+}
+
+static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
+                                          struct kvm_mmu_page *root)
+{
+       lockdep_assert_held(&kvm->mmu_lock);
+
+       if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
+               return false;
+
+       kvm_mmu_get_root(kvm, root);
+       return true;
+
+}
+
+static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
+                                                    struct kvm_mmu_page *root)
+{
+       struct kvm_mmu_page *next_root;
+
+       next_root = list_next_entry(root, link);
+       tdp_mmu_put_root(kvm, root);
+       return next_root;
+}
+
+/*
+ * Note: this iterator gets and puts references to the roots it iterates over.
+ * This makes it safe to release the MMU lock and yield within the loop, but
+ * if exiting the loop early, the caller must drop the reference to the most
+ * recent root. (Unless keeping a live reference is desirable.)
+ */
+#define for_each_tdp_mmu_root_yield_safe(_kvm, _root)                          \
+       for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots,        \
+                                     typeof(*_root), link);            \
+            tdp_mmu_next_root_valid(_kvm, _root);                      \
+            _root = tdp_mmu_next_root(_kvm, _root))
+
+#define for_each_tdp_mmu_root(_kvm, _root)                             \
        list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
 
 bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
@@ -447,18 +488,9 @@ bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
        struct kvm_mmu_page *root;
        bool flush = false;
 
-       for_each_tdp_mmu_root(kvm, root) {
-               /*
-                * Take a reference on the root so that it cannot be freed if
-                * this thread releases the MMU lock and yields in this loop.
-                */
-               kvm_mmu_get_root(kvm, root);
-
+       for_each_tdp_mmu_root_yield_safe(kvm, root)
                flush |= zap_gfn_range(kvm, root, start, end, true);
 
-               kvm_mmu_put_root(kvm, root);
-       }
-
        return flush;
 }
 
@@ -619,13 +651,7 @@ static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
        int ret = 0;
        int as_id;
 
-       for_each_tdp_mmu_root(kvm, root) {
-               /*
-                * Take a reference on the root so that it cannot be freed if
-                * this thread releases the MMU lock and yields in this loop.
-                */
-               kvm_mmu_get_root(kvm, root);
-
+       for_each_tdp_mmu_root_yield_safe(kvm, root) {
                as_id = kvm_mmu_page_as_id(root);
                slots = __kvm_memslots(kvm, as_id);
                kvm_for_each_memslot(memslot, slots) {
@@ -647,8 +673,6 @@ static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
                        ret |= handler(kvm, memslot, root, gfn_start,
                                       gfn_end, data);
                }
-
-               kvm_mmu_put_root(kvm, root);
        }
 
        return ret;
@@ -838,21 +862,13 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
        int root_as_id;
        bool spte_set = false;
 
-       for_each_tdp_mmu_root(kvm, root) {
+       for_each_tdp_mmu_root_yield_safe(kvm, root) {
                root_as_id = kvm_mmu_page_as_id(root);
                if (root_as_id != slot->as_id)
                        continue;
 
-               /*
-                * Take a reference on the root so that it cannot be freed if
-                * this thread releases the MMU lock and yields in this loop.
-                */
-               kvm_mmu_get_root(kvm, root);
-
                spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
                             slot->base_gfn + slot->npages, min_level);
-
-               kvm_mmu_put_root(kvm, root);
        }
 
        return spte_set;
@@ -906,21 +922,13 @@ bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
        int root_as_id;
        bool spte_set = false;
 
-       for_each_tdp_mmu_root(kvm, root) {
+       for_each_tdp_mmu_root_yield_safe(kvm, root) {
                root_as_id = kvm_mmu_page_as_id(root);
                if (root_as_id != slot->as_id)
                        continue;
 
-               /*
-                * Take a reference on the root so that it cannot be freed if
-                * this thread releases the MMU lock and yields in this loop.
-                */
-               kvm_mmu_get_root(kvm, root);
-
                spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
                                slot->base_gfn + slot->npages);
-
-               kvm_mmu_put_root(kvm, root);
        }
 
        return spte_set;
@@ -1029,21 +1037,13 @@ bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
        int root_as_id;
        bool spte_set = false;
 
-       for_each_tdp_mmu_root(kvm, root) {
+       for_each_tdp_mmu_root_yield_safe(kvm, root) {
                root_as_id = kvm_mmu_page_as_id(root);
                if (root_as_id != slot->as_id)
                        continue;
 
-               /*
-                * Take a reference on the root so that it cannot be freed if
-                * this thread releases the MMU lock and yields in this loop.
-                */
-               kvm_mmu_get_root(kvm, root);
-
                spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn,
                                slot->base_gfn + slot->npages);
-
-               kvm_mmu_put_root(kvm, root);
        }
        return spte_set;
 }
@@ -1089,21 +1089,13 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
        struct kvm_mmu_page *root;
        int root_as_id;
 
-       for_each_tdp_mmu_root(kvm, root) {
+       for_each_tdp_mmu_root_yield_safe(kvm, root) {
                root_as_id = kvm_mmu_page_as_id(root);
                if (root_as_id != slot->as_id)
                        continue;
 
-               /*
-                * Take a reference on the root so that it cannot be freed if
-                * this thread releases the MMU lock and yields in this loop.
-                */
-               kvm_mmu_get_root(kvm, root);
-
                zap_collapsible_spte_range(kvm, root, slot->base_gfn,
                                           slot->base_gfn + slot->npages);
-
-               kvm_mmu_put_root(kvm, root);
        }
 }
 
@@ -1160,16 +1152,19 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
  * Return the level of the lowest level SPTE added to sptes.
  * That SPTE may be non-present.
  */
-int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes)
+int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
+                        int *root_level)
 {
        struct tdp_iter iter;
        struct kvm_mmu *mmu = vcpu->arch.mmu;
-       int leaf = vcpu->arch.mmu->shadow_root_level;
        gfn_t gfn = addr >> PAGE_SHIFT;
+       int leaf = -1;
+
+       *root_level = vcpu->arch.mmu->shadow_root_level;
 
        tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
                leaf = iter.level;
-               sptes[leaf - 1] = iter.old_spte;
+               sptes[leaf] = iter.old_spte;
        }
 
        return leaf;
index 556e065..cbbdbad 100644 (file)
@@ -44,5 +44,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
                                   struct kvm_memory_slot *slot, gfn_t gfn);
 
-int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes);
+int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
+                        int *root_level);
+
 #endif /* __KVM_X86_MMU_TDP_MMU_H */
index b0b6674..cb4c6ee 100644 (file)
@@ -199,6 +199,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
 static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
+
        if (!nested_svm_vmrun_msrpm(svm)) {
                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                vcpu->run->internal.suberror =
@@ -595,6 +596,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
        svm->nested.vmcb12_gpa = 0;
        WARN_ON_ONCE(svm->nested.nested_run_pending);
 
+       kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
+
        /* in case we halted in L2 */
        svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
 
@@ -754,6 +757,7 @@ void svm_leave_nested(struct vcpu_svm *svm)
                leave_guest_mode(&svm->vcpu);
                copy_vmcb_control_area(&vmcb->control, &hsave->control);
                nested_svm_uninit_mmu_context(&svm->vcpu);
+               vmcb_mark_all_dirty(svm->vmcb);
        }
 
        kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
@@ -1194,6 +1198,10 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
         * in the registers, the save area of the nested state instead
         * contains saved L1 state.
         */
+
+       svm->nested.nested_run_pending =
+               !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
+
        copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
        hsave->save = *save;
 
index 9858d5a..c8ffdbc 100644 (file)
@@ -1563,6 +1563,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
                        goto vmgexit_err;
                break;
        case SVM_VMGEXIT_NMI_COMPLETE:
+       case SVM_VMGEXIT_AP_HLT_LOOP:
        case SVM_VMGEXIT_AP_JUMP_TABLE:
        case SVM_VMGEXIT_UNSUPPORTED_EVENT:
                break;
@@ -1888,6 +1889,9 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
        case SVM_VMGEXIT_NMI_COMPLETE:
                ret = svm_invoke_exit_handler(svm, SVM_EXIT_IRET);
                break;
+       case SVM_VMGEXIT_AP_HLT_LOOP:
+               ret = kvm_emulate_ap_reset_hold(&svm->vcpu);
+               break;
        case SVM_VMGEXIT_AP_JUMP_TABLE: {
                struct kvm_sev_info *sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
 
@@ -2001,7 +2005,7 @@ void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu)
         * of which one step is to perform a VMLOAD. Since hardware does not
         * perform a VMSAVE on VMRUN, the host savearea must be updated.
         */
-       asm volatile(__ex("vmsave") : : "a" (__sme_page_pa(sd->save_area)) : "memory");
+       asm volatile(__ex("vmsave %0") : : "a" (__sme_page_pa(sd->save_area)) : "memory");
 
        /*
         * Certain MSRs are restored on VMEXIT, only save ones that aren't
@@ -2040,3 +2044,21 @@ void sev_es_vcpu_put(struct vcpu_svm *svm)
                wrmsrl(host_save_user_msrs[i].index, svm->host_user_msrs[i]);
        }
 }
+
+void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
+{
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       /* First SIPI: Use the values as initially set by the VMM */
+       if (!svm->received_first_sipi) {
+               svm->received_first_sipi = true;
+               return;
+       }
+
+       /*
+        * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
+        * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
+        * non-zero value.
+        */
+       ghcb_set_sw_exit_info_2(svm->ghcb, 1);
+}
index cce0143..7ef1717 100644 (file)
@@ -3677,8 +3677,6 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
        return EXIT_FASTPATH_NONE;
 }
 
-void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
-
 static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
                                        struct vcpu_svm *svm)
 {
@@ -4384,6 +4382,14 @@ static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
                   (vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT));
 }
 
+static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
+{
+       if (!sev_es_guest(vcpu->kvm))
+               return kvm_vcpu_deliver_sipi_vector(vcpu, vector);
+
+       sev_vcpu_deliver_sipi_vector(vcpu, vector);
+}
+
 static void svm_vm_destroy(struct kvm *kvm)
 {
        avic_vm_destroy(kvm);
@@ -4526,6 +4532,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
 
        .msr_filter_changed = svm_msr_filter_changed,
        .complete_emulated_msr = svm_complete_emulated_msr,
+
+       .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
 };
 
 static struct kvm_x86_init_ops svm_init_ops __initdata = {
index 5431e63..0fe874a 100644 (file)
@@ -185,6 +185,7 @@ struct vcpu_svm {
        struct vmcb_save_area *vmsa;
        struct ghcb *ghcb;
        struct kvm_host_map ghcb_map;
+       bool received_first_sipi;
 
        /* SEV-ES scratch area support */
        void *ghcb_sa;
@@ -591,6 +592,7 @@ void sev_es_init_vmcb(struct vcpu_svm *svm);
 void sev_es_create_vcpu(struct vcpu_svm *svm);
 void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu);
 void sev_es_vcpu_put(struct vcpu_svm *svm);
+void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
 
 /* vmenter.S */
 
index e2f2656..0fbb469 100644 (file)
@@ -4442,6 +4442,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
        /* trying to cancel vmlaunch/vmresume is a bug */
        WARN_ON_ONCE(vmx->nested.nested_run_pending);
 
+       kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+
        /* Service the TLB flush request for L2 before switching to L1. */
        if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
                kvm_vcpu_flush_tlb_current(vcpu);
index 75c9c6a..2af05d3 100644 (file)
@@ -7707,6 +7707,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .msr_filter_changed = vmx_msr_filter_changed,
        .complete_emulated_msr = kvm_complete_insn_gp,
        .cpu_dirty_log_size = vmx_cpu_dirty_log_size,
+
+       .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
 };
 
 static __init int hardware_setup(void)
index 3f7c1fc..9a8969a 100644 (file)
@@ -7976,17 +7976,22 @@ void kvm_arch_exit(void)
        kmem_cache_destroy(x86_fpu_cache);
 }
 
-int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
+static int __kvm_vcpu_halt(struct kvm_vcpu *vcpu, int state, int reason)
 {
        ++vcpu->stat.halt_exits;
        if (lapic_in_kernel(vcpu)) {
-               vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
+               vcpu->arch.mp_state = state;
                return 1;
        } else {
-               vcpu->run->exit_reason = KVM_EXIT_HLT;
+               vcpu->run->exit_reason = reason;
                return 0;
        }
 }
+
+int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
+{
+       return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
+}
 EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
 
 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
@@ -8000,6 +8005,14 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
 
+int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
+{
+       int ret = kvm_skip_emulated_instruction(vcpu);
+
+       return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, KVM_EXIT_AP_RESET_HOLD) && ret;
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
+
 #ifdef CONFIG_X86_64
 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
                                unsigned long clock_type)
@@ -8789,7 +8802,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        if (kvm_request_pending(vcpu)) {
                if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
-                       if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
+                       if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
+                               ;
+                       else if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
                                r = 0;
                                goto out;
                        }
@@ -9094,6 +9109,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
        kvm_apic_accept_events(vcpu);
        switch(vcpu->arch.mp_state) {
        case KVM_MP_STATE_HALTED:
+       case KVM_MP_STATE_AP_RESET_HOLD:
                vcpu->arch.pv.pv_unhalted = false;
                vcpu->arch.mp_state =
                        KVM_MP_STATE_RUNNABLE;
@@ -9520,8 +9536,9 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
                kvm_load_guest_fpu(vcpu);
 
        kvm_apic_accept_events(vcpu);
-       if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
-                                       vcpu->arch.pv.pv_unhalted)
+       if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED ||
+            vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) &&
+           vcpu->arch.pv.pv_unhalted)
                mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
        else
                mp_state->mp_state = vcpu->arch.mp_state;
@@ -10152,6 +10169,7 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
        kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
        kvm_rip_write(vcpu, 0);
 }
+EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector);
 
 int kvm_arch_hardware_enable(void)
 {
index 4321fa0..419365c 100644 (file)
 #include <asm/fpu/api.h>
 #include <asm/asm.h>
 
+/*
+ * Use KFPU_387.  MMX instructions are not affected by MXCSR,
+ * but both AMD and Intel documentation states that even integer MMX
+ * operations will result in #MF if an exception is pending in FCW.
+ *
+ * EMMS is not needed afterwards because, after calling kernel_fpu_end(),
+ * any subsequent user of the 387 stack will reinitialize it using
+ * KFPU_387.
+ */
+
 void *_mmx_memcpy(void *to, const void *from, size_t len)
 {
        void *p;
@@ -37,7 +47,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
        p = to;
        i = len >> 6; /* len/64 */
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        __asm__ __volatile__ (
                "1: prefetch (%0)\n"            /* This set is 28 bytes */
@@ -127,7 +137,7 @@ static void fast_clear_page(void *page)
 {
        int i;
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        __asm__ __volatile__ (
                "  pxor %%mm0, %%mm0\n" : :
@@ -160,7 +170,7 @@ static void fast_copy_page(void *to, void *from)
 {
        int i;
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        /*
         * maybe the prefetch stuff can go before the expensive fnsave...
@@ -247,7 +257,7 @@ static void fast_clear_page(void *page)
 {
        int i;
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        __asm__ __volatile__ (
                "  pxor %%mm0, %%mm0\n" : :
@@ -282,7 +292,7 @@ static void fast_copy_page(void *to, void *from)
 {
        int i;
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        __asm__ __volatile__ (
                "1: prefetch (%0)\n"
index dfd82f5..f6a9e2e 100644 (file)
@@ -829,6 +829,8 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
        }
 
        free_page((unsigned long)pmd_sv);
+
+       pgtable_pmd_page_dtor(virt_to_page(pmd));
        free_page((unsigned long)pmd);
 
        return 1;
index 9e87ab0..e68ea5f 100644 (file)
@@ -164,10 +164,10 @@ static int xen_cpu_up_prepare_hvm(unsigned int cpu)
        else
                per_cpu(xen_vcpu_id, cpu) = cpu;
        rc = xen_vcpu_setup(cpu);
-       if (rc)
+       if (rc || !xen_have_vector_callback)
                return rc;
 
-       if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
+       if (xen_feature(XENFEAT_hvm_safe_pvclock))
                xen_setup_timer(cpu);
 
        rc = xen_smp_intr_init(cpu);
@@ -188,6 +188,8 @@ static int xen_cpu_dead_hvm(unsigned int cpu)
        return 0;
 }
 
+static bool no_vector_callback __initdata;
+
 static void __init xen_hvm_guest_init(void)
 {
        if (xen_pv_domain())
@@ -207,7 +209,7 @@ static void __init xen_hvm_guest_init(void)
 
        xen_panic_handler_init();
 
-       if (xen_feature(XENFEAT_hvm_callback_vector))
+       if (!no_vector_callback && xen_feature(XENFEAT_hvm_callback_vector))
                xen_have_vector_callback = 1;
 
        xen_hvm_smp_init();
@@ -233,6 +235,13 @@ static __init int xen_parse_nopv(char *arg)
 }
 early_param("xen_nopv", xen_parse_nopv);
 
+static __init int xen_parse_no_vector_callback(char *arg)
+{
+       no_vector_callback = true;
+       return 0;
+}
+early_param("xen_no_vector_callback", xen_parse_no_vector_callback);
+
 bool __init xen_hvm_need_lapic(void)
 {
        if (xen_pv_domain())
index f5e7db4..6ff3c88 100644 (file)
@@ -33,9 +33,11 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
        int cpu;
 
        native_smp_prepare_cpus(max_cpus);
-       WARN_ON(xen_smp_intr_init(0));
 
-       xen_init_lock_cpu(0);
+       if (xen_have_vector_callback) {
+               WARN_ON(xen_smp_intr_init(0));
+               xen_init_lock_cpu(0);
+       }
 
        for_each_possible_cpu(cpu) {
                if (cpu == 0)
@@ -50,9 +52,11 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
 static void xen_hvm_cpu_die(unsigned int cpu)
 {
        if (common_cpu_die(cpu) == 0) {
-               xen_smp_intr_free(cpu);
-               xen_uninit_lock_cpu(cpu);
-               xen_teardown_timer(cpu);
+               if (xen_have_vector_callback) {
+                       xen_smp_intr_free(cpu);
+                       xen_uninit_lock_cpu(cpu);
+                       xen_teardown_timer(cpu);
+               }
        }
 }
 #else
@@ -64,14 +68,19 @@ static void xen_hvm_cpu_die(unsigned int cpu)
 
 void __init xen_hvm_smp_init(void)
 {
-       if (!xen_have_vector_callback)
+       smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
+       smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
+       smp_ops.smp_cpus_done = xen_smp_cpus_done;
+       smp_ops.cpu_die = xen_hvm_cpu_die;
+
+       if (!xen_have_vector_callback) {
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+               nopvspin = true;
+#endif
                return;
+       }
 
-       smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
        smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
-       smp_ops.cpu_die = xen_hvm_cpu_die;
        smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
        smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
-       smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
-       smp_ops.smp_cpus_done = xen_smp_cpus_done;
 }
index 9e81d10..9e4eb0f 100644 (file)
@@ -6332,13 +6332,13 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
         * limit 'something'.
         */
        /* no more than 50% of tags for async I/O */
-       bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
+       bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U);
        /*
         * no more than 75% of tags for sync writes (25% extra tags
         * w.r.t. async I/O, to prevent async I/O from starving sync
         * writes)
         */
-       bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
+       bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U);
 
        /*
         * In-word depths in case some bfq_queue is being weight-
@@ -6348,9 +6348,9 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
         * shortage.
         */
        /* no more than ~18% of tags for async I/O */
-       bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
+       bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U);
        /* no more than ~37% of tags for sync writes (~20% extra tags) */
-       bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
+       bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U);
 
        for (i = 0; i < 2; i++)
                for (j = 0; j < 2; j++)
index ac6078a..98d656b 100644 (file)
@@ -2551,8 +2551,8 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
        bool use_debt, ioc_locked;
        unsigned long flags;
 
-       /* bypass IOs if disabled or for root cgroup */
-       if (!ioc->enabled || !iocg->level)
+       /* bypass IOs if disabled, still initializing, or for root cgroup */
+       if (!ioc->enabled || !iocg || !iocg->level)
                return;
 
        /* calculate the absolute vtime cost */
@@ -2679,14 +2679,14 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
                           struct bio *bio)
 {
        struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
-       struct ioc *ioc = iocg->ioc;
+       struct ioc *ioc = rqos_to_ioc(rqos);
        sector_t bio_end = bio_end_sector(bio);
        struct ioc_now now;
        u64 vtime, abs_cost, cost;
        unsigned long flags;
 
-       /* bypass if disabled or for root cgroup */
-       if (!ioc->enabled || !iocg->level)
+       /* bypass if disabled, still initializing, or for root cgroup */
+       if (!ioc->enabled || !iocg || !iocg->level)
                return;
 
        abs_cost = calc_vtime_cost(bio, iocg, true);
@@ -2863,6 +2863,12 @@ static int blk_iocost_init(struct request_queue *q)
        ioc_refresh_params(ioc, true);
        spin_unlock_irq(&ioc->lock);
 
+       /*
+        * rqos must be added before activation to allow iocg_pd_init() to
+        * lookup the ioc from q. This means that the rqos methods may get
+        * called before policy activation completion, can't assume that the
+        * target bio has an iocg associated and need to test for NULL iocg.
+        */
        rq_qos_add(q, rqos);
        ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
        if (ret) {
index 4d6e83e..4de03da 100644 (file)
@@ -246,6 +246,7 @@ static const char *const hctx_flag_name[] = {
        HCTX_FLAG_NAME(BLOCKING),
        HCTX_FLAG_NAME(NO_SCHED),
        HCTX_FLAG_NAME(STACKING),
+       HCTX_FLAG_NAME(TAG_HCTX_SHARED),
 };
 #undef HCTX_FLAG_NAME
 
index 73faec4..419548e 100644 (file)
@@ -246,15 +246,18 @@ struct block_device *disk_part_iter_next(struct disk_part_iter *piter)
                part = rcu_dereference(ptbl->part[piter->idx]);
                if (!part)
                        continue;
+               piter->part = bdgrab(part);
+               if (!piter->part)
+                       continue;
                if (!bdev_nr_sectors(part) &&
                    !(piter->flags & DISK_PITER_INCL_EMPTY) &&
                    !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
-                     piter->idx == 0))
+                     piter->idx == 0)) {
+                       bdput(piter->part);
+                       piter->part = NULL;
                        continue;
+               }
 
-               piter->part = bdgrab(part);
-               if (!piter->part)
-                       continue;
                piter->idx += inc;
                break;
        }
index 511932a..0959613 100644 (file)
@@ -354,7 +354,7 @@ static uint32_t derive_pub_key(const void *pub_key, uint32_t len, uint8_t *buf)
        memcpy(cur, e, sizeof(e));
        cur += sizeof(e);
        /* Zero parameters to satisfy set_pub_key ABI. */
-       memset(cur, 0, SETKEY_PARAMS_SIZE);
+       memzero_explicit(cur, SETKEY_PARAMS_SIZE);
 
        return cur - buf;
 }
index 8892908..788a4ba 100644 (file)
@@ -356,7 +356,8 @@ int public_key_verify_signature(const struct public_key *pkey,
        if (ret)
                goto error_free_key;
 
-       if (strcmp(sig->pkey_algo, "sm2") == 0 && sig->data_size) {
+       if (sig->pkey_algo && strcmp(sig->pkey_algo, "sm2") == 0 &&
+           sig->data_size) {
                ret = cert_sig_digest_update(sig, tfm);
                if (ret)
                        goto error_free_key;
index d56b860..96f80c8 100644 (file)
@@ -39,7 +39,8 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
        struct ecdh params;
        unsigned int ndigits;
 
-       if (crypto_ecdh_decode_key(buf, len, &params) < 0)
+       if (crypto_ecdh_decode_key(buf, len, &params) < 0 ||
+           params.key_size > sizeof(ctx->private_key))
                return -EINVAL;
 
        ndigits = ecdh_supported_curve(params.curve_id);
index eacbf4f..8f899f8 100644 (file)
@@ -107,6 +107,8 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
        preempt_enable();
 
        // bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s]
+       if (!min)
+               min = 1;
        speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
        tmpl->speed = speed;
 
index edf1558..ebcf534 100644 (file)
@@ -395,9 +395,6 @@ config ACPI_CONTAINER
 
          This helps support hotplug of nodes, CPUs, and memory.
 
-         To compile this driver as a module, choose M here:
-         the module will be called container.
-
 config ACPI_HOTPLUG_MEMORY
        bool "Memory Hotplug"
        depends on MEMORY_HOTPLUG
@@ -411,9 +408,6 @@ config ACPI_HOTPLUG_MEMORY
          removing memory devices at runtime, you need not enable
          this driver.
 
-         To compile this driver as a module, choose M here:
-         the module will be called acpi_memhotplug.
-
 config ACPI_HOTPLUG_IOAPIC
        bool
        depends on PCI
index cb229e2..e6a5d99 100644 (file)
@@ -97,7 +97,7 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
 extern struct list_head acpi_bus_id_list;
 
 struct acpi_device_bus_id {
-       char bus_id[15];
+       const char *bus_id;
        unsigned int instance_no;
        struct list_head node;
 };
index 80b668c..1db063b 100644 (file)
@@ -486,6 +486,7 @@ static void acpi_device_del(struct acpi_device *device)
                                acpi_device_bus_id->instance_no--;
                        else {
                                list_del(&acpi_device_bus_id->node);
+                               kfree_const(acpi_device_bus_id->bus_id);
                                kfree(acpi_device_bus_id);
                        }
                        break;
@@ -585,6 +586,8 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
        if (!device)
                return -EINVAL;
 
+       *device = NULL;
+
        status = acpi_get_data_full(handle, acpi_scan_drop_device,
                                    (void **)device, callback);
        if (ACPI_FAILURE(status) || !*device) {
@@ -674,7 +677,14 @@ int acpi_device_add(struct acpi_device *device,
        }
        if (!found) {
                acpi_device_bus_id = new_bus_id;
-               strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
+               acpi_device_bus_id->bus_id =
+                       kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
+               if (!acpi_device_bus_id->bus_id) {
+                       pr_err(PREFIX "Memory allocation error for bus id\n");
+                       result = -ENOMEM;
+                       goto err_free_new_bus_id;
+               }
+
                acpi_device_bus_id->instance_no = 0;
                list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
        }
@@ -709,6 +719,11 @@ int acpi_device_add(struct acpi_device *device,
        if (device->parent)
                list_del(&device->node);
        list_del(&device->wakeup_list);
+
+ err_free_new_bus_id:
+       if (!found)
+               kfree(new_bus_id);
+
        mutex_unlock(&acpi_device_lock);
 
  err_detach:
index 25fea34..2b69536 100644 (file)
@@ -105,18 +105,8 @@ static void lpi_device_get_constraints_amd(void)
 
        for (i = 0; i < out_obj->package.count; i++) {
                union acpi_object *package = &out_obj->package.elements[i];
-               struct lpi_device_info_amd info = { };
 
-               if (package->type == ACPI_TYPE_INTEGER) {
-                       switch (i) {
-                       case 0:
-                               info.revision = package->integer.value;
-                               break;
-                       case 1:
-                               info.count = package->integer.value;
-                               break;
-                       }
-               } else if (package->type == ACPI_TYPE_PACKAGE) {
+               if (package->type == ACPI_TYPE_PACKAGE) {
                        lpi_constraints_table = kcalloc(package->package.count,
                                                        sizeof(*lpi_constraints_table),
                                                        GFP_KERNEL);
@@ -135,12 +125,10 @@ static void lpi_device_get_constraints_amd(void)
 
                                for (k = 0; k < info_obj->package.count; ++k) {
                                        union acpi_object *obj = &info_obj->package.elements[k];
-                                       union acpi_object *obj_new;
 
                                        list = &lpi_constraints_table[lpi_constraints_table_size];
                                        list->min_dstate = -1;
 
-                                       obj_new = &obj[k];
                                        switch (k) {
                                        case 0:
                                                dev_info.enabled = obj->integer.value;
index 65a3886..5f0472c 100644 (file)
@@ -3607,7 +3607,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
 
        if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) {
                printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev));
-               return err;
+               goto err_out_disable_pdev;
        }
 
        card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);
index 78f6db1..6eb4c7a 100644 (file)
@@ -4432,6 +4432,12 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
  *
  * Set the device's firmware node pointer to @fwnode, but if a secondary
  * firmware node of the device is present, preserve it.
+ *
+ * Valid fwnode cases are:
+ *  - primary --> secondary --> -ENODEV
+ *  - primary --> NULL
+ *  - secondary --> -ENODEV
+ *  - NULL
  */
 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
 {
@@ -4450,8 +4456,9 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
        } else {
                if (fwnode_is_primary(fn)) {
                        dev->fwnode = fn->secondary;
+                       /* Set fn->secondary = NULL, so fn remains the primary fwnode */
                        if (!(parent && fn == parent->fwnode))
-                               fn->secondary = ERR_PTR(-ENODEV);
+                               fn->secondary = NULL;
                } else {
                        dev->fwnode = NULL;
                }
index 8dfac7f..ff2ee87 100644 (file)
@@ -582,8 +582,12 @@ void regmap_debugfs_init(struct regmap *map)
                devname = dev_name(map->dev);
 
        if (name) {
-               map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+               if (!map->debugfs_name) {
+                       map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
                                              devname, name);
+                       if (!map->debugfs_name)
+                               return;
+               }
                name = map->debugfs_name;
        } else {
                name = devname;
@@ -591,9 +595,10 @@ void regmap_debugfs_init(struct regmap *map)
 
        if (!strcmp(name, "dummy")) {
                kfree(map->debugfs_name);
-
                map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
                                                dummy_index);
+               if (!map->debugfs_name)
+                               return;
                name = map->debugfs_name;
                dummy_index++;
        }
index 2623269..583b671 100644 (file)
@@ -445,6 +445,7 @@ config BLK_DEV_RBD
 config BLK_DEV_RSXX
        tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
        depends on PCI
+       select CRC32
        help
          Device driver for IBM's high speed PCIe SSD
          storage device: Flash Adapter 900GB Full Height.
index 4b6d3d8..2ff05a0 100644 (file)
@@ -7,6 +7,7 @@ config BLK_DEV_RNBD_CLIENT
        tristate "RDMA Network Block Device driver client"
        depends on INFINIBAND_RTRS_CLIENT
        select BLK_DEV_RNBD
+       select SG_POOL
        help
          RNBD client is a network block device driver using rdma transport.
 
index 1773c0a..080f58a 100644 (file)
@@ -90,3 +90,4 @@ Kleber Souza <kleber.souza@profitbricks.com>
 Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
 Milind Dumbare <Milind.dumbare@gmail.com>
 Roman Penyaev <roman.penyaev@profitbricks.com>
+Swapnil Ingle <ingleswapnil@gmail.com>
index 96e3f9f..45a4700 100644 (file)
@@ -375,12 +375,19 @@ static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
        init_waitqueue_head(&iu->comp.wait);
        iu->comp.errno = INT_MAX;
 
+       if (sg_alloc_table(&iu->sgt, 1, GFP_KERNEL)) {
+               rnbd_put_permit(sess, permit);
+               kfree(iu);
+               return NULL;
+       }
+
        return iu;
 }
 
 static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
 {
        if (atomic_dec_and_test(&iu->refcount)) {
+               sg_free_table(&iu->sgt);
                rnbd_put_permit(sess, iu->permit);
                kfree(iu);
        }
@@ -487,8 +494,6 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
        iu->buf = NULL;
        iu->dev = dev;
 
-       sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
-
        msg.hdr.type    = cpu_to_le16(RNBD_MSG_CLOSE);
        msg.device_id   = cpu_to_le32(device_id);
 
@@ -502,7 +507,6 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
                err = errno;
        }
 
-       sg_free_table(&iu->sgt);
        rnbd_put_iu(sess, iu);
        return err;
 }
@@ -575,7 +579,6 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
        iu->buf = rsp;
        iu->dev = dev;
 
-       sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
        sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
 
        msg.hdr.type    = cpu_to_le16(RNBD_MSG_OPEN);
@@ -594,7 +597,6 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
                err = errno;
        }
 
-       sg_free_table(&iu->sgt);
        rnbd_put_iu(sess, iu);
        return err;
 }
@@ -622,8 +624,6 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
 
        iu->buf = rsp;
        iu->sess = sess;
-
-       sg_alloc_table(&iu->sgt, 1, GFP_KERNEL);
        sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
 
        msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
@@ -650,7 +650,6 @@ put_iu:
        } else {
                err = errno;
        }
-       sg_free_table(&iu->sgt);
        rnbd_put_iu(sess, iu);
        return err;
 }
@@ -1698,7 +1697,8 @@ static void rnbd_destroy_sessions(void)
         */
 
        list_for_each_entry_safe(sess, sn, &sess_list, list) {
-               WARN_ON(!rnbd_clt_get_sess(sess));
+               if (!rnbd_clt_get_sess(sess))
+                       continue;
                close_rtrs(sess);
                list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
                        /*
index b8e4433..a6a68d4 100644 (file)
@@ -338,10 +338,12 @@ static int rnbd_srv_link_ev(struct rtrs_srv *rtrs,
 
 void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
 {
-       mutex_lock(&sess_dev->sess->lock);
-       rnbd_srv_destroy_dev_session_sysfs(sess_dev);
-       mutex_unlock(&sess_dev->sess->lock);
+       struct rnbd_srv_session *sess = sess_dev->sess;
+
        sess_dev->keep_id = true;
+       mutex_lock(&sess->lock);
+       rnbd_srv_destroy_dev_session_sysfs(sess_dev);
+       mutex_unlock(&sess->lock);
 }
 
 static int process_msg_close(struct rtrs_srv *rtrs,
index 37244a7..9cf249c 100644 (file)
@@ -1256,6 +1256,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        { TEGRA30_CLK_I2S3_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
        { TEGRA30_CLK_I2S4_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
        { TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+       { TEGRA30_CLK_HDA, TEGRA30_CLK_PLL_P, 102000000, 0 },
+       { TEGRA30_CLK_HDA2CODEC_2X, TEGRA30_CLK_PLL_P, 48000000, 0 },
        /* must be the last entry */
        { TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
 };
index a60aee1..65df9ef 100644 (file)
@@ -235,36 +235,6 @@ static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter,
        return len;
 }
 
-static ssize_t ti_eqep_position_floor_read(struct counter_device *counter,
-                                          struct counter_count *count,
-                                          void *ext_priv, char *buf)
-{
-       struct ti_eqep_cnt *priv = counter->priv;
-       u32 qposinit;
-
-       regmap_read(priv->regmap32, QPOSINIT, &qposinit);
-
-       return sprintf(buf, "%u\n", qposinit);
-}
-
-static ssize_t ti_eqep_position_floor_write(struct counter_device *counter,
-                                           struct counter_count *count,
-                                           void *ext_priv, const char *buf,
-                                           size_t len)
-{
-       struct ti_eqep_cnt *priv = counter->priv;
-       int err;
-       u32 res;
-
-       err = kstrtouint(buf, 0, &res);
-       if (err < 0)
-               return err;
-
-       regmap_write(priv->regmap32, QPOSINIT, res);
-
-       return len;
-}
-
 static ssize_t ti_eqep_position_enable_read(struct counter_device *counter,
                                            struct counter_count *count,
                                            void *ext_priv, char *buf)
@@ -301,11 +271,6 @@ static struct counter_count_ext ti_eqep_position_ext[] = {
                .read   = ti_eqep_position_ceiling_read,
                .write  = ti_eqep_position_ceiling_write,
        },
-       {
-               .name   = "floor",
-               .read   = ti_eqep_position_floor_read,
-               .write  = ti_eqep_position_floor_write,
-       },
        {
                .name   = "enable",
                .read   = ti_eqep_position_enable_read,
index 1a66046..be05e03 100644 (file)
@@ -76,11 +76,6 @@ static inline int ceiling_fp(int32_t x)
        return ret;
 }
 
-static inline int32_t percent_fp(int percent)
-{
-       return div_fp(percent, 100);
-}
-
 static inline u64 mul_ext_fp(u64 x, u64 y)
 {
        return (x * y) >> EXT_FRAC_BITS;
@@ -91,11 +86,6 @@ static inline u64 div_ext_fp(u64 x, u64 y)
        return div64_u64(x << EXT_FRAC_BITS, y);
 }
 
-static inline int32_t percent_ext_fp(int percent)
-{
-       return div_ext_fp(percent, 100);
-}
-
 /**
  * struct sample -     Store performance sample
  * @core_avg_perf:     Ratio of APERF/MPERF which is the actual average
@@ -2653,12 +2643,13 @@ static void intel_cpufreq_adjust_perf(unsigned int cpunum,
                                      unsigned long capacity)
 {
        struct cpudata *cpu = all_cpu_data[cpunum];
+       u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
        int old_pstate = cpu->pstate.current_pstate;
        int cap_pstate, min_pstate, max_pstate, target_pstate;
 
        update_turbo_state();
-       cap_pstate = global.turbo_disabled ? cpu->pstate.max_pstate :
-                                            cpu->pstate.turbo_pstate;
+       cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
+                                            HWP_HIGHEST_PERF(hwp_cap);
 
        /* Optimization: Avoid unnecessary divisions. */
 
index 0acc9e2..b9ccb6a 100644 (file)
@@ -878,9 +878,9 @@ static int get_transition_latency(struct powernow_k8_data *data)
 
 /* Take a frequency, and issue the fid/vid transition command */
 static int transition_frequency_fidvid(struct powernow_k8_data *data,
-               unsigned int index)
+               unsigned int index,
+               struct cpufreq_policy *policy)
 {
-       struct cpufreq_policy *policy;
        u32 fid = 0;
        u32 vid = 0;
        int res;
@@ -912,9 +912,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
        freqs.old = find_khz_freq_from_fid(data->currfid);
        freqs.new = find_khz_freq_from_fid(fid);
 
-       policy = cpufreq_cpu_get(smp_processor_id());
-       cpufreq_cpu_put(policy);
-
        cpufreq_freq_transition_begin(policy, &freqs);
        res = transition_fid_vid(data, fid, vid);
        cpufreq_freq_transition_end(policy, &freqs, res);
@@ -969,7 +966,7 @@ static long powernowk8_target_fn(void *arg)
 
        powernow_k8_acpi_pst_values(data, newstate);
 
-       ret = transition_frequency_fidvid(data, newstate);
+       ret = transition_frequency_fidvid(data, newstate, pol);
 
        if (ret) {
                pr_err("transition frequency failed\n");
index bbd5170..e535f28 100644 (file)
@@ -366,6 +366,7 @@ if CRYPTO_DEV_OMAP
 config CRYPTO_DEV_OMAP_SHAM
        tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
        depends on ARCH_OMAP2PLUS
+       select CRYPTO_ENGINE
        select CRYPTO_SHA1
        select CRYPTO_MD5
        select CRYPTO_SHA256
index e63684d..9ad6397 100644 (file)
@@ -76,10 +76,6 @@ static void dma_buf_release(struct dentry *dentry)
 
        dmabuf->ops->release(dmabuf);
 
-       mutex_lock(&db_list.lock);
-       list_del(&dmabuf->list_node);
-       mutex_unlock(&db_list.lock);
-
        if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
                dma_resv_fini(dmabuf->resv);
 
@@ -88,6 +84,22 @@ static void dma_buf_release(struct dentry *dentry)
        kfree(dmabuf);
 }
 
+static int dma_buf_file_release(struct inode *inode, struct file *file)
+{
+       struct dma_buf *dmabuf;
+
+       if (!is_dma_buf_file(file))
+               return -EINVAL;
+
+       dmabuf = file->private_data;
+
+       mutex_lock(&db_list.lock);
+       list_del(&dmabuf->list_node);
+       mutex_unlock(&db_list.lock);
+
+       return 0;
+}
+
 static const struct dentry_operations dma_buf_dentry_ops = {
        .d_dname = dmabuffs_dname,
        .d_release = dma_buf_release,
@@ -413,6 +425,7 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
 }
 
 static const struct file_operations dma_buf_fops = {
+       .release        = dma_buf_file_release,
        .mmap           = dma_buf_mmap_internal,
        .llseek         = dma_buf_llseek,
        .poll           = dma_buf_poll,
index 3c4e343..364fc2f 100644 (file)
@@ -251,6 +251,9 @@ static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
                buffer->vaddr = NULL;
        }
 
+       /* free page list */
+       kfree(buffer->pages);
+       /* release memory */
        cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
        kfree(buffer);
 }
index b971505..08d71da 100644 (file)
@@ -86,12 +86,12 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
 
        if (desc->chunk) {
                /* Create and add new element into the linked list */
-               desc->chunks_alloc++;
-               list_add_tail(&chunk->list, &desc->chunk->list);
                if (!dw_edma_alloc_burst(chunk)) {
                        kfree(chunk);
                        return NULL;
                }
+               desc->chunks_alloc++;
+               list_add_tail(&chunk->list, &desc->chunk->list);
        } else {
                /* List head */
                chunk->burst = NULL;
index 266423a..4dbb03c 100644 (file)
@@ -434,7 +434,7 @@ int idxd_register_driver(void)
        return 0;
 
 drv_fail:
-       for (; i > 0; i--)
+       while (--i >= 0)
                driver_unregister(&idxd_drvs[i]->drv);
        return rc;
 }
@@ -1840,7 +1840,7 @@ int idxd_register_bus_type(void)
        return 0;
 
 bus_err:
-       for (; i > 0; i--)
+       while (--i >= 0)
                bus_unregister(idxd_bus_types[i]);
        return rc;
 }
index f133ae8..6ad8afb 100644 (file)
@@ -1007,6 +1007,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
        return 0;
 
 err_free:
+       mtk_hsdma_hw_deinit(hsdma);
        of_dma_controller_free(pdev->dev.of_node);
 err_unregister:
        dma_async_device_unregister(dd);
index 584c931..d29d01e 100644 (file)
@@ -350,7 +350,7 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
 
        ret = dma_async_device_register(ddev);
        if (ret)
-               return ret;
+               goto disable_xdmac;
 
        ret = of_dma_controller_register(dev->of_node,
                                         of_dma_simple_xlate, mdev);
@@ -363,6 +363,8 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
 
 unregister_dmac:
        dma_async_device_unregister(ddev);
+disable_xdmac:
+       disable_xdmac(mdev);
        return ret;
 }
 
index d5773d4..8857985 100644 (file)
@@ -630,7 +630,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
                             GFP_NOWAIT);
 
        if (!async_desc)
-               goto err_out;
+               return NULL;
 
        if (flags & DMA_PREP_FENCE)
                async_desc->flags |= DESC_FLAG_NWD;
@@ -670,10 +670,6 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
        }
 
        return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
-
-err_out:
-       kfree(async_desc);
-       return NULL;
 }
 
 /**
index d2334f5..1a0bf6b 100644 (file)
@@ -1416,7 +1416,7 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
        len = 1 << bit;
        ring->alloc_size = (len + (len - 1));
        dev_dbg(gpii->gpi_dev->dev,
-               "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
+               "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%zu\n",
                  elements, el_size, (elements * el_size), len,
                  ring->alloc_size);
 
@@ -1424,7 +1424,7 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
                                               ring->alloc_size,
                                               &ring->dma_handle, GFP_KERNEL);
        if (!ring->pre_aligned) {
-               dev_err(gpii->gpi_dev->dev, "could not alloc size:%lu mem for ring\n",
+               dev_err(gpii->gpi_dev->dev, "could not alloc size:%zu mem for ring\n",
                        ring->alloc_size);
                return -ENOMEM;
        }
@@ -1444,8 +1444,8 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
        smp_wmb();
 
        dev_dbg(gpii->gpi_dev->dev,
-               "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n",
-               ring->dma_handle, ring->phys_addr, ring->len,
+               "phy_pre:%pad phy_alig:%pa len:%u el_size:%u elements:%u\n",
+               &ring->dma_handle, &ring->phys_addr, ring->len,
                ring->el_size, ring->elements);
 
        return 0;
@@ -1948,7 +1948,7 @@ static int gpi_ch_init(struct gchan *gchan)
        return ret;
 
 error_start_chan:
-       for (i = i - 1; i >= 0; i++) {
+       for (i = i - 1; i >= 0; i--) {
                gpi_stop_chan(&gpii->gchan[i]);
                gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET);
        }
index e4637ec..36ba8b4 100644 (file)
 #define STM32_MDMA_MAX_CHANNELS                63
 #define STM32_MDMA_MAX_REQUESTS                256
 #define STM32_MDMA_MAX_BURST           128
-#define STM32_MDMA_VERY_HIGH_PRIORITY  0x11
+#define STM32_MDMA_VERY_HIGH_PRIORITY  0x3
 
 enum stm32_mdma_trigger_mode {
        STM32_MDMA_BUFFER,
index 87157cb..2984604 100644 (file)
@@ -4698,9 +4698,9 @@ static int pktdma_setup_resources(struct udma_dev *ud)
                ud->tchan_tpl.levels = 1;
        }
 
-       ud->tchan_tpl.levels = ud->tchan_tpl.levels;
-       ud->tchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
-       ud->tchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
+       ud->rchan_tpl.levels = ud->tchan_tpl.levels;
+       ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
+       ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
 
        ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
                                           sizeof(unsigned long), GFP_KERNEL);
index 22faea6..7977755 100644 (file)
@@ -2781,7 +2781,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
                has_dre = false;
 
        if (!has_dre)
-               xdev->common.copy_align = fls(width - 1);
+               xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
 
        if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
            of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
@@ -2900,7 +2900,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
                                    struct device_node *node)
 {
-       int ret, i, nr_channels = 1;
+       int ret, i;
+       u32 nr_channels = 1;
 
        ret = of_property_read_u32(node, "dma-channels", &nr_channels);
        if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
@@ -3112,7 +3113,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
        }
 
        /* Register the DMA engine with the core */
-       dma_async_device_register(&xdev->common);
+       err = dma_async_device_register(&xdev->common);
+       if (err) {
+               dev_err(xdev->dev, "failed to register the dma device\n");
+               goto error;
+       }
 
        err = of_dma_controller_register(node, of_dma_xilinx_xlate,
                                         xdev);
index c70f46e..dea65d8 100644 (file)
@@ -521,7 +521,8 @@ config GPIO_SAMA5D2_PIOBU
 
 config GPIO_SIFIVE
        bool "SiFive GPIO support"
-       depends on OF_GPIO && IRQ_DOMAIN_HIERARCHY
+       depends on OF_GPIO
+       select IRQ_DOMAIN_HIERARCHY
        select GPIO_GENERIC
        select GPIOLIB_IRQCHIP
        select REGMAP_MMIO
@@ -597,6 +598,8 @@ config GPIO_TEGRA
        default ARCH_TEGRA
        depends on ARCH_TEGRA || COMPILE_TEST
        depends on OF_GPIO
+       select GPIOLIB_IRQCHIP
+       select IRQ_DOMAIN_HIERARCHY
        help
          Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
 
index 672681a..a912a8f 100644 (file)
@@ -676,20 +676,17 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
        else
                state->duty_cycle = 1;
 
+       val = (unsigned long long) u; /* on duration */
        regmap_read(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), &u);
-       val = (unsigned long long) u * NSEC_PER_SEC;
+       val += (unsigned long long) u; /* period = on + off duration */
+       val *= NSEC_PER_SEC;
        do_div(val, mvpwm->clk_rate);
-       if (val < state->duty_cycle) {
+       if (val > UINT_MAX)
+               state->period = UINT_MAX;
+       else if (val)
+               state->period = val;
+       else
                state->period = 1;
-       } else {
-               val -= state->duty_cycle;
-               if (val > UINT_MAX)
-                       state->period = UINT_MAX;
-               else if (val)
-                       state->period = val;
-               else
-                       state->period = 1;
-       }
 
        regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u);
        if (u)
index 12b679c..1a7b511 100644 (file)
@@ -1979,6 +1979,21 @@ struct gpio_chardev_data {
 #endif
 };
 
+static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
+{
+       struct gpio_device *gdev = cdev->gdev;
+       struct gpiochip_info chipinfo;
+
+       memset(&chipinfo, 0, sizeof(chipinfo));
+
+       strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
+       strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
+       chipinfo.lines = gdev->ngpio;
+       if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
+               return -EFAULT;
+       return 0;
+}
+
 #ifdef CONFIG_GPIO_CDEV_V1
 /*
  * returns 0 if the versions match, else the previously selected ABI version
@@ -1993,6 +2008,41 @@ static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
 
        return abiv;
 }
+
+static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
+                          bool watch)
+{
+       struct gpio_desc *desc;
+       struct gpioline_info lineinfo;
+       struct gpio_v2_line_info lineinfo_v2;
+
+       if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
+               return -EFAULT;
+
+       /* this doubles as a range check on line_offset */
+       desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset);
+       if (IS_ERR(desc))
+               return PTR_ERR(desc);
+
+       if (watch) {
+               if (lineinfo_ensure_abi_version(cdev, 1))
+                       return -EPERM;
+
+               if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
+                       return -EBUSY;
+       }
+
+       gpio_desc_to_lineinfo(desc, &lineinfo_v2);
+       gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
+
+       if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
+               if (watch)
+                       clear_bit(lineinfo.line_offset, cdev->watched_lines);
+               return -EFAULT;
+       }
+
+       return 0;
+}
 #endif
 
 static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
@@ -2030,6 +2080,22 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
        return 0;
 }
 
+static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
+{
+       __u32 offset;
+
+       if (copy_from_user(&offset, ip, sizeof(offset)))
+               return -EFAULT;
+
+       if (offset >= cdev->gdev->ngpio)
+               return -EINVAL;
+
+       if (!test_and_clear_bit(offset, cdev->watched_lines))
+               return -EBUSY;
+
+       return 0;
+}
+
 /*
  * gpio_ioctl() - ioctl handler for the GPIO chardev
  */
@@ -2037,80 +2103,24 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        struct gpio_chardev_data *cdev = file->private_data;
        struct gpio_device *gdev = cdev->gdev;
-       struct gpio_chip *gc = gdev->chip;
        void __user *ip = (void __user *)arg;
-       __u32 offset;
 
        /* We fail any subsequent ioctl():s when the chip is gone */
-       if (!gc)
+       if (!gdev->chip)
                return -ENODEV;
 
        /* Fill in the struct and pass to userspace */
        if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
-               struct gpiochip_info chipinfo;
-
-               memset(&chipinfo, 0, sizeof(chipinfo));
-
-               strscpy(chipinfo.name, dev_name(&gdev->dev),
-                       sizeof(chipinfo.name));
-               strscpy(chipinfo.label, gdev->label,
-                       sizeof(chipinfo.label));
-               chipinfo.lines = gdev->ngpio;
-               if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
-                       return -EFAULT;
-               return 0;
+               return chipinfo_get(cdev, ip);
 #ifdef CONFIG_GPIO_CDEV_V1
-       } else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
-               struct gpio_desc *desc;
-               struct gpioline_info lineinfo;
-               struct gpio_v2_line_info lineinfo_v2;
-
-               if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
-                       return -EFAULT;
-
-               /* this doubles as a range check on line_offset */
-               desc = gpiochip_get_desc(gc, lineinfo.line_offset);
-               if (IS_ERR(desc))
-                       return PTR_ERR(desc);
-
-               gpio_desc_to_lineinfo(desc, &lineinfo_v2);
-               gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
-
-               if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
-                       return -EFAULT;
-               return 0;
        } else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
                return linehandle_create(gdev, ip);
        } else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
                return lineevent_create(gdev, ip);
-       } else if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
-               struct gpio_desc *desc;
-               struct gpioline_info lineinfo;
-               struct gpio_v2_line_info lineinfo_v2;
-
-               if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
-                       return -EFAULT;
-
-               /* this doubles as a range check on line_offset */
-               desc = gpiochip_get_desc(gc, lineinfo.line_offset);
-               if (IS_ERR(desc))
-                       return PTR_ERR(desc);
-
-               if (lineinfo_ensure_abi_version(cdev, 1))
-                       return -EPERM;
-
-               if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
-                       return -EBUSY;
-
-               gpio_desc_to_lineinfo(desc, &lineinfo_v2);
-               gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
-
-               if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
-                       clear_bit(lineinfo.line_offset, cdev->watched_lines);
-                       return -EFAULT;
-               }
-
-               return 0;
+       } else if (cmd == GPIO_GET_LINEINFO_IOCTL ||
+                  cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
+               return lineinfo_get_v1(cdev, ip,
+                                      cmd == GPIO_GET_LINEINFO_WATCH_IOCTL);
 #endif /* CONFIG_GPIO_CDEV_V1 */
        } else if (cmd == GPIO_V2_GET_LINEINFO_IOCTL ||
                   cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL) {
@@ -2119,16 +2129,7 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        } else if (cmd == GPIO_V2_GET_LINE_IOCTL) {
                return linereq_create(gdev, ip);
        } else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) {
-               if (copy_from_user(&offset, ip, sizeof(offset)))
-                       return -EFAULT;
-
-               if (offset >= cdev->gdev->ngpio)
-                       return -EINVAL;
-
-               if (!test_and_clear_bit(offset, cdev->watched_lines))
-                       return -EBUSY;
-
-               return 0;
+               return lineinfo_unwatch(cdev, ip);
        }
        return -EINVAL;
 }
index b02cc2a..b78a634 100644 (file)
@@ -1489,6 +1489,9 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
                type = IRQ_TYPE_NONE;
        }
 
+       if (gc->to_irq)
+               chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__);
+
        gc->to_irq = gpiochip_to_irq;
        gc->irq.default_type = type;
        gc->irq.lock_key = lock_key;
index 3060778..6107ac9 100644 (file)
@@ -112,6 +112,7 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
 union igp_info {
        struct atom_integrated_system_info_v1_11 v11;
        struct atom_integrated_system_info_v1_12 v12;
+       struct atom_integrated_system_info_v2_1 v21;
 };
 
 union umc_info {
@@ -209,24 +210,42 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
                if (adev->flags & AMD_IS_APU) {
                        igp_info = (union igp_info *)
                                (mode_info->atom_context->bios + data_offset);
-                       switch (crev) {
-                       case 11:
-                               mem_channel_number = igp_info->v11.umachannelnumber;
-                               /* channel width is 64 */
-                               if (vram_width)
-                                       *vram_width = mem_channel_number * 64;
-                               mem_type = igp_info->v11.memorytype;
-                               if (vram_type)
-                                       *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+                       switch (frev) {
+                       case 1:
+                               switch (crev) {
+                               case 11:
+                               case 12:
+                                       mem_channel_number = igp_info->v11.umachannelnumber;
+                                       if (!mem_channel_number)
+                                               mem_channel_number = 1;
+                                       /* channel width is 64 */
+                                       if (vram_width)
+                                               *vram_width = mem_channel_number * 64;
+                                       mem_type = igp_info->v11.memorytype;
+                                       if (vram_type)
+                                               *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+                                       break;
+                               default:
+                                       return -EINVAL;
+                               }
                                break;
-                       case 12:
-                               mem_channel_number = igp_info->v12.umachannelnumber;
-                               /* channel width is 64 */
-                               if (vram_width)
-                                       *vram_width = mem_channel_number * 64;
-                               mem_type = igp_info->v12.memorytype;
-                               if (vram_type)
-                                       *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+                       case 2:
+                               switch (crev) {
+                               case 1:
+                               case 2:
+                                       mem_channel_number = igp_info->v21.umachannelnumber;
+                                       if (!mem_channel_number)
+                                               mem_channel_number = 1;
+                                       /* channel width is 64 */
+                                       if (vram_width)
+                                               *vram_width = mem_channel_number * 64;
+                                       mem_type = igp_info->v21.memorytype;
+                                       if (vram_type)
+                                               *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+                                       break;
+                               default:
+                                       return -EINVAL;
+                               }
                                break;
                        default:
                                return -EINVAL;
index 1cb7d73..cab1eba 100644 (file)
@@ -81,7 +81,6 @@ MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin");
 
 #define AMDGPU_RESUME_MS               2000
 
@@ -2548,11 +2547,11 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
        if (adev->gmc.xgmi.num_physical_nodes > 1)
                amdgpu_xgmi_remove_device(adev);
 
-       amdgpu_amdkfd_device_fini(adev);
-
        amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
        amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
 
+       amdgpu_amdkfd_device_fini(adev);
+
        /* need to disable SMC first */
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if (!adev->ip_blocks[i].status.hw)
@@ -3034,7 +3033,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
 #endif
        default:
                if (amdgpu_dc > 0)
-                       DRM_INFO("Display Core has been requested via kernel parameter "
+                       DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
                                         "but isn't supported by ASIC, ignoring\n");
                return false;
        }
index 72efd57..7169fb5 100644 (file)
@@ -1085,6 +1085,8 @@ static const struct pci_device_id pciidlist[] = {
 
        /* Renoir */
        {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+       {0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+       {0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
 
        /* Navi12 */
        {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
index 523d22d..347fec6 100644 (file)
@@ -563,7 +563,7 @@ static int psp_asd_load(struct psp_context *psp)
         * add workaround to bypass it for sriov now.
         * TODO: add version check to make it common
         */
-       if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw)
+       if (amdgpu_sriov_vf(psp->adev) || !psp->asd_ucode_size)
                return 0;
 
        cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
@@ -1315,8 +1315,12 @@ static int psp_hdcp_terminate(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->hdcp_context.hdcp_initialized)
-               return 0;
+       if (!psp->hdcp_context.hdcp_initialized) {
+               if (psp->hdcp_context.hdcp_shared_buf)
+                       goto out;
+               else
+                       return 0;
+       }
 
        ret = psp_hdcp_unload(psp);
        if (ret)
@@ -1324,6 +1328,7 @@ static int psp_hdcp_terminate(struct psp_context *psp)
 
        psp->hdcp_context.hdcp_initialized = false;
 
+out:
        /* free hdcp shared memory */
        amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
                              &psp->hdcp_context.hdcp_shared_mc_addr,
@@ -1462,8 +1467,12 @@ static int psp_dtm_terminate(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->dtm_context.dtm_initialized)
-               return 0;
+       if (!psp->dtm_context.dtm_initialized) {
+               if (psp->dtm_context.dtm_shared_buf)
+                       goto out;
+               else
+                       return 0;
+       }
 
        ret = psp_dtm_unload(psp);
        if (ret)
@@ -1471,6 +1480,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
 
        psp->dtm_context.dtm_initialized = false;
 
+out:
        /* free hdcp shared memory */
        amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
                              &psp->dtm_context.dtm_shared_mc_addr,
@@ -2589,11 +2599,10 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
 
        switch (desc->fw_type) {
        case TA_FW_TYPE_PSP_ASD:
-               psp->asd_fw_version        = le32_to_cpu(desc->fw_version);
+               psp->asd_fw_version        = le32_to_cpu(desc->fw_version);
                psp->asd_feature_version   = le32_to_cpu(desc->fw_version);
-               psp->asd_ucode_size        = le32_to_cpu(desc->size_bytes);
+               psp->asd_ucode_size        = le32_to_cpu(desc->size_bytes);
                psp->asd_start_addr        = ucode_start_addr;
-               psp->asd_fw                = psp->ta_fw;
                break;
        case TA_FW_TYPE_PSP_XGMI:
                psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
index c136bd4..82e9526 100644 (file)
@@ -1518,7 +1518,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_err_handler_data *data;
        int i = 0;
-       int ret = 0;
+       int ret = 0, status;
 
        if (!con || !con->eh_data || !bps || !count)
                return -EINVAL;
@@ -1543,12 +1543,12 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
                        .size = AMDGPU_GPU_PAGE_SIZE,
                        .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
                };
-               ret = amdgpu_vram_mgr_query_page_status(
+               status = amdgpu_vram_mgr_query_page_status(
                                ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
                                data->bps[i].retired_page);
-               if (ret == -EBUSY)
+               if (status == -EBUSY)
                        (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
-               else if (ret == -ENOENT)
+               else if (status == -ENOENT)
                        (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
        }
 
index 1dd0401..19d9aa7 100644 (file)
@@ -30,6 +30,7 @@
 #define EEPROM_I2C_TARGET_ADDR_VEGA20          0xA0
 #define EEPROM_I2C_TARGET_ADDR_ARCTURUS                0xA8
 #define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342   0xA0
+#define EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID   0xA0
 
 /*
  * The 2 macros bellow represent the actual size in bytes that
@@ -62,7 +63,8 @@
 static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
 {
        if ((adev->asic_type == CHIP_VEGA20) ||
-           (adev->asic_type == CHIP_ARCTURUS))
+           (adev->asic_type == CHIP_ARCTURUS) ||
+           (adev->asic_type == CHIP_SIENNA_CICHLID))
                return true;
 
        return false;
@@ -100,6 +102,10 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
        case CHIP_ARCTURUS:
                return __get_eeprom_i2c_addr_arct(adev, i2c_addr);
 
+       case CHIP_SIENNA_CICHLID:
+               *i2c_addr = EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID;
+               break;
+
        default:
                return false;
        }
index ba10867..346963e 100644 (file)
 #define mmGCR_GENERAL_CNTL_Sienna_Cichlid                      0x1580
 #define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX     0
 
+#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh                0x0025
+#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX       1
+#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh                0x0026
+#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh_BASE_IDX       1
 #define mmSPI_CONFIG_CNTL_1_Vangogh             0x2441
 #define mmSPI_CONFIG_CNTL_1_Vangogh_BASE_IDX    1
 #define mmVGT_TF_MEMORY_BASE_HI_Vangogh          0x2261
 #define mmVGT_ESGS_RING_SIZE_Vangogh_BASE_IDX    1
 #define mmSPI_CONFIG_CNTL_Vangogh                0x2440
 #define mmSPI_CONFIG_CNTL_Vangogh_BASE_IDX       1
+#define mmGCR_GENERAL_CNTL_Vangogh               0x1580
+#define mmGCR_GENERAL_CNTL_Vangogh_BASE_IDX      0
 
 #define mmCP_HYP_PFP_UCODE_ADDR                        0x5814
 #define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX       1
 #define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid          0x15db
 #define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid_BASE_IDX        0
 
+#define mmGC_THROTTLE_CTRL_Sienna_Cichlid              0x2030
+#define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX     0
+
 MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
 MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
 MODULE_FIRMWARE("amdgpu/navi10_me.bin");
@@ -3237,7 +3246,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000142),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -3324,6 +3333,7 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
 static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
 static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev);
 static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev);
+static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev);
 
 static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
 {
@@ -7192,6 +7202,9 @@ static int gfx_v10_0_hw_init(void *handle)
        if (adev->asic_type == CHIP_SIENNA_CICHLID)
                gfx_v10_3_program_pbb_mode(adev);
 
+       if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+               gfx_v10_3_set_power_brake_sequence(adev);
+
        return r;
 }
 
@@ -7377,8 +7390,16 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
 
        amdgpu_gfx_off_ctrl(adev, false);
        mutex_lock(&adev->gfx.gpu_clock_mutex);
-       clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
-               ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
+       switch (adev->asic_type) {
+       case CHIP_VANGOGH:
+               clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
+                       ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
+               break;
+       default:
+               clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
+                       ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
+               break;
+       }
        mutex_unlock(&adev->gfx.gpu_clock_mutex);
        amdgpu_gfx_off_ctrl(adev, true);
        return clock;
@@ -9169,6 +9190,31 @@ static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev)
        }
 }
 
+static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev)
+{
+       WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX,
+                    (0x1 << GRBM_GFX_INDEX__SA_BROADCAST_WRITES__SHIFT) |
+                    (0x1 << GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT) |
+                    (0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
+
+       WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL);
+       WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA,
+                    (0x1 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL__SHIFT) |
+                    (0x12 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP__SHIFT) |
+                    (0x13 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP__SHIFT) |
+                    (0xf << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS__SHIFT));
+
+       WREG32_SOC15(GC, 0, mmGC_THROTTLE_CTRL_Sienna_Cichlid,
+                    (0x1 << GC_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT) |
+                    (0x1 << GC_THROTTLE_CTRL__PATTERN_MODE__SHIFT) |
+                    (0x5 << GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL__SHIFT));
+
+       WREG32_SOC15(GC, 0, mmDIDT_IND_INDEX, ixDIDT_SQ_THROTTLE_CTRL);
+
+       WREG32_SOC15(GC, 0, mmDIDT_IND_DATA,
+                    (0x1 << DIDT_SQ_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT));
+}
+
 const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
 {
        .type = AMD_IP_BLOCK_TYPE_GFX,
index b72c8e4..1961745 100644 (file)
@@ -310,7 +310,7 @@ static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev)
                /* Send no-retry XNACK on fault to suppress VM fault storm. */
                tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
-                                   !amdgpu_noretry);
+                                   !adev->gmc.noretry);
                WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
                                    i * hub->ctx_distance, tmp);
                WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -491,12 +491,11 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
 {
        uint32_t def, data, def1, data1;
 
-       def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
+       def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
        def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
 
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
-               data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
-
+               data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
                data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
                           DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
                           DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
@@ -505,8 +504,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
                           DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
 
        } else {
-               data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
-
+               data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
                data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
                          DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
                          DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
@@ -516,7 +514,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
        }
 
        if (def != data)
-               WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
+               WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
        if (def1 != data1)
                WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
 }
@@ -525,17 +523,44 @@ static void
 mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
                                           bool enable)
 {
-       uint32_t def, data;
-
-       def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
-
-       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
-               data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
-       else
-               data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+       uint32_t def, data, def1, data1, def2, data2;
+
+       def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
+       def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
+       def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
+
+       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
+               data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
+               data1 &= !(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+               data2 &= !(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+       } else {
+               data |= MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
+               data1 |= (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+               data2 |= (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+       }
 
        if (def != data)
-               WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
+               WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
+       if (def1 != data1)
+               WREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL, data1);
+       if (def2 != data2)
+               WREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL, data2);
 }
 
 static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
@@ -554,26 +579,39 @@ static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
 
 static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags)
 {
-       int data, data1;
+       int data, data1, data2, data3;
 
        if (amdgpu_sriov_vf(adev))
                *flags = 0;
 
-       data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
-       data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+       data = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+       data1  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
+       data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
+       data3 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
 
        /* AMD_CG_SUPPORT_MC_MGCG */
-       if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) &&
-           !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+       if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
                       DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
                       DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
                       DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
                       DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
-                      DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
-               *flags |= AMD_CG_SUPPORT_MC_MGCG;
+                      DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))
+               && !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) {
+                       *flags |= AMD_CG_SUPPORT_MC_MGCG;
+       }
 
        /* AMD_CG_SUPPORT_MC_LS */
-       if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+       if (!(data1 & MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK)
+               && !(data2 & (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                               DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                               DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                               DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                               DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK))
+               && !(data3 & (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                               DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                               DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                               DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                               DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK)))
                *flags |= AMD_CG_SUPPORT_MC_LS;
 }
 
index d65a533..3ba7bdf 100644 (file)
@@ -47,7 +47,7 @@ enum psp_gfx_crtl_cmd_id
     GFX_CTRL_CMD_ID_DISABLE_INT     = 0x00060000,   /* disable PSP-to-Gfx interrupt */
     GFX_CTRL_CMD_ID_MODE1_RST       = 0x00070000,   /* trigger the Mode 1 reset */
     GFX_CTRL_CMD_ID_GBR_IH_SET      = 0x00080000,   /* set Gbr IH_RB_CNTL registers */
-    GFX_CTRL_CMD_ID_CONSUME_CMD     = 0x000A0000,   /* send interrupt to psp for updating write pointer of vf */
+    GFX_CTRL_CMD_ID_CONSUME_CMD     = 0x00090000,   /* send interrupt to psp for updating write pointer of vf */
     GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
 
     GFX_CTRL_CMD_ID_MAX             = 0x000F0000,   /* max command ID */
index 8a23636..0b3516c 100644 (file)
@@ -1239,7 +1239,8 @@ static int soc15_common_early_init(void *handle)
                break;
        case CHIP_RENOIR:
                adev->asic_funcs = &soc15_asic_funcs;
-               if (adev->pdev->device == 0x1636)
+               if ((adev->pdev->device == 0x1636) ||
+                   (adev->pdev->device == 0x164c))
                        adev->apu_flags |= AMD_APU_IS_RENOIR;
                else
                        adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
index 8cac497..a5640a6 100644 (file)
@@ -1040,11 +1040,14 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
                                (struct crat_subtype_iolink *)sub_type_hdr);
                if (ret < 0)
                        return ret;
-               crat_table->length += (sub_type_hdr->length * entries);
-               crat_table->total_entries += entries;
 
-               sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
-                               sub_type_hdr->length * entries);
+               if (entries) {
+                       crat_table->length += (sub_type_hdr->length * entries);
+                       crat_table->total_entries += entries;
+
+                       sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
+                                       sub_type_hdr->length * entries);
+               }
 #else
                pr_info("IO link not available for non x86 platforms\n");
 #endif
index 797b5d4..e509a17 100644 (file)
@@ -6,7 +6,7 @@ config DRM_AMD_DC
        bool "AMD DC - Enable new display engine"
        default y
        select SND_HDA_COMPONENT if SND_HDA_CORE
-       select DRM_AMD_DC_DCN if (X86 || PPC64 || (ARM64 && KERNEL_MODE_NEON)) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
+       select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
        help
          Choose this option if you want to use the new display engine
          support for AMDGPU. This adds required support for Vega and
index 519080e..c6da89d 100644 (file)
@@ -939,41 +939,6 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
 }
 #endif
 
-#ifdef CONFIG_DEBUG_FS
-static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
-{
-       dm->crc_win_x_start_property =
-               drm_property_create_range(adev_to_drm(dm->adev),
-                                         DRM_MODE_PROP_ATOMIC,
-                                         "AMD_CRC_WIN_X_START", 0, U16_MAX);
-       if (!dm->crc_win_x_start_property)
-               return -ENOMEM;
-
-       dm->crc_win_y_start_property =
-               drm_property_create_range(adev_to_drm(dm->adev),
-                                         DRM_MODE_PROP_ATOMIC,
-                                         "AMD_CRC_WIN_Y_START", 0, U16_MAX);
-       if (!dm->crc_win_y_start_property)
-               return -ENOMEM;
-
-       dm->crc_win_x_end_property =
-               drm_property_create_range(adev_to_drm(dm->adev),
-                                         DRM_MODE_PROP_ATOMIC,
-                                         "AMD_CRC_WIN_X_END", 0, U16_MAX);
-       if (!dm->crc_win_x_end_property)
-               return -ENOMEM;
-
-       dm->crc_win_y_end_property =
-               drm_property_create_range(adev_to_drm(dm->adev),
-                                         DRM_MODE_PROP_ATOMIC,
-                                         "AMD_CRC_WIN_Y_END", 0, U16_MAX);
-       if (!dm->crc_win_y_end_property)
-               return -ENOMEM;
-
-       return 0;
-}
-#endif
-
 static int amdgpu_dm_init(struct amdgpu_device *adev)
 {
        struct dc_init_data init_data;
@@ -1120,10 +1085,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
                dc_init_callbacks(adev->dm.dc, &init_params);
        }
-#endif
-#ifdef CONFIG_DEBUG_FS
-       if (create_crtc_crc_properties(&adev->dm))
-               DRM_ERROR("amdgpu: failed to create crc property.\n");
 #endif
        if (amdgpu_dm_initialize_drm_device(adev)) {
                DRM_ERROR(
@@ -2386,8 +2347,7 @@ void amdgpu_dm_update_connector_after_detect(
 
                        drm_connector_update_edid_property(connector,
                                                           aconnector->edid);
-                       aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid);
-                       drm_connector_list_update(connector);
+                       drm_add_edid_modes(connector, aconnector->edid);
 
                        if (aconnector->dc_link->aux_mode)
                                drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
@@ -5334,64 +5294,12 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
        state->crc_src = cur->crc_src;
        state->cm_has_degamma = cur->cm_has_degamma;
        state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
-#ifdef CONFIG_DEBUG_FS
-       state->crc_window = cur->crc_window;
-#endif
+
        /* TODO Duplicate dc_stream after objects are stream object is flattened */
 
        return &state->base;
 }
 
-#ifdef CONFIG_DEBUG_FS
-static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
-                                           struct drm_crtc_state *crtc_state,
-                                           struct drm_property *property,
-                                           uint64_t val)
-{
-       struct drm_device *dev = crtc->dev;
-       struct amdgpu_device *adev = drm_to_adev(dev);
-       struct dm_crtc_state *dm_new_state =
-               to_dm_crtc_state(crtc_state);
-
-       if (property == adev->dm.crc_win_x_start_property)
-               dm_new_state->crc_window.x_start = val;
-       else if (property == adev->dm.crc_win_y_start_property)
-               dm_new_state->crc_window.y_start = val;
-       else if (property == adev->dm.crc_win_x_end_property)
-               dm_new_state->crc_window.x_end = val;
-       else if (property == adev->dm.crc_win_y_end_property)
-               dm_new_state->crc_window.y_end = val;
-       else
-               return -EINVAL;
-
-       return 0;
-}
-
-static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
-                                           const struct drm_crtc_state *state,
-                                           struct drm_property *property,
-                                           uint64_t *val)
-{
-       struct drm_device *dev = crtc->dev;
-       struct amdgpu_device *adev = drm_to_adev(dev);
-       struct dm_crtc_state *dm_state =
-               to_dm_crtc_state(state);
-
-       if (property == adev->dm.crc_win_x_start_property)
-               *val = dm_state->crc_window.x_start;
-       else if (property == adev->dm.crc_win_y_start_property)
-               *val = dm_state->crc_window.y_start;
-       else if (property == adev->dm.crc_win_x_end_property)
-               *val = dm_state->crc_window.x_end;
-       else if (property == adev->dm.crc_win_y_end_property)
-               *val = dm_state->crc_window.y_end;
-       else
-               return -EINVAL;
-
-       return 0;
-}
-#endif
-
 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
 {
        enum dc_irq_source irq_source;
@@ -5458,10 +5366,6 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
        .enable_vblank = dm_enable_vblank,
        .disable_vblank = dm_disable_vblank,
        .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
-#ifdef CONFIG_DEBUG_FS
-       .atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
-       .atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
-#endif
 };
 
 static enum drm_connector_status
@@ -6663,25 +6567,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
        return 0;
 }
 
-#ifdef CONFIG_DEBUG_FS
-static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
-                               struct amdgpu_crtc *acrtc)
-{
-       drm_object_attach_property(&acrtc->base.base,
-                                  dm->crc_win_x_start_property,
-                                  0);
-       drm_object_attach_property(&acrtc->base.base,
-                                  dm->crc_win_y_start_property,
-                                  0);
-       drm_object_attach_property(&acrtc->base.base,
-                                  dm->crc_win_x_end_property,
-                                  0);
-       drm_object_attach_property(&acrtc->base.base,
-                                  dm->crc_win_y_end_property,
-                                  0);
-}
-#endif
-
 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
                               struct drm_plane *plane,
                               uint32_t crtc_index)
@@ -6729,9 +6614,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
        drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
                                   true, MAX_COLOR_LUT_ENTRIES);
        drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
-#ifdef CONFIG_DEBUG_FS
-       attach_crtc_crc_properties(dm, acrtc);
-#endif
+
        return 0;
 
 fail:
@@ -8368,7 +8251,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
         */
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
-               bool configure_crc = false;
 
                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 
@@ -8378,30 +8260,21 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                        dc_stream_retain(dm_new_crtc_state->stream);
                        acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
                        manage_dm_interrupts(adev, acrtc, true);
-               }
+
 #ifdef CONFIG_DEBUG_FS
-               if (new_crtc_state->active &&
-                       amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
                        /**
                         * Frontend may have changed so reapply the CRC capture
                         * settings for the stream.
                         */
                        dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-                       dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
-
-                       if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
-                               if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
-                                       configure_crc = true;
-                       } else {
-                               if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
-                                       configure_crc = true;
-                       }
 
-                       if (configure_crc)
+                       if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
                                amdgpu_dm_crtc_configure_crc_source(
-                                       crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
-               }
+                                       crtc, dm_new_crtc_state,
+                                       dm_new_crtc_state->crc_src);
+                       }
 #endif
+               }
        }
 
        for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
index 2ee6edb..1182daf 100644 (file)
@@ -336,32 +336,6 @@ struct amdgpu_display_manager {
         */
        const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
 
-#ifdef CONFIG_DEBUG_FS
-       /**
-        * @crc_win_x_start_property:
-        *
-        * X start of the crc calculation window
-        */
-       struct drm_property *crc_win_x_start_property;
-       /**
-        * @crc_win_y_start_property:
-        *
-        * Y start of the crc calculation window
-        */
-       struct drm_property *crc_win_y_start_property;
-       /**
-        * @crc_win_x_end_property:
-        *
-        * X end of the crc calculation window
-        */
-       struct drm_property *crc_win_x_end_property;
-       /**
-        * @crc_win_y_end_property:
-        *
-        * Y end of the crc calculation window
-        */
-       struct drm_property *crc_win_y_end_property;
-#endif
        /**
         * @mst_encoders:
         *
@@ -448,15 +422,6 @@ struct dm_plane_state {
        struct dc_plane_state *dc_state;
 };
 
-#ifdef CONFIG_DEBUG_FS
-struct crc_rec {
-       uint16_t x_start;
-       uint16_t y_start;
-       uint16_t x_end;
-       uint16_t y_end;
-       };
-#endif
-
 struct dm_crtc_state {
        struct drm_crtc_state base;
        struct dc_stream_state *stream;
@@ -479,9 +444,6 @@ struct dm_crtc_state {
        struct dc_info_packet vrr_infopacket;
 
        int abm_level;
-#ifdef CONFIG_DEBUG_FS
-       struct crc_rec crc_window;
-#endif
 };
 
 #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
index 7b886a7..66cb873 100644 (file)
@@ -81,41 +81,6 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
        return pipe_crc_sources;
 }
 
-static void amdgpu_dm_set_crc_window_default(struct dm_crtc_state *dm_crtc_state)
-{
-       dm_crtc_state->crc_window.x_start = 0;
-       dm_crtc_state->crc_window.y_start = 0;
-       dm_crtc_state->crc_window.x_end = 0;
-       dm_crtc_state->crc_window.y_end = 0;
-}
-
-bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state)
-{
-       bool ret = true;
-
-       if ((dm_crtc_state->crc_window.x_start != 0) ||
-               (dm_crtc_state->crc_window.y_start != 0) ||
-               (dm_crtc_state->crc_window.x_end != 0) ||
-               (dm_crtc_state->crc_window.y_end != 0))
-               ret = false;
-
-       return ret;
-}
-
-bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
-                                       struct dm_crtc_state *dm_old_crtc_state)
-{
-       bool ret = false;
-
-       if ((dm_new_crtc_state->crc_window.x_start != dm_old_crtc_state->crc_window.x_start) ||
-               (dm_new_crtc_state->crc_window.y_start != dm_old_crtc_state->crc_window.y_start) ||
-               (dm_new_crtc_state->crc_window.x_end != dm_old_crtc_state->crc_window.x_end) ||
-               (dm_new_crtc_state->crc_window.y_end != dm_old_crtc_state->crc_window.y_end))
-               ret = true;
-
-       return ret;
-}
-
 int
 amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
                                 size_t *values_cnt)
@@ -140,7 +105,6 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
        struct dc_stream_state *stream_state = dm_crtc_state->stream;
        bool enable = amdgpu_dm_is_valid_crc_source(source);
        int ret = 0;
-       struct crc_params *crc_window = NULL, tmp_window;
 
        /* Configuration will be deferred to stream enable. */
        if (!stream_state)
@@ -150,24 +114,8 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
 
        /* Enable CRTC CRC generation if necessary. */
        if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
-               if (!enable)
-                       amdgpu_dm_set_crc_window_default(dm_crtc_state);
-
-               if (!amdgpu_dm_crc_window_is_default(dm_crtc_state)) {
-                       crc_window = &tmp_window;
-
-                       tmp_window.windowa_x_start = dm_crtc_state->crc_window.x_start;
-                       tmp_window.windowa_y_start = dm_crtc_state->crc_window.y_start;
-                       tmp_window.windowa_x_end = dm_crtc_state->crc_window.x_end;
-                       tmp_window.windowa_y_end = dm_crtc_state->crc_window.y_end;
-                       tmp_window.windowb_x_start = dm_crtc_state->crc_window.x_start;
-                       tmp_window.windowb_y_start = dm_crtc_state->crc_window.y_start;
-                       tmp_window.windowb_x_end = dm_crtc_state->crc_window.x_end;
-                       tmp_window.windowb_y_end = dm_crtc_state->crc_window.y_end;
-               }
-
                if (!dc_stream_configure_crc(stream_state->ctx->dc,
-                                            stream_state, crc_window, enable, enable)) {
+                                            stream_state, NULL, enable, enable)) {
                        ret = -EINVAL;
                        goto unlock;
                }
index 0235bfb..f7d7317 100644 (file)
@@ -47,9 +47,6 @@ static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source
 
 /* amdgpu_dm_crc.c */
 #ifdef CONFIG_DEBUG_FS
-bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state);
-bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
-                                       struct dm_crtc_state *dm_old_crtc_state);
 int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
                                        struct dm_crtc_state *dm_crtc_state,
                                        enum amdgpu_dm_pipe_crc_source source);
index 64f515d..f3c00f4 100644 (file)
@@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
 calcs_ccflags := -mhard-float -maltivec
 endif
 
-ifdef CONFIG_ARM64
-calcs_rcflags := -mgeneral-regs-only
-endif
-
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
index d59b380..ff96bee 100644 (file)
@@ -104,13 +104,6 @@ ifdef CONFIG_PPC64
 CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
 endif
 
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := -mgeneral-regs-only
-endif
-
 AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
@@ -125,13 +118,6 @@ ifdef CONFIG_PPC64
 CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
 endif
 
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := -mgeneral-regs-only
-endif
-
 AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN30)
@@ -146,13 +132,6 @@ ifdef CONFIG_PPC64
 CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
 endif
 
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := -mgeneral-regs-only
-endif
-
 AMD_DAL_CLK_MGR_DCN301 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn301/,$(CLK_MGR_DCN301))
 
 AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN301)
index 5b466f4..ab98c25 100644 (file)
@@ -251,6 +251,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
        struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
        bool force_reset = false;
        bool update_uclk = false;
+       bool p_state_change_support;
 
        if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
                return;
@@ -291,8 +292,9 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
                clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
 
        clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
-       if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
-               clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
+       p_state_change_support = new_clocks->p_state_change_support || (display_count == 0);
+       if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+               clk_mgr_base->clks.p_state_change_support = p_state_change_support;
 
                /* to disable P-State switching, set UCLK min = max */
                if (!clk_mgr_base->clks.p_state_change_support)
index 9e1071b..f4a2088 100644 (file)
@@ -2487,9 +2487,14 @@ enum dc_status dc_link_validate_mode_timing(
 static struct abm *get_abm_from_stream_res(const struct dc_link *link)
 {
        int i;
-       struct dc *dc = link->ctx->dc;
+       struct dc *dc = NULL;
        struct abm *abm = NULL;
 
+       if (!link || !link->ctx)
+               return NULL;
+
+       dc = link->ctx->dc;
+
        for (i = 0; i < MAX_PIPES; i++) {
                struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
                struct dc_stream_state *stream = pipe_ctx.stream;
index 2fc1223..f95bade 100644 (file)
@@ -2399,6 +2399,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
                        initial_link_setting;
        uint32_t link_bw;
 
+       if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+               return false;
+
        /* search for the minimum link setting that:
         * 1. is supported according to the link training result
         * 2. could support the b/w requested by the timing
@@ -3045,14 +3048,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
                for (i = 0; i < MAX_PIPES; i++) {
                        pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
                        if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
-                                       pipe_ctx->stream->link == link)
+                                       pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
                                core_link_disable_stream(pipe_ctx);
                }
 
                for (i = 0; i < MAX_PIPES; i++) {
                        pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
                        if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
-                                       pipe_ctx->stream->link == link)
+                                       pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
                                core_link_enable_stream(link->dc->current_state, pipe_ctx);
                }
 
@@ -3992,7 +3995,7 @@ bool dc_link_dp_set_test_pattern(
        unsigned int cust_pattern_size)
 {
        struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
-       struct pipe_ctx *pipe_ctx = &pipes[0];
+       struct pipe_ctx *pipe_ctx = NULL;
        unsigned int lane;
        unsigned int i;
        unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
@@ -4002,12 +4005,18 @@ bool dc_link_dp_set_test_pattern(
        memset(&training_pattern, 0, sizeof(training_pattern));
 
        for (i = 0; i < MAX_PIPES; i++) {
+               if (pipes[i].stream == NULL)
+                       continue;
+
                if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
                        pipe_ctx = &pipes[i];
                        break;
                }
        }
 
+       if (pipe_ctx == NULL)
+               return false;
+
        /* Reset CRTC Test Pattern if it is currently running and request is VideoMode */
        if (link->test_pattern_enabled && test_pattern ==
                        DP_TEST_PATTERN_VIDEO_MODE) {
index 733e6e6..62ad1a1 100644 (file)
@@ -31,11 +31,4 @@ DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
 
 AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
 
-# fix:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# aarch64 does not support soft-float, so use hard-float and handle this in code
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn10/dcn10_resource.o := -mgeneral-regs-only
-endif
-
 AMD_DISPLAY_FILES += $(AMD_DAL_DCN10)
index cfc130e..017b67b 100644 (file)
@@ -647,8 +647,13 @@ static void power_on_plane(
        if (REG(DC_IP_REQUEST_CNTL)) {
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 1);
-               hws->funcs.dpp_pg_control(hws, plane_id, true);
-               hws->funcs.hubp_pg_control(hws, plane_id, true);
+
+               if (hws->funcs.dpp_pg_control)
+                       hws->funcs.dpp_pg_control(hws, plane_id, true);
+
+               if (hws->funcs.hubp_pg_control)
+                       hws->funcs.hubp_pg_control(hws, plane_id, true);
+
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 0);
                DC_LOG_DEBUG(
@@ -1082,8 +1087,13 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
        if (REG(DC_IP_REQUEST_CNTL)) {
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 1);
-               hws->funcs.dpp_pg_control(hws, dpp->inst, false);
-               hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
+               if (hws->funcs.dpp_pg_control)
+                       hws->funcs.dpp_pg_control(hws, dpp->inst, false);
+
+               if (hws->funcs.hubp_pg_control)
+                       hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
                dpp->funcs->dpp_reset(dpp);
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 0);
index 100ce0e..b096011 100644 (file)
@@ -470,7 +470,7 @@ void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
 unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id)
 {
        struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
-       uint32_t val = 0;
+       uint32_t val = 0xf;
 
        if (opp_id < MAX_OPP && REG(MUX[opp_id]))
                REG_GET(MUX[opp_id], MPC_OUT_MUX, &val);
index bdc3783..90e912f 100644 (file)
@@ -608,8 +608,8 @@ static const struct dc_debug_options debug_defaults_drv = {
                .disable_pplib_clock_request = false,
                .disable_pplib_wm_range = false,
                .pplib_wm_report_mode = WM_REPORT_DEFAULT,
-               .pipe_split_policy = MPC_SPLIT_DYNAMIC,
-               .force_single_disp_pipe_split = true,
+               .pipe_split_policy = MPC_SPLIT_AVOID,
+               .force_single_disp_pipe_split = false,
                .disable_dcc = DCC_ENABLE,
                .voltage_align_fclk = true,
                .disable_stereo_support = true,
@@ -1534,15 +1534,8 @@ static bool dcn10_resource_construct(
        memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
        memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
 
-#if defined(CONFIG_ARM64)
-       /* Aarch64 does not support -msoft-float/-mfloat-abi=soft */
-       DC_FP_START();
-       dcn10_resource_construct_fp(dc);
-       DC_FP_END();
-#else
        /* Other architectures we build for build this with soft-float */
        dcn10_resource_construct_fp(dc);
-#endif
 
        pool->base.pp_smu = dcn10_pp_smu_create(ctx);
 
index 624cb13..5fcaf78 100644 (file)
@@ -17,10 +17,6 @@ ifdef CONFIG_PPC64
 CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec
 endif
 
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mgeneral-regs-only
-endif
-
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
index cb822df..480d928 100644 (file)
@@ -1062,8 +1062,13 @@ static void dcn20_power_on_plane(
        if (REG(DC_IP_REQUEST_CNTL)) {
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 1);
-               dcn20_dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
-               dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
+               if (hws->funcs.dpp_pg_control)
+                       hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
+
+               if (hws->funcs.hubp_pg_control)
+                       hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 0);
                DC_LOG_DEBUG(
index e04ecf0..5ed18ca 100644 (file)
@@ -2517,8 +2517,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
                 * if this primary pipe has a bottom pipe in prev. state
                 * and if the bottom pipe is still available (which it should be),
                 * pick that pipe as secondary
-                * Same logic applies for ODM pipes. Since mpo is not allowed with odm
-                * check in else case.
+                * Same logic applies for ODM pipes
                 */
                if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) {
                        preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;
@@ -2526,7 +2525,9 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
                                secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
                                secondary_pipe->pipe_idx = preferred_pipe_idx;
                        }
-               } else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
+               }
+               if (secondary_pipe == NULL &&
+                               dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
                        preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;
                        if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
                                secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
index 1ee5fc0..bb8c951 100644 (file)
@@ -13,10 +13,6 @@ ifdef CONFIG_PPC64
 CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec
 endif
 
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mgeneral-regs-only
-endif
-
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
index 1c88d2e..b000b43 100644 (file)
@@ -296,7 +296,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
        .num_banks = 8,
        .num_chans = 4,
        .vmm_page_size_bytes = 4096,
-       .dram_clock_change_latency_us = 23.84,
+       .dram_clock_change_latency_us = 11.72,
        .return_bus_width_bytes = 64,
        .dispclk_dppclk_vco_speed_mhz = 3600,
        .xfc_bus_transport_time_us = 4,
index 248c271..c20331e 100644 (file)
@@ -41,11 +41,6 @@ CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -maltivec
 CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -maltivec
 endif
 
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mgeneral-regs-only
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mgeneral-regs-only
-endif
-
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
index 2fd5d34..3ca7d91 100644 (file)
@@ -21,10 +21,6 @@ ifdef CONFIG_PPC64
 CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -maltivec
 endif
 
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mgeneral-regs-only
-endif
-
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
index 4825c5c..35f5bf0 100644 (file)
@@ -1731,6 +1731,7 @@ static struct resource_funcs dcn301_res_pool_funcs = {
        .populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
        .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
        .add_stream_to_ctx = dcn30_add_stream_to_ctx,
+       .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
        .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
        .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
        .set_mcif_arb_params = dcn30_set_mcif_arb_params,
index 36e44e1..8d4924b 100644 (file)
@@ -20,10 +20,6 @@ ifdef CONFIG_PPC64
 CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -maltivec
 endif
 
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mgeneral-regs-only
-endif
-
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
index a02a33d..6bb7f29 100644 (file)
@@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
 dml_ccflags := -mhard-float -maltivec
 endif
 
-ifdef CONFIG_ARM64
-dml_rcflags := -mgeneral-regs-only
-endif
-
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
index 860e72a..80170f9 100644 (file)
@@ -2635,14 +2635,15 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
        }
 
        if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
-                       mode_lib->vba.MinActiveDRAMClockChangeMargin > 60 &&
-                       mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+                       mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
                mode_lib->vba.DRAMClockChangeWatermark += 25;
 
                for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
-                       if (mode_lib->vba.DRAMClockChangeWatermark >
-                       dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
-                               mode_lib->vba.MinTTUVBlank[k] += 25;
+                       if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+                               if (mode_lib->vba.DRAMClockChangeWatermark >
+                               dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
+                                       mode_lib->vba.MinTTUVBlank[k] += 25;
+                       }
                }
 
                mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
index f2624a1..8d31eb7 100644 (file)
@@ -10,10 +10,6 @@ ifdef CONFIG_PPC64
 dsc_ccflags := -mhard-float -maltivec
 endif
 
-ifdef CONFIG_ARM64
-dsc_rcflags := -mgeneral-regs-only
-endif
-
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
index 95cb569..126c2f3 100644 (file)
 #include <asm/fpu/api.h>
 #define DC_FP_START() kernel_fpu_begin()
 #define DC_FP_END() kernel_fpu_end()
-#elif defined(CONFIG_ARM64)
-#include <asm/neon.h>
-#define DC_FP_START() kernel_neon_begin()
-#define DC_FP_END() kernel_neon_end()
 #elif defined(CONFIG_PPC64)
 #include <asm/switch_to.h>
 #include <asm/cputable.h>
index e57e64b..8832278 100644 (file)
@@ -251,7 +251,7 @@ static int smu10_set_hard_min_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cl
                smu10_data->gfx_actual_soft_min_freq = clock;
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_SetHardMinGfxClk,
-                                       smu10_data->gfx_actual_soft_min_freq,
+                                       clock,
                                        NULL);
        }
        return 0;
@@ -558,7 +558,8 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
 
        /* enable the pp_od_clk_voltage sysfs file */
        hwmgr->od_enabled = 1;
-
+       /* disabled fine grain tuning function by default */
+       data->fine_grain_enabled = 0;
        return result;
 }
 
@@ -597,6 +598,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
        uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
        uint32_t index_fclk = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
        uint32_t index_socclk = data->clock_vol_info.vdd_dep_on_socclk->count - 1;
+       uint32_t fine_grain_min_freq = 0, fine_grain_max_freq = 0;
 
        if (hwmgr->smu_version < 0x1E3700) {
                pr_info("smu firmware version too old, can not set dpm level\n");
@@ -613,6 +615,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
        switch (level) {
        case AMD_DPM_FORCED_LEVEL_HIGH:
        case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+               data->fine_grain_enabled = 0;
+
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+               data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+               data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
                                                data->gfx_max_freq_limit/100,
@@ -648,6 +658,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+               data->fine_grain_enabled = 0;
+
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+               data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+               data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
                                                min_sclk,
@@ -658,6 +676,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+               data->fine_grain_enabled = 0;
+
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+               data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+               data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
                                                min_mclk,
@@ -668,6 +694,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+               data->fine_grain_enabled = 0;
+
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+               data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+               data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
                                                SMU10_UMD_PSTATE_GFXCLK,
@@ -703,6 +737,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_AUTO:
+               data->fine_grain_enabled = 0;
+
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+               data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+               data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
                                                min_sclk,
@@ -741,6 +783,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_LOW:
+               data->fine_grain_enabled = 0;
+
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+               data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+               data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinGfxClk,
                                                data->gfx_min_freq_limit/100,
@@ -759,6 +809,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                break;
        case AMD_DPM_FORCED_LEVEL_MANUAL:
+               data->fine_grain_enabled = 1;
        case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
        default:
                break;
@@ -948,6 +999,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
        struct smu10_voltage_dependency_table *mclk_table =
                        data->clock_vol_info.vdd_dep_on_fclk;
        uint32_t i, now, size = 0;
+       uint32_t min_freq, max_freq = 0;
+       uint32_t ret = 0;
 
        switch (type) {
        case PP_SCLK:
@@ -983,18 +1036,28 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
                break;
        case OD_SCLK:
                if (hwmgr->od_enabled) {
-                       size = sprintf(buf, "%s:\n", "OD_SCLK");
+                       ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+                       if (ret)
+                               return ret;
+                       ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+                       if (ret)
+                               return ret;
 
+                       size = sprintf(buf, "%s:\n", "OD_SCLK");
                        size += sprintf(buf + size, "0: %10uMhz\n",
-                       (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : data->gfx_min_freq_limit/100);
-                       size += sprintf(buf + size, "1: %10uMhz\n", data->gfx_max_freq_limit/100);
+                       (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
+                       size += sprintf(buf + size, "1: %10uMhz\n",
+                       (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
                }
                break;
        case OD_RANGE:
                if (hwmgr->od_enabled) {
-                       uint32_t min_freq, max_freq = 0;
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
-                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+                       ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+                       if (ret)
+                               return ret;
+                       ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+                       if (ret)
+                               return ret;
 
                        size = sprintf(buf, "%s:\n", "OD_RANGE");
                        size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
@@ -1414,23 +1477,96 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
                                        enum PP_OD_DPM_TABLE_COMMAND type,
                                        long *input, uint32_t size)
 {
+       uint32_t min_freq, max_freq = 0;
+       struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+       int ret = 0;
+
        if (!hwmgr->od_enabled) {
                pr_err("Fine grain not support\n");
                return -EINVAL;
        }
 
-       if (size != 2) {
-               pr_err("Input parameter number not correct\n");
+       if (!smu10_data->fine_grain_enabled) {
+               pr_err("Fine grain not started\n");
                return -EINVAL;
        }
 
        if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
-               if (input[0] == 0)
-                       smu10_set_hard_min_gfxclk_by_freq(hwmgr, input[1]);
-               else if (input[0] == 1)
-                       smu10_set_soft_max_gfxclk_by_freq(hwmgr, input[1]);
-               else
+               if (size != 2) {
+                       pr_err("Input parameter number not correct\n");
                        return -EINVAL;
+               }
+
+               if (input[0] == 0) {
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+                       if (input[1] < min_freq) {
+                               pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+                                       input[1], min_freq);
+                               return -EINVAL;
+                       }
+                       smu10_data->gfx_actual_soft_min_freq = input[1];
+               } else if (input[0] == 1) {
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+                       if (input[1] > max_freq) {
+                               pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+                                       input[1], max_freq);
+                               return -EINVAL;
+                       }
+                       smu10_data->gfx_actual_soft_max_freq = input[1];
+               } else {
+                       return -EINVAL;
+               }
+       } else if (type == PP_OD_RESTORE_DEFAULT_TABLE) {
+               if (size != 0) {
+                       pr_err("Input parameter number not correct\n");
+                       return -EINVAL;
+               }
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+
+               smu10_data->gfx_actual_soft_min_freq = min_freq;
+               smu10_data->gfx_actual_soft_max_freq = max_freq;
+
+               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetHardMinGfxClk,
+                                       min_freq,
+                                       NULL);
+               if (ret)
+                       return ret;
+
+               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetSoftMaxGfxClk,
+                                       max_freq,
+                                       NULL);
+               if (ret)
+                       return ret;
+       } else if (type == PP_OD_COMMIT_DPM_TABLE) {
+               if (size != 0) {
+                       pr_err("Input parameter number not correct\n");
+                       return -EINVAL;
+               }
+
+               if (smu10_data->gfx_actual_soft_min_freq > smu10_data->gfx_actual_soft_max_freq) {
+                       pr_err("The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+                                       smu10_data->gfx_actual_soft_min_freq, smu10_data->gfx_actual_soft_max_freq);
+                       return -EINVAL;
+               }
+
+               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetHardMinGfxClk,
+                                       smu10_data->gfx_actual_soft_min_freq,
+                                       NULL);
+               if (ret)
+                       return ret;
+
+               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetSoftMaxGfxClk,
+                                       smu10_data->gfx_actual_soft_max_freq,
+                                       NULL);
+               if (ret)
+                       return ret;
+       } else {
+               return -EINVAL;
        }
 
        return 0;
index 6c9b5f0..808e0ec 100644 (file)
@@ -283,6 +283,7 @@ struct smu10_hwmgr {
        uint32_t                        vclk_soft_min;
        uint32_t                        dclk_soft_min;
        uint32_t                        gfx_actual_soft_min_freq;
+       uint32_t                        gfx_actual_soft_max_freq;
        uint32_t                        gfx_min_freq_limit;
        uint32_t                        gfx_max_freq_limit; /* in 10Khz*/
 
@@ -299,6 +300,8 @@ struct smu10_hwmgr {
        bool need_min_deep_sleep_dcefclk;
        uint32_t                             deep_sleep_dcefclk;
        uint32_t                             num_active_display;
+
+       bool                                                    fine_grain_enabled;
 };
 
 struct pp_hwmgr;
index 9608745..12b36eb 100644 (file)
@@ -2372,7 +2372,7 @@ static void sienna_cichlid_fill_i2c_req(SwI2cRequest_t  *req, bool write,
 {
        int i;
 
-       req->I2CcontrollerPort = 0;
+       req->I2CcontrollerPort = 1;
        req->I2CSpeed = 2;
        req->SlaveAddress = address;
        req->NumCmds = numbytes;
index 8cb4fce..5c1482d 100644 (file)
@@ -252,7 +252,8 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
                *value = metrics->UvdActivity;
                break;
        case METRICS_AVERAGE_SOCKETPOWER:
-               *value = metrics->CurrentSocketPower;
+               *value = (metrics->CurrentSocketPower << 8) /
+               1000 ;
                break;
        case METRICS_TEMPERATURE_EDGE:
                *value = metrics->GfxTemperature / 100 *
index dc75db8..9a96970 100644 (file)
@@ -188,6 +188,7 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
                        return -EINVAL;
                *freq = clk_table->SocClocks[dpm_level].Freq;
                break;
+       case SMU_UCLK:
        case SMU_MCLK:
                if (dpm_level >= NUM_FCLK_DPM_LEVELS)
                        return -EINVAL;
@@ -1120,7 +1121,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
 static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state)
 {
 
-       return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GpuChangeState, state, NULL);
+       return 0;
 }
 
 static const struct pptable_funcs renoir_ppt_funcs = {
index 522d550..06abf2a 100644 (file)
@@ -225,6 +225,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
        break;
        case SMU_FCLK:
        case SMU_MCLK:
+       case SMU_UCLK:
                ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
                if (ret)
                        return ret;
index ba15070..4a8cbec 100644 (file)
@@ -3021,7 +3021,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set,
 
        ret = handle_conflicting_encoders(state, true);
        if (ret)
-               return ret;
+               goto fail;
 
        ret = drm_atomic_commit(state);
 
index 02ca22e..0b232a7 100644 (file)
@@ -387,9 +387,16 @@ static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
        if (gbo->vmap_use_count > 0)
                goto out;
 
-       ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
-       if (ret)
-               return ret;
+       /*
+        * VRAM helpers unmap the BO only on demand. So the previous
+        * page mapping might still be around. Only vmap if the there's
+        * no mapping present.
+        */
+       if (dma_buf_map_is_null(&gbo->map)) {
+               ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
+               if (ret)
+                       return ret;
+       }
 
 out:
        ++gbo->vmap_use_count;
@@ -577,6 +584,7 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
                return;
 
        ttm_bo_vunmap(bo, &gbo->map);
+       dma_buf_map_clear(&gbo->map); /* explicitly clear mapping for next vmap call */
 }
 
 static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
index e623194..a0cb746 100644 (file)
@@ -1163,7 +1163,14 @@ retry:
        if (ret)
                goto out;
 
-       if (old_fb->format != fb->format) {
+       /*
+        * Only check the FOURCC format code, excluding modifiers. This is
+        * enough for all legacy drivers. Atomic drivers have their own
+        * checks in their ->atomic_check implementation, which will
+        * return -EINVAL if any hw or driver constraint is violated due
+        * to modifier changes.
+        */
+       if (old_fb->format->format != fb->format->format) {
                DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
                ret = -EINVAL;
                goto out;
index 6e74e67..3491460 100644 (file)
@@ -388,19 +388,18 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
                return -ENOENT;
 
        *fence = drm_syncobj_fence_get(syncobj);
-       drm_syncobj_put(syncobj);
 
        if (*fence) {
                ret = dma_fence_chain_find_seqno(fence, point);
                if (!ret)
-                       return 0;
+                       goto out;
                dma_fence_put(*fence);
        } else {
                ret = -EINVAL;
        }
 
        if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
-               return ret;
+               goto out;
 
        memset(&wait, 0, sizeof(wait));
        wait.task = current;
@@ -432,6 +431,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
        if (wait.node.next)
                drm_syncobj_remove_wait(syncobj, &wait);
 
+out:
+       drm_syncobj_put(syncobj);
+
        return ret;
 }
 EXPORT_SYMBOL(drm_syncobj_find_fence);
index e5574e5..6d9e81e 100644 (file)
@@ -38,6 +38,7 @@ i915-y += i915_drv.o \
          i915_config.o \
          i915_irq.o \
          i915_getparam.o \
+         i915_mitigations.o \
          i915_params.o \
          i915_pci.o \
          i915_scatterlist.o \
index a9439b4..b3533a3 100644 (file)
@@ -1616,10 +1616,6 @@ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
 
        get_dsi_io_power_domains(i915,
                                 enc_to_intel_dsi(encoder));
-
-       if (crtc_state->dsc.compression_enable)
-               intel_display_power_get(i915,
-                                       intel_dsc_power_domain(crtc_state));
 }
 
 static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
index 92940a0..d5ace48 100644 (file)
@@ -3725,7 +3725,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
        intel_ddi_init_dp_buf_reg(encoder, crtc_state);
        if (!is_mst)
                intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
-       intel_dp_configure_protocol_converter(intel_dp);
+       intel_dp_configure_protocol_converter(intel_dp, crtc_state);
        intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
                                              true);
        intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
index ce82d65..34d78c6 100644 (file)
@@ -1436,6 +1436,9 @@ struct intel_dp {
                bool ycbcr_444_to_420;
        } dfp;
 
+       /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
+       struct pm_qos_request pm_qos;
+
        /* Display stream compression testing */
        bool force_dsc_en;
 
index 2165398..09123e8 100644 (file)
@@ -1489,7 +1489,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
         * lowest possible wakeup latency and so prevent the cpu from going into
         * deep sleep states.
         */
-       cpu_latency_qos_update_request(&i915->pm_qos, 0);
+       cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
 
        intel_dp_check_edp(intel_dp);
 
@@ -1622,7 +1622,7 @@ done:
 
        ret = recv_bytes;
 out:
-       cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
+       cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
 
        if (vdd)
                edp_panel_vdd_off(intel_dp, false);
@@ -1898,6 +1898,9 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
 static void
 intel_dp_aux_fini(struct intel_dp *intel_dp)
 {
+       if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
+               cpu_latency_qos_remove_request(&intel_dp->pm_qos);
+
        kfree(intel_dp->aux.name);
 }
 
@@ -1950,6 +1953,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
                                               encoder->base.name);
 
        intel_dp->aux.transfer = intel_dp_aux_transfer;
+       cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
 }
 
 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
@@ -4010,7 +4014,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
        intel_de_posting_read(dev_priv, intel_dp->output_reg);
 }
 
-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+                                          const struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 tmp;
@@ -4029,8 +4034,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
                drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
                            enableddisabled(intel_dp->has_hdmi_sink));
 
-       tmp = intel_dp->dfp.ycbcr_444_to_420 ?
-               DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
+       tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
+               intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
 
        if (drm_dp_dpcd_writeb(&intel_dp->aux,
                               DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
@@ -4084,7 +4089,7 @@ static void intel_enable_dp(struct intel_atomic_state *state,
        }
 
        intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
-       intel_dp_configure_protocol_converter(intel_dp);
+       intel_dp_configure_protocol_converter(intel_dp, pipe_config);
        intel_dp_start_link_train(intel_dp, pipe_config);
        intel_dp_stop_link_train(intel_dp, pipe_config);
 
index b871a09..05f7ddf 100644 (file)
@@ -51,7 +51,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
 int intel_dp_retrain_link(struct intel_encoder *encoder,
                          struct drm_modeset_acquire_ctx *ctx);
 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp);
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+                                          const struct intel_crtc_state *crtc_state);
 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
                                           const struct intel_crtc_state *crtc_state,
                                           bool enable);
index b2a4bbc..b9d8825 100644 (file)
@@ -2210,6 +2210,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
        if (content_protection_type_changed) {
                mutex_lock(&hdcp->mutex);
                hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               drm_connector_get(&connector->base);
                schedule_work(&hdcp->prop_work);
                mutex_unlock(&hdcp->mutex);
        }
@@ -2221,6 +2222,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
                desired_and_not_enabled =
                        hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
                mutex_unlock(&hdcp->mutex);
+               /*
+                * If HDCP already ENABLED and CP property is DESIRED, schedule
+                * prop_work to update correct CP property to user space.
+                */
+               if (!desired_and_not_enabled && !content_protection_type_changed) {
+                       drm_connector_get(&connector->base);
+                       schedule_work(&hdcp->prop_work);
+               }
        }
 
        if (desired_and_not_enabled || content_protection_type_changed)
index 9f23bac..d64fce1 100644 (file)
@@ -1650,16 +1650,13 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
                val = pch_get_backlight(connector);
        else
                val = lpt_get_backlight(connector);
-       val = intel_panel_compute_brightness(connector, val);
-       panel->backlight.level = clamp(val, panel->backlight.min,
-                                      panel->backlight.max);
 
        if (cpu_mode) {
                drm_dbg_kms(&dev_priv->drm,
                            "CPU backlight register was enabled, switching to PCH override\n");
 
                /* Write converted CPU PWM value to PCH override register */
-               lpt_set_backlight(connector->base.state, panel->backlight.level);
+               lpt_set_backlight(connector->base.state, val);
                intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
                               pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
 
@@ -1667,6 +1664,10 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
                               cpu_ctl2 & ~BLM_PWM_ENABLE);
        }
 
+       val = intel_panel_compute_brightness(connector, val);
+       panel->backlight.level = clamp(val, panel->backlight.min,
+                                      panel->backlight.max);
+
        return 0;
 }
 
index d52f9c1..f94025e 100644 (file)
@@ -812,10 +812,20 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
                intel_dsi_prepare(encoder, pipe_config);
 
        intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
-       intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
 
-       /* Deassert reset */
-       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+       /*
+        * Give the panel time to power-on and then deassert its reset.
+        * Depending on the VBT MIPI sequences version the deassert-seq
+        * may contain the necessary delay, intel_dsi_msleep() will skip
+        * the delay in that case. If there is no deassert-seq, then an
+        * unconditional msleep is used to give the panel time to power-on.
+        */
+       if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
+               intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
+               intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+       } else {
+               msleep(intel_dsi->panel_on_delay);
+       }
 
        if (IS_GEMINILAKE(dev_priv)) {
                glk_cold_boot = glk_dsi_enable_io(encoder);
index bcc80f4..bd3046e 100644 (file)
@@ -1046,7 +1046,7 @@ static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cach
        GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
        cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
 
-       __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
+       i915_gem_object_flush_map(obj);
        i915_gem_object_unpin_map(obj);
 
        intel_gt_chipset_flush(cache->rq->engine->gt);
@@ -1296,6 +1296,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
                goto err_pool;
        }
 
+       memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
+
        batch = i915_vma_instance(pool->obj, vma->vm, NULL);
        if (IS_ERR(batch)) {
                err = PTR_ERR(batch);
index d93d85c..9446537 100644 (file)
@@ -7,8 +7,6 @@
 #include "i915_drv.h"
 #include "intel_gpu_commands.h"
 
-#define MAX_URB_ENTRIES 64
-#define STATE_SIZE (4 * 1024)
 #define GT3_INLINE_DATA_DELAYS 0x1E00
 #define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
 
@@ -34,38 +32,59 @@ struct batch_chunk {
 };
 
 struct batch_vals {
-       u32 max_primitives;
-       u32 max_urb_entries;
-       u32 cmd_size;
-       u32 state_size;
+       u32 max_threads;
        u32 state_start;
-       u32 batch_size;
+       u32 surface_start;
        u32 surface_height;
        u32 surface_width;
-       u32 scratch_size;
-       u32 max_size;
+       u32 size;
 };
 
+static inline int num_primitives(const struct batch_vals *bv)
+{
+       /*
+        * We need to saturate the GPU with work in order to dispatch
+        * a shader on every HW thread, and clear the thread-local registers.
+        * In short, we have to dispatch work faster than the shaders can
+        * run in order to fill the EU and occupy each HW thread.
+        */
+       return bv->max_threads;
+}
+
 static void
 batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
 {
        if (IS_HASWELL(i915)) {
-               bv->max_primitives = 280;
-               bv->max_urb_entries = MAX_URB_ENTRIES;
+               switch (INTEL_INFO(i915)->gt) {
+               default:
+               case 1:
+                       bv->max_threads = 70;
+                       break;
+               case 2:
+                       bv->max_threads = 140;
+                       break;
+               case 3:
+                       bv->max_threads = 280;
+                       break;
+               }
                bv->surface_height = 16 * 16;
                bv->surface_width = 32 * 2 * 16;
        } else {
-               bv->max_primitives = 128;
-               bv->max_urb_entries = MAX_URB_ENTRIES / 2;
+               switch (INTEL_INFO(i915)->gt) {
+               default:
+               case 1: /* including vlv */
+                       bv->max_threads = 36;
+                       break;
+               case 2:
+                       bv->max_threads = 128;
+                       break;
+               }
                bv->surface_height = 16 * 8;
                bv->surface_width = 32 * 16;
        }
-       bv->cmd_size = bv->max_primitives * 4096;
-       bv->state_size = STATE_SIZE;
-       bv->state_start = bv->cmd_size;
-       bv->batch_size = bv->cmd_size + bv->state_size;
-       bv->scratch_size = bv->surface_height * bv->surface_width;
-       bv->max_size = bv->batch_size + bv->scratch_size;
+       bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K);
+       bv->surface_start = bv->state_start + SZ_4K;
+       bv->size = bv->surface_start + bv->surface_height * bv->surface_width;
 }
 
 static void batch_init(struct batch_chunk *bc,
@@ -155,7 +174,8 @@ static u32
 gen7_fill_binding_table(struct batch_chunk *state,
                        const struct batch_vals *bv)
 {
-       u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv);
+       u32 surface_start =
+               gen7_fill_surface_state(state, bv->surface_start, bv);
        u32 *cs = batch_alloc_items(state, 32, 8);
        u32 offset = batch_offset(state, cs);
 
@@ -214,9 +234,9 @@ static void
 gen7_emit_state_base_address(struct batch_chunk *batch,
                             u32 surface_state_base)
 {
-       u32 *cs = batch_alloc_items(batch, 0, 12);
+       u32 *cs = batch_alloc_items(batch, 0, 10);
 
-       *cs++ = STATE_BASE_ADDRESS | (12 - 2);
+       *cs++ = STATE_BASE_ADDRESS | (10 - 2);
        /* general */
        *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
        /* surface */
@@ -233,8 +253,6 @@ gen7_emit_state_base_address(struct batch_chunk *batch,
        *cs++ = BASE_ADDRESS_MODIFY;
        *cs++ = 0;
        *cs++ = BASE_ADDRESS_MODIFY;
-       *cs++ = 0;
-       *cs++ = 0;
        batch_advance(batch, cs);
 }
 
@@ -244,8 +262,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
                    u32 urb_size, u32 curbe_size,
                    u32 mode)
 {
-       u32 urb_entries = bv->max_urb_entries;
-       u32 threads = bv->max_primitives - 1;
+       u32 threads = bv->max_threads - 1;
        u32 *cs = batch_alloc_items(batch, 32, 8);
 
        *cs++ = MEDIA_VFE_STATE | (8 - 2);
@@ -254,7 +271,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
        *cs++ = 0;
 
        /* number of threads & urb entries for GPGPU vs Media Mode */
-       *cs++ = threads << 16 | urb_entries << 8 | mode << 2;
+       *cs++ = threads << 16 | 1 << 8 | mode << 2;
 
        *cs++ = 0;
 
@@ -293,17 +310,12 @@ gen7_emit_media_object(struct batch_chunk *batch,
 {
        unsigned int x_offset = (media_object_index % 16) * 64;
        unsigned int y_offset = (media_object_index / 16) * 16;
-       unsigned int inline_data_size;
-       unsigned int media_batch_size;
-       unsigned int i;
+       unsigned int pkt = 6 + 3;
        u32 *cs;
 
-       inline_data_size = 112 * 8;
-       media_batch_size = inline_data_size + 6;
-
-       cs = batch_alloc_items(batch, 8, media_batch_size);
+       cs = batch_alloc_items(batch, 8, pkt);
 
-       *cs++ = MEDIA_OBJECT | (media_batch_size - 2);
+       *cs++ = MEDIA_OBJECT | (pkt - 2);
 
        /* interface descriptor offset */
        *cs++ = 0;
@@ -317,25 +329,44 @@ gen7_emit_media_object(struct batch_chunk *batch,
        *cs++ = 0;
 
        /* inline */
-       *cs++ = (y_offset << 16) | (x_offset);
+       *cs++ = y_offset << 16 | x_offset;
        *cs++ = 0;
        *cs++ = GT3_INLINE_DATA_DELAYS;
-       for (i = 3; i < inline_data_size; i++)
-               *cs++ = 0;
 
        batch_advance(batch, cs);
 }
 
 static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
 {
-       u32 *cs = batch_alloc_items(batch, 0, 5);
+       u32 *cs = batch_alloc_items(batch, 0, 4);
 
-       *cs++ = GFX_OP_PIPE_CONTROL(5);
-       *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE |
-               PIPE_CONTROL_GLOBAL_GTT_IVB;
+       *cs++ = GFX_OP_PIPE_CONTROL(4);
+       *cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+               PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+               PIPE_CONTROL_DC_FLUSH_ENABLE |
+               PIPE_CONTROL_CS_STALL;
        *cs++ = 0;
        *cs++ = 0;
+
+       batch_advance(batch, cs);
+}
+
+static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
+{
+       u32 *cs = batch_alloc_items(batch, 0, 8);
+
+       /* ivb: Stall before STATE_CACHE_INVALIDATE */
+       *cs++ = GFX_OP_PIPE_CONTROL(4);
+       *cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
+               PIPE_CONTROL_CS_STALL;
+       *cs++ = 0;
+       *cs++ = 0;
+
+       *cs++ = GFX_OP_PIPE_CONTROL(4);
+       *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
        *cs++ = 0;
+       *cs++ = 0;
+
        batch_advance(batch, cs);
 }
 
@@ -344,34 +375,34 @@ static void emit_batch(struct i915_vma * const vma,
                       const struct batch_vals *bv)
 {
        struct drm_i915_private *i915 = vma->vm->i915;
-       unsigned int desc_count = 64;
-       const u32 urb_size = 112;
+       const unsigned int desc_count = 1;
+       const unsigned int urb_size = 1;
        struct batch_chunk cmds, state;
-       u32 interface_descriptor;
+       u32 descriptors;
        unsigned int i;
 
-       batch_init(&cmds, vma, start, 0, bv->cmd_size);
-       batch_init(&state, vma, start, bv->state_start, bv->state_size);
+       batch_init(&cmds, vma, start, 0, bv->state_start);
+       batch_init(&state, vma, start, bv->state_start, SZ_4K);
 
-       interface_descriptor =
-               gen7_fill_interface_descriptor(&state, bv,
-                                              IS_HASWELL(i915) ?
-                                              &cb_kernel_hsw :
-                                              &cb_kernel_ivb,
-                                              desc_count);
-       gen7_emit_pipeline_flush(&cmds);
+       descriptors = gen7_fill_interface_descriptor(&state, bv,
+                                                    IS_HASWELL(i915) ?
+                                                    &cb_kernel_hsw :
+                                                    &cb_kernel_ivb,
+                                                    desc_count);
+
+       gen7_emit_pipeline_invalidate(&cmds);
        batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
        batch_add(&cmds, MI_NOOP);
-       gen7_emit_state_base_address(&cmds, interface_descriptor);
+       gen7_emit_pipeline_invalidate(&cmds);
+
        gen7_emit_pipeline_flush(&cmds);
+       gen7_emit_state_base_address(&cmds, descriptors);
+       gen7_emit_pipeline_invalidate(&cmds);
 
        gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
+       gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
 
-       gen7_emit_interface_descriptor_load(&cmds,
-                                           interface_descriptor,
-                                           desc_count);
-
-       for (i = 0; i < bv->max_primitives; i++)
+       for (i = 0; i < num_primitives(bv); i++)
                gen7_emit_media_object(&cmds, i);
 
        batch_add(&cmds, MI_BATCH_BUFFER_END);
@@ -385,15 +416,15 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
 
        batch_get_defaults(engine->i915, &bv);
        if (!vma)
-               return bv.max_size;
+               return bv.size;
 
-       GEM_BUG_ON(vma->obj->base.size < bv.max_size);
+       GEM_BUG_ON(vma->obj->base.size < bv.size);
 
        batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
        if (IS_ERR(batch))
                return PTR_ERR(batch);
 
-       emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
+       emit_batch(vma, memset(batch, 0, bv.size), &bv);
 
        i915_gem_object_flush_map(vma->obj);
        __i915_gem_object_release_map(vma->obj);
index a24cc1f..0625cbb 100644 (file)
@@ -134,11 +134,6 @@ static bool remove_signaling_context(struct intel_breadcrumbs *b,
        return true;
 }
 
-static inline bool __request_completed(const struct i915_request *rq)
-{
-       return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
-}
-
 __maybe_unused static bool
 check_signal_order(struct intel_context *ce, struct i915_request *rq)
 {
@@ -257,7 +252,7 @@ static void signal_irq_work(struct irq_work *work)
                list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
                        bool release;
 
-                       if (!__request_completed(rq))
+                       if (!__i915_request_is_complete(rq))
                                break;
 
                        if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
@@ -379,7 +374,7 @@ static void insert_breadcrumb(struct i915_request *rq)
         * straight onto a signaled list, and queue the irq worker for
         * its signal completion.
         */
-       if (__request_completed(rq)) {
+       if (__i915_request_is_complete(rq)) {
                if (__signal_request(rq) &&
                    llist_add(&rq->signal_node, &b->signaled_requests))
                        irq_work_queue(&b->irq_work);
index 7614a3d..26c7d0a 100644 (file)
@@ -3988,6 +3988,9 @@ err:
 static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
 {
        i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+
+       /* Called on error unwind, clear all flags to prevent further use */
+       memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx));
 }
 
 typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
index a41b43f..ecf3a61 100644 (file)
@@ -32,6 +32,7 @@
 #include "gen6_ppgtt.h"
 #include "gen7_renderclear.h"
 #include "i915_drv.h"
+#include "i915_mitigations.h"
 #include "intel_breadcrumbs.h"
 #include "intel_context.h"
 #include "intel_gt.h"
@@ -886,7 +887,8 @@ static int switch_context(struct i915_request *rq)
        GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
 
        if (engine->wa_ctx.vma && ce != engine->kernel_context) {
-               if (engine->wa_ctx.vma->private != ce) {
+               if (engine->wa_ctx.vma->private != ce &&
+                   i915_mitigate_clear_residuals()) {
                        ret = clear_residuals(rq);
                        if (ret)
                                return ret;
@@ -1290,7 +1292,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
 
        GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
 
-       if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
+       if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) {
                err = gen7_ctx_switch_bb_init(engine);
                if (err)
                        goto err_ring_unpin;
index 7ea94d2..8015964 100644 (file)
@@ -126,6 +126,10 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
        struct intel_timeline_cacheline *cl =
                container_of(rcu, typeof(*cl), rcu);
 
+       /* Must wait until after all *rq->hwsp are complete before removing */
+       i915_gem_object_unpin_map(cl->hwsp->vma->obj);
+       __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
+
        i915_active_fini(&cl->active);
        kfree(cl);
 }
@@ -133,11 +137,6 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
 static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
 {
        GEM_BUG_ON(!i915_active_is_idle(&cl->active));
-
-       i915_gem_object_unpin_map(cl->hwsp->vma->obj);
-       i915_vma_put(cl->hwsp->vma);
-       __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
-
        call_rcu(&cl->rcu, __rcu_cacheline_free);
 }
 
@@ -179,7 +178,6 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
                return ERR_CAST(vaddr);
        }
 
-       i915_vma_get(hwsp->vma);
        cl->hwsp = hwsp;
        cl->vaddr = page_pack_bits(vaddr, cacheline);
 
index 180c23e..602f1a0 100644 (file)
@@ -53,6 +53,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
        fw_def(ELKHARTLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl,  9, 0, 0)) \
        fw_def(ICELAKE,     0, guc_def(icl, 49, 0, 1), huc_def(icl,  9, 0, 0)) \
        fw_def(COMETLAKE,   5, guc_def(cml, 49, 0, 1), huc_def(cml,  4, 0, 0)) \
+       fw_def(COMETLAKE,   0, guc_def(kbl, 49, 0, 1), huc_def(kbl,  4, 0, 0)) \
        fw_def(COFFEELAKE,  0, guc_def(kbl, 49, 0, 1), huc_def(kbl,  4, 0, 0)) \
        fw_def(GEMINILAKE,  0, guc_def(glk, 49, 0, 1), huc_def(glk,  4, 0, 0)) \
        fw_def(KABYLAKE,    0, guc_def(kbl, 49, 0, 1), huc_def(kbl,  4, 0, 0)) \
index a15f875..62a5b0d 100644 (file)
@@ -217,6 +217,15 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                                  DDI_BUF_CTL_ENABLE);
                        vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE;
                }
+               vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+                       ~(PORTA_HOTPLUG_ENABLE | PORTA_HOTPLUG_STATUS_MASK);
+               vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+                       ~(PORTB_HOTPLUG_ENABLE | PORTB_HOTPLUG_STATUS_MASK);
+               vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+                       ~(PORTC_HOTPLUG_ENABLE | PORTC_HOTPLUG_STATUS_MASK);
+               /* No hpd_invert set in vgpu vbt, need to clear invert mask */
+               vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~BXT_DDI_HPD_INVERT_MASK;
+               vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~BXT_DE_PORT_HOTPLUG_MASK;
 
                vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1));
                vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
@@ -273,6 +282,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                        vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |=
                                (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
                                 TRANS_DDI_FUNC_ENABLE);
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+                               PORTA_HOTPLUG_ENABLE;
                        vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
                                GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
                }
@@ -301,6 +312,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                                (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
                                 (PORT_B << TRANS_DDI_PORT_SHIFT) |
                                 TRANS_DDI_FUNC_ENABLE);
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+                               PORTB_HOTPLUG_ENABLE;
                        vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
                                GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
                }
@@ -329,6 +342,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                                (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
                                 (PORT_B << TRANS_DDI_PORT_SHIFT) |
                                 TRANS_DDI_FUNC_ENABLE);
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+                               PORTC_HOTPLUG_ENABLE;
                        vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
                                GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
                }
@@ -661,44 +676,62 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
                                PORTD_HOTPLUG_STATUS_MASK;
                intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
        } else if (IS_BROXTON(i915)) {
-               if (connected) {
-                       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+               if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+                       if (connected) {
                                vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
                                        GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
+                       } else {
+                               vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
+                                       ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
                        }
-                       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
-                               vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
-                                       SFUSE_STRAP_DDIB_DETECTED;
+                       vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+                               GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+                               ~PORTA_HOTPLUG_STATUS_MASK;
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+                               PORTA_HOTPLUG_LONG_DETECT;
+                       intel_vgpu_trigger_virtual_event(vgpu, DP_A_HOTPLUG);
+               }
+               if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+                       if (connected) {
                                vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
                                        GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
-                       }
-                       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
                                vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
-                                       SFUSE_STRAP_DDIC_DETECTED;
-                               vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
-                                       GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
-                       }
-               } else {
-                       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
+                                       SFUSE_STRAP_DDIB_DETECTED;
+                       } else {
                                vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
-                                       ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
-                       }
-                       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+                                       ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
                                vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
                                        ~SFUSE_STRAP_DDIB_DETECTED;
-                               vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
-                                       ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
                        }
-                       if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
-                               vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
-                                       ~SFUSE_STRAP_DDIC_DETECTED;
+                       vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+                               GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+                               ~PORTB_HOTPLUG_STATUS_MASK;
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+                               PORTB_HOTPLUG_LONG_DETECT;
+                       intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
+               }
+               if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+                       if (connected) {
+                               vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
+                                       GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+                               vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
+                                       SFUSE_STRAP_DDIC_DETECTED;
+                       } else {
                                vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
                                        ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+                               vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
+                                       ~SFUSE_STRAP_DDIC_DETECTED;
                        }
+                       vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
+                               GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
+                               ~PORTC_HOTPLUG_STATUS_MASK;
+                       vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
+                               PORTC_HOTPLUG_LONG_DETECT;
+                       intel_vgpu_trigger_virtual_event(vgpu, DP_C_HOTPLUG);
                }
-               vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
-                       PORTB_HOTPLUG_STATUS_MASK;
-               intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
        }
 }
 
index e49944f..cbe5931 100644 (file)
@@ -437,10 +437,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        if (ret)
                goto out_clean_sched_policy;
 
-       if (IS_BROADWELL(dev_priv))
+       if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
                ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
-       /* FixMe: Re-enable APL/BXT once vfio_edid enabled */
-       else if (!IS_BROXTON(dev_priv))
+       else
                ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
        if (ret)
                goto out_clean_sched_policy;
index 9326595..b0899b6 100644 (file)
@@ -1166,7 +1166,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
                }
        }
        if (IS_ERR(src)) {
-               unsigned long x, n;
+               unsigned long x, n, remain;
                void *ptr;
 
                /*
@@ -1177,14 +1177,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
                 * We don't care about copying too much here as we only
                 * validate up to the end of the batch.
                 */
+               remain = length;
                if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
-                       length = round_up(length,
+                       remain = round_up(remain,
                                          boot_cpu_data.x86_clflush_size);
 
                ptr = dst;
                x = offset_in_page(offset);
-               for (n = offset >> PAGE_SHIFT; length; n++) {
-                       int len = min(length, PAGE_SIZE - x);
+               for (n = offset >> PAGE_SHIFT; remain; n++) {
+                       int len = min(remain, PAGE_SIZE - x);
 
                        src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
                        if (needs_clflush)
@@ -1193,13 +1194,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
                        kunmap_atomic(src);
 
                        ptr += len;
-                       length -= len;
+                       remain -= len;
                        x = 0;
                }
        }
 
        i915_gem_object_unpin_pages(src_obj);
 
+       memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
+
        /* dst_obj is returned with vmap pinned */
        return dst;
 }
@@ -1392,11 +1395,6 @@ static unsigned long *alloc_whitelist(u32 batch_length)
 
 #define LENGTH_BIAS 2
 
-static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
-{
-       return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
-}
-
 /**
  * intel_engine_cmd_parser() - parse a batch buffer for privilege violations
  * @engine: the engine on which the batch is to execute
@@ -1538,16 +1536,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
                                ret = 0; /* allow execution */
                        }
                }
-
-               if (shadow_needs_clflush(shadow->obj))
-                       drm_clflush_virt_range(batch_end, 8);
        }
 
-       if (shadow_needs_clflush(shadow->obj)) {
-               void *ptr = page_mask_bits(shadow->obj->mm.mapping);
-
-               drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
-       }
+       i915_gem_object_flush_map(shadow->obj);
 
        if (!IS_ERR_OR_NULL(jump_whitelist))
                kfree(jump_whitelist);
index 320856b..99eb0d7 100644 (file)
@@ -578,8 +578,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 
        pci_set_master(pdev);
 
-       cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
-
        intel_gt_init_workarounds(dev_priv);
 
        /* On the 945G/GM, the chipset reports the MSI capability on the
@@ -626,7 +624,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 err_msi:
        if (pdev->msi_enabled)
                pci_disable_msi(pdev);
-       cpu_latency_qos_remove_request(&dev_priv->pm_qos);
 err_mem_regions:
        intel_memory_regions_driver_release(dev_priv);
 err_ggtt:
@@ -648,8 +645,6 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
 
        if (pdev->msi_enabled)
                pci_disable_msi(pdev);
-
-       cpu_latency_qos_remove_request(&dev_priv->pm_qos);
 }
 
 /**
@@ -1052,6 +1047,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
 
 void i915_driver_shutdown(struct drm_i915_private *i915)
 {
+       disable_rpm_wakeref_asserts(&i915->runtime_pm);
+
        i915_gem_suspend(i915);
 
        drm_kms_helper_poll_disable(&i915->drm);
@@ -1065,6 +1062,8 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
 
        intel_suspend_encoders(i915);
        intel_shutdown_encoders(i915);
+
+       enable_rpm_wakeref_asserts(&i915->runtime_pm);
 }
 
 static bool suspend_to_idle(struct drm_i915_private *dev_priv)
index 0a3ee4f..632c713 100644 (file)
@@ -891,9 +891,6 @@ struct drm_i915_private {
 
        bool display_irqs_enabled;
 
-       /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
-       struct pm_qos_request pm_qos;
-
        /* Sideband mailbox protection */
        struct mutex sb_lock;
        struct pm_qos_request sb_qos;
diff --git a/drivers/gpu/drm/i915/i915_mitigations.c b/drivers/gpu/drm/i915/i915_mitigations.c
new file mode 100644 (file)
index 0000000..84f1259
--- /dev/null
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "i915_drv.h"
+#include "i915_mitigations.h"
+
+static unsigned long mitigations __read_mostly = ~0UL;
+
+enum {
+       CLEAR_RESIDUALS = 0,
+};
+
+static const char * const names[] = {
+       [CLEAR_RESIDUALS] = "residuals",
+};
+
+bool i915_mitigate_clear_residuals(void)
+{
+       return READ_ONCE(mitigations) & BIT(CLEAR_RESIDUALS);
+}
+
+static int mitigations_set(const char *val, const struct kernel_param *kp)
+{
+       unsigned long new = ~0UL;
+       char *str, *sep, *tok;
+       bool first = true;
+       int err = 0;
+
+       BUILD_BUG_ON(ARRAY_SIZE(names) >= BITS_PER_TYPE(mitigations));
+
+       str = kstrdup(val, GFP_KERNEL);
+       if (!str)
+               return -ENOMEM;
+
+       for (sep = str; (tok = strsep(&sep, ","));) {
+               bool enable = true;
+               int i;
+
+               /* Be tolerant of leading/trailing whitespace */
+               tok = strim(tok);
+
+               if (first) {
+                       first = false;
+
+                       if (!strcmp(tok, "auto"))
+                               continue;
+
+                       new = 0;
+                       if (!strcmp(tok, "off"))
+                               continue;
+               }
+
+               if (*tok == '!') {
+                       enable = !enable;
+                       tok++;
+               }
+
+               if (!strncmp(tok, "no", 2)) {
+                       enable = !enable;
+                       tok += 2;
+               }
+
+               if (*tok == '\0')
+                       continue;
+
+               for (i = 0; i < ARRAY_SIZE(names); i++) {
+                       if (!strcmp(tok, names[i])) {
+                               if (enable)
+                                       new |= BIT(i);
+                               else
+                                       new &= ~BIT(i);
+                               break;
+                       }
+               }
+               if (i == ARRAY_SIZE(names)) {
+                       pr_err("Bad \"%s.mitigations=%s\", '%s' is unknown\n",
+                              DRIVER_NAME, val, tok);
+                       err = -EINVAL;
+                       break;
+               }
+       }
+       kfree(str);
+       if (err)
+               return err;
+
+       WRITE_ONCE(mitigations, new);
+       return 0;
+}
+
+static int mitigations_get(char *buffer, const struct kernel_param *kp)
+{
+       unsigned long local = READ_ONCE(mitigations);
+       int count, i;
+       bool enable;
+
+       if (!local)
+               return scnprintf(buffer, PAGE_SIZE, "%s\n", "off");
+
+       if (local & BIT(BITS_PER_LONG - 1)) {
+               count = scnprintf(buffer, PAGE_SIZE, "%s,", "auto");
+               enable = false;
+       } else {
+               enable = true;
+               count = 0;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(names); i++) {
+               if ((local & BIT(i)) != enable)
+                       continue;
+
+               count += scnprintf(buffer + count, PAGE_SIZE - count,
+                                  "%s%s,", enable ? "" : "!", names[i]);
+       }
+
+       buffer[count - 1] = '\n';
+       return count;
+}
+
+static const struct kernel_param_ops ops = {
+       .set = mitigations_set,
+       .get = mitigations_get,
+};
+
+module_param_cb_unsafe(mitigations, &ops, NULL, 0600);
+MODULE_PARM_DESC(mitigations,
+"Selectively enable security mitigations for all Intel® GPUs in the system.\n"
+"\n"
+"  auto -- enables all mitigations required for the platform [default]\n"
+"  off  -- disables all mitigations\n"
+"\n"
+"Individual mitigations can be enabled by passing a comma-separated string,\n"
+"e.g. mitigations=residuals to enable only clearing residuals or\n"
+"mitigations=auto,noresiduals to disable only the clear residual mitigation.\n"
+"Either '!' or 'no' may be used to switch from enabling the mitigation to\n"
+"disabling it.\n"
+"\n"
+"Active mitigations for Ivybridge, Baytrail, Haswell:\n"
+"  residuals -- clear all thread-local registers between contexts"
+);
diff --git a/drivers/gpu/drm/i915/i915_mitigations.h b/drivers/gpu/drm/i915/i915_mitigations.h
new file mode 100644 (file)
index 0000000..1359d81
--- /dev/null
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __I915_MITIGATIONS_H__
+#define __I915_MITIGATIONS_H__
+
+#include <linux/types.h>
+
+bool i915_mitigate_clear_residuals(void);
+
+#endif /* __I915_MITIGATIONS_H__ */
index d76685c..9856479 100644 (file)
@@ -184,13 +184,24 @@ static u64 get_rc6(struct intel_gt *gt)
        return val;
 }
 
-static void park_rc6(struct drm_i915_private *i915)
+static void init_rc6(struct i915_pmu *pmu)
 {
-       struct i915_pmu *pmu = &i915->pmu;
+       struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+       intel_wakeref_t wakeref;
 
-       if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
+       with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) {
                pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
+               pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
+                                       pmu->sample[__I915_SAMPLE_RC6].cur;
+               pmu->sleep_last = ktime_get();
+       }
+}
 
+static void park_rc6(struct drm_i915_private *i915)
+{
+       struct i915_pmu *pmu = &i915->pmu;
+
+       pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
        pmu->sleep_last = ktime_get();
 }
 
@@ -201,6 +212,7 @@ static u64 get_rc6(struct intel_gt *gt)
        return __get_rc6(gt);
 }
 
+static void init_rc6(struct i915_pmu *pmu) { }
 static void park_rc6(struct drm_i915_private *i915) {}
 
 #endif
@@ -612,10 +624,8 @@ static void i915_pmu_enable(struct perf_event *event)
                container_of(event->pmu, typeof(*i915), pmu.base);
        unsigned int bit = event_enabled_bit(event);
        struct i915_pmu *pmu = &i915->pmu;
-       intel_wakeref_t wakeref;
        unsigned long flags;
 
-       wakeref = intel_runtime_pm_get(&i915->runtime_pm);
        spin_lock_irqsave(&pmu->lock, flags);
 
        /*
@@ -626,13 +636,6 @@ static void i915_pmu_enable(struct perf_event *event)
        GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
        GEM_BUG_ON(pmu->enable_count[bit] == ~0);
 
-       if (pmu->enable_count[bit] == 0 &&
-           config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
-               pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
-               pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
-               pmu->sleep_last = ktime_get();
-       }
-
        pmu->enable |= BIT_ULL(bit);
        pmu->enable_count[bit]++;
 
@@ -673,8 +676,6 @@ static void i915_pmu_enable(struct perf_event *event)
         * an existing non-zero value.
         */
        local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
-
-       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 }
 
 static void i915_pmu_disable(struct perf_event *event)
@@ -1130,6 +1131,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
        hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        pmu->timer.function = i915_sample;
        pmu->cpuhp.cpu = -1;
+       init_rc6(pmu);
 
        if (!is_igp(i915)) {
                pmu->name = kasprintf(GFP_KERNEL,
index 620b6fa..92adfee 100644 (file)
@@ -434,7 +434,7 @@ static inline u32 hwsp_seqno(const struct i915_request *rq)
 
 static inline bool __i915_request_has_started(const struct i915_request *rq)
 {
-       return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
+       return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
 }
 
 /**
@@ -465,11 +465,19 @@ static inline bool __i915_request_has_started(const struct i915_request *rq)
  */
 static inline bool i915_request_started(const struct i915_request *rq)
 {
+       bool result;
+
        if (i915_request_signaled(rq))
                return true;
 
-       /* Remember: started but may have since been preempted! */
-       return __i915_request_has_started(rq);
+       result = true;
+       rcu_read_lock(); /* the HWSP may be freed at runtime */
+       if (likely(!i915_request_signaled(rq)))
+               /* Remember: started but may have since been preempted! */
+               result = __i915_request_has_started(rq);
+       rcu_read_unlock();
+
+       return result;
 }
 
 /**
@@ -482,10 +490,16 @@ static inline bool i915_request_started(const struct i915_request *rq)
  */
 static inline bool i915_request_is_running(const struct i915_request *rq)
 {
+       bool result;
+
        if (!i915_request_is_active(rq))
                return false;
 
-       return __i915_request_has_started(rq);
+       rcu_read_lock();
+       result = __i915_request_has_started(rq) && i915_request_is_active(rq);
+       rcu_read_unlock();
+
+       return result;
 }
 
 /**
@@ -509,12 +523,25 @@ static inline bool i915_request_is_ready(const struct i915_request *rq)
        return !list_empty(&rq->sched.link);
 }
 
+static inline bool __i915_request_is_complete(const struct i915_request *rq)
+{
+       return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
+}
+
 static inline bool i915_request_completed(const struct i915_request *rq)
 {
+       bool result;
+
        if (i915_request_signaled(rq))
                return true;
 
-       return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
+       result = true;
+       rcu_read_lock(); /* the HWSP may be freed at runtime */
+       if (likely(!i915_request_signaled(rq)))
+               result = __i915_request_is_complete(rq);
+       rcu_read_unlock();
+
+       return result;
 }
 
 static inline void i915_request_mark_complete(struct i915_request *rq)
index 7e82c41..bdc9891 100644 (file)
@@ -534,8 +534,10 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
 
        if (!gpu->aspace) {
                dev_err(dev->dev, "No memory protection without MMU\n");
-               ret = -ENXIO;
-               goto fail;
+               if (!allow_vram_carveout) {
+                       ret = -ENXIO;
+                       goto fail;
+               }
        }
 
        return gpu;
index 93da668..4534633 100644 (file)
@@ -564,8 +564,10 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
                 * implement a cmdstream validator.
                 */
                DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
-               ret = -ENXIO;
-               goto fail;
+               if (!allow_vram_carveout) {
+                       ret = -ENXIO;
+                       goto fail;
+               }
        }
 
        icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
index c0be3a0..82bebb4 100644 (file)
@@ -692,8 +692,10 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
                 * implement a cmdstream validator.
                 */
                DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
-               ret = -ENXIO;
-               goto fail;
+               if (!allow_vram_carveout) {
+                       ret = -ENXIO;
+                       goto fail;
+               }
        }
 
        icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
index 87c8b03..12e75ba 100644 (file)
@@ -18,6 +18,10 @@ bool snapshot_debugbus = false;
 MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
 module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
 
+bool allow_vram_carveout = false;
+MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
+module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
+
 static const struct adreno_info gpulist[] = {
        {
                .rev   = ADRENO_REV(2, 0, 0, 0),
index 6cf9975..f091756 100644 (file)
@@ -191,8 +191,6 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
                struct platform_device *pdev)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
-       struct io_pgtable_domain_attr pgtbl_cfg;
        struct iommu_domain *iommu;
        struct msm_mmu *mmu;
        struct msm_gem_address_space *aspace;
@@ -202,13 +200,18 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
        if (!iommu)
                return NULL;
 
-       /*
-        * This allows GPU to set the bus attributes required to use system
-        * cache on behalf of the iommu page table walker.
-        */
-       if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
-               pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
-               iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+
+       if (adreno_is_a6xx(adreno_gpu)) {
+               struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+               struct io_pgtable_domain_attr pgtbl_cfg;
+               /*
+               * This allows GPU to set the bus attributes required to use system
+               * cache on behalf of the iommu page table walker.
+               */
+               if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
+                       pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
+                       iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+               }
        }
 
        mmu = msm_iommu_new(&pdev->dev, iommu);
index c3775f7..b3d9a33 100644 (file)
@@ -18,6 +18,7 @@
 #include "adreno_pm4.xml.h"
 
 extern bool snapshot_debugbus;
+extern bool allow_vram_carveout;
 
 enum {
        ADRENO_FW_PM4 = 0,
@@ -211,6 +212,11 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
        return gpu->revn == 540;
 }
 
+static inline bool adreno_is_a6xx(struct adreno_gpu *gpu)
+{
+       return ((gpu->revn < 700 && gpu->revn > 599));
+}
+
 static inline int adreno_is_a618(struct adreno_gpu *gpu)
 {
        return gpu->revn == 618;
index 6e971d5..3bc7ed2 100644 (file)
@@ -693,6 +693,13 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
                return 0;
        }
 
+       if (state == ST_CONNECT_PENDING) {
+               /* wait until ST_CONNECTED */
+               dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
+               mutex_unlock(&dp->event_mutex);
+               return 0;
+       }
+
        ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
        if (ret == -ECONNRESET) { /* cable unplugged */
                dp->core_initialized = false;
index 97dca3e..d1780bc 100644 (file)
@@ -167,12 +167,18 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
        panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
 
        rc = dp_panel_read_dpcd(dp_panel);
+       if (rc) {
+               DRM_ERROR("read dpcd failed %d\n", rc);
+               return rc;
+       }
+
        bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
-       if (rc || !is_link_rate_valid(bw_code) ||
+       if (!is_link_rate_valid(bw_code) ||
                        !is_lane_count_valid(dp_panel->link_info.num_lanes) ||
                        (bw_code > dp_panel->max_bw_code)) {
-               DRM_ERROR("read dpcd failed %d\n", rc);
-               return rc;
+               DRM_ERROR("Illegal link rate=%d lane=%d\n", dp_panel->link_info.rate,
+                               dp_panel->link_info.num_lanes);
+               return -EINVAL;
        }
 
        if (dp_panel->dfp_present) {
index 535a026..108c405 100644 (file)
@@ -457,14 +457,14 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
 
        drm_mode_config_init(ddev);
 
-       /* Bind all our sub-components: */
-       ret = component_bind_all(dev, ddev);
+       ret = msm_init_vram(ddev);
        if (ret)
                goto err_destroy_mdss;
 
-       ret = msm_init_vram(ddev);
+       /* Bind all our sub-components: */
+       ret = component_bind_all(dev, ddev);
        if (ret)
-               goto err_msm_uninit;
+               goto err_destroy_mdss;
 
        dma_set_max_seg_size(dev, UINT_MAX);
 
index 9a7c49b..9d10739 100644 (file)
@@ -96,6 +96,8 @@ static struct page **get_pages(struct drm_gem_object *obj)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
+       WARN_ON(!msm_gem_is_locked(obj));
+
        if (!msm_obj->pages) {
                struct drm_device *dev = obj->dev;
                struct page **p;
@@ -988,6 +990,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
                if (msm_obj->pages)
                        kvfree(msm_obj->pages);
 
+               put_iova_vmas(obj);
+
                /* dma_buf_detach() grabs resv lock, so we need to unlock
                 * prior to drm_prime_gem_destroy
                 */
@@ -997,11 +1001,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
        } else {
                msm_gem_vunmap(obj);
                put_pages(obj);
+               put_iova_vmas(obj);
                msm_gem_unlock(obj);
        }
 
-       put_iova_vmas(obj);
-
        drm_gem_object_release(obj);
 
        kfree(msm_obj);
@@ -1115,6 +1118,8 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
                struct msm_gem_vma *vma;
                struct page **pages;
 
+               drm_gem_private_object_init(dev, obj, size);
+
                msm_gem_lock(obj);
 
                vma = add_vma(obj, NULL);
@@ -1126,9 +1131,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
 
                to_msm_bo(obj)->vram_node = &vma->node;
 
-               drm_gem_private_object_init(dev, obj, size);
-
+               msm_gem_lock(obj);
                pages = get_pages(obj);
+               msm_gem_unlock(obj);
                if (IS_ERR(pages)) {
                        ret = PTR_ERR(pages);
                        goto fail;
index 6fdddb2..4488e1c 100644 (file)
@@ -37,6 +37,7 @@ nouveau-y += dispnv50/wimmc37b.o
 nouveau-y += dispnv50/wndw.o
 nouveau-y += dispnv50/wndwc37e.o
 nouveau-y += dispnv50/wndwc57e.o
+nouveau-y += dispnv50/wndwc67e.o
 
 nouveau-y += dispnv50/base.o
 nouveau-y += dispnv50/base507c.o
index 27ea3f3..abefc23 100644 (file)
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
                int version;
                int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
        } cores[] = {
+               { GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
                { TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
                { GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
                { GP102_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
index 121c24a..31d8b2e 100644 (file)
@@ -31,6 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
                int version;
                int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
        } curses[] = {
+               { GA102_DISP_CURSOR, 0, cursc37a_new },
                { TU102_DISP_CURSOR, 0, cursc37a_new },
                { GV100_DISP_CURSOR, 0, cursc37a_new },
                { GK104_DISP_CURSOR, 0, curs907a_new },
index 33fff38..c636703 100644 (file)
@@ -222,7 +222,7 @@ nv50_dmac_wait(struct nvif_push *push, u32 size)
 
 int
 nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
-                const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
+                const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
                 struct nv50_dmac *dmac)
 {
        struct nouveau_cli *cli = (void *)device->object.client;
@@ -271,7 +271,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
        if (ret)
                return ret;
 
-       if (!syncbuf)
+       if (syncbuf < 0)
                return 0;
 
        ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
index 92bddc0..38dec11 100644 (file)
@@ -95,7 +95,7 @@ struct nv50_outp_atom {
 
 int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
                     const s32 *oclass, u8 head, void *data, u32 size,
-                    u64 syncbuf, struct nv50_dmac *dmac);
+                    s64 syncbuf, struct nv50_dmac *dmac);
 void nv50_dmac_destroy(struct nv50_dmac *);
 
 /*
index a1ac153..566fbdd 100644 (file)
@@ -31,6 +31,7 @@ nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
                int version;
                int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
        } wimms[] = {
+               { GA102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
                { TU102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
                { GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
                {}
index 685b708..b390029 100644 (file)
@@ -76,7 +76,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
        int ret;
 
        ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
-                              &oclass, 0, &args, sizeof(args), 0,
+                              &oclass, 0, &args, sizeof(args), -1,
                               &wndw->wimm);
        if (ret) {
                NV_ERROR(drm, "wimm%04x allocation failed: %d\n", oclass, ret);
index 0356474..ce45124 100644 (file)
@@ -784,6 +784,7 @@ nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
                int (*new)(struct nouveau_drm *, enum drm_plane_type,
                           int, s32, struct nv50_wndw **);
        } wndws[] = {
+               { GA102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc67e_new },
                { TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
                { GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
                {}
index 3278e28..f4e0c50 100644 (file)
@@ -129,6 +129,14 @@ int wndwc37e_update(struct nv50_wndw *, u32 *);
 
 int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
                 struct nv50_wndw **);
+bool wndwc57e_ilut(struct nv50_wndw *, struct nv50_wndw_atom *, int);
+int wndwc57e_ilut_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc57e_ilut_clr(struct nv50_wndw *);
+int wndwc57e_csc_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+int wndwc57e_csc_clr(struct nv50_wndw *);
+
+int wndwc67e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+                struct nv50_wndw **);
 
 int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
                  struct nv50_wndw **);
index 429be0b..abdd3bb 100644 (file)
@@ -80,7 +80,7 @@ wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
        return 0;
 }
 
-static int
+int
 wndwc57e_csc_clr(struct nv50_wndw *wndw)
 {
        struct nvif_push *push = wndw->wndw.push;
@@ -98,7 +98,7 @@ wndwc57e_csc_clr(struct nv50_wndw *wndw)
        return 0;
 }
 
-static int
+int
 wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
        struct nvif_push *push = wndw->wndw.push;
@@ -111,7 +111,7 @@ wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
        return 0;
 }
 
-static int
+int
 wndwc57e_ilut_clr(struct nv50_wndw *wndw)
 {
        struct nvif_push *push = wndw->wndw.push;
@@ -124,7 +124,7 @@ wndwc57e_ilut_clr(struct nv50_wndw *wndw)
        return 0;
 }
 
-static int
+int
 wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
 {
        struct nvif_push *push = wndw->wndw.push;
@@ -179,7 +179,7 @@ wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
        writew(readw(mem - 4), mem + 4);
 }
 
-static bool
+bool
 wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
 {
        if (size = size ? size : 1024, size != 256 && size != 1024)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc67e.c
new file mode 100644 (file)
index 0000000..7a370fa
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <nvif/pushc37b.h>
+
+#include <nvhw/class/clc57e.h>
+
+static int
+wndwc67e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+       struct nvif_push *push = wndw->wndw.push;
+       int ret;
+
+       if ((ret = PUSH_WAIT(push, 17)))
+               return ret;
+
+       PUSH_MTHD(push, NVC57E, SET_PRESENT_CONTROL,
+                 NVVAL(NVC57E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
+                 NVVAL(NVC57E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
+                 NVDEF(NVC57E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
+
+       PUSH_MTHD(push, NVC57E, SET_SIZE,
+                 NVVAL(NVC57E, SET_SIZE, WIDTH, asyw->image.w) |
+                 NVVAL(NVC57E, SET_SIZE, HEIGHT, asyw->image.h),
+
+                               SET_STORAGE,
+                 NVVAL(NVC57E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh),
+
+                               SET_PARAMS,
+                 NVVAL(NVC57E, SET_PARAMS, FORMAT, asyw->image.format) |
+                 NVDEF(NVC57E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
+                 NVDEF(NVC57E, SET_PARAMS, SWAP_UV, DISABLE) |
+                 NVDEF(NVC57E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST),
+
+                               SET_PLANAR_STORAGE(0),
+                 NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
+                 NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
+
+       PUSH_MTHD(push, NVC57E, SET_CONTEXT_DMA_ISO(0), asyw->image.handle, 1);
+       PUSH_MTHD(push, NVC57E, SET_OFFSET(0), asyw->image.offset[0] >> 8);
+
+       PUSH_MTHD(push, NVC57E, SET_POINT_IN(0),
+                 NVVAL(NVC57E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
+                 NVVAL(NVC57E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
+
+       PUSH_MTHD(push, NVC57E, SET_SIZE_IN,
+                 NVVAL(NVC57E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
+                 NVVAL(NVC57E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
+
+       PUSH_MTHD(push, NVC57E, SET_SIZE_OUT,
+                 NVVAL(NVC57E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
+                 NVVAL(NVC57E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
+       return 0;
+}
+
+static const struct nv50_wndw_func
+wndwc67e = {
+       .acquire = wndwc37e_acquire,
+       .release = wndwc37e_release,
+       .sema_set = wndwc37e_sema_set,
+       .sema_clr = wndwc37e_sema_clr,
+       .ntfy_set = wndwc37e_ntfy_set,
+       .ntfy_clr = wndwc37e_ntfy_clr,
+       .ntfy_reset = corec37d_ntfy_init,
+       .ntfy_wait_begun = base507c_ntfy_wait_begun,
+       .ilut = wndwc57e_ilut,
+       .ilut_identity = true,
+       .ilut_size = 1024,
+       .xlut_set = wndwc57e_ilut_set,
+       .xlut_clr = wndwc57e_ilut_clr,
+       .csc = base907c_csc,
+       .csc_set = wndwc57e_csc_set,
+       .csc_clr = wndwc57e_csc_clr,
+       .image_set = wndwc67e_image_set,
+       .image_clr = wndwc37e_image_clr,
+       .blend_set = wndwc37e_blend_set,
+       .update = wndwc37e_update,
+};
+
+int
+wndwc67e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+            s32 oclass, struct nv50_wndw **pwndw)
+{
+       return wndwc37e_new_(&wndwc67e, drm, type, index, oclass, BIT(index >> 1), pwndw);
+}
index cd9a2e6..57d4f45 100644 (file)
@@ -33,6 +33,7 @@ struct nv_device_info_v0 {
 #define NV_DEVICE_INFO_V0_PASCAL                                           0x0a
 #define NV_DEVICE_INFO_V0_VOLTA                                            0x0b
 #define NV_DEVICE_INFO_V0_TURING                                           0x0c
+#define NV_DEVICE_INFO_V0_AMPERE                                           0x0d
        __u8  family;
        __u8  pad06[2];
        __u64 ram_size;
index 2c79beb..ba2c28e 100644 (file)
@@ -88,6 +88,7 @@
 #define GP102_DISP                                    /* cl5070.h */ 0x00009870
 #define GV100_DISP                                    /* cl5070.h */ 0x0000c370
 #define TU102_DISP                                    /* cl5070.h */ 0x0000c570
+#define GA102_DISP                                    /* cl5070.h */ 0x0000c670
 
 #define GV100_DISP_CAPS                                              0x0000c373
 
 #define GK104_DISP_CURSOR                             /* cl507a.h */ 0x0000917a
 #define GV100_DISP_CURSOR                             /* cl507a.h */ 0x0000c37a
 #define TU102_DISP_CURSOR                             /* cl507a.h */ 0x0000c57a
+#define GA102_DISP_CURSOR                             /* cl507a.h */ 0x0000c67a
 
 #define NV50_DISP_OVERLAY                             /* cl507b.h */ 0x0000507b
 #define G82_DISP_OVERLAY                              /* cl507b.h */ 0x0000827b
 
 #define GV100_DISP_WINDOW_IMM_CHANNEL_DMA             /* clc37b.h */ 0x0000c37b
 #define TU102_DISP_WINDOW_IMM_CHANNEL_DMA             /* clc37b.h */ 0x0000c57b
+#define GA102_DISP_WINDOW_IMM_CHANNEL_DMA             /* clc37b.h */ 0x0000c67b
 
 #define NV50_DISP_BASE_CHANNEL_DMA                    /* cl507c.h */ 0x0000507c
 #define G82_DISP_BASE_CHANNEL_DMA                     /* cl507c.h */ 0x0000827c
 #define GP102_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000987d
 #define GV100_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000c37d
 #define TU102_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000c57d
+#define GA102_DISP_CORE_CHANNEL_DMA                   /* cl507d.h */ 0x0000c67d
 
 #define NV50_DISP_OVERLAY_CHANNEL_DMA                 /* cl507e.h */ 0x0000507e
 #define G82_DISP_OVERLAY_CHANNEL_DMA                  /* cl507e.h */ 0x0000827e
 
 #define GV100_DISP_WINDOW_CHANNEL_DMA                 /* clc37e.h */ 0x0000c37e
 #define TU102_DISP_WINDOW_CHANNEL_DMA                 /* clc37e.h */ 0x0000c57e
+#define GA102_DISP_WINDOW_CHANNEL_DMA                 /* clc37e.h */ 0x0000c67e
 
 #define NV50_TESLA                                                   0x00005097
 #define G82_TESLA                                                    0x00008297
index 5c007ce..c920939 100644 (file)
@@ -120,6 +120,7 @@ struct nvkm_device {
                GP100    = 0x130,
                GV100    = 0x140,
                TU100    = 0x160,
+               GA100    = 0x170,
        } card_type;
        u32 chipset;
        u8  chiprev;
index 5a96c94..0f6fa66 100644 (file)
@@ -37,4 +37,5 @@ int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int gv100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 int tu102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int ga102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
 #endif
index 1a39e52..50cc7c0 100644 (file)
@@ -32,4 +32,5 @@ int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 int gv100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 int tu102_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int ga100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
 #endif
index 34b56b1..2ecd52a 100644 (file)
@@ -86,6 +86,8 @@ int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 int gp10b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 int gv100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int ga100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int ga102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
 
 #include <subdev/bios.h>
 #include <subdev/bios/ramcfg.h>
index eaacf8d..cdcce5e 100644 (file)
@@ -37,4 +37,5 @@ int nv50_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
 int g94_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
 int gf119_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
 int gk104_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
+int ga102_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
 #endif
index 81b9773..640f649 100644 (file)
@@ -92,6 +92,7 @@ int g94_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
 int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
 int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
 int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
+int gk110_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
 int gm200_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
 
 static inline int
index 6641fe4..e45ca45 100644 (file)
@@ -32,4 +32,5 @@ int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 int tu102_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int ga100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
 #endif
index c7a94c9..72f35a2 100644 (file)
@@ -256,6 +256,7 @@ nouveau_backlight_init(struct drm_connector *connector)
        case NV_DEVICE_INFO_V0_PASCAL:
        case NV_DEVICE_INFO_V0_VOLTA:
        case NV_DEVICE_INFO_V0_TURING:
+       case NV_DEVICE_INFO_V0_AMPERE: //XXX: not confirmed
                ret = nv50_backlight_init(nv_encoder, &props, &ops);
                break;
        default:
index 8d0d30e..529cb60 100644 (file)
@@ -35,6 +35,7 @@ nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass,
               struct nvif_disp *disp)
 {
        static const struct nvif_mclass disps[] = {
+               { GA102_DISP, -1 },
                { TU102_DISP, -1 },
                { GV100_DISP, -1 },
                { GP102_DISP, -1 },
index 7851bec..cdcc851 100644 (file)
@@ -1815,7 +1815,7 @@ nvf0_chipset = {
        .fb = gk110_fb_new,
        .fuse = gf100_fuse_new,
        .gpio = gk104_gpio_new,
-       .i2c = gk104_i2c_new,
+       .i2c = gk110_i2c_new,
        .ibus = gk104_ibus_new,
        .iccsense = gf100_iccsense_new,
        .imem = nv50_instmem_new,
@@ -1853,7 +1853,7 @@ nvf1_chipset = {
        .fb = gk110_fb_new,
        .fuse = gf100_fuse_new,
        .gpio = gk104_gpio_new,
-       .i2c = gk104_i2c_new,
+       .i2c = gk110_i2c_new,
        .ibus = gk104_ibus_new,
        .iccsense = gf100_iccsense_new,
        .imem = nv50_instmem_new,
@@ -1891,7 +1891,7 @@ nv106_chipset = {
        .fb = gk110_fb_new,
        .fuse = gf100_fuse_new,
        .gpio = gk104_gpio_new,
-       .i2c = gk104_i2c_new,
+       .i2c = gk110_i2c_new,
        .ibus = gk104_ibus_new,
        .iccsense = gf100_iccsense_new,
        .imem = nv50_instmem_new,
@@ -1929,7 +1929,7 @@ nv108_chipset = {
        .fb = gk110_fb_new,
        .fuse = gf100_fuse_new,
        .gpio = gk104_gpio_new,
-       .i2c = gk104_i2c_new,
+       .i2c = gk110_i2c_new,
        .ibus = gk104_ibus_new,
        .iccsense = gf100_iccsense_new,
        .imem = nv50_instmem_new,
@@ -1967,7 +1967,7 @@ nv117_chipset = {
        .fb = gm107_fb_new,
        .fuse = gm107_fuse_new,
        .gpio = gk104_gpio_new,
-       .i2c = gk104_i2c_new,
+       .i2c = gk110_i2c_new,
        .ibus = gk104_ibus_new,
        .iccsense = gf100_iccsense_new,
        .imem = nv50_instmem_new,
@@ -2003,7 +2003,7 @@ nv118_chipset = {
        .fb = gm107_fb_new,
        .fuse = gm107_fuse_new,
        .gpio = gk104_gpio_new,
-       .i2c = gk104_i2c_new,
+       .i2c = gk110_i2c_new,
        .ibus = gk104_ibus_new,
        .iccsense = gf100_iccsense_new,
        .imem = nv50_instmem_new,
@@ -2652,6 +2652,61 @@ nv168_chipset = {
        .sec2 = tu102_sec2_new,
 };
 
+static const struct nvkm_device_chip
+nv170_chipset = {
+       .name = "GA100",
+       .bar = tu102_bar_new,
+       .bios = nvkm_bios_new,
+       .devinit = ga100_devinit_new,
+       .fb = ga100_fb_new,
+       .gpio = gk104_gpio_new,
+       .i2c = gm200_i2c_new,
+       .ibus = gm200_ibus_new,
+       .imem = nv50_instmem_new,
+       .mc = ga100_mc_new,
+       .mmu = tu102_mmu_new,
+       .pci = gp100_pci_new,
+       .timer = gk20a_timer_new,
+};
+
+static const struct nvkm_device_chip
+nv172_chipset = {
+       .name = "GA102",
+       .bar = tu102_bar_new,
+       .bios = nvkm_bios_new,
+       .devinit = ga100_devinit_new,
+       .fb = ga102_fb_new,
+       .gpio = ga102_gpio_new,
+       .i2c = gm200_i2c_new,
+       .ibus = gm200_ibus_new,
+       .imem = nv50_instmem_new,
+       .mc = ga100_mc_new,
+       .mmu = tu102_mmu_new,
+       .pci = gp100_pci_new,
+       .timer = gk20a_timer_new,
+       .disp = ga102_disp_new,
+       .dma = gv100_dma_new,
+};
+
+static const struct nvkm_device_chip
+nv174_chipset = {
+       .name = "GA104",
+       .bar = tu102_bar_new,
+       .bios = nvkm_bios_new,
+       .devinit = ga100_devinit_new,
+       .fb = ga102_fb_new,
+       .gpio = ga102_gpio_new,
+       .i2c = gm200_i2c_new,
+       .ibus = gm200_ibus_new,
+       .imem = nv50_instmem_new,
+       .mc = ga100_mc_new,
+       .mmu = tu102_mmu_new,
+       .pci = gp100_pci_new,
+       .timer = gk20a_timer_new,
+       .disp = ga102_disp_new,
+       .dma = gv100_dma_new,
+};
+
 static int
 nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
                       struct nvkm_notify *notify)
@@ -3063,6 +3118,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                        case 0x130: device->card_type = GP100; break;
                        case 0x140: device->card_type = GV100; break;
                        case 0x160: device->card_type = TU100; break;
+                       case 0x170: device->card_type = GA100; break;
                        default:
                                break;
                        }
@@ -3160,10 +3216,23 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                case 0x166: device->chip = &nv166_chipset; break;
                case 0x167: device->chip = &nv167_chipset; break;
                case 0x168: device->chip = &nv168_chipset; break;
+               case 0x172: device->chip = &nv172_chipset; break;
+               case 0x174: device->chip = &nv174_chipset; break;
                default:
-                       nvdev_error(device, "unknown chipset (%08x)\n", boot0);
-                       ret = -ENODEV;
-                       goto done;
+                       if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
+                               switch (device->chipset) {
+                               case 0x170: device->chip = &nv170_chipset; break;
+                               default:
+                                       break;
+                               }
+                       }
+
+                       if (!device->chip) {
+                               nvdev_error(device, "unknown chipset (%08x)\n", boot0);
+                               ret = -ENODEV;
+                               goto done;
+                       }
+                       break;
                }
 
                nvdev_info(device, "NVIDIA %s (%08x)\n",
index 03c6d9a..1478947 100644 (file)
@@ -176,6 +176,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
        case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
        case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
        case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
+       case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break;
        default:
                args->v0.family = 0;
                break;
index cf07531..b03f043 100644 (file)
@@ -17,6 +17,7 @@ nvkm-y += nvkm/engine/disp/gp100.o
 nvkm-y += nvkm/engine/disp/gp102.o
 nvkm-y += nvkm/engine/disp/gv100.o
 nvkm-y += nvkm/engine/disp/tu102.o
+nvkm-y += nvkm/engine/disp/ga102.o
 nvkm-y += nvkm/engine/disp/vga.o
 
 nvkm-y += nvkm/engine/disp/head.o
@@ -42,6 +43,7 @@ nvkm-y += nvkm/engine/disp/sorgm200.o
 nvkm-y += nvkm/engine/disp/sorgp100.o
 nvkm-y += nvkm/engine/disp/sorgv100.o
 nvkm-y += nvkm/engine/disp/sortu102.o
+nvkm-y += nvkm/engine/disp/sorga102.o
 
 nvkm-y += nvkm/engine/disp/outp.o
 nvkm-y += nvkm/engine/disp/dp.o
@@ -75,6 +77,7 @@ nvkm-y += nvkm/engine/disp/rootgp100.o
 nvkm-y += nvkm/engine/disp/rootgp102.o
 nvkm-y += nvkm/engine/disp/rootgv100.o
 nvkm-y += nvkm/engine/disp/roottu102.o
+nvkm-y += nvkm/engine/disp/rootga102.o
 
 nvkm-y += nvkm/engine/disp/capsgv100.o
 
index 3800aeb..55fbfe2 100644 (file)
 
 #include <nvif/event.h>
 
+/* IED scripts are no longer used by UEFI/RM from Ampere, but have been updated for
+ * the x86 option ROM.  However, the relevant VBIOS table versions weren't modified,
+ * so we're unable to detect this in a nice way.
+ */
+#define AMPERE_IED_HACK(disp) ((disp)->engine.subdev.device->card_type >= GA100)
+
 struct lt_state {
        struct nvkm_dp *dp;
        u8  stat[6];
@@ -238,6 +244,19 @@ nvkm_dp_train_links(struct nvkm_dp *dp)
                dp->dpcd[DPCD_RC02] &= ~DPCD_RC02_TPS3_SUPPORTED;
        lt.pc2 = dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED;
 
+       if (AMPERE_IED_HACK(disp) && (lnkcmp = lt.dp->info.script[0])) {
+               /* Execute BeforeLinkTraining script from DP Info table. */
+               while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
+                       lnkcmp += 3;
+               lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
+
+               nvbios_init(&dp->outp.disp->engine.subdev, lnkcmp,
+                       init.outp = &dp->outp.info;
+                       init.or   = ior->id;
+                       init.link = ior->asy.link;
+               );
+       }
+
        /* Set desired link configuration on the source. */
        if ((lnkcmp = lt.dp->info.lnkcmp)) {
                if (dp->version < 0x30) {
@@ -316,12 +335,14 @@ nvkm_dp_train_init(struct nvkm_dp *dp)
                );
        }
 
-       /* Execute BeforeLinkTraining script from DP Info table. */
-       nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[0],
-               init.outp = &dp->outp.info;
-               init.or   = dp->outp.ior->id;
-               init.link = dp->outp.ior->asy.link;
-       );
+       if (!AMPERE_IED_HACK(dp->outp.disp)) {
+               /* Execute BeforeLinkTraining script from DP Info table. */
+               nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[0],
+                       init.outp = &dp->outp.info;
+                       init.or   = dp->outp.ior->id;
+                       init.link = dp->outp.ior->asy.link;
+               );
+       }
 }
 
 static const struct dp_rates {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
new file mode 100644 (file)
index 0000000..aa2e564
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+#include "head.h"
+#include "ior.h"
+#include "channv50.h"
+#include "rootnv50.h"
+
+static const struct nv50_disp_func
+ga102_disp = {
+       .init = tu102_disp_init,
+       .fini = gv100_disp_fini,
+       .intr = gv100_disp_intr,
+       .uevent = &gv100_disp_chan_uevent,
+       .super = gv100_disp_super,
+       .root = &ga102_disp_root_oclass,
+       .wndw = { .cnt = gv100_disp_wndw_cnt },
+       .head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
+       .sor = { .cnt = gv100_sor_cnt, .new = ga102_sor_new },
+       .ramht_size = 0x2000,
+};
+
+int
+ga102_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+       return nv50_disp_new_(&ga102_disp, device, index, pdisp);
+}
index 09f3038..9f0bb7c 100644 (file)
@@ -150,6 +150,8 @@ void gv100_sor_dp_audio(struct nvkm_ior *, int, bool);
 void gv100_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32);
 void gv100_sor_dp_watermark(struct nvkm_ior *, int, u8);
 
+void tu102_sor_dp_vcpi(struct nvkm_ior *, int, u8, u8, u16, u16);
+
 void g84_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
 void gt215_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
 void gf119_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
@@ -207,4 +209,6 @@ int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
 int gv100_sor_new(struct nvkm_disp *, int);
 
 int tu102_sor_new(struct nvkm_disp *, int);
+
+int ga102_sor_new(struct nvkm_disp *, int);
 #endif
index a677161..db31b37 100644 (file)
@@ -86,6 +86,8 @@ void gv100_disp_intr(struct nv50_disp *);
 void gv100_disp_super(struct work_struct *);
 int gv100_disp_wndw_cnt(struct nvkm_disp *, unsigned long *);
 
+int tu102_disp_init(struct nv50_disp *);
+
 void nv50_disp_dptmds_war_2(struct nv50_disp *, struct dcb_output *);
 void nv50_disp_dptmds_war_3(struct nv50_disp *, struct dcb_output *);
 void nv50_disp_update_sppll1(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootga102.c
new file mode 100644 (file)
index 0000000..9af07c3
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "rootnv50.h"
+#include "channv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+ga102_disp_root = {
+       .user = {
+               {{-1,-1,GV100_DISP_CAPS                }, gv100_disp_caps_new },
+               {{0,0,GA102_DISP_CURSOR                }, gv100_disp_curs_new },
+               {{0,0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
+               {{0,0,GA102_DISP_CORE_CHANNEL_DMA      }, gv100_disp_core_new },
+               {{0,0,GA102_DISP_WINDOW_CHANNEL_DMA    }, gv100_disp_wndw_new },
+               {}
+       },
+};
+
+static int
+ga102_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+                   void *data, u32 size, struct nvkm_object **pobject)
+{
+       return nv50_disp_root_new_(&ga102_disp_root, disp, oclass, data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+ga102_disp_root_oclass = {
+       .base.oclass = GA102_DISP,
+       .base.minver = -1,
+       .base.maxver = -1,
+       .ctor = ga102_disp_root_new,
+};
index 7070f54..27bb170 100644 (file)
@@ -41,4 +41,5 @@ extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
 extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
 extern const struct nvkm_disp_oclass gv100_disp_root_oclass;
 extern const struct nvkm_disp_oclass tu102_disp_root_oclass;
+extern const struct nvkm_disp_oclass ga102_disp_root_oclass;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorga102.c
new file mode 100644 (file)
index 0000000..033827d
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ior.h"
+
+#include <subdev/timer.h>
+
+static int
+ga102_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
+{
+       struct nvkm_device *device = sor->disp->engine.subdev.device;
+       const u32 soff = nv50_ior_base(sor);
+       const u32 loff = nv50_sor_link(sor);
+       u32 dpctrl = 0x00000000;
+       u32 clksor = 0x00000000;
+
+       switch (sor->dp.bw) {
+       case 0x06: clksor |= 0x00000000; break;
+       case 0x0a: clksor |= 0x00040000; break;
+       case 0x14: clksor |= 0x00080000; break;
+       case 0x1e: clksor |= 0x000c0000; break;
+       default:
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
+       if (sor->dp.mst)
+               dpctrl |= 0x40000000;
+       if (sor->dp.ef)
+               dpctrl |= 0x00004000;
+
+       nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
+
+       /*XXX*/
+       nvkm_msec(device, 40, NVKM_DELAY);
+       nvkm_mask(device, 0x612300 + soff, 0x00030000, 0x00010000);
+       nvkm_mask(device, 0x61c10c + loff, 0x00000003, 0x00000001);
+
+       nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
+       return 0;
+}
+
+static void
+ga102_sor_clock(struct nvkm_ior *sor)
+{
+       struct nvkm_device *device = sor->disp->engine.subdev.device;
+       u32 div2 = 0;
+       if (sor->asy.proto == TMDS) {
+               if (sor->tmds.high_speed)
+                       div2 = 1;
+       }
+       nvkm_wr32(device, 0x00ec08 + (sor->id * 0x10), 0x00000000);
+       nvkm_wr32(device, 0x00ec04 + (sor->id * 0x10), div2);
+}
+
+static const struct nvkm_ior_func
+ga102_sor_hda = {
+       .route = {
+               .get = gm200_sor_route_get,
+               .set = gm200_sor_route_set,
+       },
+       .state = gv100_sor_state,
+       .power = nv50_sor_power,
+       .clock = ga102_sor_clock,
+       .hdmi = {
+               .ctrl = gv100_hdmi_ctrl,
+               .scdc = gm200_hdmi_scdc,
+       },
+       .dp = {
+               .lanes = { 0, 1, 2, 3 },
+               .links = ga102_sor_dp_links,
+               .power = g94_sor_dp_power,
+               .pattern = gm107_sor_dp_pattern,
+               .drive = gm200_sor_dp_drive,
+               .vcpi = tu102_sor_dp_vcpi,
+               .audio = gv100_sor_dp_audio,
+               .audio_sym = gv100_sor_dp_audio_sym,
+               .watermark = gv100_sor_dp_watermark,
+       },
+       .hda = {
+               .hpd = gf119_hda_hpd,
+               .eld = gf119_hda_eld,
+               .device_entry = gv100_hda_device_entry,
+       },
+};
+
+static const struct nvkm_ior_func
+ga102_sor = {
+       .route = {
+               .get = gm200_sor_route_get,
+               .set = gm200_sor_route_set,
+       },
+       .state = gv100_sor_state,
+       .power = nv50_sor_power,
+       .clock = ga102_sor_clock,
+       .hdmi = {
+               .ctrl = gv100_hdmi_ctrl,
+               .scdc = gm200_hdmi_scdc,
+       },
+       .dp = {
+               .lanes = { 0, 1, 2, 3 },
+               .links = ga102_sor_dp_links,
+               .power = g94_sor_dp_power,
+               .pattern = gm107_sor_dp_pattern,
+               .drive = gm200_sor_dp_drive,
+               .vcpi = tu102_sor_dp_vcpi,
+               .audio = gv100_sor_dp_audio,
+               .audio_sym = gv100_sor_dp_audio_sym,
+               .watermark = gv100_sor_dp_watermark,
+       },
+};
+
+int
+ga102_sor_new(struct nvkm_disp *disp, int id)
+{
+       struct nvkm_device *device = disp->engine.subdev.device;
+       u32 hda = nvkm_rd32(device, 0x08a15c);
+       if (hda & BIT(id))
+               return nvkm_ior_new_(&ga102_sor_hda, disp, SOR, id);
+       return nvkm_ior_new_(&ga102_sor, disp, SOR, id);
+}
index 59865a9..0cf9e87 100644 (file)
@@ -23,7 +23,7 @@
 
 #include <subdev/timer.h>
 
-static void
+void
 tu102_sor_dp_vcpi(struct nvkm_ior *sor, int head,
                  u8 slot, u8 slot_nr, u16 pbn, u16 aligned)
 {
index 883ae41..4c85d1d 100644 (file)
@@ -28,7 +28,7 @@
 #include <core/gpuobj.h>
 #include <subdev/timer.h>
 
-static int
+int
 tu102_disp_init(struct nv50_disp *disp)
 {
        struct nvkm_device *device = disp->base.engine.subdev.device;
index 7deb81b..4b571cc 100644 (file)
@@ -75,7 +75,7 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
        nvkm_debug(subdev, "%08x: type %02x, %d bytes\n",
                   image.base, image.type, image.size);
 
-       if (!shadow_fetch(bios, mthd, image.size)) {
+       if (!shadow_fetch(bios, mthd, image.base + image.size)) {
                nvkm_debug(subdev, "%08x: fetch failed\n", image.base);
                return 0;
        }
index 3634cd0..023ddc7 100644 (file)
@@ -64,6 +64,9 @@ pramin_init(struct nvkm_bios *bios, const char *name)
                return NULL;
 
        /* we can't get the bios image pointer without PDISP */
+       if (device->card_type >= GA100)
+               addr = device->chipset == 0x170; /*XXX: find the fuse reg for this */
+       else
        if (device->card_type >= GM100)
                addr = nvkm_rd32(device, 0x021c04);
        else
index b342937..d1abb64 100644 (file)
@@ -15,3 +15,4 @@ nvkm-y += nvkm/subdev/devinit/gm107.o
 nvkm-y += nvkm/subdev/devinit/gm200.o
 nvkm-y += nvkm/subdev/devinit/gv100.o
 nvkm-y += nvkm/subdev/devinit/tu102.o
+nvkm-y += nvkm/subdev/devinit/ga100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
new file mode 100644 (file)
index 0000000..636a921
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+#include <subdev/clk/pll.h>
+
+static int
+ga100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
+{
+       struct nvkm_subdev *subdev = &init->subdev;
+       struct nvkm_device *device = subdev->device;
+       struct nvbios_pll info;
+       int head = type - PLL_VPLL0;
+       int N, fN, M, P;
+       int ret;
+
+       ret = nvbios_pll_parse(device->bios, type, &info);
+       if (ret)
+               return ret;
+
+       ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
+       if (ret < 0)
+               return ret;
+
+       switch (info.type) {
+       case PLL_VPLL0:
+       case PLL_VPLL1:
+       case PLL_VPLL2:
+       case PLL_VPLL3:
+               nvkm_wr32(device, 0x00ef00 + (head * 0x40), 0x02080004);
+               nvkm_wr32(device, 0x00ef18 + (head * 0x40), (N << 16) | fN);
+               nvkm_wr32(device, 0x00ef04 + (head * 0x40), (P << 16) | M);
+               nvkm_wr32(device, 0x00e9c0 + (head * 0x04), 0x00000001);
+               break;
+       default:
+               nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static const struct nvkm_devinit_func
+ga100_devinit = {
+       .init = nv50_devinit_init,
+       .post = tu102_devinit_post,
+       .pll_set = ga100_devinit_pll_set,
+};
+
+int
+ga100_devinit_new(struct nvkm_device *device, int index, struct nvkm_devinit **pinit)
+{
+       return nv50_devinit_new_(&ga100_devinit, device, index, pinit);
+}
index 9472335..05961e6 100644 (file)
@@ -19,4 +19,5 @@ void nvkm_devinit_ctor(const struct nvkm_devinit_func *, struct nvkm_device *,
                       int index, struct nvkm_devinit *);
 
 int nv04_devinit_post(struct nvkm_devinit *, bool);
+int tu102_devinit_post(struct nvkm_devinit *, bool);
 #endif
index 397670e..9a469bf 100644 (file)
@@ -65,7 +65,7 @@ tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
        return ret;
 }
 
-static int
+int
 tu102_devinit_post(struct nvkm_devinit *base, bool post)
 {
        struct nv50_devinit *init = nv50_devinit(base);
index 43a4215..5d0bab8 100644 (file)
@@ -32,6 +32,8 @@ nvkm-y += nvkm/subdev/fb/gp100.o
 nvkm-y += nvkm/subdev/fb/gp102.o
 nvkm-y += nvkm/subdev/fb/gp10b.o
 nvkm-y += nvkm/subdev/fb/gv100.o
+nvkm-y += nvkm/subdev/fb/ga100.o
+nvkm-y += nvkm/subdev/fb/ga102.o
 
 nvkm-y += nvkm/subdev/fb/ram.o
 nvkm-y += nvkm/subdev/fb/ramnv04.o
@@ -52,6 +54,7 @@ nvkm-y += nvkm/subdev/fb/ramgk104.o
 nvkm-y += nvkm/subdev/fb/ramgm107.o
 nvkm-y += nvkm/subdev/fb/ramgm200.o
 nvkm-y += nvkm/subdev/fb/ramgp100.o
+nvkm-y += nvkm/subdev/fb/ramga102.o
 nvkm-y += nvkm/subdev/fb/sddr2.o
 nvkm-y += nvkm/subdev/fb/sddr3.o
 nvkm-y += nvkm/subdev/fb/gddr3.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
new file mode 100644 (file)
index 0000000..bf82686
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ram.h"
+
+static const struct nvkm_fb_func
+ga100_fb = {
+       .dtor = gf100_fb_dtor,
+       .oneinit = gf100_fb_oneinit,
+       .init = gp100_fb_init,
+       .init_page = gv100_fb_init_page,
+       .init_unkn = gp100_fb_init_unkn,
+       .ram_new = gp100_ram_new,
+       .default_bigpage = 16,
+};
+
+int
+ga100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+       return gp102_fb_new_(&ga100_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
new file mode 100644 (file)
index 0000000..bcecf84
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ram.h"
+
+static const struct nvkm_fb_func
+ga102_fb = {
+       .dtor = gf100_fb_dtor,
+       .oneinit = gf100_fb_oneinit,
+       .init = gp100_fb_init,
+       .init_page = gv100_fb_init_page,
+       .init_unkn = gp100_fb_init_unkn,
+       .ram_new = ga102_ram_new,
+       .default_bigpage = 16,
+};
+
+int
+ga102_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+       return gp102_fb_new_(&ga102_fb, device, index, pfb);
+}
index 10ff5d0..feda86a 100644 (file)
@@ -22,7 +22,7 @@
 #include "gf100.h"
 #include "ram.h"
 
-static int
+int
 gv100_fb_init_page(struct nvkm_fb *fb)
 {
        return (fb->page == 16) ? 0 : -EINVAL;
index 5be9c56..66932ac 100644 (file)
@@ -82,4 +82,6 @@ int gp102_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, int,
                  struct nvkm_fb **);
 bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
 int gp102_fb_vpr_scrub(struct nvkm_fb *);
+
+int gv100_fb_init_page(struct nvkm_fb *);
 #endif
index d723a9b..ea7d66f 100644 (file)
@@ -70,4 +70,5 @@ int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 int gm200_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int ga102_ram_new(struct nvkm_fb *, struct nvkm_ram **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramga102.c
new file mode 100644 (file)
index 0000000..298c136
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ram.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/init.h>
+#include <subdev/bios/rammap.h>
+
+static const struct nvkm_ram_func
+ga102_ram = {
+};
+
+int
+ga102_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+       struct nvkm_device *device = fb->subdev.device;
+       enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
+       u32 size = nvkm_rd32(device, 0x1183a4);
+
+       return nvkm_ram_new_(&ga102_ram, fb, type, (u64)size << 20, pram);
+}
index b2ad592..efbbaa0 100644 (file)
@@ -5,3 +5,4 @@ nvkm-y += nvkm/subdev/gpio/nv50.o
 nvkm-y += nvkm/subdev/gpio/g94.o
 nvkm-y += nvkm/subdev/gpio/gf119.o
 nvkm-y += nvkm/subdev/gpio/gk104.o
+nvkm-y += nvkm/subdev/gpio/ga102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
new file mode 100644 (file)
index 0000000..62c791b
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+ga102_gpio_reset(struct nvkm_gpio *gpio, u8 match)
+{
+       struct nvkm_device *device = gpio->subdev.device;
+       struct nvkm_bios *bios = device->bios;
+       u8 ver, len;
+       u16 entry;
+       int ent = -1;
+
+       while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
+               u32 data = nvbios_rd32(bios, entry);
+               u8  line =   (data & 0x0000003f);
+               u8  defs = !!(data & 0x00000080);
+               u8  func =   (data & 0x0000ff00) >> 8;
+               u8  unk0 =   (data & 0x00ff0000) >> 16;
+               u8  unk1 =   (data & 0x1f000000) >> 24;
+
+               if ( func  == DCB_GPIO_UNUSED ||
+                   (match != DCB_GPIO_UNUSED && match != func))
+                       continue;
+
+               nvkm_gpio_set(gpio, 0, func, line, defs);
+
+               nvkm_mask(device, 0x021200 + (line * 4), 0xff, unk0);
+               if (unk1--)
+                       nvkm_mask(device, 0x00d740 + (unk1 * 4), 0xff, line);
+       }
+}
+
+static int
+ga102_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out)
+{
+       struct nvkm_device *device = gpio->subdev.device;
+       u32 data = ((dir ^ 1) << 13) | (out << 12);
+       nvkm_mask(device, 0x021200 + (line * 4), 0x00003000, data);
+       nvkm_mask(device, 0x00d604, 0x00000001, 0x00000001); /* update? */
+       return 0;
+}
+
+static int
+ga102_gpio_sense(struct nvkm_gpio *gpio, int line)
+{
+       struct nvkm_device *device = gpio->subdev.device;
+       return !!(nvkm_rd32(device, 0x021200 + (line * 4)) & 0x00004000);
+}
+
+static void
+ga102_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
+{
+       struct nvkm_device *device = gpio->subdev.device;
+       u32 intr0 = nvkm_rd32(device, 0x021640);
+       u32 intr1 = nvkm_rd32(device, 0x02164c);
+       u32 stat0 = nvkm_rd32(device, 0x021648) & intr0;
+       u32 stat1 = nvkm_rd32(device, 0x021654) & intr1;
+       *lo = (stat1 & 0xffff0000) | (stat0 >> 16);
+       *hi = (stat1 << 16) | (stat0 & 0x0000ffff);
+       nvkm_wr32(device, 0x021640, intr0);
+       nvkm_wr32(device, 0x02164c, intr1);
+}
+
+static void
+ga102_gpio_intr_mask(struct nvkm_gpio *gpio, u32 type, u32 mask, u32 data)
+{
+       struct nvkm_device *device = gpio->subdev.device;
+       u32 inte0 = nvkm_rd32(device, 0x021648);
+       u32 inte1 = nvkm_rd32(device, 0x021654);
+       if (type & NVKM_GPIO_LO)
+               inte0 = (inte0 & ~(mask << 16)) | (data << 16);
+       if (type & NVKM_GPIO_HI)
+               inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
+       mask >>= 16;
+       data >>= 16;
+       if (type & NVKM_GPIO_LO)
+               inte1 = (inte1 & ~(mask << 16)) | (data << 16);
+       if (type & NVKM_GPIO_HI)
+               inte1 = (inte1 & ~mask) | data;
+       nvkm_wr32(device, 0x021648, inte0);
+       nvkm_wr32(device, 0x021654, inte1);
+}
+
+static const struct nvkm_gpio_func
+ga102_gpio = {
+       .lines = 32,
+       .intr_stat = ga102_gpio_intr_stat,
+       .intr_mask = ga102_gpio_intr_mask,
+       .drive = ga102_gpio_drive,
+       .sense = ga102_gpio_sense,
+       .reset = ga102_gpio_reset,
+};
+
+int
+ga102_gpio_new(struct nvkm_device *device, int index, struct nvkm_gpio **pgpio)
+{
+       return nvkm_gpio_new_(&ga102_gpio, device, index, pgpio);
+}
index 723d028..8197039 100644 (file)
@@ -7,6 +7,7 @@ nvkm-y += nvkm/subdev/i2c/g94.o
 nvkm-y += nvkm/subdev/i2c/gf117.o
 nvkm-y += nvkm/subdev/i2c/gf119.o
 nvkm-y += nvkm/subdev/i2c/gk104.o
+nvkm-y += nvkm/subdev/i2c/gk110.o
 nvkm-y += nvkm/subdev/i2c/gm200.o
 
 nvkm-y += nvkm/subdev/i2c/pad.o
index 30b4889..f920eab 100644 (file)
@@ -3,6 +3,13 @@
 #define __NVKM_I2C_AUX_H__
 #include "pad.h"
 
+static inline void
+nvkm_i2c_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+       if (i2c->func->aux_autodpcd)
+               i2c->func->aux_autodpcd(i2c, aux, false);
+}
+
 struct nvkm_i2c_aux_func {
        bool address_only;
        int  (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
index db7769c..47068f6 100644 (file)
@@ -77,7 +77,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                 u8 type, u32 addr, u8 *data, u8 *size)
 {
        struct g94_i2c_aux *aux = g94_i2c_aux(obj);
-       struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+       struct nvkm_i2c *i2c = aux->base.pad->i2c;
+       struct nvkm_device *device = i2c->subdev.device;
        const u32 base = aux->ch * 0x50;
        u32 ctrl, stat, timeout, retries = 0;
        u32 xbuf[4] = {};
@@ -96,6 +97,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                goto out;
        }
 
+       nvkm_i2c_aux_autodpcd(i2c, aux->ch, false);
+
        if (!(type & 1)) {
                memcpy(xbuf, data, *size);
                for (i = 0; i < 16; i += 4) {
@@ -128,7 +131,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                        if (!timeout--) {
                                AUX_ERR(&aux->base, "timeout %08x", ctrl);
                                ret = -EIO;
-                               goto out;
+                               goto out_err;
                        }
                } while (ctrl & 0x00010000);
                ret = 0;
@@ -154,7 +157,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                memcpy(data, xbuf, *size);
                *size = stat & 0x0000001f;
        }
-
+out_err:
+       nvkm_i2c_aux_autodpcd(i2c, aux->ch, true);
 out:
        g94_i2c_aux_fini(aux);
        return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
index edb6148..8bd1d44 100644 (file)
@@ -33,7 +33,7 @@ static void
 gm200_i2c_aux_fini(struct gm200_i2c_aux *aux)
 {
        struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
-       nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000);
+       nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00710000, 0x00000000);
 }
 
 static int
@@ -54,10 +54,10 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
                        AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl);
                        return -EBUSY;
                }
-       } while (ctrl & 0x03010000);
+       } while (ctrl & 0x07010000);
 
        /* set some magic, and wait up to 1ms for it to appear */
-       nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq);
+       nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00700000, ureq);
        timeout = 1000;
        do {
                ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50));
@@ -67,7 +67,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
                        gm200_i2c_aux_fini(aux);
                        return -EBUSY;
                }
-       } while ((ctrl & 0x03000000) != urep);
+       } while ((ctrl & 0x07000000) != urep);
 
        return 0;
 }
@@ -77,7 +77,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                   u8 type, u32 addr, u8 *data, u8 *size)
 {
        struct gm200_i2c_aux *aux = gm200_i2c_aux(obj);
-       struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
+       struct nvkm_i2c *i2c = aux->base.pad->i2c;
+       struct nvkm_device *device = i2c->subdev.device;
        const u32 base = aux->ch * 0x50;
        u32 ctrl, stat, timeout, retries = 0;
        u32 xbuf[4] = {};
@@ -96,6 +97,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                goto out;
        }
 
+       nvkm_i2c_aux_autodpcd(i2c, aux->ch, false);
+
        if (!(type & 1)) {
                memcpy(xbuf, data, *size);
                for (i = 0; i < 16; i += 4) {
@@ -128,7 +131,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                        if (!timeout--) {
                                AUX_ERR(&aux->base, "timeout %08x", ctrl);
                                ret = -EIO;
-                               goto out;
+                               goto out_err;
                        }
                } while (ctrl & 0x00010000);
                ret = 0;
@@ -155,6 +158,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                *size = stat & 0x0000001f;
        }
 
+out_err:
+       nvkm_i2c_aux_autodpcd(i2c, aux->ch, true);
 out:
        gm200_i2c_aux_fini(aux);
        return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
new file mode 100644 (file)
index 0000000..8e3bfa1
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "pad.h"
+
+static void
+gk110_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+       nvkm_mask(i2c->subdev.device, 0x00e4f8 + (aux * 0x50), 0x00010000, enable << 16);
+}
+
+static const struct nvkm_i2c_func
+gk110_i2c = {
+       .pad_x_new = gf119_i2c_pad_x_new,
+       .pad_s_new = gf119_i2c_pad_s_new,
+       .aux = 4,
+       .aux_stat = gk104_aux_stat,
+       .aux_mask = gk104_aux_mask,
+       .aux_autodpcd = gk110_aux_autodpcd,
+};
+
+int
+gk110_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c)
+{
+       return nvkm_i2c_new_(&gk110_i2c, device, index, pi2c);
+}
index a23c5f3..7b2375b 100644 (file)
 #include "priv.h"
 #include "pad.h"
 
+static void
+gm200_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
+{
+       nvkm_mask(i2c->subdev.device, 0x00d968 + (aux * 0x50), 0x00010000, enable << 16);
+}
+
 static const struct nvkm_i2c_func
 gm200_i2c = {
        .pad_x_new = gf119_i2c_pad_x_new,
@@ -31,6 +37,7 @@ gm200_i2c = {
        .aux = 8,
        .aux_stat = gk104_aux_stat,
        .aux_mask = gk104_aux_mask,
+       .aux_autodpcd = gm200_aux_autodpcd,
 };
 
 int
index 4610168..44b7bb7 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: MIT */
 #ifndef __NVKM_I2C_PAD_H__
 #define __NVKM_I2C_PAD_H__
-#include <subdev/i2c.h>
+#include "priv.h"
 
 struct nvkm_i2c_pad {
        const struct nvkm_i2c_pad_func *func;
index bd86bc2..e35f603 100644 (file)
@@ -23,6 +23,10 @@ struct nvkm_i2c_func {
        /* mask on/off interrupt types for a given set of auxch
         */
        void (*aux_mask)(struct nvkm_i2c *, u32, u32, u32);
+
+       /* enable/disable HW-initiated DPCD reads
+        */
+       void (*aux_autodpcd)(struct nvkm_i2c *, int aux, bool enable);
 };
 
 void g94_aux_stat(struct nvkm_i2c *, u32 *, u32 *, u32 *, u32 *);
index 2340040..1115376 100644 (file)
@@ -22,6 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "priv.h"
+#include <subdev/timer.h>
 
 static void
 gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
@@ -31,7 +32,6 @@ gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
        u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
        u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
        nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
-       nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
 }
 
 static void
@@ -42,7 +42,6 @@ gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
        u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
        u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
        nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
-       nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
 }
 
 static void
@@ -53,7 +52,6 @@ gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
        u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
        u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
        nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
-       nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
 }
 
 void
@@ -90,6 +88,12 @@ gf100_ibus_intr(struct nvkm_subdev *ibus)
                        intr1 &= ~stat;
                }
        }
+
+       nvkm_mask(device, 0x121c4c, 0x0000003f, 0x00000002);
+       nvkm_msec(device, 2000,
+               if (!(nvkm_rd32(device, 0x121c4c) & 0x0000003f))
+                       break;
+       );
 }
 
 static int
index f3915f8..22e487b 100644 (file)
@@ -22,6 +22,7 @@
  * Authors: Ben Skeggs
  */
 #include "priv.h"
+#include <subdev/timer.h>
 
 static void
 gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
@@ -31,7 +32,6 @@ gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
        u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
        u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
        nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
-       nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
 }
 
 static void
@@ -42,7 +42,6 @@ gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
        u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
        u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
        nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
-       nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
 }
 
 static void
@@ -53,7 +52,6 @@ gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
        u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
        u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
        nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
-       nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
 }
 
 void
@@ -90,6 +88,12 @@ gk104_ibus_intr(struct nvkm_subdev *ibus)
                        intr1 &= ~stat;
                }
        }
+
+       nvkm_mask(device, 0x12004c, 0x0000003f, 0x00000002);
+       nvkm_msec(device, 2000,
+               if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f))
+                       break;
+       );
 }
 
 static int
index 2585ef0..ac2b34e 100644 (file)
@@ -14,3 +14,4 @@ nvkm-y += nvkm/subdev/mc/gk20a.o
 nvkm-y += nvkm/subdev/mc/gp100.o
 nvkm-y += nvkm/subdev/mc/gp10b.o
 nvkm-y += nvkm/subdev/mc/tu102.o
+nvkm-y += nvkm/subdev/mc/ga100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
new file mode 100644 (file)
index 0000000..967eb3a
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2021 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+ga100_mc_intr_unarm(struct nvkm_mc *mc)
+{
+       nvkm_wr32(mc->subdev.device, 0xb81610, 0x00000004);
+}
+
+static void
+ga100_mc_intr_rearm(struct nvkm_mc *mc)
+{
+       nvkm_wr32(mc->subdev.device, 0xb81608, 0x00000004);
+}
+
+static void
+ga100_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 intr)
+{
+       nvkm_wr32(mc->subdev.device, 0xb81210,          mask & intr );
+       nvkm_wr32(mc->subdev.device, 0xb81410, mask & ~(mask & intr));
+}
+
+static u32
+ga100_mc_intr_stat(struct nvkm_mc *mc)
+{
+       u32 intr_top = nvkm_rd32(mc->subdev.device, 0xb81600), intr = 0x00000000;
+       if (intr_top & 0x00000004)
+               intr = nvkm_mask(mc->subdev.device, 0xb81010, 0x00000000, 0x00000000);
+       return intr;
+}
+
+static void
+ga100_mc_init(struct nvkm_mc *mc)
+{
+       nv50_mc_init(mc);
+       nvkm_wr32(mc->subdev.device, 0xb81210, 0xffffffff);
+}
+
+static const struct nvkm_mc_func
+ga100_mc = {
+       .init = ga100_mc_init,
+       .intr = gp100_mc_intr,
+       .intr_unarm = ga100_mc_intr_unarm,
+       .intr_rearm = ga100_mc_intr_rearm,
+       .intr_mask = ga100_mc_intr_mask,
+       .intr_stat = ga100_mc_intr_stat,
+       .reset = gk104_mc_reset,
+};
+
+int
+ga100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+       return nvkm_mc_new_(&ga100_mc, device, index, pmc);
+}
index de91e9a..6d5212a 100644 (file)
@@ -316,9 +316,9 @@ nvkm_mmu_vram(struct nvkm_mmu *mmu)
 {
        struct nvkm_device *device = mmu->subdev.device;
        struct nvkm_mm *mm = &device->fb->ram->vram;
-       const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
-       const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
-       const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
+       const u64 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL);
+       const u64 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP);
+       const u64 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED);
        u8 type = NVKM_MEM_KIND * !!mmu->func->kind;
        u8 heap = NVKM_MEM_VRAM;
        int heapM, heapN, heapU;
index d59ef6e..23195d5 100644 (file)
@@ -730,9 +730,6 @@ int radeon_ttm_init(struct radeon_device *rdev)
        }
        rdev->mman.initialized = true;
 
-       ttm_pool_init(&rdev->mman.bdev.pool, rdev->dev, rdev->need_swiotlb,
-                     dma_addressing_limited(&rdev->pdev->dev));
-
        r = radeon_ttm_init_vram(rdev);
        if (r) {
                DRM_ERROR("Failed initializing VRAM heap.\n");
index 7b2f606..11e0313 100644 (file)
@@ -66,7 +66,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
 static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
 static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
 
-static spinlock_t shrinker_lock;
+static struct mutex shrinker_lock;
 static struct list_head shrinker_list;
 static struct shrinker mm_shrinker;
 
@@ -79,12 +79,13 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
        struct page *p;
        void *vaddr;
 
-       if (order) {
-               gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+       /* Don't set the __GFP_COMP flag for higher order allocations.
+        * Mapping pages directly into an userspace process and calling
+        * put_page() on a TTM allocated page is illegal.
+        */
+       if (order)
+               gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY |
                        __GFP_KSWAPD_RECLAIM;
-               gfp_flags &= ~__GFP_MOVABLE;
-               gfp_flags &= ~__GFP_COMP;
-       }
 
        if (!pool->use_dma_alloc) {
                p = alloc_pages(gfp_flags, order);
@@ -190,7 +191,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
                size_t size = (1ULL << order) * PAGE_SIZE;
 
                addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
-               if (dma_mapping_error(pool->dev, **dma_addr))
+               if (dma_mapping_error(pool->dev, addr))
                        return -EFAULT;
        }
 
@@ -249,9 +250,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
        spin_lock_init(&pt->lock);
        INIT_LIST_HEAD(&pt->pages);
 
-       spin_lock(&shrinker_lock);
+       mutex_lock(&shrinker_lock);
        list_add_tail(&pt->shrinker_list, &shrinker_list);
-       spin_unlock(&shrinker_lock);
+       mutex_unlock(&shrinker_lock);
 }
 
 /* Remove a pool_type from the global shrinker list and free all pages */
@@ -259,9 +260,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
 {
        struct page *p, *tmp;
 
-       spin_lock(&shrinker_lock);
+       mutex_lock(&shrinker_lock);
        list_del(&pt->shrinker_list);
-       spin_unlock(&shrinker_lock);
+       mutex_unlock(&shrinker_lock);
 
        list_for_each_entry_safe(p, tmp, &pt->pages, lru)
                ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
@@ -302,7 +303,7 @@ static unsigned int ttm_pool_shrink(void)
        unsigned int num_freed;
        struct page *p;
 
-       spin_lock(&shrinker_lock);
+       mutex_lock(&shrinker_lock);
        pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
 
        p = ttm_pool_type_take(pt);
@@ -314,7 +315,7 @@ static unsigned int ttm_pool_shrink(void)
        }
 
        list_move_tail(&pt->shrinker_list, &shrinker_list);
-       spin_unlock(&shrinker_lock);
+       mutex_unlock(&shrinker_lock);
 
        return num_freed;
 }
@@ -507,7 +508,6 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
                        ttm_pool_type_init(&pool->caching[i].orders[j],
                                           pool, i, j);
 }
-EXPORT_SYMBOL(ttm_pool_init);
 
 /**
  * ttm_pool_fini - Cleanup a pool
@@ -525,7 +525,6 @@ void ttm_pool_fini(struct ttm_pool *pool)
                for (j = 0; j < MAX_ORDER; ++j)
                        ttm_pool_type_fini(&pool->caching[i].orders[j]);
 }
-EXPORT_SYMBOL(ttm_pool_fini);
 
 #ifdef CONFIG_DEBUG_FS
 /* Count the number of pages available in a pool_type */
@@ -566,7 +565,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
 {
        unsigned int i;
 
-       spin_lock(&shrinker_lock);
+       mutex_lock(&shrinker_lock);
 
        seq_puts(m, "\t ");
        for (i = 0; i < MAX_ORDER; ++i)
@@ -602,7 +601,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
        seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
                   atomic_long_read(&allocated_pages), page_pool_size);
 
-       spin_unlock(&shrinker_lock);
+       mutex_unlock(&shrinker_lock);
 
        return 0;
 }
@@ -646,7 +645,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
        if (!page_pool_size)
                page_pool_size = num_pages;
 
-       spin_lock_init(&shrinker_lock);
+       mutex_init(&shrinker_lock);
        INIT_LIST_HEAD(&shrinker_list);
 
        for (i = 0; i < MAX_ORDER; ++i) {
index 5551062..98cab0b 100644 (file)
@@ -1267,6 +1267,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
        card->dai_link = dai_link;
        card->num_links = 1;
        card->name = vc4_hdmi->variant->card_name;
+       card->driver_name = "vc4-hdmi";
        card->dev = dev;
        card->owner = THIS_MODULE;
 
index 7bdda1b..09fa75a 100644 (file)
@@ -899,6 +899,7 @@ config HID_SONY
        depends on NEW_LEDS
        depends on LEDS_CLASS
        select POWER_SUPPLY
+       select CRC32
        help
        Support for
 
index 3d1ccac..2ab38b7 100644 (file)
@@ -154,7 +154,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
 
        for (i = 0; i < cl_data->num_hid_devices; i++) {
                cl_data->sensor_virt_addr[i] = dma_alloc_coherent(dev, sizeof(int) * 8,
-                                                                 &cl_data->sensor_phys_addr[i],
+                                                                 &cl_data->sensor_dma_addr[i],
                                                                  GFP_KERNEL);
                cl_data->sensor_sts[i] = 0;
                cl_data->sensor_requested_cnt[i] = 0;
@@ -187,7 +187,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
                }
                info.period = msecs_to_jiffies(AMD_SFH_IDLE_LOOP);
                info.sensor_idx = cl_idx;
-               info.phys_address = cl_data->sensor_phys_addr[i];
+               info.dma_address = cl_data->sensor_dma_addr[i];
 
                cl_data->report_descr[i] = kzalloc(cl_data->report_descr_sz[i], GFP_KERNEL);
                if (!cl_data->report_descr[i]) {
@@ -212,7 +212,7 @@ cleanup:
                if (cl_data->sensor_virt_addr[i]) {
                        dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
                                          cl_data->sensor_virt_addr[i],
-                                         cl_data->sensor_phys_addr[i]);
+                                         cl_data->sensor_dma_addr[i]);
                }
                kfree(cl_data->feature_report[i]);
                kfree(cl_data->input_report[i]);
@@ -238,7 +238,7 @@ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata)
                if (cl_data->sensor_virt_addr[i]) {
                        dma_free_coherent(&privdata->pdev->dev, 8 * sizeof(int),
                                          cl_data->sensor_virt_addr[i],
-                                         cl_data->sensor_phys_addr[i]);
+                                         cl_data->sensor_dma_addr[i]);
                }
        }
        kfree(cl_data);
index 6be0783..d7eac17 100644 (file)
@@ -27,7 +27,7 @@ struct amdtp_cl_data {
        int hid_descr_size[MAX_HID_DEVICES];
        phys_addr_t phys_addr_base;
        u32 *sensor_virt_addr[MAX_HID_DEVICES];
-       phys_addr_t sensor_phys_addr[MAX_HID_DEVICES];
+       dma_addr_t sensor_dma_addr[MAX_HID_DEVICES];
        u32 sensor_sts[MAX_HID_DEVICES];
        u32 sensor_requested_cnt[MAX_HID_DEVICES];
        u8 report_type[MAX_HID_DEVICES];
index a51c7b7..dbac166 100644 (file)
@@ -41,7 +41,7 @@ void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info i
        cmd_param.s.buf_layout = 1;
        cmd_param.s.buf_length = 16;
 
-       writeq(info.phys_address, privdata->mmio + AMD_C2P_MSG2);
+       writeq(info.dma_address, privdata->mmio + AMD_C2P_MSG2);
        writel(cmd_param.ul, privdata->mmio + AMD_C2P_MSG1);
        writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
 }
index e8be94f..8f8d19b 100644 (file)
@@ -67,7 +67,7 @@ struct amd_mp2_dev {
 struct amd_mp2_sensor_info {
        u8 sensor_idx;
        u32 period;
-       phys_addr_t phys_address;
+       dma_addr_t dma_address;
 };
 
 void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info);
index 4c5f236..5ba0aa1 100644 (file)
 #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W        0x0401
 #define USB_DEVICE_ID_HP_X2            0x074d
 #define USB_DEVICE_ID_HP_X2_10_COVER   0x0755
+#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN   0x2706
 
 #define USB_VENDOR_ID_ELECOM           0x056e
 #define USB_DEVICE_ID_ELECOM_BM084     0x0061
index dc7f6b4..f23027d 100644 (file)
@@ -322,6 +322,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
                USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
          HID_BATTERY_QUIRK_IGNORE },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
+         HID_BATTERY_QUIRK_IGNORE },
        {}
 };
 
index 1ffcfc9..45e7e0b 100644 (file)
@@ -1869,6 +1869,10 @@ static const struct hid_device_id logi_dj_receivers[] = {
          HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
                0xc531),
         .driver_data = recvr_type_gaming_hidpp},
+       { /* Logitech G602 receiver (0xc537) */
+         HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+               0xc537),
+        .driver_data = recvr_type_gaming_hidpp},
        { /* Logitech lightspeed receiver (0xc539) */
          HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
                USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1),
index f857814..7eb9a6d 100644 (file)
@@ -4053,6 +4053,8 @@ static const struct hid_device_id hidpp_devices[] = {
        { /* MX Master mouse over Bluetooth */
          HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012),
          .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { /* MX Ergo trackball over Bluetooth */
+         HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e),
          .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
        { /* MX Master 3 mouse over Bluetooth */
index d670bcd..0743ef5 100644 (file)
@@ -2054,6 +2054,10 @@ static const struct hid_device_id mt_devices[] = {
                HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
                        USB_VENDOR_ID_SYNAPTICS, 0xce08) },
 
+       { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+               HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+                       USB_VENDOR_ID_SYNAPTICS, 0xce09) },
+
        /* TopSeed panels */
        { .driver_data = MT_CLS_TOPSEED,
                MT_USB_DEVICE(USB_VENDOR_ID_TOPSEED2,
index d26d8cd..56406ce 100644 (file)
@@ -90,7 +90,7 @@ static int uclogic_params_get_str_desc(__u8 **pbuf, struct hid_device *hdev,
                goto cleanup;
        } else if (rc < 0) {
                hid_err(hdev,
-                       "failed retrieving string descriptor #%hhu: %d\n",
+                       "failed retrieving string descriptor #%u: %d\n",
                        idx, rc);
                goto cleanup;
        }
index 4101268..4399d6c 100644 (file)
@@ -1482,7 +1482,7 @@ static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
                wdata->state.cmd_err = err;
                wiimote_cmd_complete(wdata);
        } else if (err) {
-               hid_warn(wdata->hdev, "Remote error %hhu on req %hhu\n", err,
+               hid_warn(wdata->hdev, "Remote error %u on req %u\n", err,
                                                                        cmd);
        }
 }
index 045c464..e8acd23 100644 (file)
@@ -1270,6 +1270,37 @@ static int wacom_devm_sysfs_create_group(struct wacom *wacom,
                                               group);
 }
 
+static void wacom_devm_kfifo_release(struct device *dev, void *res)
+{
+       struct kfifo_rec_ptr_2 *devres = res;
+
+       kfifo_free(devres);
+}
+
+static int wacom_devm_kfifo_alloc(struct wacom *wacom)
+{
+       struct wacom_wac *wacom_wac = &wacom->wacom_wac;
+       struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo;
+       int error;
+
+       pen_fifo = devres_alloc(wacom_devm_kfifo_release,
+                             sizeof(struct kfifo_rec_ptr_2),
+                             GFP_KERNEL);
+
+       if (!pen_fifo)
+               return -ENOMEM;
+
+       error = kfifo_alloc(pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+       if (error) {
+               devres_free(pen_fifo);
+               return error;
+       }
+
+       devres_add(&wacom->hdev->dev, pen_fifo);
+
+       return 0;
+}
+
 enum led_brightness wacom_leds_brightness_get(struct wacom_led *led)
 {
        struct wacom *wacom = led->wacom;
@@ -2724,7 +2755,7 @@ static int wacom_probe(struct hid_device *hdev,
        if (features->check_for_hid_type && features->hid_type != hdev->type)
                return -ENODEV;
 
-       error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL);
+       error = wacom_devm_kfifo_alloc(wacom);
        if (error)
                return error;
 
@@ -2786,8 +2817,6 @@ static void wacom_remove(struct hid_device *hdev)
 
        if (wacom->wacom_wac.features.type != REMOTE)
                wacom_release_resources(wacom);
-
-       kfifo_free(&wacom_wac->pen_fifo);
 }
 
 #ifdef CONFIG_PM
index 502f8cd..d491fdc 100644 (file)
@@ -2550,7 +2550,6 @@ static void hv_kexec_handler(void)
        /* Make sure conn_state is set as hv_synic_cleanup checks for it */
        mb();
        cpuhp_remove_state(hyperv_cpuhp_online);
-       hyperv_cleanup();
 };
 
 static void hv_crash_handler(struct pt_regs *regs)
@@ -2566,7 +2565,6 @@ static void hv_crash_handler(struct pt_regs *regs)
        cpu = smp_processor_id();
        hv_stimer_cleanup(cpu);
        hv_synic_disable_regs(cpu);
-       hyperv_cleanup();
 };
 
 static int hv_synic_suspend(void)
index 9b30644..822c2e7 100644 (file)
@@ -222,7 +222,7 @@ static int amd_create_sensor(struct device *dev,
         */
        cpus = num_present_cpus() / num_siblings;
 
-       s_config = devm_kcalloc(dev, cpus + sockets,
+       s_config = devm_kcalloc(dev, cpus + sockets + 1,
                                sizeof(u32), GFP_KERNEL);
        if (!s_config)
                return -ENOMEM;
@@ -254,6 +254,7 @@ static int amd_create_sensor(struct device *dev,
                        scnprintf(label_l[i], 10, "Esocket%u", (i - cpus));
        }
 
+       s_config[i] = 0;
        return 0;
 }
 
index 777439f..111a91d 100644 (file)
@@ -334,8 +334,18 @@ static int pwm_fan_probe(struct platform_device *pdev)
 
        ctx->pwm_value = MAX_PWM;
 
-       /* Set duty cycle to maximum allowed and enable PWM output */
        pwm_init_state(ctx->pwm, &state);
+       /*
+        * __set_pwm assumes that MAX_PWM * (period - 1) fits into an unsigned
+        * long. Check this here to prevent the fan running at a too low
+        * frequency.
+        */
+       if (state.period > ULONG_MAX / MAX_PWM + 1) {
+               dev_err(dev, "Configured period too big\n");
+               return -EINVAL;
+       }
+
+       /* Set duty cycle to maximum allowed and enable PWM output */
        state.duty_cycle = ctx->pwm->args.period - 1;
        state.enabled = true;
 
index d4d60ad..ab1f39a 100644 (file)
@@ -1013,6 +1013,7 @@ config I2C_SIRF
 config I2C_SPRD
        tristate "Spreadtrum I2C interface"
        depends on I2C=y && (ARCH_SPRD || COMPILE_TEST)
+       depends on COMMON_CLK
        help
          If you say yes to this option, support will be included for the
          Spreadtrum I2C interface.
index ae90713..877fe37 100644 (file)
@@ -1449,7 +1449,7 @@ static int i801_add_mux(struct i801_priv *priv)
 
        /* Register GPIO descriptor lookup table */
        lookup = devm_kzalloc(dev,
-                             struct_size(lookup, table, mux_config->n_gpios),
+                             struct_size(lookup, table, mux_config->n_gpios + 1),
                              GFP_KERNEL);
        if (!lookup)
                return -ENOMEM;
index b444fbf..a8e8af5 100644 (file)
@@ -241,6 +241,19 @@ static struct imx_i2c_hwdata vf610_i2c_hwdata = {
 
 };
 
+static const struct platform_device_id imx_i2c_devtype[] = {
+       {
+               .name = "imx1-i2c",
+               .driver_data = (kernel_ulong_t)&imx1_i2c_hwdata,
+       }, {
+               .name = "imx21-i2c",
+               .driver_data = (kernel_ulong_t)&imx21_i2c_hwdata,
+       }, {
+               /* sentinel */
+       }
+};
+MODULE_DEVICE_TABLE(platform, imx_i2c_devtype);
+
 static const struct of_device_id i2c_imx_dt_ids[] = {
        { .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, },
        { .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, },
@@ -1330,7 +1343,11 @@ static int i2c_imx_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        match = device_get_match_data(&pdev->dev);
-       i2c_imx->hwdata = match;
+       if (match)
+               i2c_imx->hwdata = match;
+       else
+               i2c_imx->hwdata = (struct imx_i2c_hwdata *)
+                               platform_get_device_id(pdev)->driver_data;
 
        /* Setup i2c_imx driver structure */
        strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
@@ -1498,6 +1515,7 @@ static struct platform_driver i2c_imx_driver = {
                .of_match_table = i2c_imx_dt_ids,
                .acpi_match_table = i2c_imx_acpi_ids,
        },
+       .id_table = imx_i2c_devtype,
 };
 
 static int __init i2c_adap_imx_init(void)
index 33de99b..0818d3e 100644 (file)
@@ -38,6 +38,7 @@
 #define I2C_IO_CONFIG_OPEN_DRAIN       0x0003
 #define I2C_IO_CONFIG_PUSH_PULL                0x0000
 #define I2C_SOFT_RST                   0x0001
+#define I2C_HANDSHAKE_RST              0x0020
 #define I2C_FIFO_ADDR_CLR              0x0001
 #define I2C_DELAY_LEN                  0x0002
 #define I2C_TIME_CLR_VALUE             0x0000
@@ -45,6 +46,7 @@
 #define I2C_WRRD_TRANAC_VALUE          0x0002
 #define I2C_RD_TRANAC_VALUE            0x0001
 #define I2C_SCL_MIS_COMP_VALUE         0x0000
+#define I2C_CHN_CLR_FLAG               0x0000
 
 #define I2C_DMA_CON_TX                 0x0000
 #define I2C_DMA_CON_RX                 0x0001
@@ -54,7 +56,9 @@
 #define I2C_DMA_START_EN               0x0001
 #define I2C_DMA_INT_FLAG_NONE          0x0000
 #define I2C_DMA_CLR_FLAG               0x0000
+#define I2C_DMA_WARM_RST               0x0001
 #define I2C_DMA_HARD_RST               0x0002
+#define I2C_DMA_HANDSHAKE_RST          0x0004
 
 #define MAX_SAMPLE_CNT_DIV             8
 #define MAX_STEP_CNT_DIV               64
@@ -475,11 +479,24 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
 {
        u16 control_reg;
 
-       writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
-       udelay(50);
-       writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
-
-       mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
+       if (i2c->dev_comp->dma_sync) {
+               writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
+               udelay(10);
+               writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+               udelay(10);
+               writel(I2C_DMA_HANDSHAKE_RST | I2C_DMA_HARD_RST,
+                      i2c->pdmabase + OFFSET_RST);
+               mtk_i2c_writew(i2c, I2C_HANDSHAKE_RST | I2C_SOFT_RST,
+                              OFFSET_SOFTRESET);
+               udelay(10);
+               writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+               mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_SOFTRESET);
+       } else {
+               writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
+               udelay(50);
+               writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+               mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
+       }
 
        /* Set ioconfig */
        if (i2c->use_push_pull)
index d960790..845eda7 100644 (file)
@@ -347,7 +347,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
                if (result)
                        return result;
                if (recv_len && i == 0) {
-                       if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
+                       if (data[i] > I2C_SMBUS_BLOCK_MAX)
                                return -EPROTO;
                        length += data[i];
                }
index 19cda67..2917fec 100644 (file)
@@ -72,6 +72,8 @@
 
 /* timeout (ms) for pm runtime autosuspend */
 #define SPRD_I2C_PM_TIMEOUT    1000
+/* timeout (ms) for transfer message */
+#define I2C_XFER_TIMEOUT       1000
 
 /* SPRD i2c data structure */
 struct sprd_i2c {
@@ -244,6 +246,7 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
                               struct i2c_msg *msg, bool is_last_msg)
 {
        struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
+       unsigned long time_left;
 
        i2c_dev->msg = msg;
        i2c_dev->buf = msg->buf;
@@ -273,7 +276,10 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
 
        sprd_i2c_opt_start(i2c_dev);
 
-       wait_for_completion(&i2c_dev->complete);
+       time_left = wait_for_completion_timeout(&i2c_dev->complete,
+                               msecs_to_jiffies(I2C_XFER_TIMEOUT));
+       if (!time_left)
+               return -ETIMEDOUT;
 
        return i2c_dev->err;
 }
index ec7a7e9..c0c7d01 100644 (file)
@@ -80,7 +80,7 @@ static int tegra_bpmp_xlate_flags(u16 flags, u16 *out)
                flags &= ~I2C_M_RECV_LEN;
        }
 
-       return (flags != 0) ? -EINVAL : 0;
+       return 0;
 }
 
 /**
index 6f08c0c..8b113ae 100644 (file)
@@ -326,6 +326,8 @@ static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned int reg)
        /* read back register to make sure that register writes completed */
        if (reg != I2C_TX_FIFO)
                readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+       else if (i2c_dev->is_vi)
+               readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, I2C_INT_STATUS));
 }
 
 static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned int reg)
@@ -339,6 +341,21 @@ static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data,
        writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
 }
 
+static void i2c_writesl_vi(struct tegra_i2c_dev *i2c_dev, void *data,
+                          unsigned int reg, unsigned int len)
+{
+       u32 *data32 = data;
+
+       /*
+        * VI I2C controller has known hardware bug where writes get stuck
+        * when immediate multiple writes happen to TX_FIFO register.
+        * Recommended software work around is to read I2C register after
+        * each write to TX_FIFO register to flush out the data.
+        */
+       while (len--)
+               i2c_writel(i2c_dev, *data32++, reg);
+}
+
 static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data,
                       unsigned int reg, unsigned int len)
 {
@@ -533,7 +550,7 @@ static int tegra_i2c_poll_register(struct tegra_i2c_dev *i2c_dev,
        void __iomem *addr = i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg);
        u32 val;
 
-       if (!i2c_dev->atomic_mode)
+       if (!i2c_dev->atomic_mode && !in_irq())
                return readl_relaxed_poll_timeout(addr, val, !(val & mask),
                                                  delay_us, timeout_us);
 
@@ -811,7 +828,10 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
                i2c_dev->msg_buf_remaining = buf_remaining;
                i2c_dev->msg_buf = buf + words_to_transfer * BYTES_PER_FIFO_WORD;
 
-               i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+               if (i2c_dev->is_vi)
+                       i2c_writesl_vi(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+               else
+                       i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
 
                buf += words_to_transfer * BYTES_PER_FIFO_WORD;
        }
index b11c8c4..e946903 100644 (file)
@@ -397,16 +397,12 @@ static int tiadc_iio_buffered_hardware_setup(struct device *dev,
        ret = devm_request_threaded_irq(dev, irq, pollfunc_th, pollfunc_bh,
                                flags, indio_dev->name, indio_dev);
        if (ret)
-               goto error_kfifo_free;
+               return ret;
 
        indio_dev->setup_ops = setup_ops;
        indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
 
        return 0;
-
-error_kfifo_free:
-       iio_kfifo_free(indio_dev->buffer);
-       return ret;
 }
 
 static const char * const chan_name_ain[] = {
index 0507283..2dbd264 100644 (file)
  * @sdata: Sensor data.
  *
  * returns:
- * 0 - no new samples available
- * 1 - new samples available
- * negative - error or unknown
+ * false - no new samples available or read error
+ * true - new samples available
  */
-static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
-                                           struct st_sensor_data *sdata)
+static bool st_sensors_new_samples_available(struct iio_dev *indio_dev,
+                                            struct st_sensor_data *sdata)
 {
        int ret, status;
 
        /* How would I know if I can't check it? */
        if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr)
-               return -EINVAL;
+               return true;
 
        /* No scan mask, no interrupt */
        if (!indio_dev->active_scan_mask)
-               return 0;
+               return false;
 
        ret = regmap_read(sdata->regmap,
                          sdata->sensor_settings->drdy_irq.stat_drdy.addr,
                          &status);
        if (ret < 0) {
                dev_err(sdata->dev, "error checking samples available\n");
-               return ret;
+               return false;
        }
 
-       if (status & sdata->sensor_settings->drdy_irq.stat_drdy.mask)
-               return 1;
-
-       return 0;
+       return !!(status & sdata->sensor_settings->drdy_irq.stat_drdy.mask);
 }
 
 /**
@@ -180,9 +176,15 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
 
        /* Tell the interrupt handler that we're dealing with edges */
        if (irq_trig == IRQF_TRIGGER_FALLING ||
-           irq_trig == IRQF_TRIGGER_RISING)
+           irq_trig == IRQF_TRIGGER_RISING) {
+               if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) {
+                       dev_err(&indio_dev->dev,
+                               "edge IRQ not supported w/o stat register.\n");
+                       err = -EOPNOTSUPP;
+                       goto iio_trigger_free;
+               }
                sdata->edge_irq = true;
-       else
+       } else {
                /*
                 * If we're not using edges (i.e. level interrupts) we
                 * just mask off the IRQ, handle one interrupt, then
@@ -190,6 +192,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                 * interrupt handler top half again and start over.
                 */
                irq_trig |= IRQF_ONESHOT;
+       }
 
        /*
         * If the interrupt pin is Open Drain, by definition this
index 28921b6..e9297c2 100644 (file)
@@ -187,9 +187,9 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
                return ret;
 
        if (pwr_down)
-               st->pwr_down_mask |= (1 << chan->channel);
-       else
                st->pwr_down_mask &= ~(1 << chan->channel);
+       else
+               st->pwr_down_mask |= (1 << chan->channel);
 
        ret = ad5504_spi_write(st, AD5504_ADDR_CTRL,
                                AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) |
index a2f8209..37fd0b6 100644 (file)
@@ -601,7 +601,7 @@ static int sx9310_read_thresh(struct sx9310_data *data,
                return ret;
 
        regval = FIELD_GET(SX9310_REG_PROX_CTRL8_9_PTHRESH_MASK, regval);
-       if (regval > ARRAY_SIZE(sx9310_pthresh_codes))
+       if (regval >= ARRAY_SIZE(sx9310_pthresh_codes))
                return -EINVAL;
 
        *val = sx9310_pthresh_codes[regval];
@@ -1305,7 +1305,8 @@ sx9310_get_default_reg(struct sx9310_data *data, int i,
                if (ret)
                        break;
 
-               pos = min(max(ilog2(pos), 3), 10) - 3;
+               /* Powers of 2, except for a gap between 16 and 64 */
+               pos = clamp(ilog2(pos), 3, 11) - (pos >= 32 ? 4 : 3);
                reg_def->def &= ~SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK;
                reg_def->def |= FIELD_PREP(SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK,
                                           pos);
index 503fe54..608ccb1 100644 (file)
@@ -248,6 +248,12 @@ static int mlx90632_set_meas_type(struct regmap *regmap, u8 type)
        if (ret < 0)
                return ret;
 
+       /*
+        * Give the mlx90632 some time to reset properly before sending a new I2C command
+        * if this is not done, the following I2C command(s) will not be accepted.
+        */
+       usleep_range(150, 200);
+
        ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL,
                                 (MLX90632_CFG_MTYP_MASK | MLX90632_CFG_PWR_MASK),
                                 (MLX90632_MTYP_STATUS(type) | MLX90632_PWR_STATUS_HALT));
index 7f70e5a..97a77ea 100644 (file)
@@ -131,8 +131,10 @@ static ssize_t default_roce_mode_store(struct config_item *item,
                return ret;
 
        gid_type = ib_cache_gid_parse_type_str(buf);
-       if (gid_type < 0)
+       if (gid_type < 0) {
+               cma_configfs_params_put(cma_dev);
                return -EINVAL;
+       }
 
        ret = cma_set_default_gid_type(cma_dev, group->port_num, gid_type);
 
index e0a41c8..ff1551b 100644 (file)
@@ -254,6 +254,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
        } else {
                ret = xa_alloc_cyclic(&rt->xa, &res->id, res, xa_limit_32b,
                                      &rt->next_id, GFP_KERNEL);
+               ret = (ret < 0) ? ret : 0;
        }
 
 out:
index 7dab9a2..da2512c 100644 (file)
@@ -95,8 +95,6 @@ struct ucma_context {
        u64                     uid;
 
        struct list_head        list;
-       /* sync between removal event and id destroy, protected by file mut */
-       int                     destroying;
        struct work_struct      close_work;
 };
 
@@ -122,7 +120,7 @@ static DEFINE_XARRAY_ALLOC(ctx_table);
 static DEFINE_XARRAY_ALLOC(multicast_table);
 
 static const struct file_operations ucma_fops;
-static int __destroy_id(struct ucma_context *ctx);
+static int ucma_destroy_private_ctx(struct ucma_context *ctx);
 
 static inline struct ucma_context *_ucma_find_context(int id,
                                                      struct ucma_file *file)
@@ -179,19 +177,14 @@ static void ucma_close_id(struct work_struct *work)
 
        /* once all inflight tasks are finished, we close all underlying
         * resources. The context is still alive till its explicit destryoing
-        * by its creator.
+        * by its creator. This puts back the xarray's reference.
         */
        ucma_put_ctx(ctx);
        wait_for_completion(&ctx->comp);
        /* No new events will be generated after destroying the id. */
        rdma_destroy_id(ctx->cm_id);
 
-       /*
-        * At this point ctx->ref is zero so the only place the ctx can be is in
-        * a uevent or in __destroy_id(). Since the former doesn't touch
-        * ctx->cm_id and the latter sync cancels this, there is no races with
-        * this store.
-        */
+       /* Reading the cm_id without holding a positive ref is not allowed */
        ctx->cm_id = NULL;
 }
 
@@ -204,7 +197,6 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
                return NULL;
 
        INIT_WORK(&ctx->close_work, ucma_close_id);
-       refcount_set(&ctx->ref, 1);
        init_completion(&ctx->comp);
        /* So list_del() will work if we don't do ucma_finish_ctx() */
        INIT_LIST_HEAD(&ctx->list);
@@ -218,6 +210,13 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
        return ctx;
 }
 
+static void ucma_set_ctx_cm_id(struct ucma_context *ctx,
+                              struct rdma_cm_id *cm_id)
+{
+       refcount_set(&ctx->ref, 1);
+       ctx->cm_id = cm_id;
+}
+
 static void ucma_finish_ctx(struct ucma_context *ctx)
 {
        lockdep_assert_held(&ctx->file->mut);
@@ -303,7 +302,7 @@ static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
        ctx = ucma_alloc_ctx(listen_ctx->file);
        if (!ctx)
                goto err_backlog;
-       ctx->cm_id = cm_id;
+       ucma_set_ctx_cm_id(ctx, cm_id);
 
        uevent = ucma_create_uevent(listen_ctx, event);
        if (!uevent)
@@ -321,8 +320,7 @@ static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
        return 0;
 
 err_alloc:
-       xa_erase(&ctx_table, ctx->id);
-       kfree(ctx);
+       ucma_destroy_private_ctx(ctx);
 err_backlog:
        atomic_inc(&listen_ctx->backlog);
        /* Returning error causes the new ID to be destroyed */
@@ -356,8 +354,12 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
                wake_up_interruptible(&ctx->file->poll_wait);
        }
 
-       if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL && !ctx->destroying)
-               queue_work(system_unbound_wq, &ctx->close_work);
+       if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) {
+               xa_lock(&ctx_table);
+               if (xa_load(&ctx_table, ctx->id) == ctx)
+                       queue_work(system_unbound_wq, &ctx->close_work);
+               xa_unlock(&ctx_table);
+       }
        return 0;
 }
 
@@ -461,13 +463,12 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
                ret = PTR_ERR(cm_id);
                goto err1;
        }
-       ctx->cm_id = cm_id;
+       ucma_set_ctx_cm_id(ctx, cm_id);
 
        resp.id = ctx->id;
        if (copy_to_user(u64_to_user_ptr(cmd.response),
                         &resp, sizeof(resp))) {
-               xa_erase(&ctx_table, ctx->id);
-               __destroy_id(ctx);
+               ucma_destroy_private_ctx(ctx);
                return -EFAULT;
        }
 
@@ -477,8 +478,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
        return 0;
 
 err1:
-       xa_erase(&ctx_table, ctx->id);
-       kfree(ctx);
+       ucma_destroy_private_ctx(ctx);
        return ret;
 }
 
@@ -516,68 +516,73 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
        rdma_unlock_handler(mc->ctx->cm_id);
 }
 
-/*
- * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
- * this point, no new events will be reported from the hardware. However, we
- * still need to cleanup the UCMA context for this ID. Specifically, there
- * might be events that have not yet been consumed by the user space software.
- * mutex. After that we release them as needed.
- */
-static int ucma_free_ctx(struct ucma_context *ctx)
+static int ucma_cleanup_ctx_events(struct ucma_context *ctx)
 {
        int events_reported;
        struct ucma_event *uevent, *tmp;
        LIST_HEAD(list);
 
-       ucma_cleanup_multicast(ctx);
-
-       /* Cleanup events not yet reported to the user. */
+       /* Cleanup events not yet reported to the user.*/
        mutex_lock(&ctx->file->mut);
        list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
-               if (uevent->ctx == ctx || uevent->conn_req_ctx == ctx)
+               if (uevent->ctx != ctx)
+                       continue;
+
+               if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
+                   xa_cmpxchg(&ctx_table, uevent->conn_req_ctx->id,
+                              uevent->conn_req_ctx, XA_ZERO_ENTRY,
+                              GFP_KERNEL) == uevent->conn_req_ctx) {
                        list_move_tail(&uevent->list, &list);
+                       continue;
+               }
+               list_del(&uevent->list);
+               kfree(uevent);
        }
        list_del(&ctx->list);
        events_reported = ctx->events_reported;
        mutex_unlock(&ctx->file->mut);
 
        /*
-        * If this was a listening ID then any connections spawned from it
-        * that have not been delivered to userspace are cleaned up too.
-        * Must be done outside any locks.
+        * If this was a listening ID then any connections spawned from it that
+        * have not been delivered to userspace are cleaned up too. Must be done
+        * outside any locks.
         */
        list_for_each_entry_safe(uevent, tmp, &list, list) {
-               list_del(&uevent->list);
-               if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
-                   uevent->conn_req_ctx != ctx)
-                       __destroy_id(uevent->conn_req_ctx);
+               ucma_destroy_private_ctx(uevent->conn_req_ctx);
                kfree(uevent);
        }
-
-       mutex_destroy(&ctx->mutex);
-       kfree(ctx);
        return events_reported;
 }
 
-static int __destroy_id(struct ucma_context *ctx)
+/*
+ * When this is called the xarray must have a XA_ZERO_ENTRY in the ctx->id (ie
+ * the ctx is not public to the user). This either because:
+ *  - ucma_finish_ctx() hasn't been called
+ *  - xa_cmpxchg() succeed to remove the entry (only one thread can succeed)
+ */
+static int ucma_destroy_private_ctx(struct ucma_context *ctx)
 {
+       int events_reported;
+
        /*
-        * If the refcount is already 0 then ucma_close_id() has already
-        * destroyed the cm_id, otherwise holding the refcount keeps cm_id
-        * valid. Prevent queue_work() from being called.
+        * Destroy the underlying cm_id. New work queuing is prevented now by
+        * the removal from the xarray. Once the work is cancled ref will either
+        * be 0 because the work ran to completion and consumed the ref from the
+        * xarray, or it will be positive because we still have the ref from the
+        * xarray. This can also be 0 in cases where cm_id was never set
         */
-       if (refcount_inc_not_zero(&ctx->ref)) {
-               rdma_lock_handler(ctx->cm_id);
-               ctx->destroying = 1;
-               rdma_unlock_handler(ctx->cm_id);
-               ucma_put_ctx(ctx);
-       }
-
        cancel_work_sync(&ctx->close_work);
-       /* At this point it's guaranteed that there is no inflight closing task */
-       if (ctx->cm_id)
+       if (refcount_read(&ctx->ref))
                ucma_close_id(&ctx->close_work);
-       return ucma_free_ctx(ctx);
+
+       events_reported = ucma_cleanup_ctx_events(ctx);
+       ucma_cleanup_multicast(ctx);
+
+       WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, XA_ZERO_ENTRY, NULL,
+                          GFP_KERNEL) != NULL);
+       mutex_destroy(&ctx->mutex);
+       kfree(ctx);
+       return events_reported;
 }
 
 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
@@ -596,14 +601,17 @@ static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
 
        xa_lock(&ctx_table);
        ctx = _ucma_find_context(cmd.id, file);
-       if (!IS_ERR(ctx))
-               __xa_erase(&ctx_table, ctx->id);
+       if (!IS_ERR(ctx)) {
+               if (__xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
+                                GFP_KERNEL) != ctx)
+                       ctx = ERR_PTR(-ENOENT);
+       }
        xa_unlock(&ctx_table);
 
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
 
-       resp.events_reported = __destroy_id(ctx);
+       resp.events_reported = ucma_destroy_private_ctx(ctx);
        if (copy_to_user(u64_to_user_ptr(cmd.response),
                         &resp, sizeof(resp)))
                ret = -EFAULT;
@@ -1777,15 +1785,16 @@ static int ucma_close(struct inode *inode, struct file *filp)
         * prevented by this being a FD release function. The list_add_tail() in
         * ucma_connect_event_handler() can run concurrently, however it only
         * adds to the list *after* a listening ID. By only reading the first of
-        * the list, and relying on __destroy_id() to block
+        * the list, and relying on ucma_destroy_private_ctx() to block
         * ucma_connect_event_handler(), no additional locking is needed.
         */
        while (!list_empty(&file->ctx_list)) {
                struct ucma_context *ctx = list_first_entry(
                        &file->ctx_list, struct ucma_context, list);
 
-               xa_erase(&ctx_table, ctx->id);
-               __destroy_id(ctx);
+               WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
+                                  GFP_KERNEL) != ctx);
+               ucma_destroy_private_ctx(ctx);
        }
        kfree(file);
        return 0;
index 7ca4112..917338d 100644 (file)
@@ -135,7 +135,7 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
         */
        if (mask)
                pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0);
-       return rounddown_pow_of_two(pgsz_bitmap);
+       return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0;
 }
 EXPORT_SYMBOL(ib_umem_find_best_pgsz);
 
index 3bae9ba..d26f3f3 100644 (file)
@@ -3956,7 +3956,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
 
        err = set_has_smi_cap(dev);
        if (err)
-               return err;
+               goto err_mp;
 
        if (!mlx5_core_mp_enabled(mdev)) {
                for (i = 1; i <= dev->num_ports; i++) {
@@ -4319,7 +4319,7 @@ static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
 
        err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
        if (err)
-               mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+               mlx5_free_bfreg(dev->mdev, &dev->bfreg);
 
        return err;
 }
index bc98bd9..3acb5c1 100644 (file)
@@ -434,9 +434,9 @@ static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
                pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
                       __func__, dev->id, pd->id);
        }
-       kfree(uctx->cntxt_pd);
        uctx->cntxt_pd = NULL;
        _ocrdma_dealloc_pd(dev, pd);
+       kfree(pd);
 }
 
 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
index 38a3777..3705c6b 100644 (file)
@@ -214,6 +214,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
 
                }
                usnic_uiom_free_dev_list(dev_list);
+               dev_list = NULL;
        }
 
        /* Try to find resources on an unused vf */
@@ -239,6 +240,8 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
 qp_grp_check:
        if (IS_ERR_OR_NULL(qp_grp)) {
                usnic_err("Failed to allocate qp_grp\n");
+               if (usnic_ib_share_vf)
+                       usnic_uiom_free_dev_list(dev_list);
                return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
        }
        return qp_grp;
index 41dba70..c770951 100644 (file)
@@ -96,9 +96,10 @@ static int imx_icc_node_init_qos(struct icc_provider *provider,
                        return -ENODEV;
                }
                /* Allow scaling to be disabled on a per-node basis */
-               if (!dn || !of_device_is_available(dn)) {
+               if (!of_device_is_available(dn)) {
                        dev_warn(dev, "Missing property %s, skip scaling %s\n",
                                 adj->phandle_name, node->name);
+                       of_node_put(dn);
                        return 0;
                }
 
index ba43a15..d7768d3 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/interconnect-provider.h>
 #include <dt-bindings/interconnect/imx8mq.h>
 
 #include "imx.h"
@@ -94,6 +95,7 @@ static struct platform_driver imx8mq_icc_driver = {
        .remove = imx8mq_icc_remove,
        .driver = {
                .name = "imx8mq-interconnect",
+               .sync_state = icc_sync_state,
        },
 };
 
index a8f93ba..b3fb5b0 100644 (file)
@@ -42,13 +42,23 @@ config INTERCONNECT_QCOM_QCS404
          This is a driver for the Qualcomm Network-on-Chip on qcs404-based
          platforms.
 
+config INTERCONNECT_QCOM_RPMH_POSSIBLE
+       tristate
+       default INTERCONNECT_QCOM
+       depends on QCOM_RPMH || (COMPILE_TEST && !QCOM_RPMH)
+       depends on QCOM_COMMAND_DB || (COMPILE_TEST && !QCOM_COMMAND_DB)
+       depends on OF || COMPILE_TEST
+       help
+         Compile-testing RPMH drivers is possible on other platforms,
+         but in order to avoid link failures, drivers must not be built-in
+         when QCOM_RPMH or QCOM_COMMAND_DB are loadable modules
+
 config INTERCONNECT_QCOM_RPMH
        tristate
 
 config INTERCONNECT_QCOM_SC7180
        tristate "Qualcomm SC7180 interconnect driver"
-       depends on INTERCONNECT_QCOM
-       depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+       depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
        select INTERCONNECT_QCOM_RPMH
        select INTERCONNECT_QCOM_BCM_VOTER
        help
@@ -57,8 +67,7 @@ config INTERCONNECT_QCOM_SC7180
 
 config INTERCONNECT_QCOM_SDM845
        tristate "Qualcomm SDM845 interconnect driver"
-       depends on INTERCONNECT_QCOM
-       depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+       depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
        select INTERCONNECT_QCOM_RPMH
        select INTERCONNECT_QCOM_BCM_VOTER
        help
@@ -67,8 +76,7 @@ config INTERCONNECT_QCOM_SDM845
 
 config INTERCONNECT_QCOM_SM8150
        tristate "Qualcomm SM8150 interconnect driver"
-       depends on INTERCONNECT_QCOM
-       depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+       depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
        select INTERCONNECT_QCOM_RPMH
        select INTERCONNECT_QCOM_BCM_VOTER
        help
@@ -77,8 +85,7 @@ config INTERCONNECT_QCOM_SM8150
 
 config INTERCONNECT_QCOM_SM8250
        tristate "Qualcomm SM8250 interconnect driver"
-       depends on INTERCONNECT_QCOM
-       depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+       depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
        select INTERCONNECT_QCOM_RPMH
        select INTERCONNECT_QCOM_BCM_VOTER
        help
index f54cd79..6a1f704 100644 (file)
@@ -1973,8 +1973,6 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
                return r;
        }
 
-       iommu->int_enabled = true;
-
        return 0;
 }
 
@@ -2169,6 +2167,7 @@ static int iommu_init_irq(struct amd_iommu *iommu)
        if (ret)
                return ret;
 
+       iommu->int_enabled = true;
 enable_faults:
        iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
 
index 7e2c445..f0adbc4 100644 (file)
@@ -3854,6 +3854,9 @@ static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
        struct amd_iommu *iommu;
        int devid = -1;
 
+       if (!amd_iommu_irq_remap)
+               return 0;
+
        if (x86_fwspec_is_ioapic(fwspec))
                devid = get_ioapic_devid(fwspec->param[0]);
        else if (x86_fwspec_is_hpet(fwspec))
index 5dff7ff..bcda170 100644 (file)
@@ -196,6 +196,8 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
 
                set_bit(qsmmu->bypass_cbndx, smmu->context_map);
 
+               arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
+
                reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
                arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
        }
@@ -323,7 +325,9 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
 }
 
 static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
+       { .compatible = "qcom,msm8998-smmu-v2" },
        { .compatible = "qcom,sc7180-smmu-500" },
+       { .compatible = "qcom,sdm630-smmu-v2" },
        { .compatible = "qcom,sdm845-smmu-500" },
        { .compatible = "qcom,sm8150-smmu-500" },
        { .compatible = "qcom,sm8250-smmu-500" },
index f0305e6..4078358 100644 (file)
@@ -863,33 +863,6 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
        unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
        int i, count = 0;
 
-       /*
-        * The Intel graphic driver is used to assume that the returned
-        * sg list is not combound. This blocks the efforts of converting
-        * Intel IOMMU driver to dma-iommu api's. Add this quirk to make the
-        * device driver work and should be removed once it's fixed in i915
-        * driver.
-        */
-       if (IS_ENABLED(CONFIG_DRM_I915) && dev_is_pci(dev) &&
-           to_pci_dev(dev)->vendor == PCI_VENDOR_ID_INTEL &&
-           (to_pci_dev(dev)->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
-               for_each_sg(sg, s, nents, i) {
-                       unsigned int s_iova_off = sg_dma_address(s);
-                       unsigned int s_length = sg_dma_len(s);
-                       unsigned int s_iova_len = s->length;
-
-                       s->offset += s_iova_off;
-                       s->length = s_length;
-                       sg_dma_address(s) = dma_addr + s_iova_off;
-                       sg_dma_len(s) = s_length;
-                       dma_addr += s_iova_len;
-
-                       pr_info_once("sg combining disabled due to i915 driver\n");
-               }
-
-               return nents;
-       }
-
        for_each_sg(sg, s, nents, i) {
                /* Restore this segment's original unaligned fields first */
                unsigned int s_iova_off = sg_dma_address(s);
index b46dbfa..004feae 100644 (file)
@@ -1461,8 +1461,8 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
                int mask = ilog2(__roundup_pow_of_two(npages));
                unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
 
-               if (WARN_ON_ONCE(!ALIGN(addr, align)))
-                       addr &= ~(align - 1);
+               if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
+                       addr = ALIGN_DOWN(addr, align);
 
                desc.qw0 = QI_EIOTLB_PASID(pasid) |
                                QI_EIOTLB_DID(did) |
index 788119c..f665322 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/dmi.h>
 #include <linux/pci-ats.h>
 #include <linux/memblock.h>
-#include <linux/dma-map-ops.h>
 #include <linux/dma-direct.h>
 #include <linux/crash_dump.h>
 #include <linux/numa.h>
@@ -719,6 +718,8 @@ static int domain_update_device_node(struct dmar_domain *domain)
        return nid;
 }
 
+static void domain_update_iotlb(struct dmar_domain *domain);
+
 /* Some capabilities may be different across iommus */
 static void domain_update_iommu_cap(struct dmar_domain *domain)
 {
@@ -744,6 +745,8 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
                domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
        else
                domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
+
+       domain_update_iotlb(domain);
 }
 
 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
@@ -1464,17 +1467,22 @@ static void domain_update_iotlb(struct dmar_domain *domain)
 
        assert_spin_locked(&device_domain_lock);
 
-       list_for_each_entry(info, &domain->devices, link) {
-               struct pci_dev *pdev;
-
-               if (!info->dev || !dev_is_pci(info->dev))
-                       continue;
-
-               pdev = to_pci_dev(info->dev);
-               if (pdev->ats_enabled) {
+       list_for_each_entry(info, &domain->devices, link)
+               if (info->ats_enabled) {
                        has_iotlb_device = true;
                        break;
                }
+
+       if (!has_iotlb_device) {
+               struct subdev_domain_info *sinfo;
+
+               list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+                       info = get_domain_info(sinfo->pdev);
+                       if (info && info->ats_enabled) {
+                               has_iotlb_device = true;
+                               break;
+                       }
+               }
        }
 
        domain->has_iotlb_device = has_iotlb_device;
@@ -1555,25 +1563,37 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
 #endif
 }
 
+static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
+                                   u64 addr, unsigned int mask)
+{
+       u16 sid, qdep;
+
+       if (!info || !info->ats_enabled)
+               return;
+
+       sid = info->bus << 8 | info->devfn;
+       qdep = info->ats_qdep;
+       qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
+                          qdep, addr, mask);
+}
+
 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
                                  u64 addr, unsigned mask)
 {
-       u16 sid, qdep;
        unsigned long flags;
        struct device_domain_info *info;
+       struct subdev_domain_info *sinfo;
 
        if (!domain->has_iotlb_device)
                return;
 
        spin_lock_irqsave(&device_domain_lock, flags);
-       list_for_each_entry(info, &domain->devices, link) {
-               if (!info->ats_enabled)
-                       continue;
+       list_for_each_entry(info, &domain->devices, link)
+               __iommu_flush_dev_iotlb(info, addr, mask);
 
-               sid = info->bus << 8 | info->devfn;
-               qdep = info->ats_qdep;
-               qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
-                               qdep, addr, mask);
+       list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+               info = get_domain_info(sinfo->pdev);
+               __iommu_flush_dev_iotlb(info, addr, mask);
        }
        spin_unlock_irqrestore(&device_domain_lock, flags);
 }
@@ -1877,6 +1897,7 @@ static struct dmar_domain *alloc_domain(int flags)
                domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
        domain->has_iotlb_device = false;
        INIT_LIST_HEAD(&domain->devices);
+       INIT_LIST_HEAD(&domain->subdevices);
 
        return domain;
 }
@@ -2547,7 +2568,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
        info->iommu = iommu;
        info->pasid_table = NULL;
        info->auxd_enabled = 0;
-       INIT_LIST_HEAD(&info->auxiliary_domains);
+       INIT_LIST_HEAD(&info->subdevices);
 
        if (dev && dev_is_pci(dev)) {
                struct pci_dev *pdev = to_pci_dev(info->dev);
@@ -4475,33 +4496,61 @@ is_aux_domain(struct device *dev, struct iommu_domain *domain)
                        domain->type == IOMMU_DOMAIN_UNMANAGED;
 }
 
-static void auxiliary_link_device(struct dmar_domain *domain,
-                                 struct device *dev)
+static inline struct subdev_domain_info *
+lookup_subdev_info(struct dmar_domain *domain, struct device *dev)
+{
+       struct subdev_domain_info *sinfo;
+
+       if (!list_empty(&domain->subdevices)) {
+               list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+                       if (sinfo->pdev == dev)
+                               return sinfo;
+               }
+       }
+
+       return NULL;
+}
+
+static int auxiliary_link_device(struct dmar_domain *domain,
+                                struct device *dev)
 {
        struct device_domain_info *info = get_domain_info(dev);
+       struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
 
        assert_spin_locked(&device_domain_lock);
        if (WARN_ON(!info))
-               return;
+               return -EINVAL;
 
-       domain->auxd_refcnt++;
-       list_add(&domain->auxd, &info->auxiliary_domains);
+       if (!sinfo) {
+               sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
+               sinfo->domain = domain;
+               sinfo->pdev = dev;
+               list_add(&sinfo->link_phys, &info->subdevices);
+               list_add(&sinfo->link_domain, &domain->subdevices);
+       }
+
+       return ++sinfo->users;
 }
 
-static void auxiliary_unlink_device(struct dmar_domain *domain,
-                                   struct device *dev)
+static int auxiliary_unlink_device(struct dmar_domain *domain,
+                                  struct device *dev)
 {
        struct device_domain_info *info = get_domain_info(dev);
+       struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
+       int ret;
 
        assert_spin_locked(&device_domain_lock);
-       if (WARN_ON(!info))
-               return;
+       if (WARN_ON(!info || !sinfo || sinfo->users <= 0))
+               return -EINVAL;
 
-       list_del(&domain->auxd);
-       domain->auxd_refcnt--;
+       ret = --sinfo->users;
+       if (!ret) {
+               list_del(&sinfo->link_phys);
+               list_del(&sinfo->link_domain);
+               kfree(sinfo);
+       }
 
-       if (!domain->auxd_refcnt && domain->default_pasid > 0)
-               ioasid_put(domain->default_pasid);
+       return ret;
 }
 
 static int aux_domain_add_dev(struct dmar_domain *domain,
@@ -4530,6 +4579,19 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
        }
 
        spin_lock_irqsave(&device_domain_lock, flags);
+       ret = auxiliary_link_device(domain, dev);
+       if (ret <= 0)
+               goto link_failed;
+
+       /*
+        * Subdevices from the same physical device can be attached to the
+        * same domain. For such cases, only the first subdevice attachment
+        * needs to go through the full steps in this function. So if ret >
+        * 1, just goto out.
+        */
+       if (ret > 1)
+               goto out;
+
        /*
         * iommu->lock must be held to attach domain to iommu and setup the
         * pasid entry for second level translation.
@@ -4548,10 +4610,9 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
                                                     domain->default_pasid);
        if (ret)
                goto table_failed;
-       spin_unlock(&iommu->lock);
-
-       auxiliary_link_device(domain, dev);
 
+       spin_unlock(&iommu->lock);
+out:
        spin_unlock_irqrestore(&device_domain_lock, flags);
 
        return 0;
@@ -4560,8 +4621,10 @@ table_failed:
        domain_detach_iommu(domain, iommu);
 attach_failed:
        spin_unlock(&iommu->lock);
+       auxiliary_unlink_device(domain, dev);
+link_failed:
        spin_unlock_irqrestore(&device_domain_lock, flags);
-       if (!domain->auxd_refcnt && domain->default_pasid > 0)
+       if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
                ioasid_put(domain->default_pasid);
 
        return ret;
@@ -4581,14 +4644,18 @@ static void aux_domain_remove_dev(struct dmar_domain *domain,
        info = get_domain_info(dev);
        iommu = info->iommu;
 
-       auxiliary_unlink_device(domain, dev);
-
-       spin_lock(&iommu->lock);
-       intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false);
-       domain_detach_iommu(domain, iommu);
-       spin_unlock(&iommu->lock);
+       if (!auxiliary_unlink_device(domain, dev)) {
+               spin_lock(&iommu->lock);
+               intel_pasid_tear_down_entry(iommu, dev,
+                                           domain->default_pasid, false);
+               domain_detach_iommu(domain, iommu);
+               spin_unlock(&iommu->lock);
+       }
 
        spin_unlock_irqrestore(&device_domain_lock, flags);
+
+       if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
+               ioasid_put(domain->default_pasid);
 }
 
 static int prepare_domain_attach_device(struct iommu_domain *domain,
index aeffda9..685200a 100644 (file)
@@ -1353,6 +1353,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
                irq_data = irq_domain_get_irq_data(domain, virq + i);
                irq_cfg = irqd_cfg(irq_data);
                if (!irq_data || !irq_cfg) {
+                       if (!i)
+                               kfree(data);
                        ret = -EINVAL;
                        goto out_free_data;
                }
index 4fa248b..18a9f05 100644 (file)
@@ -118,8 +118,10 @@ void intel_svm_check(struct intel_iommu *iommu)
        iommu->flags |= VTD_FLAG_SVM_CAPABLE;
 }
 
-static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
-                               unsigned long address, unsigned long pages, int ih)
+static void __flush_svm_range_dev(struct intel_svm *svm,
+                                 struct intel_svm_dev *sdev,
+                                 unsigned long address,
+                                 unsigned long pages, int ih)
 {
        struct qi_desc desc;
 
@@ -142,7 +144,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
        }
        desc.qw2 = 0;
        desc.qw3 = 0;
-       qi_submit_sync(svm->iommu, &desc, 1, 0);
+       qi_submit_sync(sdev->iommu, &desc, 1, 0);
 
        if (sdev->dev_iotlb) {
                desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
@@ -166,7 +168,23 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
                }
                desc.qw2 = 0;
                desc.qw3 = 0;
-               qi_submit_sync(svm->iommu, &desc, 1, 0);
+               qi_submit_sync(sdev->iommu, &desc, 1, 0);
+       }
+}
+
+static void intel_flush_svm_range_dev(struct intel_svm *svm,
+                                     struct intel_svm_dev *sdev,
+                                     unsigned long address,
+                                     unsigned long pages, int ih)
+{
+       unsigned long shift = ilog2(__roundup_pow_of_two(pages));
+       unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift));
+       unsigned long start = ALIGN_DOWN(address, align);
+       unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align);
+
+       while (start < end) {
+               __flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih);
+               start += align;
        }
 }
 
@@ -211,7 +229,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
         */
        rcu_read_lock();
        list_for_each_entry_rcu(sdev, &svm->devs, list)
-               intel_pasid_tear_down_entry(svm->iommu, sdev->dev,
+               intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
                                            svm->pasid, true);
        rcu_read_unlock();
 
@@ -281,6 +299,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
        struct dmar_domain *dmar_domain;
        struct device_domain_info *info;
        struct intel_svm *svm = NULL;
+       unsigned long iflags;
        int ret = 0;
 
        if (WARN_ON(!iommu) || !data)
@@ -363,6 +382,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
        }
        sdev->dev = dev;
        sdev->sid = PCI_DEVID(info->bus, info->devfn);
+       sdev->iommu = iommu;
 
        /* Only count users if device has aux domains */
        if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
@@ -381,12 +401,12 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
         * each bind of a new device even with an existing PASID, we need to
         * call the nested mode setup function here.
         */
-       spin_lock(&iommu->lock);
+       spin_lock_irqsave(&iommu->lock, iflags);
        ret = intel_pasid_setup_nested(iommu, dev,
                                       (pgd_t *)(uintptr_t)data->gpgd,
                                       data->hpasid, &data->vendor.vtd, dmar_domain,
                                       data->addr_width);
-       spin_unlock(&iommu->lock);
+       spin_unlock_irqrestore(&iommu->lock, iflags);
        if (ret) {
                dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
                                    data->hpasid, ret);
@@ -486,6 +506,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
        struct device_domain_info *info;
        struct intel_svm_dev *sdev;
        struct intel_svm *svm = NULL;
+       unsigned long iflags;
        int pasid_max;
        int ret;
 
@@ -546,6 +567,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
                goto out;
        }
        sdev->dev = dev;
+       sdev->iommu = iommu;
 
        ret = intel_iommu_enable_pasid(iommu, dev);
        if (ret) {
@@ -575,7 +597,6 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
                        kfree(sdev);
                        goto out;
                }
-               svm->iommu = iommu;
 
                if (pasid_max > intel_pasid_max_id)
                        pasid_max = intel_pasid_max_id;
@@ -605,14 +626,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
                        }
                }
 
-               spin_lock(&iommu->lock);
+               spin_lock_irqsave(&iommu->lock, iflags);
                ret = intel_pasid_setup_first_level(iommu, dev,
                                mm ? mm->pgd : init_mm.pgd,
                                svm->pasid, FLPT_DEFAULT_DID,
                                (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
                                (cpu_feature_enabled(X86_FEATURE_LA57) ?
                                 PASID_FLAG_FL5LP : 0));
-               spin_unlock(&iommu->lock);
+               spin_unlock_irqrestore(&iommu->lock, iflags);
                if (ret) {
                        if (mm)
                                mmu_notifier_unregister(&svm->notifier, mm);
@@ -632,14 +653,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
                 * Binding a new device with existing PASID, need to setup
                 * the PASID entry.
                 */
-               spin_lock(&iommu->lock);
+               spin_lock_irqsave(&iommu->lock, iflags);
                ret = intel_pasid_setup_first_level(iommu, dev,
                                                mm ? mm->pgd : init_mm.pgd,
                                                svm->pasid, FLPT_DEFAULT_DID,
                                                (mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
                                                (cpu_feature_enabled(X86_FEATURE_LA57) ?
                                                PASID_FLAG_FL5LP : 0));
-               spin_unlock(&iommu->lock);
+               spin_unlock_irqrestore(&iommu->lock, iflags);
                if (ret) {
                        kfree(sdev);
                        goto out;
index 4bb3293..d20b8b3 100644 (file)
@@ -358,7 +358,7 @@ static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
  * @iovad: - iova domain in question.
  * @pfn: - page frame number
  * This function finds and returns an iova belonging to the
- * given doamin which matches the given pfn.
+ * given domain which matches the given pfn.
  */
 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
 {
@@ -601,7 +601,7 @@ void queue_iova(struct iova_domain *iovad,
 EXPORT_SYMBOL_GPL(queue_iova);
 
 /**
- * put_iova_domain - destroys the iova doamin
+ * put_iova_domain - destroys the iova domain
  * @iovad: - iova domain in question.
  * All the iova's in that domain are destroyed.
  */
@@ -712,9 +712,9 @@ EXPORT_SYMBOL_GPL(reserve_iova);
 
 /**
  * copy_reserved_iova - copies the reserved between domains
- * @from: - source doamin from where to copy
+ * @from: - source domain from where to copy
  * @to: - destination domin where to copy
- * This function copies reserved iova's from one doamin to
+ * This function copies reserved iova's from one domain to
  * other.
  */
 void
index 94920a5..b147f22 100644 (file)
@@ -493,8 +493,9 @@ config TI_SCI_INTA_IRQCHIP
          TI System Controller, say Y here. Otherwise, say N.
 
 config TI_PRUSS_INTC
-       tristate "TI PRU-ICSS Interrupt Controller"
-       depends on ARCH_DAVINCI || SOC_AM33XX || SOC_AM43XX || SOC_DRA7XX || ARCH_KEYSTONE || ARCH_K3
+       tristate
+       depends on TI_PRUSS
+       default TI_PRUSS
        select IRQ_DOMAIN
        help
          This enables support for the PRU-ICSS Local Interrupt Controller
index 5f5eb88..25c9a9c 100644 (file)
@@ -167,7 +167,7 @@ static void bcm2836_arm_irqchip_handle_ipi(struct irq_desc *desc)
        chained_irq_exit(chip, desc);
 }
 
-static void bcm2836_arm_irqchip_ipi_eoi(struct irq_data *d)
+static void bcm2836_arm_irqchip_ipi_ack(struct irq_data *d)
 {
        int cpu = smp_processor_id();
 
@@ -195,7 +195,7 @@ static struct irq_chip bcm2836_arm_irqchip_ipi = {
        .name           = "IPI",
        .irq_mask       = bcm2836_arm_irqchip_dummy_op,
        .irq_unmask     = bcm2836_arm_irqchip_dummy_op,
-       .irq_eoi        = bcm2836_arm_irqchip_ipi_eoi,
+       .irq_ack        = bcm2836_arm_irqchip_ipi_ack,
        .ipi_send_mask  = bcm2836_arm_irqchip_ipi_send_mask,
 };
 
index 9ed1bc4..09b91b8 100644 (file)
@@ -142,8 +142,8 @@ static void liointc_resume(struct irq_chip_generic *gc)
 
 static const char * const parent_names[] = {"int0", "int1", "int2", "int3"};
 
-int __init liointc_of_init(struct device_node *node,
-                               struct device_node *parent)
+static int __init liointc_of_init(struct device_node *node,
+                                 struct device_node *parent)
 {
        struct irq_chip_generic *gc;
        struct irq_domain *domain;
index 95d4fd8..0bbb0b2 100644 (file)
@@ -197,6 +197,13 @@ static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
                if (ret)
                        return ret;
 
+               ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq,
+                                                   &mips_mt_cpu_irq_controller,
+                                                   NULL);
+
+               if (ret)
+                       return ret;
+
                ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
                if (ret)
                        return ret;
index 0aa50d0..fbb3544 100644 (file)
@@ -66,7 +66,7 @@ static int sl28cpld_intc_probe(struct platform_device *pdev)
        irqchip->chip.num_regs = 1;
        irqchip->chip.status_base = base + INTC_IP;
        irqchip->chip.mask_base = base + INTC_IE;
-       irqchip->chip.mask_invert = true,
+       irqchip->chip.mask_invert = true;
        irqchip->chip.ack_base = base + INTC_IP;
 
        return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
index 26cf0ac..c9a53c2 100644 (file)
@@ -13,6 +13,7 @@ if MISDN != n
 config MISDN_DSP
        tristate "Digital Audio Processing of transparent data"
        depends on MISDN
+       select BITREVERSE
        help
          Enable support for digital audio processing capability.
 
index 8f39f9b..4c2ce21 100644 (file)
@@ -19,6 +19,7 @@ if NVM
 
 config NVM_PBLK
        tristate "Physical Block Device Open-Channel SSD target"
+       select CRC32
        help
          Allows an open-channel SSD to be exposed as a block device to the
          host. The target assumes the device exposes raw flash and must be
index b7e2d96..9e44c09 100644 (file)
@@ -605,6 +605,7 @@ config DM_INTEGRITY
        select BLK_DEV_INTEGRITY
        select DM_BUFIO
        select CRYPTO
+       select CRYPTO_SKCIPHER
        select ASYNC_XOR
        help
          This device-mapper target emulates a block device that has
@@ -622,6 +623,7 @@ config DM_ZONED
        tristate "Drive-managed zoned block device target support"
        depends on BLK_DEV_DM
        depends on BLK_DEV_ZONED
+       select CRC32
        help
          This device-mapper target takes a host-managed or host-aware zoned
          block device and exposes most of its capacity as a regular block
index 6469223..d636b7b 100644 (file)
@@ -17,7 +17,7 @@ struct feature {
 };
 
 static struct feature feature_list[] = {
-       {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LARGE_BUCKET,
+       {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE,
                "large_bucket"},
        {0, 0, 0 },
 };
index a1653c4..84fc2c0 100644 (file)
 
 /* Feature set definition */
 /* Incompat feature set */
-#define BCH_FEATURE_INCOMPAT_LARGE_BUCKET      0x0001 /* 32bit bucket size */
+/* 32bit bucket size, obsoleted */
+#define BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET         0x0001
+/* real bucket size is (1 << bucket_size) */
+#define BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE     0x0002
 
-#define BCH_FEATURE_COMPAT_SUUP                0
-#define BCH_FEATURE_RO_COMPAT_SUUP     0
-#define BCH_FEATURE_INCOMPAT_SUUP      BCH_FEATURE_INCOMPAT_LARGE_BUCKET
+#define BCH_FEATURE_COMPAT_SUPP                0
+#define BCH_FEATURE_RO_COMPAT_SUPP     0
+#define BCH_FEATURE_INCOMPAT_SUPP      (BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET| \
+                                        BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE)
 
 #define BCH_HAS_COMPAT_FEATURE(sb, mask) \
                ((sb)->feature_compat & (mask))
@@ -77,7 +81,23 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
                ~BCH##_FEATURE_INCOMPAT_##flagname; \
 }
 
-BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LARGE_BUCKET);
+BCH_FEATURE_INCOMPAT_FUNCS(obso_large_bucket, OBSO_LARGE_BUCKET);
+BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LOG_LARGE_BUCKET_SIZE);
+
+static inline bool bch_has_unknown_compat_features(struct cache_sb *sb)
+{
+       return ((sb->feature_compat & ~BCH_FEATURE_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_ro_compat_features(struct cache_sb *sb)
+{
+       return ((sb->feature_ro_compat & ~BCH_FEATURE_RO_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_incompat_features(struct cache_sb *sb)
+{
+       return ((sb->feature_incompat & ~BCH_FEATURE_INCOMPAT_SUPP) != 0);
+}
 
 int bch_print_cache_set_feature_compat(struct cache_set *c, char *buf, int size);
 int bch_print_cache_set_feature_ro_compat(struct cache_set *c, char *buf, int size);
index a4752ac..2047a9c 100644 (file)
@@ -64,9 +64,25 @@ static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s
 {
        unsigned int bucket_size = le16_to_cpu(s->bucket_size);
 
-       if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES &&
-            bch_has_feature_large_bucket(sb))
-               bucket_size |= le16_to_cpu(s->bucket_size_hi) << 16;
+       if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
+               if (bch_has_feature_large_bucket(sb)) {
+                       unsigned int max, order;
+
+                       max = sizeof(unsigned int) * BITS_PER_BYTE - 1;
+                       order = le16_to_cpu(s->bucket_size);
+                       /*
+                        * bcache tool will make sure the overflow won't
+                        * happen, an error message here is enough.
+                        */
+                       if (order > max)
+                               pr_err("Bucket size (1 << %u) overflows\n",
+                                       order);
+                       bucket_size = 1 << order;
+               } else if (bch_has_feature_obso_large_bucket(sb)) {
+                       bucket_size +=
+                               le16_to_cpu(s->obso_bucket_size_hi) << 16;
+               }
+       }
 
        return bucket_size;
 }
@@ -228,6 +244,20 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
                sb->feature_compat = le64_to_cpu(s->feature_compat);
                sb->feature_incompat = le64_to_cpu(s->feature_incompat);
                sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
+
+               /* Check incompatible features */
+               err = "Unsupported compatible feature found";
+               if (bch_has_unknown_compat_features(sb))
+                       goto err;
+
+               err = "Unsupported read-only compatible feature found";
+               if (bch_has_unknown_ro_compat_features(sb))
+                       goto err;
+
+               err = "Unsupported incompatible feature found";
+               if (bch_has_unknown_incompat_features(sb))
+                       goto err;
+
                err = read_super_common(sb, bdev, s);
                if (err)
                        goto err;
@@ -1302,6 +1332,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
        bcache_device_link(&dc->disk, c, "bdev");
        atomic_inc(&c->attached_dev_nr);
 
+       if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) {
+               pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+               pr_err("Please update to the latest bcache-tools to create the cache device\n");
+               set_disk_ro(dc->disk.disk, 1);
+       }
+
        /* Allow the writeback thread to proceed */
        up_write(&dc->writeback_lock);
 
@@ -1524,6 +1560,12 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
 
        bcache_device_link(d, c, "volume");
 
+       if (bch_has_feature_obso_large_bucket(&c->cache->sb)) {
+               pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+               pr_err("Please update to the latest bcache-tools to create the cache device\n");
+               set_disk_ro(d->disk, 1);
+       }
+
        return 0;
 err:
        kobject_put(&d->kobj);
@@ -2083,6 +2125,9 @@ static int run_cache_set(struct cache_set *c)
        c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
        bcache_write_super(c);
 
+       if (bch_has_feature_obso_large_bucket(&c->cache->sb))
+               pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n");
+
        list_for_each_entry_safe(dc, t, &uncached_devices, list)
                bch_cached_dev_attach(dc, c, NULL);
 
@@ -2644,8 +2689,8 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
        }
 
        list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
+               char *pdev_set_uuid = pdev->dc->sb.set_uuid;
                list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
-                       char *pdev_set_uuid = pdev->dc->sb.set_uuid;
                        char *set_uuid = c->set_uuid;
 
                        if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
index 9c1a86b..fce4cbf 100644 (file)
@@ -1534,6 +1534,12 @@ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
 }
 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
 
+struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
+{
+       return c->dm_io;
+}
+EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
+
 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
 {
        return b->block;
index 5379113..5a55617 100644 (file)
@@ -1454,13 +1454,16 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
 static void kcryptd_async_done(struct crypto_async_request *async_req,
                               int error);
 
-static void crypt_alloc_req_skcipher(struct crypt_config *cc,
+static int crypt_alloc_req_skcipher(struct crypt_config *cc,
                                     struct convert_context *ctx)
 {
        unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
 
-       if (!ctx->r.req)
-               ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO);
+       if (!ctx->r.req) {
+               ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+               if (!ctx->r.req)
+                       return -ENOMEM;
+       }
 
        skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
 
@@ -1471,13 +1474,18 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
        skcipher_request_set_callback(ctx->r.req,
            CRYPTO_TFM_REQ_MAY_BACKLOG,
            kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
+
+       return 0;
 }
 
-static void crypt_alloc_req_aead(struct crypt_config *cc,
+static int crypt_alloc_req_aead(struct crypt_config *cc,
                                 struct convert_context *ctx)
 {
-       if (!ctx->r.req_aead)
-               ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO);
+       if (!ctx->r.req_aead) {
+               ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+               if (!ctx->r.req_aead)
+                       return -ENOMEM;
+       }
 
        aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
 
@@ -1488,15 +1496,17 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
        aead_request_set_callback(ctx->r.req_aead,
            CRYPTO_TFM_REQ_MAY_BACKLOG,
            kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
+
+       return 0;
 }
 
-static void crypt_alloc_req(struct crypt_config *cc,
+static int crypt_alloc_req(struct crypt_config *cc,
                            struct convert_context *ctx)
 {
        if (crypt_integrity_aead(cc))
-               crypt_alloc_req_aead(cc, ctx);
+               return crypt_alloc_req_aead(cc, ctx);
        else
-               crypt_alloc_req_skcipher(cc, ctx);
+               return crypt_alloc_req_skcipher(cc, ctx);
 }
 
 static void crypt_free_req_skcipher(struct crypt_config *cc,
@@ -1529,17 +1539,28 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
  * Encrypt / decrypt data from one bio to another one (can be the same one)
  */
 static blk_status_t crypt_convert(struct crypt_config *cc,
-                        struct convert_context *ctx, bool atomic)
+                        struct convert_context *ctx, bool atomic, bool reset_pending)
 {
        unsigned int tag_offset = 0;
        unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
        int r;
 
-       atomic_set(&ctx->cc_pending, 1);
+       /*
+        * if reset_pending is set we are dealing with the bio for the first time,
+        * else we're continuing to work on the previous bio, so don't mess with
+        * the cc_pending counter
+        */
+       if (reset_pending)
+               atomic_set(&ctx->cc_pending, 1);
 
        while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
 
-               crypt_alloc_req(cc, ctx);
+               r = crypt_alloc_req(cc, ctx);
+               if (r) {
+                       complete(&ctx->restart);
+                       return BLK_STS_DEV_RESOURCE;
+               }
+
                atomic_inc(&ctx->cc_pending);
 
                if (crypt_integrity_aead(cc))
@@ -1553,7 +1574,25 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
                 * but the driver request queue is full, let's wait.
                 */
                case -EBUSY:
-                       wait_for_completion(&ctx->restart);
+                       if (in_interrupt()) {
+                               if (try_wait_for_completion(&ctx->restart)) {
+                                       /*
+                                        * we don't have to block to wait for completion,
+                                        * so proceed
+                                        */
+                               } else {
+                                       /*
+                                        * we can't wait for completion without blocking
+                                        * exit and continue processing in a workqueue
+                                        */
+                                       ctx->r.req = NULL;
+                                       ctx->cc_sector += sector_step;
+                                       tag_offset++;
+                                       return BLK_STS_DEV_RESOURCE;
+                               }
+                       } else {
+                               wait_for_completion(&ctx->restart);
+                       }
                        reinit_completion(&ctx->restart);
                        fallthrough;
                /*
@@ -1691,6 +1730,12 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
        atomic_inc(&io->io_pending);
 }
 
+static void kcryptd_io_bio_endio(struct work_struct *work)
+{
+       struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+       bio_endio(io->base_bio);
+}
+
 /*
  * One of the bios was finished. Check for completion of
  * the whole request and correctly clean up the buffer.
@@ -1713,7 +1758,23 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
                kfree(io->integrity_metadata);
 
        base_bio->bi_status = error;
-       bio_endio(base_bio);
+
+       /*
+        * If we are running this function from our tasklet,
+        * we can't call bio_endio() here, because it will call
+        * clone_endio() from dm.c, which in turn will
+        * free the current struct dm_crypt_io structure with
+        * our tasklet. In this case we need to delay bio_endio()
+        * execution to after the tasklet is done and dequeued.
+        */
+       if (tasklet_trylock(&io->tasklet)) {
+               tasklet_unlock(&io->tasklet);
+               bio_endio(base_bio);
+               return;
+       }
+
+       INIT_WORK(&io->work, kcryptd_io_bio_endio);
+       queue_work(cc->io_queue, &io->work);
 }
 
 /*
@@ -1945,6 +2006,37 @@ static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
        }
 }
 
+static void kcryptd_crypt_write_continue(struct work_struct *work)
+{
+       struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+       struct crypt_config *cc = io->cc;
+       struct convert_context *ctx = &io->ctx;
+       int crypt_finished;
+       sector_t sector = io->sector;
+       blk_status_t r;
+
+       wait_for_completion(&ctx->restart);
+       reinit_completion(&ctx->restart);
+
+       r = crypt_convert(cc, &io->ctx, true, false);
+       if (r)
+               io->error = r;
+       crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
+       if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
+               /* Wait for completion signaled by kcryptd_async_done() */
+               wait_for_completion(&ctx->restart);
+               crypt_finished = 1;
+       }
+
+       /* Encryption was already finished, submit io now */
+       if (crypt_finished) {
+               kcryptd_crypt_write_io_submit(io, 0);
+               io->sector = sector;
+       }
+
+       crypt_dec_pending(io);
+}
+
 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
 {
        struct crypt_config *cc = io->cc;
@@ -1973,7 +2065,17 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
 
        crypt_inc_pending(io);
        r = crypt_convert(cc, ctx,
-                         test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags));
+                         test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
+       /*
+        * Crypto API backlogged the request, because its queue was full
+        * and we're in softirq context, so continue from a workqueue
+        * (TODO: is it actually possible to be in softirq in the write path?)
+        */
+       if (r == BLK_STS_DEV_RESOURCE) {
+               INIT_WORK(&io->work, kcryptd_crypt_write_continue);
+               queue_work(cc->crypt_queue, &io->work);
+               return;
+       }
        if (r)
                io->error = r;
        crypt_finished = atomic_dec_and_test(&ctx->cc_pending);
@@ -1998,6 +2100,25 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
        crypt_dec_pending(io);
 }
 
+static void kcryptd_crypt_read_continue(struct work_struct *work)
+{
+       struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+       struct crypt_config *cc = io->cc;
+       blk_status_t r;
+
+       wait_for_completion(&io->ctx.restart);
+       reinit_completion(&io->ctx.restart);
+
+       r = crypt_convert(cc, &io->ctx, true, false);
+       if (r)
+               io->error = r;
+
+       if (atomic_dec_and_test(&io->ctx.cc_pending))
+               kcryptd_crypt_read_done(io);
+
+       crypt_dec_pending(io);
+}
+
 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
 {
        struct crypt_config *cc = io->cc;
@@ -2009,7 +2130,16 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
                           io->sector);
 
        r = crypt_convert(cc, &io->ctx,
-                         test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags));
+                         test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
+       /*
+        * Crypto API backlogged the request, because its queue was full
+        * and we're in softirq context, so continue from a workqueue
+        */
+       if (r == BLK_STS_DEV_RESOURCE) {
+               INIT_WORK(&io->work, kcryptd_crypt_read_continue);
+               queue_work(cc->crypt_queue, &io->work);
+               return;
+       }
        if (r)
                io->error = r;
 
@@ -2091,8 +2221,12 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
 
        if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
            (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
-               if (in_irq()) {
-                       /* Crypto API's "skcipher_walk_first() refuses to work in hard IRQ context */
+               /*
+                * in_irq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context.
+                * irqs_disabled(): the kernel may run some IO completion from the idle thread, but
+                * it is being executed with irqs disabled.
+                */
+               if (in_irq() || irqs_disabled()) {
                        tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
                        tasklet_schedule(&io->tasklet);
                        return;
index 5a7a1b9..b64fede 100644 (file)
@@ -257,8 +257,9 @@ struct dm_integrity_c {
        bool journal_uptodate;
        bool just_formatted;
        bool recalculate_flag;
-       bool fix_padding;
        bool discard;
+       bool fix_padding;
+       bool legacy_recalculate;
 
        struct alg_spec internal_hash_alg;
        struct alg_spec journal_crypt_alg;
@@ -386,6 +387,14 @@ static int dm_integrity_failed(struct dm_integrity_c *ic)
        return READ_ONCE(ic->failed);
 }
 
+static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
+{
+       if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) &&
+           !ic->legacy_recalculate)
+               return true;
+       return false;
+}
+
 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
                                          unsigned j, unsigned char seq)
 {
@@ -1379,12 +1388,52 @@ thorough_test:
 #undef MAY_BE_HASH
 }
 
-static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
+struct flush_request {
+       struct dm_io_request io_req;
+       struct dm_io_region io_reg;
+       struct dm_integrity_c *ic;
+       struct completion comp;
+};
+
+static void flush_notify(unsigned long error, void *fr_)
+{
+       struct flush_request *fr = fr_;
+       if (unlikely(error != 0))
+               dm_integrity_io_error(fr->ic, "flusing disk cache", -EIO);
+       complete(&fr->comp);
+}
+
+static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
 {
        int r;
+
+       struct flush_request fr;
+
+       if (!ic->meta_dev)
+               flush_data = false;
+       if (flush_data) {
+               fr.io_req.bi_op = REQ_OP_WRITE,
+               fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
+               fr.io_req.mem.type = DM_IO_KMEM,
+               fr.io_req.mem.ptr.addr = NULL,
+               fr.io_req.notify.fn = flush_notify,
+               fr.io_req.notify.context = &fr;
+               fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
+               fr.io_reg.bdev = ic->dev->bdev,
+               fr.io_reg.sector = 0,
+               fr.io_reg.count = 0,
+               fr.ic = ic;
+               init_completion(&fr.comp);
+               r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
+               BUG_ON(r);
+       }
+
        r = dm_bufio_write_dirty_buffers(ic->bufio);
        if (unlikely(r))
                dm_integrity_io_error(ic, "writing tags", r);
+
+       if (flush_data)
+               wait_for_completion(&fr.comp);
 }
 
 static void sleep_on_endio_wait(struct dm_integrity_c *ic)
@@ -2110,7 +2159,7 @@ offload_to_thread:
 
        if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
                integrity_metadata(&dio->work);
-               dm_integrity_flush_buffers(ic);
+               dm_integrity_flush_buffers(ic, false);
 
                dio->in_flight = (atomic_t)ATOMIC_INIT(1);
                dio->completion = NULL;
@@ -2195,7 +2244,7 @@ static void integrity_commit(struct work_struct *w)
        flushes = bio_list_get(&ic->flush_bio_list);
        if (unlikely(ic->mode != 'J')) {
                spin_unlock_irq(&ic->endio_wait.lock);
-               dm_integrity_flush_buffers(ic);
+               dm_integrity_flush_buffers(ic, true);
                goto release_flush_bios;
        }
 
@@ -2409,7 +2458,7 @@ skip_io:
        complete_journal_op(&comp);
        wait_for_completion_io(&comp.comp);
 
-       dm_integrity_flush_buffers(ic);
+       dm_integrity_flush_buffers(ic, true);
 }
 
 static void integrity_writer(struct work_struct *w)
@@ -2451,7 +2500,7 @@ static void recalc_write_super(struct dm_integrity_c *ic)
 {
        int r;
 
-       dm_integrity_flush_buffers(ic);
+       dm_integrity_flush_buffers(ic, false);
        if (dm_integrity_failed(ic))
                return;
 
@@ -2654,7 +2703,7 @@ static void bitmap_flush_work(struct work_struct *work)
        unsigned long limit;
        struct bio *bio;
 
-       dm_integrity_flush_buffers(ic);
+       dm_integrity_flush_buffers(ic, false);
 
        range.logical_sector = 0;
        range.n_sectors = ic->provided_data_sectors;
@@ -2663,9 +2712,7 @@ static void bitmap_flush_work(struct work_struct *work)
        add_new_range_and_wait(ic, &range);
        spin_unlock_irq(&ic->endio_wait.lock);
 
-       dm_integrity_flush_buffers(ic);
-       if (ic->meta_dev)
-               blkdev_issue_flush(ic->dev->bdev, GFP_NOIO);
+       dm_integrity_flush_buffers(ic, true);
 
        limit = ic->provided_data_sectors;
        if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
@@ -2934,11 +2981,11 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
                if (ic->meta_dev)
                        queue_work(ic->writer_wq, &ic->writer_work);
                drain_workqueue(ic->writer_wq);
-               dm_integrity_flush_buffers(ic);
+               dm_integrity_flush_buffers(ic, true);
        }
 
        if (ic->mode == 'B') {
-               dm_integrity_flush_buffers(ic);
+               dm_integrity_flush_buffers(ic, true);
 #if 1
                /* set to 0 to test bitmap replay code */
                init_journal(ic, 0, ic->journal_sections, 0);
@@ -3102,6 +3149,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
                arg_count += !!ic->journal_crypt_alg.alg_string;
                arg_count += !!ic->journal_mac_alg.alg_string;
                arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
+               arg_count += ic->legacy_recalculate;
                DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
                       ic->tag_size, ic->mode, arg_count);
                if (ic->meta_dev)
@@ -3125,6 +3173,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
                }
                if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
                        DMEMIT(" fix_padding");
+               if (ic->legacy_recalculate)
+                       DMEMIT(" legacy_recalculate");
 
 #define EMIT_ALG(a, n)                                                 \
                do {                                                    \
@@ -3754,7 +3804,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
        unsigned extra_args;
        struct dm_arg_set as;
        static const struct dm_arg _args[] = {
-               {0, 9, "Invalid number of feature args"},
+               {0, 16, "Invalid number of feature args"},
        };
        unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
        bool should_write_sb;
@@ -3902,6 +3952,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
                        ic->discard = true;
                } else if (!strcmp(opt_string, "fix_padding")) {
                        ic->fix_padding = true;
+               } else if (!strcmp(opt_string, "legacy_recalculate")) {
+                       ic->legacy_recalculate = true;
                } else {
                        r = -EINVAL;
                        ti->error = "Invalid argument";
@@ -4197,6 +4249,20 @@ try_smaller_buffer:
                        r = -ENOMEM;
                        goto bad;
                }
+       } else {
+               if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
+                       ti->error = "Recalculate can only be specified with internal_hash";
+                       r = -EINVAL;
+                       goto bad;
+               }
+       }
+
+       if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+           le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
+           dm_integrity_disable_recalculate(ic)) {
+               ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
+               r = -EOPNOTSUPP;
+               goto bad;
        }
 
        ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
index 23c3877..cab12b2 100644 (file)
@@ -3729,10 +3729,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
        blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
 
        /*
-        * RAID1 and RAID10 personalities require bio splitting,
-        * RAID0/4/5/6 don't and process large discard bios properly.
+        * RAID0 and RAID10 personalities require bio splitting,
+        * RAID1/4/5/6 don't and process large discard bios properly.
         */
-       if (rs_is_raid1(rs) || rs_is_raid10(rs)) {
+       if (rs_is_raid0(rs) || rs_is_raid10(rs)) {
                limits->discard_granularity = chunk_size_bytes;
                limits->max_discard_sectors = rs->md.chunk_sectors;
        }
index 4668b2c..11890db 100644 (file)
@@ -141,6 +141,11 @@ struct dm_snapshot {
         * for them to be committed.
         */
        struct bio_list bios_queued_during_merge;
+
+       /*
+        * Flush data after merge.
+        */
+       struct bio flush_bio;
 };
 
 /*
@@ -1121,6 +1126,17 @@ shut:
 
 static void error_bios(struct bio *bio);
 
+static int flush_data(struct dm_snapshot *s)
+{
+       struct bio *flush_bio = &s->flush_bio;
+
+       bio_reset(flush_bio);
+       bio_set_dev(flush_bio, s->origin->bdev);
+       flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
+
+       return submit_bio_wait(flush_bio);
+}
+
 static void merge_callback(int read_err, unsigned long write_err, void *context)
 {
        struct dm_snapshot *s = context;
@@ -1134,6 +1150,11 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
                goto shut;
        }
 
+       if (flush_data(s) < 0) {
+               DMERR("Flush after merge failed: shutting down merge");
+               goto shut;
+       }
+
        if (s->store->type->commit_merge(s->store,
                                         s->num_merging_chunks) < 0) {
                DMERR("Write error in exception store: shutting down merge");
@@ -1318,6 +1339,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        s->first_merging_chunk = 0;
        s->num_merging_chunks = 0;
        bio_list_init(&s->bios_queued_during_merge);
+       bio_init(&s->flush_bio, NULL, 0);
 
        /* Allocate hash table for COW data */
        if (init_hash_tables(s)) {
@@ -1504,6 +1526,8 @@ static void snapshot_dtr(struct dm_target *ti)
 
        dm_exception_store_destroy(s->store);
 
+       bio_uninit(&s->flush_bio);
+
        dm_put_device(ti, s->cow);
 
        dm_put_device(ti, s->origin);
index 188f412..4acf234 100644 (file)
@@ -363,14 +363,23 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
 {
        int r;
        dev_t dev;
+       unsigned int major, minor;
+       char dummy;
        struct dm_dev_internal *dd;
        struct dm_table *t = ti->table;
 
        BUG_ON(!t);
 
-       dev = dm_get_dev_t(path);
-       if (!dev)
-               return -ENODEV;
+       if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
+               /* Extract the major/minor numbers */
+               dev = MKDEV(major, minor);
+               if (MAJOR(dev) != major || MINOR(dev) != minor)
+                       return -EOVERFLOW;
+       } else {
+               dev = dm_get_dev_t(path);
+               if (!dev)
+                       return -ENODEV;
+       }
 
        dd = find_device(&t->devices, dev);
        if (!dd) {
index b3c3c8b..7bac564 100644 (file)
@@ -562,7 +562,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
                 * subset of the parent bdev; require extra privileges.
                 */
                if (!capable(CAP_SYS_RAWIO)) {
-                       DMWARN_LIMIT(
+                       DMDEBUG_LIMIT(
        "%s: sending ioctl %x to DM device without required privilege.",
                                current->comm, cmd);
                        r = -ENOIOCTLCMD;
index beb4823..b2b3d2b 100644 (file)
@@ -472,8 +472,11 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
        cntr = &hdev->aggregated_cs_counters;
 
        cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
-       if (!cs)
+       if (!cs) {
+               atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+               atomic64_inc(&cntr->out_of_mem_drop_cnt);
                return -ENOMEM;
+       }
 
        cs->ctx = ctx;
        cs->submitted = false;
@@ -486,6 +489,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
 
        cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
        if (!cs_cmpl) {
+               atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+               atomic64_inc(&cntr->out_of_mem_drop_cnt);
                rc = -ENOMEM;
                goto free_cs;
        }
@@ -513,6 +518,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
        cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
                        sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
        if (!cs->jobs_in_queue_cnt) {
+               atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+               atomic64_inc(&cntr->out_of_mem_drop_cnt);
                rc = -ENOMEM;
                goto free_fence;
        }
@@ -562,7 +569,7 @@ void hl_cs_rollback_all(struct hl_device *hdev)
        for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
                flush_workqueue(hdev->cq_wq[i]);
 
-       /* Make sure we don't have leftovers in the H/W queues mirror list */
+       /* Make sure we don't have leftovers in the CS mirror list */
        list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
                cs_get(cs);
                cs->aborted = true;
@@ -764,11 +771,14 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
 
 static int hl_cs_copy_chunk_array(struct hl_device *hdev,
                                        struct hl_cs_chunk **cs_chunk_array,
-                                       void __user *chunks, u32 num_chunks)
+                                       void __user *chunks, u32 num_chunks,
+                                       struct hl_ctx *ctx)
 {
        u32 size_to_copy;
 
        if (num_chunks > HL_MAX_JOBS_PER_CS) {
+               atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+               atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
                dev_err(hdev->dev,
                        "Number of chunks can NOT be larger than %d\n",
                        HL_MAX_JOBS_PER_CS);
@@ -777,11 +787,16 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev,
 
        *cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
                                        GFP_ATOMIC);
-       if (!*cs_chunk_array)
+       if (!*cs_chunk_array) {
+               atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+               atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
                return -ENOMEM;
+       }
 
        size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
        if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
+               atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+               atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
                dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
                kfree(*cs_chunk_array);
                return -EFAULT;
@@ -797,6 +812,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
        struct hl_device *hdev = hpriv->hdev;
        struct hl_cs_chunk *cs_chunk_array;
        struct hl_cs_counters_atomic *cntr;
+       struct hl_ctx *ctx = hpriv->ctx;
        struct hl_cs_job *job;
        struct hl_cs *cs;
        struct hl_cb *cb;
@@ -805,7 +821,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
        cntr = &hdev->aggregated_cs_counters;
        *cs_seq = ULLONG_MAX;
 
-       rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+       rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
+                       hpriv->ctx);
        if (rc)
                goto out;
 
@@ -832,8 +849,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
                rc = validate_queue_index(hdev, chunk, &queue_type,
                                                &is_kernel_allocated_cb);
                if (rc) {
-                       atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
-                       atomic64_inc(&cntr->parsing_drop_cnt);
+                       atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+                       atomic64_inc(&cntr->validation_drop_cnt);
                        goto free_cs_object;
                }
 
@@ -841,8 +858,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
                        cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
                        if (!cb) {
                                atomic64_inc(
-                               &hpriv->ctx->cs_counters.parsing_drop_cnt);
-                               atomic64_inc(&cntr->parsing_drop_cnt);
+                                       &ctx->cs_counters.validation_drop_cnt);
+                               atomic64_inc(&cntr->validation_drop_cnt);
                                rc = -EINVAL;
                                goto free_cs_object;
                        }
@@ -856,8 +873,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
                job = hl_cs_allocate_job(hdev, queue_type,
                                                is_kernel_allocated_cb);
                if (!job) {
-                       atomic64_inc(
-                       &hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
+                       atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
                        atomic64_inc(&cntr->out_of_mem_drop_cnt);
                        dev_err(hdev->dev, "Failed to allocate a new job\n");
                        rc = -ENOMEM;
@@ -891,7 +907,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
 
                rc = cs_parser(hpriv, job);
                if (rc) {
-                       atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
+                       atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
                        atomic64_inc(&cntr->parsing_drop_cnt);
                        dev_err(hdev->dev,
                                "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
@@ -901,8 +917,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
        }
 
        if (int_queues_only) {
-               atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
-               atomic64_inc(&cntr->parsing_drop_cnt);
+               atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+               atomic64_inc(&cntr->validation_drop_cnt);
                dev_err(hdev->dev,
                        "Reject CS %d.%llu because only internal queues jobs are present\n",
                        cs->ctx->asid, cs->sequence);
@@ -1042,7 +1058,7 @@ out:
 }
 
 static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
-               struct hl_cs_chunk *chunk, u64 *signal_seq)
+               struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx)
 {
        u64 *signal_seq_arr = NULL;
        u32 size_to_copy, signal_seq_arr_len;
@@ -1052,6 +1068,8 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
 
        /* currently only one signal seq is supported */
        if (signal_seq_arr_len != 1) {
+               atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+               atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
                dev_err(hdev->dev,
                        "Wait for signal CS supports only one signal CS seq\n");
                return -EINVAL;
@@ -1060,13 +1078,18 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
        signal_seq_arr = kmalloc_array(signal_seq_arr_len,
                                        sizeof(*signal_seq_arr),
                                        GFP_ATOMIC);
-       if (!signal_seq_arr)
+       if (!signal_seq_arr) {
+               atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+               atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
                return -ENOMEM;
+       }
 
        size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr);
        if (copy_from_user(signal_seq_arr,
                                u64_to_user_ptr(chunk->signal_seq_arr),
                                size_to_copy)) {
+               atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+               atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
                dev_err(hdev->dev,
                        "Failed to copy signal seq array from user\n");
                rc = -EFAULT;
@@ -1153,6 +1176,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
        struct hl_device *hdev = hpriv->hdev;
        struct hl_cs_compl *sig_waitcs_cmpl;
        u32 q_idx, collective_engine_id = 0;
+       struct hl_cs_counters_atomic *cntr;
        struct hl_fence *sig_fence = NULL;
        struct hl_ctx *ctx = hpriv->ctx;
        enum hl_queue_type q_type;
@@ -1160,9 +1184,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
        u64 signal_seq;
        int rc;
 
+       cntr = &hdev->aggregated_cs_counters;
        *cs_seq = ULLONG_MAX;
 
-       rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+       rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
+                       ctx);
        if (rc)
                goto out;
 
@@ -1170,6 +1196,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
        chunk = &cs_chunk_array[0];
 
        if (chunk->queue_index >= hdev->asic_prop.max_queues) {
+               atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+               atomic64_inc(&cntr->validation_drop_cnt);
                dev_err(hdev->dev, "Queue index %d is invalid\n",
                        chunk->queue_index);
                rc = -EINVAL;
@@ -1181,6 +1209,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
        q_type = hw_queue_prop->type;
 
        if (!hw_queue_prop->supports_sync_stream) {
+               atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+               atomic64_inc(&cntr->validation_drop_cnt);
                dev_err(hdev->dev,
                        "Queue index %d does not support sync stream operations\n",
                        q_idx);
@@ -1190,6 +1220,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
 
        if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
                if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
+                       atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+                       atomic64_inc(&cntr->validation_drop_cnt);
                        dev_err(hdev->dev,
                                "Queue index %d is invalid\n", q_idx);
                        rc = -EINVAL;
@@ -1200,12 +1232,14 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
        }
 
        if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) {
-               rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq);
+               rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq, ctx);
                if (rc)
                        goto free_cs_chunk_array;
 
                sig_fence = hl_ctx_get_fence(ctx, signal_seq);
                if (IS_ERR(sig_fence)) {
+                       atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+                       atomic64_inc(&cntr->validation_drop_cnt);
                        dev_err(hdev->dev,
                                "Failed to get signal CS with seq 0x%llx\n",
                                signal_seq);
@@ -1223,6 +1257,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
                        container_of(sig_fence, struct hl_cs_compl, base_fence);
 
                if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
+                       atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+                       atomic64_inc(&cntr->validation_drop_cnt);
                        dev_err(hdev->dev,
                                "CS seq 0x%llx is not of a signal CS\n",
                                signal_seq);
@@ -1270,8 +1306,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
        else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
                rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
                                cs, q_idx, collective_engine_id);
-       else
+       else {
+               atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+               atomic64_inc(&cntr->validation_drop_cnt);
                rc = -EINVAL;
+       }
 
        if (rc)
                goto free_cs_object;
index 5871162..1456eab 100644 (file)
@@ -17,12 +17,12 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
 {
        enum hl_device_status status;
 
-       if (hdev->disabled)
-               status = HL_DEVICE_STATUS_MALFUNCTION;
-       else if (atomic_read(&hdev->in_reset))
+       if (atomic_read(&hdev->in_reset))
                status = HL_DEVICE_STATUS_IN_RESET;
        else if (hdev->needs_reset)
                status = HL_DEVICE_STATUS_NEEDS_RESET;
+       else if (hdev->disabled)
+               status = HL_DEVICE_STATUS_MALFUNCTION;
        else
                status = HL_DEVICE_STATUS_OPERATIONAL;
 
@@ -1092,6 +1092,7 @@ kill_processes:
                                                GFP_KERNEL);
                if (!hdev->kernel_ctx) {
                        rc = -ENOMEM;
+                       hl_mmu_fini(hdev);
                        goto out_err;
                }
 
@@ -1103,6 +1104,7 @@ kill_processes:
                                "failed to init kernel ctx in hard reset\n");
                        kfree(hdev->kernel_ctx);
                        hdev->kernel_ctx = NULL;
+                       hl_mmu_fini(hdev);
                        goto out_err;
                }
        }
index 0e1c629..20f77f5 100644 (file)
@@ -627,25 +627,38 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
        security_status = RREG32(cpu_security_boot_status_reg);
 
        /* We read security status multiple times during boot:
-        * 1. preboot - we check if fw security feature is supported
-        * 2. boot cpu - we get boot cpu security status
-        * 3. FW application - we get FW application security status
+        * 1. preboot - a. Check whether the security status bits are valid
+        *              b. Check whether fw security is enabled
+        *              c. Check whether hard reset is done by preboot
+        * 2. boot cpu - a. Fetch boot cpu security status
+        *               b. Check whether hard reset is done by boot cpu
+        * 3. FW application - a. Fetch fw application security status
+        *                     b. Check whether hard reset is done by fw app
         *
         * Preboot:
         * Check security status bit (CPU_BOOT_DEV_STS0_ENABLED), if it is set
         * check security enabled bit (CPU_BOOT_DEV_STS0_SECURITY_EN)
         */
        if (security_status & CPU_BOOT_DEV_STS0_ENABLED) {
-               hdev->asic_prop.fw_security_status_valid = 1;
-               prop->fw_security_disabled =
-                       !(security_status & CPU_BOOT_DEV_STS0_SECURITY_EN);
+               prop->fw_security_status_valid = 1;
+
+               if (security_status & CPU_BOOT_DEV_STS0_SECURITY_EN)
+                       prop->fw_security_disabled = false;
+               else
+                       prop->fw_security_disabled = true;
+
+               if (security_status & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+                       prop->hard_reset_done_by_fw = true;
        } else {
-               hdev->asic_prop.fw_security_status_valid = 0;
+               prop->fw_security_status_valid = 0;
                prop->fw_security_disabled = true;
        }
 
+       dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
+                       prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
        dev_info(hdev->dev, "firmware-level security is %s\n",
-               prop->fw_security_disabled ? "disabled" : "enabled");
+                       prop->fw_security_disabled ? "disabled" : "enabled");
 
        return 0;
 }
@@ -655,6 +668,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
                        u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
                        bool skip_bmc, u32 cpu_timeout, u32 boot_fit_timeout)
 {
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
        u32 status;
        int rc;
 
@@ -723,11 +737,22 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
        /* Read U-Boot version now in case we will later fail */
        hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT);
 
+       /* Clear reset status since we need to read it again from boot CPU */
+       prop->hard_reset_done_by_fw = false;
+
        /* Read boot_cpu security bits */
-       if (hdev->asic_prop.fw_security_status_valid)
-               hdev->asic_prop.fw_boot_cpu_security_map =
+       if (prop->fw_security_status_valid) {
+               prop->fw_boot_cpu_security_map =
                                RREG32(cpu_security_boot_status_reg);
 
+               if (prop->fw_boot_cpu_security_map &
+                               CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+                       prop->hard_reset_done_by_fw = true;
+       }
+
+       dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
+                       prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
        if (rc) {
                detect_cpu_boot_status(hdev, status);
                rc = -EIO;
@@ -796,18 +821,21 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
                goto out;
        }
 
+       /* Clear reset status since we need to read again from app */
+       prop->hard_reset_done_by_fw = false;
+
        /* Read FW application security bits */
-       if (hdev->asic_prop.fw_security_status_valid) {
-               hdev->asic_prop.fw_app_security_map =
+       if (prop->fw_security_status_valid) {
+               prop->fw_app_security_map =
                                RREG32(cpu_security_boot_status_reg);
 
-               if (hdev->asic_prop.fw_app_security_map &
+               if (prop->fw_app_security_map &
                                CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
-                       hdev->asic_prop.hard_reset_done_by_fw = true;
+                       prop->hard_reset_done_by_fw = true;
        }
 
-       dev_dbg(hdev->dev, "Firmware hard-reset is %s\n",
-               hdev->asic_prop.hard_reset_done_by_fw ? "enabled" : "disabled");
+       dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
+                       prop->hard_reset_done_by_fw ? "enabled" : "disabled");
 
        dev_info(hdev->dev, "Successfully loaded firmware to device\n");
 
index 571eda6..e0d7f5f 100644 (file)
@@ -944,7 +944,7 @@ struct hl_asic_funcs {
        u32 (*get_signal_cb_size)(struct hl_device *hdev);
        u32 (*get_wait_cb_size)(struct hl_device *hdev);
        u32 (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id,
-                       u32 size);
+                       u32 size, bool eb);
        u32 (*gen_wait_cb)(struct hl_device *hdev,
                        struct hl_gen_wait_properties *prop);
        void (*reset_sob)(struct hl_device *hdev, void *data);
@@ -1000,6 +1000,7 @@ struct hl_va_range {
  * @queue_full_drop_cnt: dropped due to queue full
  * @device_in_reset_drop_cnt: dropped due to device in reset
  * @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight
+ * @validation_drop_cnt: dropped due to error in validation
  */
 struct hl_cs_counters_atomic {
        atomic64_t out_of_mem_drop_cnt;
@@ -1007,6 +1008,7 @@ struct hl_cs_counters_atomic {
        atomic64_t queue_full_drop_cnt;
        atomic64_t device_in_reset_drop_cnt;
        atomic64_t max_cs_in_flight_drop_cnt;
+       atomic64_t validation_drop_cnt;
 };
 
 /**
index 6bbb6bc..032d114 100644 (file)
@@ -544,6 +544,7 @@ static struct pci_driver hl_pci_driver = {
        .id_table = ids,
        .probe = hl_pci_probe,
        .remove = hl_pci_remove,
+       .shutdown = hl_pci_remove,
        .driver.pm = &hl_pm_ops,
        .err_handler = &hl_pci_err_handler,
 };
index 32e6af1..12efbd9 100644 (file)
@@ -335,6 +335,8 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
                        atomic64_read(&cntr->device_in_reset_drop_cnt);
        cs_counters.total_max_cs_in_flight_drop_cnt =
                        atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
+       cs_counters.total_validation_drop_cnt =
+                       atomic64_read(&cntr->validation_drop_cnt);
 
        if (hpriv->ctx) {
                cs_counters.ctx_out_of_mem_drop_cnt =
@@ -352,6 +354,9 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
                cs_counters.ctx_max_cs_in_flight_drop_cnt =
                                atomic64_read(
                        &hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
+               cs_counters.ctx_validation_drop_cnt =
+                               atomic64_read(
+                               &hpriv->ctx->cs_counters.validation_drop_cnt);
        }
 
        return copy_to_user(out, &cs_counters,
@@ -406,7 +411,7 @@ static int total_energy_consumption_info(struct hl_fpriv *hpriv,
 static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
 {
        struct hl_device *hdev = hpriv->hdev;
-       struct hl_pll_frequency_info freq_info = {0};
+       struct hl_pll_frequency_info freq_info = { {0} };
        u32 max_size = args->return_size;
        void __user *out = (void __user *) (uintptr_t) args->return_pointer;
        int rc;
index 7caf868..7621725 100644 (file)
@@ -418,8 +418,11 @@ static void init_signal_cs(struct hl_device *hdev,
                "generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
                cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx);
 
+       /* we set an EB since we must make sure all oeprations are done
+        * when sending the signal
+        */
        hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
-                               cs_cmpl->hw_sob->sob_id, 0);
+                               cs_cmpl->hw_sob->sob_id, 0, true);
 
        kref_get(&hw_sob->kref);
 
index 923b260..b4725e6 100644 (file)
@@ -130,10 +130,8 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
        if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
                return 0;
 
-       if (val & PCI_CONFIG_ELBI_STS_ERR) {
-               dev_err(hdev->dev, "Error writing to ELBI\n");
+       if (val & PCI_CONFIG_ELBI_STS_ERR)
                return -EIO;
-       }
 
        if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
                dev_err(hdev->dev, "ELBI write didn't finish in time\n");
@@ -160,8 +158,12 @@ int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
 
        dbi_offset = addr & 0xFFF;
 
-       rc = hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
-       rc |= hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
+       /* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+        * in case the firmware security is enabled
+        */
+       hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
+
+       rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
                                data);
 
        if (rc)
@@ -244,9 +246,11 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
 
        rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
 
-       /* Return the DBI window to the default location */
-       rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
-       rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+       /* Return the DBI window to the default location
+        * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+        * in case the firmware security is enabled
+        */
+       hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
 
        if (rc)
                dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n",
@@ -294,9 +298,11 @@ int hl_pci_set_outbound_region(struct hl_device *hdev,
        /* Enable */
        rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000);
 
-       /* Return the DBI window to the default location */
-       rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
-       rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+       /* Return the DBI window to the default location
+        * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+        * in case the firmware security is enabled
+        */
+       hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
 
        return rc;
 }
index 1f19266..8c09e44 100644 (file)
@@ -151,19 +151,6 @@ static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = {
        [PACKET_LOAD_AND_EXE]   = sizeof(struct packet_load_and_exe)
 };
 
-static const u32 gaudi_pll_base_addresses[GAUDI_PLL_MAX] = {
-       [CPU_PLL] = mmPSOC_CPU_PLL_NR,
-       [PCI_PLL] = mmPSOC_PCI_PLL_NR,
-       [SRAM_PLL] = mmSRAM_W_PLL_NR,
-       [HBM_PLL] = mmPSOC_HBM_PLL_NR,
-       [NIC_PLL] = mmNIC0_PLL_NR,
-       [DMA_PLL] = mmDMA_W_PLL_NR,
-       [MESH_PLL] = mmMESH_W_PLL_NR,
-       [MME_PLL] = mmPSOC_MME_PLL_NR,
-       [TPC_PLL] = mmPSOC_TPC_PLL_NR,
-       [IF_PLL] = mmIF_W_PLL_NR
-};
-
 static inline bool validate_packet_id(enum packet_id id)
 {
        switch (id) {
@@ -374,7 +361,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev);
 static void gaudi_disable_clock_gating(struct hl_device *hdev);
 static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid);
 static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
-                               u32 size);
+                               u32 size, bool eb);
 static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
                                struct hl_gen_wait_properties *prop);
 
@@ -667,12 +654,6 @@ static int gaudi_early_init(struct hl_device *hdev)
        if (rc)
                goto free_queue_props;
 
-       if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
-               dev_info(hdev->dev,
-                       "H/W state is dirty, must reset before initializing\n");
-               hdev->asic_funcs->hw_fini(hdev, true);
-       }
-
        /* Before continuing in the initialization, we need to read the preboot
         * version to determine whether we run with a security-enabled firmware
         */
@@ -685,6 +666,12 @@ static int gaudi_early_init(struct hl_device *hdev)
                goto pci_fini;
        }
 
+       if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+               dev_info(hdev->dev,
+                       "H/W state is dirty, must reset before initializing\n");
+               hdev->asic_funcs->hw_fini(hdev, true);
+       }
+
        return 0;
 
 pci_fini:
@@ -703,93 +690,60 @@ static int gaudi_early_fini(struct hl_device *hdev)
 }
 
 /**
- * gaudi_fetch_pll_frequency - Fetch PLL frequency values
+ * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
  *
  * @hdev: pointer to hl_device structure
- * @pll_index: index of the pll to fetch frequency from
- * @pll_freq: pointer to store the pll frequency in MHz in each of the available
- *            outputs. if a certain output is not available a 0 will be set
  *
  */
-static int gaudi_fetch_pll_frequency(struct hl_device *hdev,
-                               enum gaudi_pll_index pll_index,
-                               u16 *pll_freq_arr)
+static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
 {
-       u32 nr = 0, nf = 0, od = 0, pll_clk = 0, div_fctr, div_sel,
-                       pll_base_addr = gaudi_pll_base_addresses[pll_index];
-       u16 freq = 0;
-       int i, rc;
-
-       if (hdev->asic_prop.fw_security_status_valid &&
-                       (hdev->asic_prop.fw_app_security_map &
-                                       CPU_BOOT_DEV_STS0_PLL_INFO_EN)) {
-               rc = hl_fw_cpucp_pll_info_get(hdev, pll_index, pll_freq_arr);
+       struct asic_fixed_properties *prop = &hdev->asic_prop;
+       u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+       u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
+       int rc;
 
-               if (rc)
-                       return rc;
-       } else if (hdev->asic_prop.fw_security_disabled) {
+       if (hdev->asic_prop.fw_security_disabled) {
                /* Backward compatibility */
-               nr = RREG32(pll_base_addr + PLL_NR_OFFSET);
-               nf = RREG32(pll_base_addr + PLL_NF_OFFSET);
-               od = RREG32(pll_base_addr + PLL_OD_OFFSET);
-
-               for (i = 0; i < HL_PLL_NUM_OUTPUTS; i++) {
-                       div_fctr = RREG32(pll_base_addr +
-                                       PLL_DIV_FACTOR_0_OFFSET + i * 4);
-                       div_sel = RREG32(pll_base_addr +
-                                       PLL_DIV_SEL_0_OFFSET + i * 4);
+               div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2);
+               div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2);
+               nr = RREG32(mmPSOC_CPU_PLL_NR);
+               nf = RREG32(mmPSOC_CPU_PLL_NF);
+               od = RREG32(mmPSOC_CPU_PLL_OD);
 
-                       if (div_sel == DIV_SEL_REF_CLK ||
+               if (div_sel == DIV_SEL_REF_CLK ||
                                div_sel == DIV_SEL_DIVIDED_REF) {
-                               if (div_sel == DIV_SEL_REF_CLK)
-                                       freq = PLL_REF_CLK;
-                               else
-                                       freq = PLL_REF_CLK / (div_fctr + 1);
-                       } else if (div_sel == DIV_SEL_PLL_CLK ||
-                                       div_sel == DIV_SEL_DIVIDED_PLL) {
-                               pll_clk = PLL_REF_CLK * (nf + 1) /
-                                               ((nr + 1) * (od + 1));
-                               if (div_sel == DIV_SEL_PLL_CLK)
-                                       freq = pll_clk;
-                               else
-                                       freq = pll_clk / (div_fctr + 1);
-                       } else {
-                               dev_warn(hdev->dev,
-                                       "Received invalid div select value: %d",
-                                       div_sel);
-                       }
-
-                       pll_freq_arr[i] = freq;
+                       if (div_sel == DIV_SEL_REF_CLK)
+                               freq = PLL_REF_CLK;
+                       else
+                               freq = PLL_REF_CLK / (div_fctr + 1);
+               } else if (div_sel == DIV_SEL_PLL_CLK ||
+                       div_sel == DIV_SEL_DIVIDED_PLL) {
+                       pll_clk = PLL_REF_CLK * (nf + 1) /
+                                       ((nr + 1) * (od + 1));
+                       if (div_sel == DIV_SEL_PLL_CLK)
+                               freq = pll_clk;
+                       else
+                               freq = pll_clk / (div_fctr + 1);
+               } else {
+                       dev_warn(hdev->dev,
+                               "Received invalid div select value: %d",
+                               div_sel);
+                       freq = 0;
                }
        } else {
-               dev_err(hdev->dev, "Failed to fetch PLL frequency values\n");
-               return -EIO;
-       }
+               rc = hl_fw_cpucp_pll_info_get(hdev, CPU_PLL, pll_freq_arr);
 
-       return 0;
-}
-
-/**
- * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
- *
- * @hdev: pointer to hl_device structure
- *
- */
-static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
-{
-       struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u16 pll_freq[HL_PLL_NUM_OUTPUTS];
-       int rc;
+               if (rc)
+                       return rc;
 
-       rc = gaudi_fetch_pll_frequency(hdev, CPU_PLL, pll_freq);
-       if (rc)
-               return rc;
+               freq = pll_freq_arr[2];
+       }
 
-       prop->psoc_timestamp_frequency = pll_freq[2];
-       prop->psoc_pci_pll_nr = 0;
-       prop->psoc_pci_pll_nf = 0;
-       prop->psoc_pci_pll_od = 0;
-       prop->psoc_pci_pll_div_factor = 0;
+       prop->psoc_timestamp_frequency = freq;
+       prop->psoc_pci_pll_nr = nr;
+       prop->psoc_pci_pll_nf = nf;
+       prop->psoc_pci_pll_od = od;
+       prop->psoc_pci_pll_div_factor = div_fctr;
 
        return 0;
 }
@@ -884,11 +838,17 @@ static int gaudi_init_tpc_mem(struct hl_device *hdev)
        size_t fw_size;
        void *cpu_addr;
        dma_addr_t dma_handle;
-       int rc;
+       int rc, count = 5;
 
+again:
        rc = request_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev);
+       if (rc == -EINTR && count-- > 0) {
+               msleep(50);
+               goto again;
+       }
+
        if (rc) {
-               dev_err(hdev->dev, "Firmware file %s is not found!\n",
+               dev_err(hdev->dev, "Failed to load firmware file %s\n",
                                GAUDI_TPC_FW_FILE);
                goto out;
        }
@@ -1110,7 +1070,7 @@ static void gaudi_collective_slave_init_job(struct hl_device *hdev,
                prop->collective_sob_id, queue_id);
 
        cb_size += gaudi_gen_signal_cb(hdev, job->user_cb,
-                       prop->collective_sob_id, cb_size);
+                       prop->collective_sob_id, cb_size, false);
 }
 
 static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
@@ -2449,8 +2409,6 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
        gaudi_init_e2e(hdev);
        gaudi_init_hbm_cred(hdev);
 
-       hdev->asic_funcs->disable_clock_gating(hdev);
-
        for (tpc_id = 0, tpc_offset = 0;
                                tpc_id < TPC_NUMBER_OF_ENGINES;
                                tpc_id++, tpc_offset += TPC_CFG_OFFSET) {
@@ -3462,6 +3420,9 @@ static void gaudi_set_clock_gating(struct hl_device *hdev)
        if (hdev->in_debug)
                return;
 
+       if (!hdev->asic_prop.fw_security_disabled)
+               return;
+
        for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
                enable = !!(hdev->clock_gating_mask &
                                (BIT_ULL(gaudi_dma_assignment[i])));
@@ -3513,7 +3474,7 @@ static void gaudi_disable_clock_gating(struct hl_device *hdev)
        u32 qman_offset;
        int i;
 
-       if (!(gaudi->hw_cap_initialized & HW_CAP_CLK_GATE))
+       if (!hdev->asic_prop.fw_security_disabled)
                return;
 
        for (i = 0, qman_offset = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
@@ -3806,7 +3767,7 @@ static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
 static void gaudi_pre_hw_init(struct hl_device *hdev)
 {
        /* Perform read from the device to make sure device is up */
-       RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+       RREG32(mmHW_STATE);
 
        if (hdev->asic_prop.fw_security_disabled) {
                /* Set the access through PCI bars (Linux driver only) as
@@ -3847,6 +3808,13 @@ static int gaudi_hw_init(struct hl_device *hdev)
                return rc;
        }
 
+       /* In case the clock gating was enabled in preboot we need to disable
+        * it here before touching the MME/TPC registers.
+        * There is no need to take clk gating mutex because when this function
+        * runs, no other relevant code can run
+        */
+       hdev->asic_funcs->disable_clock_gating(hdev);
+
        /* SRAM scrambler must be initialized after CPU is running from HBM */
        gaudi_init_scrambler_sram(hdev);
 
@@ -3885,7 +3853,7 @@ static int gaudi_hw_init(struct hl_device *hdev)
        }
 
        /* Perform read from the device to flush all configuration */
-       RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+       RREG32(mmHW_STATE);
 
        return 0;
 
@@ -3927,7 +3895,10 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
        /* I don't know what is the state of the CPU so make sure it is
         * stopped in any means necessary
         */
-       WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
+       if (hdev->asic_prop.hard_reset_done_by_fw)
+               WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_RST_DEV);
+       else
+               WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
 
        WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE);
 
@@ -3971,11 +3942,15 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
 
                WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
                        1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
-       }
 
-       dev_info(hdev->dev,
-               "Issued HARD reset command, going to wait %dms\n",
-               reset_timeout_ms);
+               dev_info(hdev->dev,
+                       "Issued HARD reset command, going to wait %dms\n",
+                       reset_timeout_ms);
+       } else {
+               dev_info(hdev->dev,
+                       "Firmware performs HARD reset, going to wait %dms\n",
+                       reset_timeout_ms);
+       }
 
        /*
         * After hard reset, we can't poll the BTM_FSM register because the PSOC
@@ -7936,7 +7911,7 @@ static u32 gaudi_get_wait_cb_size(struct hl_device *hdev)
 }
 
 static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
-                               u32 size)
+                               u32 size, bool eb)
 {
        struct hl_cb *cb = (struct hl_cb *) data;
        struct packet_msg_short *pkt;
@@ -7953,7 +7928,7 @@ static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
        ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
        ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 3); /* W_S SOB base */
        ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
-       ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 1);
+       ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, eb);
        ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
        ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
 
index f2d91f4..a7ab2d7 100644 (file)
 #define MME_ACC_OFFSET         (mmMME1_ACC_BASE - mmMME0_ACC_BASE)
 #define SRAM_BANK_OFFSET       (mmSRAM_Y0_X1_RTR_BASE - mmSRAM_Y0_X0_RTR_BASE)
 
-#define PLL_NR_OFFSET          0
-#define PLL_NF_OFFSET          (mmPSOC_CPU_PLL_NF - mmPSOC_CPU_PLL_NR)
-#define PLL_OD_OFFSET          (mmPSOC_CPU_PLL_OD - mmPSOC_CPU_PLL_NR)
-#define PLL_DIV_FACTOR_0_OFFSET        (mmPSOC_CPU_PLL_DIV_FACTOR_0 - \
-                               mmPSOC_CPU_PLL_NR)
-#define PLL_DIV_SEL_0_OFFSET   (mmPSOC_CPU_PLL_DIV_SEL_0 - mmPSOC_CPU_PLL_NR)
-
 #define NUM_OF_SOB_IN_BLOCK            \
        (((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 - \
        mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2)
index 2e3612e..88a09d4 100644 (file)
@@ -9,6 +9,7 @@
 #include "../include/gaudi/gaudi_coresight.h"
 #include "../include/gaudi/asic_reg/gaudi_regs.h"
 #include "../include/gaudi/gaudi_masks.h"
+#include "../include/gaudi/gaudi_reg_map.h"
 
 #include <uapi/misc/habanalabs.h>
 #define SPMU_SECTION_SIZE              MME0_ACC_SPMU_MAX_OFFSET
@@ -874,7 +875,7 @@ int gaudi_debug_coresight(struct hl_device *hdev, void *data)
        }
 
        /* Perform read from the device to flush all configuration */
-       RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+       RREG32(mmHW_STATE);
 
        return rc;
 }
index 3e5eb9e..b8b4aa6 100644 (file)
@@ -613,12 +613,6 @@ static int goya_early_init(struct hl_device *hdev)
        if (rc)
                goto free_queue_props;
 
-       if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
-               dev_info(hdev->dev,
-                       "H/W state is dirty, must reset before initializing\n");
-               hdev->asic_funcs->hw_fini(hdev, true);
-       }
-
        /* Before continuing in the initialization, we need to read the preboot
         * version to determine whether we run with a security-enabled firmware
         */
@@ -631,6 +625,12 @@ static int goya_early_init(struct hl_device *hdev)
                goto pci_fini;
        }
 
+       if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+               dev_info(hdev->dev,
+                       "H/W state is dirty, must reset before initializing\n");
+               hdev->asic_funcs->hw_fini(hdev, true);
+       }
+
        if (!hdev->pldm) {
                val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
                if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
@@ -694,32 +694,47 @@ static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
 static void goya_fetch_psoc_frequency(struct hl_device *hdev)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
-       u32 trace_freq = 0;
-       u32 pll_clk = 0;
-       u32 div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
-       u32 div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
-       u32 nr = RREG32(mmPSOC_PCI_PLL_NR);
-       u32 nf = RREG32(mmPSOC_PCI_PLL_NF);
-       u32 od = RREG32(mmPSOC_PCI_PLL_OD);
-
-       if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) {
-               if (div_sel == DIV_SEL_REF_CLK)
-                       trace_freq = PLL_REF_CLK;
-               else
-                       trace_freq = PLL_REF_CLK / (div_fctr + 1);
-       } else if (div_sel == DIV_SEL_PLL_CLK ||
-                                       div_sel == DIV_SEL_DIVIDED_PLL) {
-               pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1));
-               if (div_sel == DIV_SEL_PLL_CLK)
-                       trace_freq = pll_clk;
-               else
-                       trace_freq = pll_clk / (div_fctr + 1);
+       u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+       u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
+       int rc;
+
+       if (hdev->asic_prop.fw_security_disabled) {
+               div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+               div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
+               nr = RREG32(mmPSOC_PCI_PLL_NR);
+               nf = RREG32(mmPSOC_PCI_PLL_NF);
+               od = RREG32(mmPSOC_PCI_PLL_OD);
+
+               if (div_sel == DIV_SEL_REF_CLK ||
+                               div_sel == DIV_SEL_DIVIDED_REF) {
+                       if (div_sel == DIV_SEL_REF_CLK)
+                               freq = PLL_REF_CLK;
+                       else
+                               freq = PLL_REF_CLK / (div_fctr + 1);
+               } else if (div_sel == DIV_SEL_PLL_CLK ||
+                               div_sel == DIV_SEL_DIVIDED_PLL) {
+                       pll_clk = PLL_REF_CLK * (nf + 1) /
+                                       ((nr + 1) * (od + 1));
+                       if (div_sel == DIV_SEL_PLL_CLK)
+                               freq = pll_clk;
+                       else
+                               freq = pll_clk / (div_fctr + 1);
+               } else {
+                       dev_warn(hdev->dev,
+                               "Received invalid div select value: %d",
+                               div_sel);
+                       freq = 0;
+               }
        } else {
-               dev_warn(hdev->dev,
-                       "Received invalid div select value: %d", div_sel);
+               rc = hl_fw_cpucp_pll_info_get(hdev, PCI_PLL, pll_freq_arr);
+
+               if (rc)
+                       return;
+
+               freq = pll_freq_arr[1];
        }
 
-       prop->psoc_timestamp_frequency = trace_freq;
+       prop->psoc_timestamp_frequency = freq;
        prop->psoc_pci_pll_nr = nr;
        prop->psoc_pci_pll_nf = nf;
        prop->psoc_pci_pll_od = od;
@@ -5324,7 +5339,7 @@ static u32 goya_get_wait_cb_size(struct hl_device *hdev)
 }
 
 static u32 goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
-               u32 size)
+                               u32 size, bool eb)
 {
        return 0;
 }
index e5801ec..b637dfd 100644 (file)
  *                                     implemented. This means that FW will
  *                                     perform hard reset procedure on
  *                                     receiving the halt-machine event.
- *                                     Initialized in: linux
+ *                                     Initialized in: preboot, u-boot, linux
  *
  * CPU_BOOT_DEV_STS0_PLL_INFO_EN       FW retrieval of PLL info is enabled.
  *                                     Initialized in: linux
  *
+ * CPU_BOOT_DEV_STS0_CLK_GATE_EN       Clock Gating enabled.
+ *                                     FW initialized Clock Gating.
+ *                                     Initialized in: preboot
+ *
  * CPU_BOOT_DEV_STS0_ENABLED           Device status register enabled.
  *                                     This is a main indication that the
  *                                     running FW populates the device status
 #define CPU_BOOT_DEV_STS0_DRAM_SCR_EN                  (1 << 9)
 #define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN               (1 << 10)
 #define CPU_BOOT_DEV_STS0_PLL_INFO_EN                  (1 << 11)
+#define CPU_BOOT_DEV_STS0_CLK_GATE_EN                  (1 << 13)
 #define CPU_BOOT_DEV_STS0_ENABLED                      (1 << 31)
 
 enum cpu_boot_status {
@@ -204,6 +209,8 @@ enum kmd_msg {
        KMD_MSG_GOTO_WFE,
        KMD_MSG_FIT_RDY,
        KMD_MSG_SKIP_BMC,
+       RESERVED,
+       KMD_MSG_RST_DEV,
 };
 
 enum cpu_msg_status {
index 951b37d..41cab29 100644 (file)
@@ -55,12 +55,23 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
        struct resource *res;
 
        res = platform_get_mem_or_io(pdev, 0);
-       if (res && resource_type(res) == IORESOURCE_IO)
+       if (!res)
+               return -EINVAL;
+
+       switch (resource_type(res)) {
+       case IORESOURCE_IO:
                base = devm_ioport_map(dev, res->start, resource_size(res));
-       else
+               if (!base)
+                       return -ENOMEM;
+               break;
+       case IORESOURCE_MEM:
                base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(base))
-               return PTR_ERR(base);
+               if (IS_ERR(base))
+                       return PTR_ERR(base);
+               break;
+       default:
+               return -EINVAL;
+       }
 
        atomic_notifier_chain_register(&panic_notifier_list,
                                       &pvpanic_panic_nb);
index de7cb03..002426e 100644 (file)
@@ -384,8 +384,10 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
                     "merging was advertised but not possible");
        blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
 
-       if (mmc_card_mmc(card))
+       if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
                block_size = card->ext_csd.data_sector_size;
+               WARN_ON(block_size != 512 && block_size != 4096);
+       }
 
        blk_queue_logical_block_size(mq->queue, block_size);
        /*
index bbf3496..f9780c6 100644 (file)
@@ -314,11 +314,7 @@ err_clk:
 
 static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
 {
-       int ret;
-
-       ret = sdhci_pltfm_unregister(pdev);
-       if (ret)
-               dev_err(&pdev->dev, "failed to shutdown\n");
+       sdhci_pltfm_suspend(&pdev->dev);
 }
 
 MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
index 4b67379..d90020e 100644 (file)
@@ -16,6 +16,8 @@
 
 #include "sdhci-pltfm.h"
 
+#define SDHCI_DWCMSHC_ARG2_STUFF       GENMASK(31, 16)
+
 /* DWCMSHC specific Mode Select value */
 #define DWCMSHC_CTRL_HS400             0x7
 
@@ -49,6 +51,29 @@ static void dwcmshc_adma_write_desc(struct sdhci_host *host, void **desc,
        sdhci_adma_write_desc(host, desc, addr, len, cmd);
 }
 
+static void dwcmshc_check_auto_cmd23(struct mmc_host *mmc,
+                                    struct mmc_request *mrq)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+
+       /*
+        * No matter V4 is enabled or not, ARGUMENT2 register is 32-bit
+        * block count register which doesn't support stuff bits of
+        * CMD23 argument on dwcmsch host controller.
+        */
+       if (mrq->sbc && (mrq->sbc->arg & SDHCI_DWCMSHC_ARG2_STUFF))
+               host->flags &= ~SDHCI_AUTO_CMD23;
+       else
+               host->flags |= SDHCI_AUTO_CMD23;
+}
+
+static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+       dwcmshc_check_auto_cmd23(mmc, mrq);
+
+       sdhci_request(mmc, mrq);
+}
+
 static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
                                      unsigned int timing)
 {
@@ -133,6 +158,8 @@ static int dwcmshc_probe(struct platform_device *pdev)
 
        sdhci_get_of_property(pdev);
 
+       host->mmc_host_ops.request = dwcmshc_request;
+
        err = sdhci_add_host(host);
        if (err)
                goto err_clk;
index c67611f..d19eef5 100644 (file)
@@ -168,7 +168,12 @@ static void xenon_reset_exit(struct sdhci_host *host,
        /* Disable tuning request and auto-retuning again */
        xenon_retune_setup(host);
 
-       xenon_set_acg(host, true);
+       /*
+        * The ACG should be turned off at the early init time, in order
+        * to solve a possible issues with the 1.8V regulator stabilization.
+        * The feature is enabled in later stage.
+        */
+       xenon_set_acg(host, false);
 
        xenon_set_sdclk_off_idle(host, sdhc_id, false);
 
index 5cdf05b..3fa8c22 100644 (file)
@@ -1615,7 +1615,7 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
        /* Extract interleaved payload data and ECC bits */
        for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
                if (buf)
-                       nand_extract_bits(buf, step * eccsize, tmp_buf,
+                       nand_extract_bits(buf, step * eccsize * 8, tmp_buf,
                                          src_bit_off, eccsize * 8);
                src_bit_off += eccsize * 8;
 
index fdb112e..a304fda 100644 (file)
@@ -579,7 +579,7 @@ static int ebu_nand_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct ebu_nand_controller *ebu_host;
        struct nand_chip *nand;
-       struct mtd_info *mtd = NULL;
+       struct mtd_info *mtd;
        struct resource *res;
        char *resname;
        int ret;
@@ -647,12 +647,13 @@ static int ebu_nand_probe(struct platform_device *pdev)
               ebu_host->ebu + EBU_ADDR_SEL(cs));
 
        nand_set_flash_node(&ebu_host->chip, dev->of_node);
+
+       mtd = nand_to_mtd(&ebu_host->chip);
        if (!mtd->name) {
                dev_err(ebu_host->dev, "NAND label property is mandatory\n");
                return -EINVAL;
        }
 
-       mtd = nand_to_mtd(&ebu_host->chip);
        mtd->dev.parent = dev;
        ebu_host->dev = dev;
 
index f2b9250..0750121 100644 (file)
@@ -2210,6 +2210,9 @@ static int ns_attach_chip(struct nand_chip *chip)
 {
        unsigned int eccsteps, eccbytes;
 
+       chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+       chip->ecc.algo = bch ? NAND_ECC_ALGO_BCH : NAND_ECC_ALGO_HAMMING;
+
        if (!bch)
                return 0;
 
@@ -2233,8 +2236,6 @@ static int ns_attach_chip(struct nand_chip *chip)
                return -EINVAL;
        }
 
-       chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
-       chip->ecc.algo = NAND_ECC_ALGO_BCH;
        chip->ecc.size = 512;
        chip->ecc.strength = bch;
        chip->ecc.bytes = eccbytes;
@@ -2273,8 +2274,6 @@ static int __init ns_init_module(void)
        nsmtd       = nand_to_mtd(chip);
        nand_set_controller_data(chip, (void *)ns);
 
-       chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
-       chip->ecc.algo   = NAND_ECC_ALGO_HAMMING;
        /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
        /* and 'badblocks' parameters to work */
        chip->options   |= NAND_SKIP_BBTSCAN;
index fbb9955..2c3e65c 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/jiffies.h>
 #include <linux/sched.h>
 #include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-bch.h>
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/partitions.h>
 #include <linux/omap-dma.h>
@@ -1866,18 +1867,19 @@ static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
 static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
                                 struct mtd_oob_region *oobregion)
 {
-       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct nand_device *nand = mtd_to_nanddev(mtd);
+       const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
        int off = BADBLOCK_MARKER_LENGTH;
 
-       if (section >= chip->ecc.steps)
+       if (section >= engine_conf->nsteps)
                return -ERANGE;
 
        /*
         * When SW correction is employed, one OMAP specific marker byte is
         * reserved after each ECC step.
         */
-       oobregion->offset = off + (section * (chip->ecc.bytes + 1));
-       oobregion->length = chip->ecc.bytes;
+       oobregion->offset = off + (section * (engine_conf->code_size + 1));
+       oobregion->length = engine_conf->code_size;
 
        return 0;
 }
@@ -1885,7 +1887,8 @@ static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
 static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
                                  struct mtd_oob_region *oobregion)
 {
-       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct nand_device *nand = mtd_to_nanddev(mtd);
+       const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
        int off = BADBLOCK_MARKER_LENGTH;
 
        if (section)
@@ -1895,7 +1898,7 @@ static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
         * When SW correction is employed, one OMAP specific marker byte is
         * reserved after each ECC step.
         */
-       off += ((chip->ecc.bytes + 1) * chip->ecc.steps);
+       off += ((engine_conf->code_size + 1) * engine_conf->nsteps);
        if (off >= mtd->oobsize)
                return -ERANGE;
 
index 8ea545b..61d932c 100644 (file)
@@ -343,6 +343,7 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
                                      const struct nand_page_io_req *req)
 {
        struct nand_device *nand = spinand_to_nand(spinand);
+       struct mtd_info *mtd = spinand_to_mtd(spinand);
        struct spi_mem_dirmap_desc *rdesc;
        unsigned int nbytes = 0;
        void *buf = NULL;
@@ -382,9 +383,16 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
                memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
                       req->datalen);
 
-       if (req->ooblen)
-               memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
-                      req->ooblen);
+       if (req->ooblen) {
+               if (req->mode == MTD_OPS_AUTO_OOB)
+                       mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
+                                                   spinand->oobbuf,
+                                                   req->ooboffs,
+                                                   req->ooblen);
+               else
+                       memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
+                              req->ooblen);
+       }
 
        return 0;
 }
index 85ebd2b..85de5f9 100644 (file)
@@ -380,7 +380,7 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                goto free_dst;
 
        min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
-               BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
+               BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr);
 
        err = skb_cow_head(skb, min_headroom);
        if (unlikely(err))
@@ -534,6 +534,7 @@ static void bareudp_setup(struct net_device *dev)
        SET_NETDEV_DEVTYPE(dev, &bareudp_type);
        dev->features    |= NETIF_F_SG | NETIF_F_HW_CSUM;
        dev->features    |= NETIF_F_RXCSUM;
+       dev->features    |= NETIF_F_LLTX;
        dev->features    |= NETIF_F_GSO_SOFTWARE;
        dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
        dev->hw_features |= NETIF_F_GSO_SOFTWARE;
@@ -644,11 +645,20 @@ static int bareudp_link_config(struct net_device *dev,
        return 0;
 }
 
+static void bareudp_dellink(struct net_device *dev, struct list_head *head)
+{
+       struct bareudp_dev *bareudp = netdev_priv(dev);
+
+       list_del(&bareudp->next);
+       unregister_netdevice_queue(dev, head);
+}
+
 static int bareudp_newlink(struct net *net, struct net_device *dev,
                           struct nlattr *tb[], struct nlattr *data[],
                           struct netlink_ext_ack *extack)
 {
        struct bareudp_conf conf;
+       LIST_HEAD(list_kill);
        int err;
 
        err = bareudp2info(data, &conf, extack);
@@ -661,17 +671,14 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
 
        err = bareudp_link_config(dev, tb);
        if (err)
-               return err;
+               goto err_unconfig;
 
        return 0;
-}
-
-static void bareudp_dellink(struct net_device *dev, struct list_head *head)
-{
-       struct bareudp_dev *bareudp = netdev_priv(dev);
 
-       list_del(&bareudp->next);
-       unregister_netdevice_queue(dev, head);
+err_unconfig:
+       bareudp_dellink(dev, &list_kill);
+       unregister_netdevice_many(&list_kill);
+       return err;
 }
 
 static size_t bareudp_get_size(const struct net_device *dev)
index 4249709..1c28ead 100644 (file)
@@ -123,6 +123,7 @@ config CAN_JANZ_ICAN3
 config CAN_KVASER_PCIEFD
        depends on PCI
        tristate "Kvaser PCIe FD cards"
+       select CRC32
          help
          This is a driver for the Kvaser PCI Express CAN FD family.
 
index 3486704..8b1ae02 100644 (file)
@@ -592,11 +592,11 @@ static void can_restart(struct net_device *dev)
 
        cf->can_id |= CAN_ERR_RESTARTED;
 
-       netif_rx_ni(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->len;
 
+       netif_rx_ni(skb);
+
 restart:
        netdev_dbg(dev, "restarted\n");
        priv->can_stats.restarts++;
index 2c9f124..da551fd 100644 (file)
@@ -1852,8 +1852,6 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
 void m_can_class_unregister(struct m_can_classdev *cdev)
 {
        unregister_candev(cdev->net);
-
-       m_can_clk_stop(cdev);
 }
 EXPORT_SYMBOL_GPL(m_can_class_unregister);
 
index 24c737c..970f0e9 100644 (file)
@@ -131,30 +131,6 @@ static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev)
 
 }
 
-static struct can_bittiming_const tcan4x5x_bittiming_const = {
-       .name = DEVICE_NAME,
-       .tseg1_min = 2,
-       .tseg1_max = 31,
-       .tseg2_min = 2,
-       .tseg2_max = 16,
-       .sjw_max = 16,
-       .brp_min = 1,
-       .brp_max = 32,
-       .brp_inc = 1,
-};
-
-static struct can_bittiming_const tcan4x5x_data_bittiming_const = {
-       .name = DEVICE_NAME,
-       .tseg1_min = 1,
-       .tseg1_max = 32,
-       .tseg2_min = 1,
-       .tseg2_max = 16,
-       .sjw_max = 16,
-       .brp_min = 1,
-       .brp_max = 32,
-       .brp_inc = 1,
-};
-
 static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
 {
        int wake_state = 0;
@@ -469,8 +445,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
        mcan_class->dev = &spi->dev;
        mcan_class->ops = &tcan4x5x_ops;
        mcan_class->is_peripheral = true;
-       mcan_class->bit_timing = &tcan4x5x_bittiming_const;
-       mcan_class->data_timing = &tcan4x5x_data_bittiming_const;
        mcan_class->net->irq = spi->irq;
 
        spi_set_drvdata(spi, priv);
index 8d36101..29cabc2 100644 (file)
@@ -1,10 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0
 config CAN_RCAR
-       tristate "Renesas R-Car CAN controller"
+       tristate "Renesas R-Car and RZ/G CAN controller"
        depends on ARCH_RENESAS || ARM
        help
          Say Y here if you want to use CAN controller found on Renesas R-Car
-         SoCs.
+         or RZ/G SoCs.
 
          To compile this driver as a module, choose M here: the module will
          be called rcar_can.
index 77129d5..f07e8b7 100644 (file)
@@ -1368,13 +1368,10 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
                struct mcp251xfd_tx_ring *tx_ring = priv->tx;
                struct spi_transfer *last_xfer;
 
-               tx_ring->tail += len;
-
                /* Increment the TEF FIFO tail pointer 'len' times in
                 * a single SPI message.
-                */
-
-               /* Note:
+                *
+                * Note:
                 *
                 * "cs_change == 1" on the last transfer results in an
                 * active chip select after the complete SPI
@@ -1391,6 +1388,8 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
                if (err)
                        return err;
 
+               tx_ring->tail += len;
+
                err = mcp251xfd_check_tef_tail(priv);
                if (err)
                        return err;
@@ -1492,7 +1491,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
        else
                skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
 
-       if (!cfd) {
+       if (!skb) {
                stats->rx_dropped++;
                return 0;
        }
@@ -1553,10 +1552,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
 
                /* Increment the RX FIFO tail pointer 'len' times in a
                 * single SPI message.
-                */
-               ring->tail += len;
-
-               /* Note:
+                *
+                * Note:
                 *
                 * "cs_change == 1" on the last transfer results in an
                 * active chip select after the complete SPI
@@ -1572,6 +1569,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
                last_xfer->cs_change = 1;
                if (err)
                        return err;
+
+               ring->tail += len;
        }
 
        return 0;
index 61631f4..f347ecc 100644 (file)
@@ -514,11 +514,11 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
        else
                memcpy(cfd->data, rm->d, cfd->len);
 
-       peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
-
        netdev->stats.rx_packets++;
        netdev->stats.rx_bytes += cfd->len;
 
+       peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
+
        return 0;
 }
 
@@ -580,11 +580,11 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
        if (!skb)
                return -ENOMEM;
 
-       peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
-
        netdev->stats.rx_packets++;
        netdev->stats.rx_bytes += cf->len;
 
+       peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
+
        return 0;
 }
 
index fa47bab..f9a524c 100644 (file)
@@ -39,6 +39,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
        struct net_device *peer;
        struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
        struct net_device_stats *peerstats, *srcstats = &dev->stats;
+       u8 len;
 
        if (can_dropped_invalid_skb(dev, skb))
                return NETDEV_TX_OK;
@@ -61,12 +62,13 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
        skb->dev        = peer;
        skb->ip_summed  = CHECKSUM_UNNECESSARY;
 
+       len = cfd->len;
        if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
                srcstats->tx_packets++;
-               srcstats->tx_bytes += cfd->len;
+               srcstats->tx_bytes += len;
                peerstats = &peer->stats;
                peerstats->rx_packets++;
-               peerstats->rx_bytes += cfd->len;
+               peerstats->rx_bytes += len;
        }
 
 out_unlock:
index 288b5a5..95c7fa1 100644 (file)
@@ -1404,7 +1404,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
            !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
                return -EINVAL;
 
-       if (vlan->vid_end > dev->num_vlans)
+       if (vlan->vid_end >= dev->num_vlans)
                return -ERANGE;
 
        b53_enable_vlan(dev, true, ds->vlan_filtering);
index 222dd35..e011911 100644 (file)
@@ -4,6 +4,7 @@ config NET_DSA_HIRSCHMANN_HELLCREEK
        depends on HAS_IOMEM
        depends on NET_DSA
        depends on PTP_1588_CLOCK
+       depends on LEDS_CLASS
        select NET_DSA_TAG_HELLCREEK
        help
          This driver adds support for Hirschmann Hellcreek TSN switches.
index 09701c1..662e68a 100644 (file)
@@ -92,9 +92,7 @@
                                         GSWIP_MDIO_PHY_FDUP_MASK)
 
 /* GSWIP MII Registers */
-#define GSWIP_MII_CFG0                 0x00
-#define GSWIP_MII_CFG1                 0x02
-#define GSWIP_MII_CFG5                 0x04
+#define GSWIP_MII_CFGp(p)              (0x2 * (p))
 #define  GSWIP_MII_CFG_EN              BIT(14)
 #define  GSWIP_MII_CFG_LDCLKDIS                BIT(12)
 #define  GSWIP_MII_CFG_MODE_MIIP       0x0
@@ -392,17 +390,9 @@ static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
 static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
                               int port)
 {
-       switch (port) {
-       case 0:
-               gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG0);
-               break;
-       case 1:
-               gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG1);
-               break;
-       case 5:
-               gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG5);
-               break;
-       }
+       /* There's no MII_CFG register for the CPU port */
+       if (!dsa_is_cpu_port(priv->ds, port))
+               gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
 }
 
 static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
@@ -822,9 +812,8 @@ static int gswip_setup(struct dsa_switch *ds)
        gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
 
        /* Disable the xMII link */
-       gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 0);
-       gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 1);
-       gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 5);
+       for (i = 0; i < priv->hw_info->max_ports; i++)
+               gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
 
        /* enable special tag insertion on cpu port */
        gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
@@ -1447,11 +1436,12 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port,
        phylink_set(mask, Pause);
        phylink_set(mask, Asym_Pause);
 
-       /* With the exclusion of MII and Reverse MII, we support Gigabit,
-        * including Half duplex
+       /* With the exclusion of MII, Reverse MII and Reduced MII, we
+        * support Gigabit, including Half duplex
         */
        if (state->interface != PHY_INTERFACE_MODE_MII &&
-           state->interface != PHY_INTERFACE_MODE_REVMII) {
+           state->interface != PHY_INTERFACE_MODE_REVMII &&
+           state->interface != PHY_INTERFACE_MODE_RMII) {
                phylink_set(mask, 1000baseT_Full);
                phylink_set(mask, 1000baseT_Half);
        }
@@ -1541,9 +1531,7 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
 {
        struct gswip_priv *priv = ds->priv;
 
-       /* Enable the xMII interface only for the external PHY */
-       if (interface != PHY_INTERFACE_MODE_INTERNAL)
-               gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
+       gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
 }
 
 static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
index 66ddf67..7b96396 100644 (file)
@@ -351,6 +351,10 @@ int mv88e6250_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
                if (err)
                        return err;
 
+               err = mv88e6185_g1_stu_data_read(chip, entry);
+               if (err)
+                       return err;
+
                /* VTU DBNum[3:0] are located in VTU Operation 3:0
                 * VTU DBNum[5:4] are located in VTU Operation 9:8
                 */
index efb33c0..cec2018 100644 (file)
@@ -19,7 +19,6 @@ if NET_VENDOR_AQUANTIA
 config AQTION
        tristate "aQuantia AQtion(tm) Support"
        depends on PCI
-       depends on X86_64 || ARM64 || COMPILE_TEST
        depends on MACSEC || MACSEC=n
        help
          This enables the support for the aQuantia AQtion(tm) Ethernet card.
index 0fdd19d..0404aaf 100644 (file)
@@ -2503,8 +2503,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
        priv = netdev_priv(dev);
 
        priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
-       if (IS_ERR(priv->clk))
-               return PTR_ERR(priv->clk);
+       if (IS_ERR(priv->clk)) {
+               ret = PTR_ERR(priv->clk);
+               goto err_free_netdev;
+       }
 
        /* Allocate number of TX rings */
        priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
@@ -2577,6 +2579,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
                         NETIF_F_HW_VLAN_CTAG_TX;
        dev->hw_features |= dev->features;
        dev->vlan_features |= dev->features;
+       dev->max_mtu = UMAC_MAX_MTU_SIZE;
 
        /* Request the WOL interrupt and advertise suspend if available */
        priv->wol_irq_disabled = 1;
index 4edd6f8..d10e4f8 100644 (file)
@@ -6790,8 +6790,10 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
                ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
                if (!ctx->tqm_fp_rings_count)
                        ctx->tqm_fp_rings_count = bp->max_q;
+               else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
+                       ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
 
-               tqm_rings = ctx->tqm_fp_rings_count + 1;
+               tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
                ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
                if (!ctx_pg) {
                        kfree(ctx);
@@ -6925,7 +6927,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
             pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
             pg_dir = &req.tqm_sp_page_dir,
             ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
-            i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+            i < BNXT_MAX_TQM_RINGS;
+            i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
                if (!(enables & ena))
                        continue;
 
@@ -12887,10 +12890,10 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
  */
 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
 {
+       pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct bnxt *bp = netdev_priv(netdev);
        int err = 0, off;
-       pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
 
        netdev_info(bp->dev, "PCI Slot Reset\n");
 
@@ -12919,22 +12922,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
                pci_save_state(pdev);
 
                err = bnxt_hwrm_func_reset(bp);
-               if (!err) {
-                       err = bnxt_hwrm_func_qcaps(bp);
-                       if (!err && netif_running(netdev))
-                               err = bnxt_open(netdev);
-               }
-               bnxt_ulp_start(bp, err);
-               if (!err) {
-                       bnxt_reenable_sriov(bp);
+               if (!err)
                        result = PCI_ERS_RESULT_RECOVERED;
-               }
-       }
-
-       if (result != PCI_ERS_RESULT_RECOVERED) {
-               if (netif_running(netdev))
-                       dev_close(netdev);
-               pci_disable_device(pdev);
        }
 
        rtnl_unlock();
@@ -12952,10 +12941,21 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
 static void bnxt_io_resume(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
+       struct bnxt *bp = netdev_priv(netdev);
+       int err;
 
+       netdev_info(bp->dev, "PCI Slot Resume\n");
        rtnl_lock();
 
-       netif_device_attach(netdev);
+       err = bnxt_hwrm_func_qcaps(bp);
+       if (!err && netif_running(netdev))
+               err = bnxt_open(netdev);
+
+       bnxt_ulp_start(bp, err);
+       if (!err) {
+               bnxt_reenable_sriov(bp);
+               netif_device_attach(netdev);
+       }
 
        rtnl_unlock();
 }
index 950ea26..51996c8 100644 (file)
@@ -1436,6 +1436,11 @@ struct bnxt_ctx_pg_info {
        struct bnxt_ctx_pg_info **ctx_pg_tbl;
 };
 
+#define BNXT_MAX_TQM_SP_RINGS          1
+#define BNXT_MAX_TQM_FP_RINGS          8
+#define BNXT_MAX_TQM_RINGS             \
+       (BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
+
 struct bnxt_ctx_mem_info {
        u32     qp_max_entries;
        u16     qp_min_qp1_entries;
@@ -1474,7 +1479,7 @@ struct bnxt_ctx_mem_info {
        struct bnxt_ctx_pg_info stat_mem;
        struct bnxt_ctx_pg_info mrav_mem;
        struct bnxt_ctx_pg_info tim_mem;
-       struct bnxt_ctx_pg_info *tqm_mem[9];
+       struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
 };
 
 struct bnxt_fw_health {
index 9ff79d5..2f8b193 100644 (file)
@@ -2532,7 +2532,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
 
                if (rc && ((struct hwrm_err_output *)&resp)->cmd_err ==
                    NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
-                       install.flags |=
+                       install.flags =
                                cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
 
                        rc = _hwrm_send_message_silent(bp, &install,
@@ -2546,6 +2546,7 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware
                                 * UPDATE directory and try the flash again
                                 */
                                defrag_attempted = true;
+                               install.flags = 0;
                                rc = __bnxt_flash_nvram(bp->dev,
                                                        BNX_DIR_TYPE_UPDATE,
                                                        BNX_DIR_ORDINAL_FIRST,
index 8c8368c..64dbbb0 100644 (file)
@@ -222,8 +222,12 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
 
 int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
 {
-       if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
-               return BNXT_MIN_ROCE_STAT_CTXS;
+       if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+               struct bnxt_en_dev *edev = bp->edev;
+
+               if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
+                       return BNXT_MIN_ROCE_STAT_CTXS;
+       }
 
        return 0;
 }
index d5d9109..814a5b1 100644 (file)
@@ -467,7 +467,7 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
 {
        long ferr, rate, rate_rounded;
 
-       if (!bp->tx_clk || !(bp->caps & MACB_CAPS_CLK_HW_CHG))
+       if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG))
                return;
 
        switch (speed) {
index 92473dd..22a0220 100644 (file)
 #define TCB_L2T_IX_M           0xfffULL
 #define TCB_L2T_IX_V(x)                ((x) << TCB_L2T_IX_S)
 
+#define TCB_T_FLAGS_W           1
+#define TCB_T_FLAGS_S           0
+#define TCB_T_FLAGS_M           0xffffffffffffffffULL
+#define TCB_T_FLAGS_V(x)        ((__u64)(x) << TCB_T_FLAGS_S)
+
+#define TCB_FIELD_COOKIE_TFLAG 1
+
 #define TCB_SMAC_SEL_W         0
 #define TCB_SMAC_SEL_S         24
 #define TCB_SMAC_SEL_M         0xffULL
index 72bb123..9e23780 100644 (file)
@@ -575,7 +575,11 @@ int send_tx_flowc_wr(struct sock *sk, int compl,
 void chtls_tcp_push(struct sock *sk, int flags);
 int chtls_push_frames(struct chtls_sock *csk, int comp);
 int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
+void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
+                                u64 mask, u64 val, u8 cookie,
+                                int through_l2t);
 int chtls_setkey(struct chtls_sock *csk, u32 keylen, u32 mode, int cipher_type);
+void chtls_set_quiesce_ctrl(struct sock *sk, int val);
 void skb_entail(struct sock *sk, struct sk_buff *skb, int flags);
 unsigned int keyid_to_addr(int start_addr, int keyid);
 void free_tls_keyid(struct sock *sk);
index a0e0d8a..e5cfbe1 100644 (file)
@@ -32,6 +32,7 @@
 #include "chtls.h"
 #include "chtls_cm.h"
 #include "clip_tbl.h"
+#include "t4_tcb.h"
 
 /*
  * State transitions and actions for close.  Note that if we are in SYN_SENT
@@ -267,7 +268,9 @@ static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
        if (sk->sk_state != TCP_SYN_RECV)
                chtls_send_abort(sk, mode, skb);
        else
-               goto out;
+               chtls_set_tcb_field_rpl_skb(sk, TCB_T_FLAGS_W,
+                                           TCB_T_FLAGS_V(TCB_T_FLAGS_M), 0,
+                                           TCB_FIELD_COOKIE_TFLAG, 1);
 
        return;
 out:
@@ -621,7 +624,7 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx)
 
        while (!skb_queue_empty(&listen_ctx->synq)) {
                struct chtls_sock *csk =
-                       container_of((struct synq *)__skb_dequeue
+                       container_of((struct synq *)skb_peek
                                (&listen_ctx->synq), struct chtls_sock, synq);
                struct sock *child = csk->sk;
 
@@ -1109,6 +1112,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
                                    const struct cpl_pass_accept_req *req,
                                    struct chtls_dev *cdev)
 {
+       struct adapter *adap = pci_get_drvdata(cdev->pdev);
        struct neighbour *n = NULL;
        struct inet_sock *newinet;
        const struct iphdr *iph;
@@ -1118,9 +1122,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
        struct dst_entry *dst;
        struct tcp_sock *tp;
        struct sock *newsk;
+       bool found = false;
        u16 port_id;
        int rxq_idx;
-       int step;
+       int step, i;
 
        iph = (const struct iphdr *)network_hdr;
        newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
@@ -1152,7 +1157,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
                n = dst_neigh_lookup(dst, &ip6h->saddr);
 #endif
        }
-       if (!n)
+       if (!n || !n->dev)
                goto free_sk;
 
        ndev = n->dev;
@@ -1161,6 +1166,13 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
        if (is_vlan_dev(ndev))
                ndev = vlan_dev_real_dev(ndev);
 
+       for_each_port(adap, i)
+               if (cdev->ports[i] == ndev)
+                       found = true;
+
+       if (!found)
+               goto free_dst;
+
        port_id = cxgb4_port_idx(ndev);
 
        csk = chtls_sock_create(cdev);
@@ -1238,6 +1250,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
 free_csk:
        chtls_sock_release(&csk->kref);
 free_dst:
+       neigh_release(n);
        dst_release(dst);
 free_sk:
        inet_csk_prepare_forced_close(newsk);
@@ -1387,7 +1400,7 @@ static void chtls_pass_accept_request(struct sock *sk,
 
        newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
        if (!newsk)
-               goto free_oreq;
+               goto reject;
 
        if (chtls_get_module(newsk))
                goto reject;
@@ -1403,8 +1416,6 @@ static void chtls_pass_accept_request(struct sock *sk,
        kfree_skb(skb);
        return;
 
-free_oreq:
-       chtls_reqsk_free(oreq);
 reject:
        mk_tid_release(reply_skb, 0, tid);
        cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
@@ -1589,6 +1600,11 @@ static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
                        sk_wake_async(sk, 0, POLL_OUT);
 
                data = lookup_stid(cdev->tids, stid);
+               if (!data) {
+                       /* listening server close */
+                       kfree_skb(skb);
+                       goto unlock;
+               }
                lsk = ((struct listen_ctx *)data)->lsk;
 
                bh_lock_sock(lsk);
@@ -1936,6 +1952,8 @@ static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
                else if (tcp_sk(sk)->linger2 < 0 &&
                         !csk_flag_nochk(csk, CSK_ABORT_SHUTDOWN))
                        chtls_abort_conn(sk, skb);
+               else if (csk_flag_nochk(csk, CSK_TX_DATA_SENT))
+                       chtls_set_quiesce_ctrl(sk, 0);
                break;
        default:
                pr_info("close_con_rpl in bad state %d\n", sk->sk_state);
@@ -1997,39 +2015,6 @@ static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
        spin_unlock_bh(&cdev->deferq.lock);
 }
 
-static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
-                          struct chtls_dev *cdev, int status, int queue)
-{
-       struct cpl_abort_req_rss *req = cplhdr(skb);
-       struct sk_buff *reply_skb;
-       struct chtls_sock *csk;
-
-       csk = rcu_dereference_sk_user_data(sk);
-
-       reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
-                             GFP_KERNEL);
-
-       if (!reply_skb) {
-               req->status = (queue << 1);
-               t4_defer_reply(skb, cdev, send_defer_abort_rpl);
-               return;
-       }
-
-       set_abort_rpl_wr(reply_skb, GET_TID(req), status);
-       kfree_skb(skb);
-
-       set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
-       if (csk_conn_inline(csk)) {
-               struct l2t_entry *e = csk->l2t_entry;
-
-               if (e && sk->sk_state != TCP_SYN_RECV) {
-                       cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
-                       return;
-               }
-       }
-       cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
-}
-
 static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
                                 struct chtls_dev *cdev,
                                 int status, int queue)
@@ -2078,9 +2063,9 @@ static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
        queue = csk->txq_idx;
 
        skb->sk = NULL;
+       chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
+                            CPL_ABORT_NO_RST, queue);
        do_abort_syn_rcv(child, lsk);
-       send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
-                      CPL_ABORT_NO_RST, queue);
 }
 
 static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
@@ -2110,8 +2095,8 @@ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
        if (!sock_owned_by_user(psk)) {
                int queue = csk->txq_idx;
 
+               chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
                do_abort_syn_rcv(sk, psk);
-               send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
        } else {
                skb->sk = sk;
                BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
@@ -2129,9 +2114,6 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
        int queue = csk->txq_idx;
 
        if (is_neg_adv(req->status)) {
-               if (sk->sk_state == TCP_SYN_RECV)
-                       chtls_set_tcb_tflag(sk, 0, 0);
-
                kfree_skb(skb);
                return;
        }
@@ -2158,12 +2140,12 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
                if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
                        return;
 
-               chtls_release_resources(sk);
-               chtls_conn_done(sk);
        }
 
        chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev,
                             rst_status, queue);
+       chtls_release_resources(sk);
+       chtls_conn_done(sk);
 }
 
 static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
@@ -2315,6 +2297,28 @@ static int chtls_wr_ack(struct chtls_dev *cdev, struct sk_buff *skb)
        return 0;
 }
 
+static int chtls_set_tcb_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
+{
+       struct cpl_set_tcb_rpl *rpl = cplhdr(skb) + RSS_HDR;
+       unsigned int hwtid = GET_TID(rpl);
+       struct sock *sk;
+
+       sk = lookup_tid(cdev->tids, hwtid);
+
+       /* return EINVAL if socket doesn't exist */
+       if (!sk)
+               return -EINVAL;
+
+       /* Reusing the skb as size of cpl_set_tcb_field structure
+        * is greater than cpl_abort_req
+        */
+       if (TCB_COOKIE_G(rpl->cookie) == TCB_FIELD_COOKIE_TFLAG)
+               chtls_send_abort(sk, CPL_ABORT_SEND_RST, NULL);
+
+       kfree_skb(skb);
+       return 0;
+}
+
 chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
        [CPL_PASS_OPEN_RPL]     = chtls_pass_open_rpl,
        [CPL_CLOSE_LISTSRV_RPL] = chtls_close_listsrv_rpl,
@@ -2327,5 +2331,6 @@ chtls_handler_func chtls_handlers[NUM_CPL_CMDS] = {
        [CPL_CLOSE_CON_RPL]     = chtls_conn_cpl,
        [CPL_ABORT_REQ_RSS]     = chtls_conn_cpl,
        [CPL_ABORT_RPL_RSS]     = chtls_conn_cpl,
-       [CPL_FW4_ACK]           = chtls_wr_ack,
+       [CPL_FW4_ACK]           = chtls_wr_ack,
+       [CPL_SET_TCB_RPL]       = chtls_set_tcb_rpl,
 };
index a4fb463..1e67140 100644 (file)
@@ -88,6 +88,24 @@ static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
        return ret < 0 ? ret : 0;
 }
 
+void chtls_set_tcb_field_rpl_skb(struct sock *sk, u16 word,
+                                u64 mask, u64 val, u8 cookie,
+                                int through_l2t)
+{
+       struct sk_buff *skb;
+       unsigned int wrlen;
+
+       wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata);
+       wrlen = roundup(wrlen, 16);
+
+       skb = alloc_skb(wrlen, GFP_KERNEL | __GFP_NOFAIL);
+       if (!skb)
+               return;
+
+       __set_tcb_field(sk, skb, word, mask, val, cookie, 0);
+       send_or_defer(sk, tcp_sk(sk), skb, through_l2t);
+}
+
 /*
  * Set one of the t_flags bits in the TCB.
  */
@@ -113,6 +131,29 @@ static int chtls_set_tcb_quiesce(struct sock *sk, int val)
                                   TF_RX_QUIESCE_V(val));
 }
 
+void chtls_set_quiesce_ctrl(struct sock *sk, int val)
+{
+       struct chtls_sock *csk;
+       struct sk_buff *skb;
+       unsigned int wrlen;
+       int ret;
+
+       wrlen = sizeof(struct cpl_set_tcb_field) + sizeof(struct ulptx_idata);
+       wrlen = roundup(wrlen, 16);
+
+       skb = alloc_skb(wrlen, GFP_ATOMIC);
+       if (!skb)
+               return;
+
+       csk = rcu_dereference_sk_user_data(sk);
+
+       __set_tcb_field(sk, skb, 1, TF_RX_QUIESCE_V(1), 0, 0, 1);
+       set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
+       ret = cxgb4_ofld_send(csk->egress_dev, skb);
+       if (ret < 0)
+               kfree_skb(skb);
+}
+
 /* TLS Key bitmap processing */
 int chtls_init_kmap(struct chtls_dev *cdev, struct cxgb4_lld_info *lldi)
 {
index 0981fe9..3d9b0b1 100644 (file)
@@ -1211,7 +1211,7 @@ static int ethoc_probe(struct platform_device *pdev)
        ret = mdiobus_register(priv->mdio);
        if (ret) {
                dev_err(&netdev->dev, "failed to register MDIO bus\n");
-               goto free2;
+               goto free3;
        }
 
        ret = ethoc_mdio_probe(netdev);
@@ -1243,6 +1243,7 @@ error2:
        netif_napi_del(&priv->napi);
 error:
        mdiobus_unregister(priv->mdio);
+free3:
        mdiobus_free(priv->mdio);
 free2:
        clk_disable_unprepare(priv->clk);
index c8e5d88..21de563 100644 (file)
@@ -223,3 +223,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
 };
 
 module_platform_driver(fs_enet_bb_mdio_driver);
+MODULE_LICENSE("GPL");
index 8b51ee1..152f4d8 100644 (file)
@@ -224,3 +224,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
 };
 
 module_platform_driver(fs_enet_fec_mdio_driver);
+MODULE_LICENSE("GPL");
index ba8869c..6d853f0 100644 (file)
@@ -3889,6 +3889,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
        INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
        netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
        dev->mtu = 1500;
+       dev->max_mtu = 1518;
 
        ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
        ugeth->phy_interface = phy_interface;
@@ -3934,12 +3935,12 @@ static int ucc_geth_remove(struct platform_device* ofdev)
        struct device_node *np = ofdev->dev.of_node;
 
        unregister_netdev(dev);
-       free_netdev(dev);
        ucc_geth_memclean(ugeth);
        if (of_phy_is_fixed_link(np))
                of_phy_deregister_fixed_link(np);
        of_node_put(ugeth->ug_info->tbi_node);
        of_node_put(ugeth->ug_info->phy_node);
+       free_netdev(dev);
 
        return 0;
 }
index 1a9bdf6..11d4bf5 100644 (file)
@@ -575,7 +575,14 @@ struct ucc_geth_tx_global_pram {
        u32 vtagtable[0x8];     /* 8 4-byte VLAN tags */
        u32 tqptr;              /* a base pointer to the Tx Queues Memory
                                   Region */
-       u8 res2[0x80 - 0x74];
+       u8 res2[0x78 - 0x74];
+       u64 snums_en;
+       u32 l2l3baseptr;        /* top byte consists of a few other bit fields */
+
+       u16 mtu[8];
+       u8 res3[0xa8 - 0x94];
+       u32 wrrtablebase;       /* top byte is reserved */
+       u8 res4[0xc0 - 0xac];
 } __packed;
 
 /* structure representing Extended Filtering Global Parameters in PRAM */
index 7165da0..a6e3f07 100644 (file)
@@ -415,6 +415,10 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
        /* for mutl buffer*/
        new_skb = skb_copy(skb, GFP_ATOMIC);
        dev_kfree_skb_any(skb);
+       if (!new_skb) {
+               netdev_err(ndev, "skb alloc failed\n");
+               return;
+       }
        skb = new_skb;
 
        check_ok = 0;
index fb5e884..33defa4 100644 (file)
@@ -169,7 +169,7 @@ struct hclgevf_mbx_arq_ring {
 #define hclge_mbx_ring_ptr_move_crq(crq) \
        (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
 #define hclge_mbx_tail_ptr_move_arq(arq) \
-       (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+               (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
 #define hclge_mbx_head_ptr_move_arq(arq) \
-               (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+               (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
 #endif
index e6f37f9..c242883 100644 (file)
@@ -752,7 +752,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
                handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
                handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
 
-               if (hdev->hw.mac.phydev) {
+               if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
+                   hdev->hw.mac.phydev->drv->set_loopback) {
                        count += 1;
                        handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
                }
@@ -4537,8 +4538,8 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
                req->ipv4_sctp_en = tuple_sets;
                break;
        case SCTP_V6_FLOW:
-               if ((nfc->data & RXH_L4_B_0_1) ||
-                   (nfc->data & RXH_L4_B_2_3))
+               if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+                   (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
                        return -EINVAL;
 
                req->ipv6_sctp_en = tuple_sets;
@@ -4730,6 +4731,8 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
                vport[i].rss_tuple_sets.ipv6_udp_en =
                        HCLGE_RSS_INPUT_TUPLE_OTHER;
                vport[i].rss_tuple_sets.ipv6_sctp_en =
+                       hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+                       HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
                        HCLGE_RSS_INPUT_TUPLE_SCTP;
                vport[i].rss_tuple_sets.ipv6_fragment_en =
                        HCLGE_RSS_INPUT_TUPLE_OTHER;
index 50a294d..ca46bc9 100644 (file)
 #define HCLGE_D_IP_BIT                 BIT(2)
 #define HCLGE_S_IP_BIT                 BIT(3)
 #define HCLGE_V_TAG_BIT                        BIT(4)
+#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT     \
+               (HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT)
 
 #define HCLGE_RSS_TC_SIZE_0            1
 #define HCLGE_RSS_TC_SIZE_1            2
index 145757c..674b3a2 100644 (file)
@@ -917,8 +917,8 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
                req->ipv4_sctp_en = tuple_sets;
                break;
        case SCTP_V6_FLOW:
-               if ((nfc->data & RXH_L4_B_0_1) ||
-                   (nfc->data & RXH_L4_B_2_3))
+               if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+                   (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
                        return -EINVAL;
 
                req->ipv6_sctp_en = tuple_sets;
@@ -2502,7 +2502,10 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
                tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
                tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
                tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
-               tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+               tuple_sets->ipv6_sctp_en =
+                       hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+                                       HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
+                                       HCLGEVF_RSS_INPUT_TUPLE_SCTP;
                tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
        }
 
index 1b183bc..f6d817a 100644 (file)
 #define HCLGEVF_D_IP_BIT               BIT(2)
 #define HCLGEVF_S_IP_BIT               BIT(3)
 #define HCLGEVF_V_TAG_BIT              BIT(4)
+#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT   \
+       (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
 
 #define HCLGEVF_STATS_TIMER_INTERVAL   36U
 
index f302504..9778c83 100644 (file)
@@ -955,6 +955,7 @@ static void release_resources(struct ibmvnic_adapter *adapter)
        release_rx_pools(adapter);
 
        release_napi(adapter);
+       release_login_buffer(adapter);
        release_login_rsp_buffer(adapter);
 }
 
@@ -2341,8 +2342,7 @@ static void __ibmvnic_reset(struct work_struct *work)
                                set_current_state(TASK_UNINTERRUPTIBLE);
                                schedule_timeout(60 * HZ);
                        }
-               } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
-                               adapter->from_passive_init)) {
+               } else {
                        rc = do_reset(adapter, rwi, reset_state);
                }
                kfree(rwi);
@@ -2981,9 +2981,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
        int rc;
 
        if (!scrq) {
-               netdev_dbg(adapter->netdev,
-                          "Invalid scrq reset. irq (%d) or msgs (%p).\n",
-                          scrq->irq, scrq->msgs);
+               netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
                return -EINVAL;
        }
 
@@ -3873,7 +3871,9 @@ static int send_login(struct ibmvnic_adapter *adapter)
                return -1;
        }
 
+       release_login_buffer(adapter);
        release_login_rsp_buffer(adapter);
+
        client_data_len = vnic_client_data_len(adapter);
 
        buffer_size =
index ba7a0f8..5b2143f 100644 (file)
@@ -436,6 +436,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
 #define FLAG2_DFLT_CRC_STRIPPING          BIT(12)
 #define FLAG2_CHECK_RX_HWTSTAMP           BIT(13)
 #define FLAG2_CHECK_SYSTIM_OVERFLOW       BIT(14)
+#define FLAG2_ENABLE_S0IX_FLOWS           BIT(15)
 
 #define E1000_RX_DESC_PS(R, i)     \
        (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
index 03215b0..06442e6 100644 (file)
@@ -23,6 +23,13 @@ struct e1000_stats {
        int stat_offset;
 };
 
+static const char e1000e_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define E1000E_PRIV_FLAGS_S0IX_ENABLED BIT(0)
+       "s0ix-enabled",
+};
+
+#define E1000E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(e1000e_priv_flags_strings)
+
 #define E1000_STAT(str, m) { \
                .stat_string = str, \
                .type = E1000_STATS, \
@@ -1776,6 +1783,8 @@ static int e1000e_get_sset_count(struct net_device __always_unused *netdev,
                return E1000_TEST_LEN;
        case ETH_SS_STATS:
                return E1000_STATS_LEN;
+       case ETH_SS_PRIV_FLAGS:
+               return E1000E_PRIV_FLAGS_STR_LEN;
        default:
                return -EOPNOTSUPP;
        }
@@ -2097,6 +2106,10 @@ static void e1000_get_strings(struct net_device __always_unused *netdev,
                        p += ETH_GSTRING_LEN;
                }
                break;
+       case ETH_SS_PRIV_FLAGS:
+               memcpy(data, e1000e_priv_flags_strings,
+                      E1000E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+               break;
        }
 }
 
@@ -2305,6 +2318,37 @@ static int e1000e_get_ts_info(struct net_device *netdev,
        return 0;
 }
 
+static u32 e1000e_get_priv_flags(struct net_device *netdev)
+{
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+       u32 priv_flags = 0;
+
+       if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
+               priv_flags |= E1000E_PRIV_FLAGS_S0IX_ENABLED;
+
+       return priv_flags;
+}
+
+static int e1000e_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+       unsigned int flags2 = adapter->flags2;
+
+       flags2 &= ~FLAG2_ENABLE_S0IX_FLOWS;
+       if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) {
+               struct e1000_hw *hw = &adapter->hw;
+
+               if (hw->mac.type < e1000_pch_cnp)
+                       return -EINVAL;
+               flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
+       }
+
+       if (flags2 != adapter->flags2)
+               adapter->flags2 = flags2;
+
+       return 0;
+}
+
 static const struct ethtool_ops e1000_ethtool_ops = {
        .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
        .get_drvinfo            = e1000_get_drvinfo,
@@ -2336,6 +2380,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
        .set_eee                = e1000e_set_eee,
        .get_link_ksettings     = e1000_get_link_ksettings,
        .set_link_ksettings     = e1000_set_link_ksettings,
+       .get_priv_flags         = e1000e_get_priv_flags,
+       .set_priv_flags         = e1000e_set_priv_flags,
 };
 
 void e1000e_set_ethtool_ops(struct net_device *netdev)
index 9aa6fad..6fb4668 100644 (file)
@@ -1240,6 +1240,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
                return 0;
 
        if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+               struct e1000_adapter *adapter = hw->adapter;
+               bool firmware_bug = false;
+
                if (force) {
                        /* Request ME un-configure ULP mode in the PHY */
                        mac_reg = er32(H2ME);
@@ -1248,16 +1251,24 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
                        ew32(H2ME, mac_reg);
                }
 
-               /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
+               /* Poll up to 2.5 seconds for ME to clear ULP_CFG_DONE.
+                * If this takes more than 1 second, show a warning indicating a
+                * firmware bug
+                */
                while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
-                       if (i++ == 30) {
+                       if (i++ == 250) {
                                ret_val = -E1000_ERR_PHY;
                                goto out;
                        }
+                       if (i > 100 && !firmware_bug)
+                               firmware_bug = true;
 
                        usleep_range(10000, 11000);
                }
-               e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+               if (firmware_bug)
+                       e_warn("ULP_CONFIG_DONE took %dmsec.  This is a firmware bug\n", i * 10);
+               else
+                       e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
 
                if (force) {
                        mac_reg = er32(H2ME);
index 128ab68..e9b82c2 100644 (file)
@@ -103,45 +103,6 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
        {0, NULL}
 };
 
-struct e1000e_me_supported {
-       u16 device_id;          /* supported device ID */
-};
-
-static const struct e1000e_me_supported me_supported[] = {
-       {E1000_DEV_ID_PCH_LPT_I217_LM},
-       {E1000_DEV_ID_PCH_LPTLP_I218_LM},
-       {E1000_DEV_ID_PCH_I218_LM2},
-       {E1000_DEV_ID_PCH_I218_LM3},
-       {E1000_DEV_ID_PCH_SPT_I219_LM},
-       {E1000_DEV_ID_PCH_SPT_I219_LM2},
-       {E1000_DEV_ID_PCH_LBG_I219_LM3},
-       {E1000_DEV_ID_PCH_SPT_I219_LM4},
-       {E1000_DEV_ID_PCH_SPT_I219_LM5},
-       {E1000_DEV_ID_PCH_CNP_I219_LM6},
-       {E1000_DEV_ID_PCH_CNP_I219_LM7},
-       {E1000_DEV_ID_PCH_ICP_I219_LM8},
-       {E1000_DEV_ID_PCH_ICP_I219_LM9},
-       {E1000_DEV_ID_PCH_CMP_I219_LM10},
-       {E1000_DEV_ID_PCH_CMP_I219_LM11},
-       {E1000_DEV_ID_PCH_CMP_I219_LM12},
-       {E1000_DEV_ID_PCH_TGP_I219_LM13},
-       {E1000_DEV_ID_PCH_TGP_I219_LM14},
-       {E1000_DEV_ID_PCH_TGP_I219_LM15},
-       {0}
-};
-
-static bool e1000e_check_me(u16 device_id)
-{
-       struct e1000e_me_supported *id;
-
-       for (id = (struct e1000e_me_supported *)me_supported;
-            id->device_id; id++)
-               if (device_id == id->device_id)
-                       return true;
-
-       return false;
-}
-
 /**
  * __ew32_prepare - prepare to write to MAC CSR register on certain parts
  * @hw: pointer to the HW structure
@@ -6962,7 +6923,6 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
        struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct pci_dev *pdev = to_pci_dev(dev);
-       struct e1000_hw *hw = &adapter->hw;
        int rc;
 
        e1000e_flush_lpic(pdev);
@@ -6970,13 +6930,13 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
        e1000e_pm_freeze(dev);
 
        rc = __e1000_shutdown(pdev, false);
-       if (rc)
+       if (rc) {
                e1000e_pm_thaw(dev);
-
-       /* Introduce S0ix implementation */
-       if (hw->mac.type >= e1000_pch_cnp &&
-           !e1000e_check_me(hw->adapter->pdev->device))
-               e1000e_s0ix_entry_flow(adapter);
+       } else {
+               /* Introduce S0ix implementation */
+               if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
+                       e1000e_s0ix_entry_flow(adapter);
+       }
 
        return rc;
 }
@@ -6986,12 +6946,10 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
        struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct pci_dev *pdev = to_pci_dev(dev);
-       struct e1000_hw *hw = &adapter->hw;
        int rc;
 
        /* Introduce S0ix implementation */
-       if (hw->mac.type >= e1000_pch_cnp &&
-           !e1000e_check_me(hw->adapter->pdev->device))
+       if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
                e1000e_s0ix_exit_flow(adapter);
 
        rc = __e1000_resume(pdev);
@@ -7655,6 +7613,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!(adapter->flags & FLAG_HAS_AMT))
                e1000e_get_hw_control(adapter);
 
+       if (hw->mac.type >= e1000_pch_cnp)
+               adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
+
        strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
        err = register_netdev(netdev);
        if (err)
index d231a2c..118473d 100644 (file)
@@ -120,6 +120,7 @@ enum i40e_state_t {
        __I40E_RESET_INTR_RECEIVED,
        __I40E_REINIT_REQUESTED,
        __I40E_PF_RESET_REQUESTED,
+       __I40E_PF_RESET_AND_REBUILD_REQUESTED,
        __I40E_CORE_RESET_REQUESTED,
        __I40E_GLOBAL_RESET_REQUESTED,
        __I40E_EMP_RESET_INTR_RECEIVED,
@@ -146,6 +147,8 @@ enum i40e_state_t {
 };
 
 #define I40E_PF_RESET_FLAG     BIT_ULL(__I40E_PF_RESET_REQUESTED)
+#define I40E_PF_RESET_AND_REBUILD_FLAG \
+       BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
 
 /* VSI state flags */
 enum i40e_vsi_state_t {
index 1337686..1db482d 100644 (file)
@@ -36,6 +36,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf);
 static void i40e_determine_queue_usage(struct i40e_pf *pf);
 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+                                  bool lock_acquired);
 static int i40e_reset(struct i40e_pf *pf);
 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
@@ -8536,6 +8538,14 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
                         "FW LLDP is disabled\n" :
                         "FW LLDP is enabled\n");
 
+       } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
+               /* Request a PF Reset
+                *
+                * Resets PF and reinitializes PFs VSI.
+                */
+               i40e_prep_for_reset(pf, lock_acquired);
+               i40e_reset_and_rebuild(pf, true, lock_acquired);
+
        } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
                int v;
 
index 729c4f0..21ee564 100644 (file)
@@ -1772,7 +1772,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
        if (num_vfs) {
                if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
                        pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
-                       i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+                       i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
                }
                ret = i40e_pci_sriov_enable(pdev, num_vfs);
                goto sriov_configure_out;
@@ -1781,7 +1781,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
        if (!pci_vfs_assigned(pf->pdev)) {
                i40e_free_vfs(pf);
                pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
-               i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+               i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
        } else {
                dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
                ret = -EINVAL;
index 47eb9c5..492ce21 100644 (file)
@@ -348,12 +348,12 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
                 * SBP is *not* set in PRT_SBPVSI (default not set).
                 */
                skb = i40e_construct_skb_zc(rx_ring, *bi);
-               *bi = NULL;
                if (!skb) {
                        rx_ring->rx_stats.alloc_buff_failed++;
                        break;
                }
 
+               *bi = NULL;
                cleaned_count++;
                i40e_inc_ntc(rx_ring);
 
index 95543df..0a867d6 100644 (file)
@@ -1834,11 +1834,9 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
        netif_tx_stop_all_queues(netdev);
        if (CLIENT_ALLOWED(adapter)) {
                err = iavf_lan_add_device(adapter);
-               if (err) {
-                       rtnl_unlock();
+               if (err)
                        dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
                                 err);
-               }
        }
        dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
        if (netdev->features & NETIF_F_GRO)
index 563ceac..bc4d8d1 100644 (file)
@@ -4432,7 +4432,7 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
        struct bpf_prog *old_prog;
 
        if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
-               NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
+               NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
                return -EOPNOTSUPP;
        }
 
@@ -5255,7 +5255,7 @@ static int mvneta_probe(struct platform_device *pdev)
        err = mvneta_port_power_up(pp, pp->phy_interface);
        if (err < 0) {
                dev_err(&pdev->dev, "can't power up port\n");
-               return err;
+               goto err_netdev;
        }
 
        /* Armada3700 network controller does not support per-cpu
index afdd228..358119d 100644 (file)
@@ -1231,7 +1231,7 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
 
        regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
        if (port->gop_id == 2)
-               val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
+               val |= GENCONF_CTRL0_PORT0_RGMII;
        else if (port->gop_id == 3)
                val |= GENCONF_CTRL0_PORT1_RGMII_MII;
        regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
@@ -2370,17 +2370,18 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
                                   struct mvpp2_tx_queue *txq)
 {
-       unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+       unsigned int thread;
        u32 val;
 
        if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
                txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
 
        val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
-       mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
-       mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
-
-       put_cpu();
+       /* PKT-coalescing registers are per-queue + per-thread */
+       for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
+               mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+               mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
+       }
 }
 
 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -5487,7 +5488,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
        struct mvpp2 *priv = port->priv;
        struct mvpp2_txq_pcpu *txq_pcpu;
        unsigned int thread;
-       int queue, err;
+       int queue, err, val;
 
        /* Checks for hardware constraints */
        if (port->first_rxq + port->nrxqs >
@@ -5501,6 +5502,18 @@ static int mvpp2_port_init(struct mvpp2_port *port)
        mvpp2_egress_disable(port);
        mvpp2_port_disable(port);
 
+       if (mvpp2_is_xlg(port->phy_interface)) {
+               val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+               val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
+               val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
+               writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+       } else {
+               val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+               val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+               val |= MVPP2_GMAC_FORCE_LINK_DOWN;
+               writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+       }
+
        port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
 
        port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
@@ -5869,8 +5882,6 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
 
        phylink_set(mask, Autoneg);
        phylink_set_port_modes(mask);
-       phylink_set(mask, Pause);
-       phylink_set(mask, Asym_Pause);
 
        switch (state->interface) {
        case PHY_INTERFACE_MODE_10GBASER:
index 5692c60..a30eb90 100644 (file)
@@ -405,6 +405,38 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
        return -EINVAL;
 }
 
+/* Drop flow control pause frames */
+static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
+{
+       unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
+       struct mvpp2_prs_entry pe;
+       unsigned int len;
+
+       memset(&pe, 0, sizeof(pe));
+
+       /* For all ports - drop flow control frames */
+       pe.index = MVPP2_PE_FC_DROP;
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+       /* Set match on DA */
+       len = ETH_ALEN;
+       while (len--)
+               mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
+
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+                                MVPP2_PRS_RI_DROP_MASK);
+
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+       /* Mask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+       mvpp2_prs_hw_write(priv, &pe);
+}
+
 /* Enable/disable dropping all mac da's */
 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
 {
@@ -1162,6 +1194,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
        mvpp2_prs_hw_write(priv, &pe);
 
        /* Create dummy entries for drop all and promiscuous modes */
+       mvpp2_prs_drop_fc(priv);
        mvpp2_prs_mac_drop_all_set(priv, 0, false);
        mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
        mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
@@ -1647,8 +1680,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
                                 MVPP2_PRS_RI_L3_PROTO_MASK);
-       /* Skip eth_type + 4 bytes of IPv6 header */
-       mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+       /* Jump to DIP of IPV6 header */
+       mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+                                MVPP2_MAX_L3_ADDR_SIZE,
                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
        /* Set L3 offset */
        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
index e22f6c8..4b68dd3 100644 (file)
 #define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
 #define MVPP2_PE_VLAN_DBL              (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
 #define MVPP2_PE_VLAN_NONE             (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
-/* reserved */
+#define MVPP2_PE_FC_DROP               (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
 #define MVPP2_PE_MAC_MC_PROMISCUOUS    (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
 #define MVPP2_PE_MAC_UC_PROMISCUOUS    (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
 #define MVPP2_PE_MAC_NON_PROMISCUOUS   (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
index 7d0f962..1a8f5a0 100644 (file)
@@ -871,8 +871,10 @@ static int cgx_lmac_init(struct cgx *cgx)
                if (!lmac)
                        return -ENOMEM;
                lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
-               if (!lmac->name)
-                       return -ENOMEM;
+               if (!lmac->name) {
+                       err = -ENOMEM;
+                       goto err_lmac_free;
+               }
                sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
                lmac->lmac_id = i;
                lmac->cgx = cgx;
@@ -883,7 +885,7 @@ static int cgx_lmac_init(struct cgx *cgx)
                                                 CGX_LMAC_FWI + i * 9),
                                   cgx_fwi_event_handler, 0, lmac->name, lmac);
                if (err)
-                       return err;
+                       goto err_irq;
 
                /* Enable interrupt */
                cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
@@ -895,6 +897,12 @@ static int cgx_lmac_init(struct cgx *cgx)
        }
 
        return cgx_lmac_verify_fwi_version(cgx);
+
+err_irq:
+       kfree(lmac->name);
+err_lmac_free:
+       kfree(lmac);
+       return err;
 }
 
 static int cgx_lmac_exit(struct cgx *cgx)
index d298b93..6c6b411 100644 (file)
@@ -469,6 +469,9 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
        int pf = rvu_get_pf(req->hdr.pcifunc);
        u8 cgx_id, lmac_id;
 
+       if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+               return -EPERM;
+
        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 
        cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
@@ -485,6 +488,9 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
        int rc = 0, i;
        u64 cfg;
 
+       if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+               return -EPERM;
+
        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 
        rsp->hdr.rc = rc;
index d29af7b..76177f7 100644 (file)
@@ -626,6 +626,11 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
        if (!reg_c0)
                return true;
 
+       /* If reg_c0 is not equal to the default flow tag then skb->mark
+        * is not supported and must be reset back to 0.
+        */
+       skb->mark = 0;
+
        priv = netdev_priv(skb->dev);
        esw = priv->mdev->priv.eswitch;
 
index e521254..072363e 100644 (file)
@@ -118,16 +118,17 @@ struct mlx5_ct_tuple {
        u16 zone;
 };
 
-struct mlx5_ct_shared_counter {
+struct mlx5_ct_counter {
        struct mlx5_fc *counter;
        refcount_t refcount;
+       bool is_shared;
 };
 
 struct mlx5_ct_entry {
        struct rhash_head node;
        struct rhash_head tuple_node;
        struct rhash_head tuple_nat_node;
-       struct mlx5_ct_shared_counter *shared_counter;
+       struct mlx5_ct_counter *counter;
        unsigned long cookie;
        unsigned long restore_cookie;
        struct mlx5_ct_tuple tuple;
@@ -394,13 +395,14 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
 }
 
 static void
-mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
+mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
 {
-       if (!refcount_dec_and_test(&entry->shared_counter->refcount))
+       if (entry->counter->is_shared &&
+           !refcount_dec_and_test(&entry->counter->refcount))
                return;
 
-       mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter);
-       kfree(entry->shared_counter);
+       mlx5_fc_destroy(ct_priv->dev, entry->counter->counter);
+       kfree(entry->counter);
 }
 
 static void
@@ -699,7 +701,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
        attr->dest_ft = ct_priv->post_ct;
        attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
        attr->outer_match_level = MLX5_MATCH_L4;
-       attr->counter = entry->shared_counter->counter;
+       attr->counter = entry->counter->counter;
        attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
 
        mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
@@ -732,13 +734,34 @@ err_attr:
        return err;
 }
 
-static struct mlx5_ct_shared_counter *
+static struct mlx5_ct_counter *
+mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
+{
+       struct mlx5_ct_counter *counter;
+       int ret;
+
+       counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+       if (!counter)
+               return ERR_PTR(-ENOMEM);
+
+       counter->is_shared = false;
+       counter->counter = mlx5_fc_create(ct_priv->dev, true);
+       if (IS_ERR(counter->counter)) {
+               ct_dbg("Failed to create counter for ct entry");
+               ret = PTR_ERR(counter->counter);
+               kfree(counter);
+               return ERR_PTR(ret);
+       }
+
+       return counter;
+}
+
+static struct mlx5_ct_counter *
 mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
                              struct mlx5_ct_entry *entry)
 {
        struct mlx5_ct_tuple rev_tuple = entry->tuple;
-       struct mlx5_ct_shared_counter *shared_counter;
-       struct mlx5_core_dev *dev = ct_priv->dev;
+       struct mlx5_ct_counter *shared_counter;
        struct mlx5_ct_entry *rev_entry;
        __be16 tmp_port;
        int ret;
@@ -767,25 +790,20 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
        rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
                                           tuples_ht_params);
        if (rev_entry) {
-               if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) {
+               if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
                        mutex_unlock(&ct_priv->shared_counter_lock);
-                       return rev_entry->shared_counter;
+                       return rev_entry->counter;
                }
        }
        mutex_unlock(&ct_priv->shared_counter_lock);
 
-       shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL);
-       if (!shared_counter)
-               return ERR_PTR(-ENOMEM);
-
-       shared_counter->counter = mlx5_fc_create(dev, true);
-       if (IS_ERR(shared_counter->counter)) {
-               ct_dbg("Failed to create counter for ct entry");
-               ret = PTR_ERR(shared_counter->counter);
-               kfree(shared_counter);
+       shared_counter = mlx5_tc_ct_counter_create(ct_priv);
+       if (IS_ERR(shared_counter)) {
+               ret = PTR_ERR(shared_counter);
                return ERR_PTR(ret);
        }
 
+       shared_counter->is_shared = true;
        refcount_set(&shared_counter->refcount, 1);
        return shared_counter;
 }
@@ -798,10 +816,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
 {
        int err;
 
-       entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
-       if (IS_ERR(entry->shared_counter)) {
-               err = PTR_ERR(entry->shared_counter);
-               ct_dbg("Failed to create counter for ct entry");
+       if (nf_ct_acct_enabled(dev_net(ct_priv->netdev)))
+               entry->counter = mlx5_tc_ct_counter_create(ct_priv);
+       else
+               entry->counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
+
+       if (IS_ERR(entry->counter)) {
+               err = PTR_ERR(entry->counter);
                return err;
        }
 
@@ -820,7 +841,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
 err_nat:
        mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
 err_orig:
-       mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+       mlx5_tc_ct_counter_put(ct_priv, entry);
        return err;
 }
 
@@ -918,7 +939,7 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
        rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
                               tuples_ht_params);
        mutex_unlock(&ct_priv->shared_counter_lock);
-       mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+       mlx5_tc_ct_counter_put(ct_priv, entry);
 
 }
 
@@ -956,7 +977,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
        if (!entry)
                return -ENOENT;
 
-       mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse);
+       mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
        flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
                          FLOW_ACTION_HW_STATS_DELAYED);
 
index 7943eb3..4880f21 100644 (file)
@@ -371,6 +371,15 @@ struct mlx5e_swp_spec {
        u8 tun_l4_proto;
 };
 
+static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
+{
+       /* SWP offsets are in 2-bytes words */
+       eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
+       eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
+       eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
+       eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
+}
+
 static inline void
 mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
                   struct mlx5e_swp_spec *swp_spec)
index 899b98a..1fae7fa 100644 (file)
@@ -51,7 +51,7 @@ static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
 }
 
 static inline void
-mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
 {
        struct mlx5e_swp_spec swp_spec = {};
        unsigned int offset = 0;
@@ -85,6 +85,8 @@ mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
        }
 
        mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
+       if (skb_vlan_tag_present(skb) &&  ihs)
+               mlx5e_eseg_swp_offsets_add_vlan(eseg);
 }
 
 #else
@@ -163,7 +165,7 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
 
 static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
                                       struct sk_buff *skb,
-                                      struct mlx5_wqe_eth_seg *eseg)
+                                      struct mlx5_wqe_eth_seg *eseg, u16 ihs)
 {
 #ifdef CONFIG_MLX5_EN_IPSEC
        if (xfrm_offload(skb))
@@ -172,7 +174,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
 
 #if IS_ENABLED(CONFIG_GENEVE)
        if (skb->encapsulation)
-               mlx5e_tx_tunnel_accel(skb, eseg);
+               mlx5e_tx_tunnel_accel(skb, eseg, ihs);
 #endif
 
        return true;
index d9076d5..2d37742 100644 (file)
@@ -1010,6 +1010,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
        return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
 }
 
+static int mlx5e_speed_validate(struct net_device *netdev, bool ext,
+                               const unsigned long link_modes, u8 autoneg)
+{
+       /* Extended link-mode has no speed limitations. */
+       if (ext)
+               return 0;
+
+       if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
+           autoneg != AUTONEG_ENABLE) {
+               netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n",
+                          __func__);
+               return -EINVAL;
+       }
+       return 0;
+}
+
 static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
 {
        u32 i, ptys_modes = 0;
@@ -1103,13 +1119,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
        link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
                mlx5e_port_speed2linkmodes(mdev, speed, !ext);
 
-       if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
-           autoneg != AUTONEG_ENABLE) {
-               netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
-                          __func__);
-               err = -EINVAL;
+       err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
+       if (err)
                goto out;
-       }
 
        link_modes = link_modes & eproto.cap;
        if (!link_modes) {
index fa8149f..e02e589 100644 (file)
@@ -942,6 +942,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
        in = kvzalloc(inlen, GFP_KERNEL);
        if (!in) {
                kfree(ft->g);
+               ft->g = NULL;
                return -ENOMEM;
        }
 
@@ -1087,6 +1088,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
        in = kvzalloc(inlen, GFP_KERNEL);
        if (!in) {
                kfree(ft->g);
+               ft->g = NULL;
                return -ENOMEM;
        }
 
@@ -1390,6 +1392,7 @@ err_destroy_groups:
        ft->g[ft->num_groups] = NULL;
        mlx5e_destroy_groups(ft);
        kvfree(in);
+       kfree(ft->g);
 
        return err;
 }
index 7a79d33..6a852b4 100644 (file)
@@ -3161,7 +3161,8 @@ static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
 
        mlx5_set_port_admin_status(mdev, state);
 
-       if (mlx5_eswitch_mode(mdev) != MLX5_ESWITCH_LEGACY)
+       if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS ||
+           !MLX5_CAP_GEN(mdev, uplink_follow))
                return;
 
        if (state == MLX5_PORT_UP)
index e47e2a0..61ed671 100644 (file)
@@ -682,9 +682,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
 
 static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
                                   struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
-                                  struct mlx5_wqe_eth_seg *eseg)
+                                  struct mlx5_wqe_eth_seg *eseg, u16 ihs)
 {
-       if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
+       if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs)))
                return false;
 
        mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
@@ -714,7 +714,8 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
                if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
                        struct mlx5_wqe_eth_seg eseg = {};
 
-                       if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg)))
+                       if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg,
+                                                            attr.ihs)))
                                return NETDEV_TX_OK;
 
                        mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
@@ -731,7 +732,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
        /* May update the WQE, but may not post other WQEs. */
        mlx5e_accel_tx_finish(sq, wqe, &accel,
                              (struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
-       if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth)))
+       if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs)))
                return NETDEV_TX_OK;
 
        mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
index 2b85d47..3e19b17 100644 (file)
@@ -95,22 +95,21 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
                return 0;
        }
 
-       if (!IS_ERR_OR_NULL(vport->egress.acl))
-               return 0;
-
-       vport->egress.acl = esw_acl_table_create(esw, vport->vport,
-                                                MLX5_FLOW_NAMESPACE_ESW_EGRESS,
-                                                table_size);
-       if (IS_ERR(vport->egress.acl)) {
-               err = PTR_ERR(vport->egress.acl);
-               vport->egress.acl = NULL;
-               goto out;
+       if (!vport->egress.acl) {
+               vport->egress.acl = esw_acl_table_create(esw, vport->vport,
+                                                        MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+                                                        table_size);
+               if (IS_ERR(vport->egress.acl)) {
+                       err = PTR_ERR(vport->egress.acl);
+                       vport->egress.acl = NULL;
+                       goto out;
+               }
+
+               err = esw_acl_egress_lgcy_groups_create(esw, vport);
+               if (err)
+                       goto out;
        }
 
-       err = esw_acl_egress_lgcy_groups_create(esw, vport);
-       if (err)
-               goto out;
-
        esw_debug(esw->dev,
                  "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
                  vport->vport, vport->info.vlan, vport->info.qos);
index f3d45ef..83a0537 100644 (file)
@@ -564,7 +564,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
        struct mlx5_core_dev *tmp_dev;
        int i, err;
 
-       if (!MLX5_CAP_GEN(dev, vport_group_manager))
+       if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+           !MLX5_CAP_GEN(dev, lag_master) ||
+           MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
                return;
 
        tmp_dev = mlx5_get_next_phys_dev(dev);
@@ -582,12 +584,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
        if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
                return;
 
-       for (i = 0; i < MLX5_MAX_PORTS; i++) {
-               tmp_dev = ldev->pf[i].dev;
-               if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
-                   MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
+       for (i = 0; i < MLX5_MAX_PORTS; i++)
+               if (!ldev->pf[i].dev)
                        break;
-       }
 
        if (i >= MLX5_MAX_PORTS)
                ldev->flags |= MLX5_LAG_FLAG_READY;
index c08315b..ca6f2fc 100644 (file)
@@ -1368,8 +1368,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
                         MLX5_COREDEV_VF : MLX5_COREDEV_PF;
 
        dev->priv.adev_idx = mlx5_adev_idx_alloc();
-       if (dev->priv.adev_idx < 0)
-               return dev->priv.adev_idx;
+       if (dev->priv.adev_idx < 0) {
+               err = dev->priv.adev_idx;
+               goto adev_init_err;
+       }
 
        err = mlx5_mdev_init(dev, prof_sel);
        if (err)
@@ -1403,6 +1405,7 @@ pci_init_err:
        mlx5_mdev_uninit(dev);
 mdev_init_err:
        mlx5_adev_idx_free(dev->priv.adev_idx);
+adev_init_err:
        mlx5_devlink_free(devlink);
 
        return err;
index 0fc7de4..8e0dddc 100644 (file)
@@ -116,7 +116,7 @@ free:
 static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
 {
        mlx5_core_roce_gid_set(dev, 0, 0, 0,
-                              NULL, NULL, false, 0, 0);
+                              NULL, NULL, false, 0, 1);
 }
 
 static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
index 8fa286c..bf85ce9 100644 (file)
@@ -19,7 +19,7 @@
 #define MLXSW_THERMAL_ASIC_TEMP_NORM   75000   /* 75C */
 #define MLXSW_THERMAL_ASIC_TEMP_HIGH   85000   /* 85C */
 #define MLXSW_THERMAL_ASIC_TEMP_HOT    105000  /* 105C */
-#define MLXSW_THERMAL_ASIC_TEMP_CRIT   110000  /* 110C */
+#define MLXSW_THERMAL_ASIC_TEMP_CRIT   140000  /* 140C */
 #define MLXSW_THERMAL_HYSTERESIS_TEMP  5000    /* 5C */
 #define MLXSW_THERMAL_MODULE_TEMP_SHIFT        (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
 #define MLXSW_THERMAL_ZONE_MAX_NAME    16
@@ -176,6 +176,12 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
        if (err)
                return err;
 
+       if (crit_temp > emerg_temp) {
+               dev_warn(dev, "%s : Critical threshold %d is above emergency threshold %d\n",
+                        tz->tzdev->type, crit_temp, emerg_temp);
+               return 0;
+       }
+
        /* According to the system thermal requirements, the thermal zones are
         * defined with four trip points. The critical and emergency
         * temperature thresholds, provided by QSFP module are set as "active"
@@ -190,11 +196,8 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
                tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp;
        tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp;
        tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp;
-       if (emerg_temp > crit_temp)
-               tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp +
+       tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp +
                                        MLXSW_THERMAL_MODULE_TEMP_SHIFT;
-       else
-               tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp;
 
        return 0;
 }
index 0b9992b..ff87a0b 100644 (file)
@@ -60,14 +60,27 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
                      const unsigned char mac[ETH_ALEN],
                      unsigned int vid, enum macaccess_entry_type type)
 {
+       u32 cmd = ANA_TABLES_MACACCESS_VALID |
+               ANA_TABLES_MACACCESS_DEST_IDX(port) |
+               ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
+               ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
+       unsigned int mc_ports;
+
+       /* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
+       if (type == ENTRYTYPE_MACv4)
+               mc_ports = (mac[1] << 8) | mac[2];
+       else if (type == ENTRYTYPE_MACv6)
+               mc_ports = (mac[0] << 8) | mac[1];
+       else
+               mc_ports = 0;
+
+       if (mc_ports & BIT(ocelot->num_phys_ports))
+               cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
+
        ocelot_mact_select(ocelot, mac, vid);
 
        /* Issue a write command */
-       ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
-                            ANA_TABLES_MACACCESS_DEST_IDX(port) |
-                            ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
-                            ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN),
-                            ANA_TABLES_MACACCESS);
+       ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
 
        return ocelot_mact_wait_for_completion(ocelot);
 }
index 2bd2840..42230f9 100644 (file)
@@ -1042,10 +1042,8 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        int ret = 0;
 
-       if (!ocelot_netdevice_dev_check(dev))
-               return 0;
-
        if (event == NETDEV_PRECHANGEUPPER &&
+           ocelot_netdevice_dev_check(dev) &&
            netif_is_lag_master(info->upper_dev)) {
                struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
                struct netlink_ext_ack *extack;
index 776b7d2..2289e1f 100644 (file)
@@ -506,10 +506,14 @@ static int mac_sonic_platform_probe(struct platform_device *pdev)
 
        err = register_netdev(dev);
        if (err)
-               goto out;
+               goto undo_probe;
 
        return 0;
 
+undo_probe:
+       dma_free_coherent(lp->device,
+                         SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+                         lp->descriptors, lp->descriptors_laddr);
 out:
        free_netdev(dev);
 
@@ -584,12 +588,16 @@ static int mac_sonic_nubus_probe(struct nubus_board *board)
 
        err = register_netdev(ndev);
        if (err)
-               goto out;
+               goto undo_probe;
 
        nubus_set_drvdata(board, ndev);
 
        return 0;
 
+undo_probe:
+       dma_free_coherent(lp->device,
+                         SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+                         lp->descriptors, lp->descriptors_laddr);
 out:
        free_netdev(ndev);
        return err;
index afa166f..28d9e98 100644 (file)
@@ -229,11 +229,14 @@ int xtsonic_probe(struct platform_device *pdev)
        sonic_msg_init(dev);
 
        if ((err = register_netdev(dev)))
-               goto out1;
+               goto undo_probe1;
 
        return 0;
 
-out1:
+undo_probe1:
+       dma_free_coherent(lp->device,
+                         SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+                         lp->descriptors, lp->descriptors_laddr);
        release_region(dev->base_addr, SONIC_MEM_SIZE);
 out:
        free_netdev(dev);
index 9156c98..ac4cd5d 100644 (file)
@@ -337,7 +337,7 @@ void ionic_rx_fill(struct ionic_queue *q)
        unsigned int i, j;
        unsigned int len;
 
-       len = netdev->mtu + ETH_HLEN;
+       len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
        nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
 
        for (i = ionic_q_space_avail(q); i; i--) {
index 4366c7a..6b5ddb0 100644 (file)
@@ -78,6 +78,7 @@ config QED
        depends on PCI
        select ZLIB_INFLATE
        select CRC8
+       select CRC32
        select NET_DEVLINK
        help
          This enables the support for Marvell FastLinQ adapters family.
index f218477..d258e0c 100644 (file)
@@ -564,11 +564,6 @@ static const struct net_device_ops netxen_netdev_ops = {
        .ndo_set_features = netxen_set_features,
 };
 
-static inline bool netxen_function_zero(struct pci_dev *pdev)
-{
-       return (PCI_FUNC(pdev->devfn) == 0) ? true : false;
-}
-
 static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter,
                                             u32 mode)
 {
@@ -664,7 +659,7 @@ static int netxen_setup_intr(struct netxen_adapter *adapter)
        netxen_initialize_interrupt_registers(adapter);
        netxen_set_msix_bit(pdev, 0);
 
-       if (netxen_function_zero(pdev)) {
+       if (adapter->portnum == 0) {
                if (!netxen_setup_msi_interrupts(adapter, num_msix))
                        netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE);
                else
index a2494bf..ca0ee29 100644 (file)
@@ -1799,6 +1799,11 @@ netdev_features_t qede_features_check(struct sk_buff *skb,
                              ntohs(udp_hdr(skb)->dest) != gnv_port))
                                return features & ~(NETIF_F_CSUM_MASK |
                                                    NETIF_F_GSO_MASK);
+               } else if (l4_proto == IPPROTO_IPIP) {
+                       /* IPIP tunnels are unknown to the device or at least unsupported natively,
+                        * offloads for them can't be done trivially, so disable them for such skb.
+                        */
+                       return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
                }
        }
 
index 46d8510..a569abe 100644 (file)
@@ -2207,7 +2207,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
        }
 
        switch (tp->mac_version) {
-       case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+       case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+       case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
        case RTL_GIGA_MAC_VER_37:
        case RTL_GIGA_MAC_VER_39:
        case RTL_GIGA_MAC_VER_43:
@@ -2233,7 +2234,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
 static void rtl_pll_power_up(struct rtl8169_private *tp)
 {
        switch (tp->mac_version) {
-       case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+       case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+       case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
        case RTL_GIGA_MAC_VER_37:
        case RTL_GIGA_MAC_VER_39:
        case RTL_GIGA_MAC_VER_43:
index c633046..590b088 100644 (file)
@@ -2606,10 +2606,10 @@ static int sh_eth_close(struct net_device *ndev)
        /* Free all the skbuffs in the Rx queue and the DMA buffer. */
        sh_eth_ring_free(ndev);
 
-       pm_runtime_put_sync(&mdp->pdev->dev);
-
        mdp->is_opened = 0;
 
+       pm_runtime_put(&mdp->pdev->dev);
+
        return 0;
 }
 
@@ -3034,6 +3034,28 @@ static int sh_mdio_release(struct sh_eth_private *mdp)
        return 0;
 }
 
+static int sh_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+{
+       int res;
+
+       pm_runtime_get_sync(bus->parent);
+       res = mdiobb_read(bus, phy, reg);
+       pm_runtime_put(bus->parent);
+
+       return res;
+}
+
+static int sh_mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+       int res;
+
+       pm_runtime_get_sync(bus->parent);
+       res = mdiobb_write(bus, phy, reg, val);
+       pm_runtime_put(bus->parent);
+
+       return res;
+}
+
 /* MDIO bus init function */
 static int sh_mdio_init(struct sh_eth_private *mdp,
                        struct sh_eth_plat_data *pd)
@@ -3058,6 +3080,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
        if (!mdp->mii_bus)
                return -ENOMEM;
 
+       /* Wrap accessors with Runtime PM-aware ops */
+       mdp->mii_bus->read = sh_mdiobb_read;
+       mdp->mii_bus->write = sh_mdiobb_write;
+
        /* Hook up MII support for ethtool */
        mdp->mii_bus->name = "sh_mii";
        mdp->mii_bus->parent = dev;
index a2e80c8..9a6a519 100644 (file)
@@ -721,6 +721,8 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID                0x4bb0
 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID                0x4bb1
 #define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID       0x4bb2
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0_ID          0x43ac
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1_ID          0x43a2
 #define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID             0xa0ac
 
 static const struct pci_device_id intel_eth_pci_id_table[] = {
@@ -735,6 +737,8 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
        { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) },
        { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) },
        { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_info) },
+       { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_info) },
+       { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_info) },
        {}
 };
 MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
index 459ae71..f184b00 100644 (file)
@@ -135,7 +135,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
        struct device *dev = dwmac->dev;
        static const struct clk_parent_data mux_parents[] = {
                { .fw_name = "clkin0", },
-               { .fw_name = "clkin1", },
+               { .index = -1, },
        };
        static const struct clk_div_table div_table[] = {
                { .div = 2, .val = 2, },
index 58e0511..a5e0eff 100644 (file)
@@ -64,6 +64,7 @@ struct emac_variant {
  * @variant:   reference to the current board variant
  * @regmap:    regmap for using the syscon
  * @internal_phy_powered: Does the internal PHY is enabled
+ * @use_internal_phy: Is the internal PHY selected for use
  * @mux_handle:        Internal pointer used by mdio-mux lib
  */
 struct sunxi_priv_data {
@@ -74,6 +75,7 @@ struct sunxi_priv_data {
        const struct emac_variant *variant;
        struct regmap_field *regmap_field;
        bool internal_phy_powered;
+       bool use_internal_phy;
        void *mux_handle;
 };
 
@@ -539,8 +541,11 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
        .dma_interrupt = sun8i_dwmac_dma_interrupt,
 };
 
+static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
+
 static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
 {
+       struct net_device *ndev = platform_get_drvdata(pdev);
        struct sunxi_priv_data *gmac = priv;
        int ret;
 
@@ -554,13 +559,25 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
 
        ret = clk_prepare_enable(gmac->tx_clk);
        if (ret) {
-               if (gmac->regulator)
-                       regulator_disable(gmac->regulator);
                dev_err(&pdev->dev, "Could not enable AHB clock\n");
-               return ret;
+               goto err_disable_regulator;
+       }
+
+       if (gmac->use_internal_phy) {
+               ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev));
+               if (ret)
+                       goto err_disable_clk;
        }
 
        return 0;
+
+err_disable_clk:
+       clk_disable_unprepare(gmac->tx_clk);
+err_disable_regulator:
+       if (gmac->regulator)
+               regulator_disable(gmac->regulator);
+
+       return ret;
 }
 
 static void sun8i_dwmac_core_init(struct mac_device_info *hw,
@@ -831,7 +848,6 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
        struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
        u32 reg, val;
        int ret = 0;
-       bool need_power_ephy = false;
 
        if (current_child ^ desired_child) {
                regmap_field_read(gmac->regmap_field, &reg);
@@ -839,13 +855,12 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
                case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID:
                        dev_info(priv->device, "Switch mux to internal PHY");
                        val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT;
-
-                       need_power_ephy = true;
+                       gmac->use_internal_phy = true;
                        break;
                case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID:
                        dev_info(priv->device, "Switch mux to external PHY");
                        val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN;
-                       need_power_ephy = false;
+                       gmac->use_internal_phy = false;
                        break;
                default:
                        dev_err(priv->device, "Invalid child ID %x\n",
@@ -853,7 +868,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
                        return -EINVAL;
                }
                regmap_field_write(gmac->regmap_field, val);
-               if (need_power_ephy) {
+               if (gmac->use_internal_phy) {
                        ret = sun8i_dwmac_power_internal_phy(priv);
                        if (ret)
                                return ret;
@@ -883,22 +898,23 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
        return ret;
 }
 
-static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
+static int sun8i_dwmac_set_syscon(struct device *dev,
+                                 struct plat_stmmacenet_data *plat)
 {
-       struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
-       struct device_node *node = priv->device->of_node;
+       struct sunxi_priv_data *gmac = plat->bsp_priv;
+       struct device_node *node = dev->of_node;
        int ret;
        u32 reg, val;
 
        ret = regmap_field_read(gmac->regmap_field, &val);
        if (ret) {
-               dev_err(priv->device, "Fail to read from regmap field.\n");
+               dev_err(dev, "Fail to read from regmap field.\n");
                return ret;
        }
 
        reg = gmac->variant->default_syscon_value;
        if (reg != val)
-               dev_warn(priv->device,
+               dev_warn(dev,
                         "Current syscon value is not the default %x (expect %x)\n",
                         val, reg);
 
@@ -911,9 +927,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
                /* Force EPHY xtal frequency to 24MHz. */
                reg |= H3_EPHY_CLK_SEL;
 
-               ret = of_mdio_parse_addr(priv->device, priv->plat->phy_node);
+               ret = of_mdio_parse_addr(dev, plat->phy_node);
                if (ret < 0) {
-                       dev_err(priv->device, "Could not parse MDIO addr\n");
+                       dev_err(dev, "Could not parse MDIO addr\n");
                        return ret;
                }
                /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
@@ -929,17 +945,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
 
        if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
                if (val % 100) {
-                       dev_err(priv->device, "tx-delay must be a multiple of 100\n");
+                       dev_err(dev, "tx-delay must be a multiple of 100\n");
                        return -EINVAL;
                }
                val /= 100;
-               dev_dbg(priv->device, "set tx-delay to %x\n", val);
+               dev_dbg(dev, "set tx-delay to %x\n", val);
                if (val <= gmac->variant->tx_delay_max) {
                        reg &= ~(gmac->variant->tx_delay_max <<
                                 SYSCON_ETXDC_SHIFT);
                        reg |= (val << SYSCON_ETXDC_SHIFT);
                } else {
-                       dev_err(priv->device, "Invalid TX clock delay: %d\n",
+                       dev_err(dev, "Invalid TX clock delay: %d\n",
                                val);
                        return -EINVAL;
                }
@@ -947,17 +963,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
 
        if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) {
                if (val % 100) {
-                       dev_err(priv->device, "rx-delay must be a multiple of 100\n");
+                       dev_err(dev, "rx-delay must be a multiple of 100\n");
                        return -EINVAL;
                }
                val /= 100;
-               dev_dbg(priv->device, "set rx-delay to %x\n", val);
+               dev_dbg(dev, "set rx-delay to %x\n", val);
                if (val <= gmac->variant->rx_delay_max) {
                        reg &= ~(gmac->variant->rx_delay_max <<
                                 SYSCON_ERXDC_SHIFT);
                        reg |= (val << SYSCON_ERXDC_SHIFT);
                } else {
-                       dev_err(priv->device, "Invalid RX clock delay: %d\n",
+                       dev_err(dev, "Invalid RX clock delay: %d\n",
                                val);
                        return -EINVAL;
                }
@@ -968,7 +984,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
        if (gmac->variant->support_rmii)
                reg &= ~SYSCON_RMII_EN;
 
-       switch (priv->plat->interface) {
+       switch (plat->interface) {
        case PHY_INTERFACE_MODE_MII:
                /* default */
                break;
@@ -982,8 +998,8 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
                reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII;
                break;
        default:
-               dev_err(priv->device, "Unsupported interface mode: %s",
-                       phy_modes(priv->plat->interface));
+               dev_err(dev, "Unsupported interface mode: %s",
+                       phy_modes(plat->interface));
                return -EINVAL;
        }
 
@@ -1004,17 +1020,10 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
        struct sunxi_priv_data *gmac = priv;
 
        if (gmac->variant->soc_has_internal_phy) {
-               /* sun8i_dwmac_exit could be called with mdiomux uninit */
-               if (gmac->mux_handle)
-                       mdio_mux_uninit(gmac->mux_handle);
                if (gmac->internal_phy_powered)
                        sun8i_dwmac_unpower_internal_phy(gmac);
        }
 
-       sun8i_dwmac_unset_syscon(gmac);
-
-       reset_control_put(gmac->rst_ephy);
-
        clk_disable_unprepare(gmac->tx_clk);
 
        if (gmac->regulator)
@@ -1049,16 +1058,11 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
 {
        struct mac_device_info *mac;
        struct stmmac_priv *priv = ppriv;
-       int ret;
 
        mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
        if (!mac)
                return NULL;
 
-       ret = sun8i_dwmac_set_syscon(priv);
-       if (ret)
-               return NULL;
-
        mac->pcsr = priv->ioaddr;
        mac->mac = &sun8i_dwmac_ops;
        mac->dma = &sun8i_dwmac_dma_ops;
@@ -1134,10 +1138,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
-       if (IS_ERR(plat_dat))
-               return PTR_ERR(plat_dat);
-
        gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
        if (!gmac)
                return -ENOMEM;
@@ -1201,11 +1201,15 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
        ret = of_get_phy_mode(dev->of_node, &interface);
        if (ret)
                return -EINVAL;
-       plat_dat->interface = interface;
+
+       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
 
        /* platform data specifying hardware features and callbacks.
         * hardware features were copied from Allwinner drivers.
         */
+       plat_dat->interface = interface;
        plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
        plat_dat->tx_coe = 1;
        plat_dat->has_sun8i = true;
@@ -1214,9 +1218,13 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
        plat_dat->exit = sun8i_dwmac_exit;
        plat_dat->setup = sun8i_dwmac_setup;
 
+       ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
+       if (ret)
+               goto dwmac_deconfig;
+
        ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
        if (ret)
-               return ret;
+               goto dwmac_syscon;
 
        ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
        if (ret)
@@ -1230,7 +1238,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
        if (gmac->variant->soc_has_internal_phy) {
                ret = get_ephy_nodes(priv);
                if (ret)
-                       goto dwmac_exit;
+                       goto dwmac_remove;
                ret = sun8i_dwmac_register_mdio_mux(priv);
                if (ret) {
                        dev_err(&pdev->dev, "Failed to register mux\n");
@@ -1239,15 +1247,42 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
        } else {
                ret = sun8i_dwmac_reset(priv);
                if (ret)
-                       goto dwmac_exit;
+                       goto dwmac_remove;
        }
 
        return ret;
 dwmac_mux:
-       sun8i_dwmac_unset_syscon(gmac);
+       reset_control_put(gmac->rst_ephy);
+       clk_put(gmac->ephy_clk);
+dwmac_remove:
+       stmmac_dvr_remove(&pdev->dev);
 dwmac_exit:
+       sun8i_dwmac_exit(pdev, gmac);
+dwmac_syscon:
+       sun8i_dwmac_unset_syscon(gmac);
+dwmac_deconfig:
+       stmmac_remove_config_dt(pdev, plat_dat);
+
+       return ret;
+}
+
+static int sun8i_dwmac_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct stmmac_priv *priv = netdev_priv(ndev);
+       struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+
+       if (gmac->variant->soc_has_internal_phy) {
+               mdio_mux_uninit(gmac->mux_handle);
+               sun8i_dwmac_unpower_internal_phy(gmac);
+               reset_control_put(gmac->rst_ephy);
+               clk_put(gmac->ephy_clk);
+       }
+
        stmmac_pltfr_remove(pdev);
-return ret;
+       sun8i_dwmac_unset_syscon(gmac);
+
+       return 0;
 }
 
 static const struct of_device_id sun8i_dwmac_match[] = {
@@ -1269,7 +1304,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
 
 static struct platform_driver sun8i_dwmac_driver = {
        .probe  = sun8i_dwmac_probe,
-       .remove = stmmac_pltfr_remove,
+       .remove = sun8i_dwmac_remove,
        .driver = {
                .name           = "dwmac-sun8i",
                .pm             = &stmmac_pltfr_pm_ops,
index 03e79a6..8f7ac24 100644 (file)
@@ -568,68 +568,24 @@ static int dwmac5_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
 int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
                         unsigned int ptp_rate)
 {
-       u32 speed, total_offset, offset, ctrl, ctr_low;
-       u32 extcfg = readl(ioaddr + GMAC_EXT_CONFIG);
-       u32 mac_cfg = readl(ioaddr + GMAC_CONFIG);
        int i, ret = 0x0;
-       u64 total_ctr;
-
-       if (extcfg & GMAC_CONFIG_EIPG_EN) {
-               offset = (extcfg & GMAC_CONFIG_EIPG) >> GMAC_CONFIG_EIPG_SHIFT;
-               offset = 104 + (offset * 8);
-       } else {
-               offset = (mac_cfg & GMAC_CONFIG_IPG) >> GMAC_CONFIG_IPG_SHIFT;
-               offset = 96 - (offset * 8);
-       }
-
-       speed = mac_cfg & (GMAC_CONFIG_PS | GMAC_CONFIG_FES);
-       speed = speed >> GMAC_CONFIG_FES_SHIFT;
-
-       switch (speed) {
-       case 0x0:
-               offset = offset * 1000; /* 1G */
-               break;
-       case 0x1:
-               offset = offset * 400; /* 2.5G */
-               break;
-       case 0x2:
-               offset = offset * 100000; /* 10M */
-               break;
-       case 0x3:
-               offset = offset * 10000; /* 100M */
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       offset = offset / 1000;
+       u32 ctrl;
 
        ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false);
        ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false);
        ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false);
        ret |= dwmac5_est_write(ioaddr, LLR, cfg->gcl_size, false);
+       ret |= dwmac5_est_write(ioaddr, CTR_LOW, cfg->ctr[0], false);
+       ret |= dwmac5_est_write(ioaddr, CTR_HIGH, cfg->ctr[1], false);
        if (ret)
                return ret;
 
-       total_offset = 0;
        for (i = 0; i < cfg->gcl_size; i++) {
-               ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i] + offset, true);
+               ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i], true);
                if (ret)
                        return ret;
-
-               total_offset += offset;
        }
 
-       total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000ULL;
-       total_ctr += total_offset;
-
-       ctr_low = do_div(total_ctr, 1000000000);
-
-       ret |= dwmac5_est_write(ioaddr, CTR_LOW, ctr_low, false);
-       ret |= dwmac5_est_write(ioaddr, CTR_HIGH, total_ctr, false);
-       if (ret)
-               return ret;
-
        ctrl = readl(ioaddr + MTL_EST_CONTROL);
        ctrl &= ~PTOV;
        ctrl |= ((1000000000 / ptp_rate) * 6) << PTOV_SHIFT;
index 5b1c12f..26b971c 100644 (file)
@@ -2184,7 +2184,7 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
                        spin_lock_irqsave(&ch->lock, flags);
                        stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
                        spin_unlock_irqrestore(&ch->lock, flags);
-                       __napi_schedule_irqoff(&ch->rx_napi);
+                       __napi_schedule(&ch->rx_napi);
                }
        }
 
@@ -2193,7 +2193,7 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
                        spin_lock_irqsave(&ch->lock, flags);
                        stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
                        spin_unlock_irqrestore(&ch->lock, flags);
-                       __napi_schedule_irqoff(&ch->tx_napi);
+                       __napi_schedule(&ch->tx_napi);
                }
        }
 
@@ -4026,6 +4026,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
        int txfifosz = priv->plat->tx_fifo_size;
+       const int mtu = new_mtu;
 
        if (txfifosz == 0)
                txfifosz = priv->dma_cap.tx_fifo_size;
@@ -4043,7 +4044,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
        if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
                return -EINVAL;
 
-       dev->mtu = new_mtu;
+       dev->mtu = mtu;
 
        netdev_update_features(dev);
 
index f5bed4d..8ed3b2c 100644 (file)
@@ -599,7 +599,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
 {
        u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
        struct plat_stmmacenet_data *plat = priv->plat;
-       struct timespec64 time;
+       struct timespec64 time, current_time;
+       ktime_t current_time_ns;
        bool fpe = false;
        int i, ret = 0;
        u64 ctr;
@@ -694,7 +695,22 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
        }
 
        /* Adjust for real system time */
-       time = ktime_to_timespec64(qopt->base_time);
+       priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
+       current_time_ns = timespec64_to_ktime(current_time);
+       if (ktime_after(qopt->base_time, current_time_ns)) {
+               time = ktime_to_timespec64(qopt->base_time);
+       } else {
+               ktime_t base_time;
+               s64 n;
+
+               n = div64_s64(ktime_sub_ns(current_time_ns, qopt->base_time),
+                             qopt->cycle_time);
+               base_time = ktime_add_ns(qopt->base_time,
+                                        (n + 1) * qopt->cycle_time);
+
+               time = ktime_to_timespec64(base_time);
+       }
+
        priv->plat->est->btr[0] = (u32)time.tv_nsec;
        priv->plat->est->btr[1] = (u32)time.tv_sec;
 
index d1fc795..43222a3 100644 (file)
@@ -599,6 +599,7 @@ void cpts_unregister(struct cpts *cpts)
 
        ptp_clock_unregister(cpts->clock);
        cpts->clock = NULL;
+       cpts->phc_index = -1;
 
        cpts_write32(cpts, 0, int_enable);
        cpts_write32(cpts, 0, control);
@@ -784,6 +785,7 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
        cpts->cc.read = cpts_systim_read;
        cpts->cc.mask = CLOCKSOURCE_MASK(32);
        cpts->info = cpts_info;
+       cpts->phc_index = -1;
 
        if (n_ext_ts)
                cpts->info.n_ext_ts = n_ext_ts;
index c479524..14d9a79 100644 (file)
@@ -326,8 +326,8 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
 }
 
 /* Issue an event ring command and wait for it to complete */
-static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
-                           enum gsi_evt_cmd_opcode opcode)
+static void evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
+                            enum gsi_evt_cmd_opcode opcode)
 {
        struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
        struct completion *completion = &evt_ring->completion;
@@ -340,7 +340,13 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
         * is issued here.  Only permit *this* event ring to trigger
         * an interrupt, and only enable the event control IRQ type
         * when we expect it to occur.
+        *
+        * There's a small chance that a previous command completed
+        * after the interrupt was disabled, so make sure we have no
+        * pending interrupts before we enable them.
         */
+       iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+
        val = BIT(evt_ring_id);
        iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
        gsi_irq_type_enable(gsi, GSI_EV_CTRL);
@@ -355,19 +361,16 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
        iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
 
        if (success)
-               return 0;
+               return;
 
        dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
                opcode, evt_ring_id, evt_ring->state);
-
-       return -ETIMEDOUT;
 }
 
 /* Allocate an event ring in NOT_ALLOCATED state */
 static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
 {
        struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
-       int ret;
 
        /* Get initial event ring state */
        evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
@@ -377,14 +380,16 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
                return -EINVAL;
        }
 
-       ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
-       if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
-               dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
-                       evt_ring_id, evt_ring->state);
-               ret = -EIO;
-       }
+       evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
 
-       return ret;
+       /* If successful the event ring state will have changed */
+       if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+               return 0;
+
+       dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
+               evt_ring_id, evt_ring->state);
+
+       return -EIO;
 }
 
 /* Reset a GSI event ring in ALLOCATED or ERROR state. */
@@ -392,7 +397,6 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
 {
        struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
        enum gsi_evt_ring_state state = evt_ring->state;
-       int ret;
 
        if (state != GSI_EVT_RING_STATE_ALLOCATED &&
            state != GSI_EVT_RING_STATE_ERROR) {
@@ -401,17 +405,20 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
                return;
        }
 
-       ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
-       if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
-               dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
-                       evt_ring_id, evt_ring->state);
+       evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
+
+       /* If successful the event ring state will have changed */
+       if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+               return;
+
+       dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
+               evt_ring_id, evt_ring->state);
 }
 
 /* Issue a hardware de-allocation request for an allocated event ring */
 static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
 {
        struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
-       int ret;
 
        if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
                dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
@@ -419,10 +426,14 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
                return;
        }
 
-       ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
-       if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
-               dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
-                       evt_ring_id, evt_ring->state);
+       evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
+
+       /* If successful the event ring state will have changed */
+       if (evt_ring->state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
+               return;
+
+       dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
+               evt_ring_id, evt_ring->state);
 }
 
 /* Fetch the current state of a channel from hardware */
@@ -438,7 +449,7 @@ static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
 }
 
 /* Issue a channel command and wait for it to complete */
-static int
+static void
 gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
 {
        struct completion *completion = &channel->completion;
@@ -453,7 +464,13 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
         * issued here.  So we only permit *this* channel to trigger
         * an interrupt and only enable the channel control IRQ type
         * when we expect it to occur.
+        *
+        * There's a small chance that a previous command completed
+        * after the interrupt was disabled, so make sure we have no
+        * pending interrupts before we enable them.
         */
+       iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+
        val = BIT(channel_id);
        iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
        gsi_irq_type_enable(gsi, GSI_CH_CTRL);
@@ -467,12 +484,10 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
        iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
 
        if (success)
-               return 0;
+               return;
 
        dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
                opcode, channel_id, gsi_channel_state(channel));
-
-       return -ETIMEDOUT;
 }
 
 /* Allocate GSI channel in NOT_ALLOCATED state */
@@ -481,7 +496,6 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
        struct gsi_channel *channel = &gsi->channel[channel_id];
        struct device *dev = gsi->dev;
        enum gsi_channel_state state;
-       int ret;
 
        /* Get initial channel state */
        state = gsi_channel_state(channel);
@@ -491,17 +505,17 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
                return -EINVAL;
        }
 
-       ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
+       gsi_channel_command(channel, GSI_CH_ALLOCATE);
 
-       /* Channel state will normally have been updated */
+       /* If successful the channel state will have changed */
        state = gsi_channel_state(channel);
-       if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
-               dev_err(dev, "channel %u bad state %u after alloc\n",
-                       channel_id, state);
-               ret = -EIO;
-       }
+       if (state == GSI_CHANNEL_STATE_ALLOCATED)
+               return 0;
 
-       return ret;
+       dev_err(dev, "channel %u bad state %u after alloc\n",
+               channel_id, state);
+
+       return -EIO;
 }
 
 /* Start an ALLOCATED channel */
@@ -509,7 +523,6 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
 {
        struct device *dev = channel->gsi->dev;
        enum gsi_channel_state state;
-       int ret;
 
        state = gsi_channel_state(channel);
        if (state != GSI_CHANNEL_STATE_ALLOCATED &&
@@ -519,17 +532,17 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
                return -EINVAL;
        }
 
-       ret = gsi_channel_command(channel, GSI_CH_START);
+       gsi_channel_command(channel, GSI_CH_START);
 
-       /* Channel state will normally have been updated */
+       /* If successful the channel state will have changed */
        state = gsi_channel_state(channel);
-       if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
-               dev_err(dev, "channel %u bad state %u after start\n",
-                       gsi_channel_id(channel), state);
-               ret = -EIO;
-       }
+       if (state == GSI_CHANNEL_STATE_STARTED)
+               return 0;
 
-       return ret;
+       dev_err(dev, "channel %u bad state %u after start\n",
+               gsi_channel_id(channel), state);
+
+       return -EIO;
 }
 
 /* Stop a GSI channel in STARTED state */
@@ -537,7 +550,6 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
 {
        struct device *dev = channel->gsi->dev;
        enum gsi_channel_state state;
-       int ret;
 
        state = gsi_channel_state(channel);
 
@@ -554,12 +566,12 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
                return -EINVAL;
        }
 
-       ret = gsi_channel_command(channel, GSI_CH_STOP);
+       gsi_channel_command(channel, GSI_CH_STOP);
 
-       /* Channel state will normally have been updated */
+       /* If successful the channel state will have changed */
        state = gsi_channel_state(channel);
-       if (ret || state == GSI_CHANNEL_STATE_STOPPED)
-               return ret;
+       if (state == GSI_CHANNEL_STATE_STOPPED)
+               return 0;
 
        /* We may have to try again if stop is in progress */
        if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
@@ -576,7 +588,6 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
 {
        struct device *dev = channel->gsi->dev;
        enum gsi_channel_state state;
-       int ret;
 
        msleep(1);      /* A short delay is required before a RESET command */
 
@@ -590,11 +601,11 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
                return;
        }
 
-       ret = gsi_channel_command(channel, GSI_CH_RESET);
+       gsi_channel_command(channel, GSI_CH_RESET);
 
-       /* Channel state will normally have been updated */
+       /* If successful the channel state will have changed */
        state = gsi_channel_state(channel);
-       if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
+       if (state != GSI_CHANNEL_STATE_ALLOCATED)
                dev_err(dev, "channel %u bad state %u after reset\n",
                        gsi_channel_id(channel), state);
 }
@@ -605,7 +616,6 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
        struct gsi_channel *channel = &gsi->channel[channel_id];
        struct device *dev = gsi->dev;
        enum gsi_channel_state state;
-       int ret;
 
        state = gsi_channel_state(channel);
        if (state != GSI_CHANNEL_STATE_ALLOCATED) {
@@ -614,11 +624,12 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
                return;
        }
 
-       ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
+       gsi_channel_command(channel, GSI_CH_DE_ALLOC);
 
-       /* Channel state will normally have been updated */
+       /* If successful the channel state will have changed */
        state = gsi_channel_state(channel);
-       if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+
+       if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
                dev_err(dev, "channel %u bad state %u after dealloc\n",
                        channel_id, state);
 }
index 9dcf16f..135c393 100644 (file)
@@ -115,13 +115,13 @@ static int ipa_interconnect_enable(struct ipa *ipa)
                return ret;
 
        data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
-       ret = icc_set_bw(clock->memory_path, data->average_rate,
+       ret = icc_set_bw(clock->imem_path, data->average_rate,
                         data->peak_rate);
        if (ret)
                goto err_memory_path_disable;
 
        data = &clock->interconnect_data[IPA_INTERCONNECT_CONFIG];
-       ret = icc_set_bw(clock->memory_path, data->average_rate,
+       ret = icc_set_bw(clock->config_path, data->average_rate,
                         data->peak_rate);
        if (ret)
                goto err_imem_path_disable;
index e34fe2d..9b08eb8 100644 (file)
@@ -216,6 +216,7 @@ int ipa_modem_start(struct ipa *ipa)
        ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
        ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
 
+       SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
        priv = netdev_priv(netdev);
        priv->ipa = ipa;
 
index 5136275..d3915f8 100644 (file)
@@ -149,7 +149,7 @@ static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
        return dev_addr;
 }
 
-static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
+int mdiobb_read(struct mii_bus *bus, int phy, int reg)
 {
        struct mdiobb_ctrl *ctrl = bus->priv;
        int ret, i;
@@ -180,8 +180,9 @@ static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
        mdiobb_get_bit(ctrl);
        return ret;
 }
+EXPORT_SYMBOL(mdiobb_read);
 
-static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
 {
        struct mdiobb_ctrl *ctrl = bus->priv;
 
@@ -201,6 +202,7 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
        mdiobb_get_bit(ctrl);
        return 0;
 }
+EXPORT_SYMBOL(mdiobb_write);
 
 struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
 {
index 3337275..ddb78fb 100644 (file)
@@ -317,7 +317,8 @@ static int smsc_phy_probe(struct phy_device *phydev)
        /* Make clk optional to keep DTB backward compatibility. */
        priv->refclk = clk_get_optional(dev, NULL);
        if (IS_ERR(priv->refclk))
-               dev_err_probe(dev, PTR_ERR(priv->refclk), "Failed to request clock\n");
+               return dev_err_probe(dev, PTR_ERR(priv->refclk),
+                                    "Failed to request clock\n");
 
        ret = clk_prepare_enable(priv->refclk);
        if (ret)
index 09c27f7..d445ecb 100644 (file)
@@ -623,6 +623,7 @@ static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
                write_unlock_bh(&pch->upl);
                return -EALREADY;
        }
+       refcount_inc(&pchb->file.refcnt);
        rcu_assign_pointer(pch->bridge, pchb);
        write_unlock_bh(&pch->upl);
 
@@ -632,19 +633,24 @@ static int ppp_bridge_channels(struct channel *pch, struct channel *pchb)
                write_unlock_bh(&pchb->upl);
                goto err_unset;
        }
+       refcount_inc(&pch->file.refcnt);
        rcu_assign_pointer(pchb->bridge, pch);
        write_unlock_bh(&pchb->upl);
 
-       refcount_inc(&pch->file.refcnt);
-       refcount_inc(&pchb->file.refcnt);
-
        return 0;
 
 err_unset:
        write_lock_bh(&pch->upl);
+       /* Re-read pch->bridge with upl held in case it was modified concurrently */
+       pchb = rcu_dereference_protected(pch->bridge, lockdep_is_held(&pch->upl));
        RCU_INIT_POINTER(pch->bridge, NULL);
        write_unlock_bh(&pch->upl);
        synchronize_rcu();
+
+       if (pchb)
+               if (refcount_dec_and_test(&pchb->file.refcnt))
+                       ppp_destroy_channel(pchb);
+
        return -EALREADY;
 }
 
index fbed05a..978ac09 100644 (file)
@@ -1365,7 +1365,7 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
        int i;
 
        if (it->nr_segs > MAX_SKB_FRAGS + 1)
-               return ERR_PTR(-ENOMEM);
+               return ERR_PTR(-EMSGSIZE);
 
        local_bh_disable();
        skb = napi_get_frags(&tfile->napi);
index 1e37190..fbbe786 100644 (file)
@@ -631,7 +631,6 @@ config USB_NET_AQC111
 config USB_RTL8153_ECM
        tristate "RTL8153 ECM support"
        depends on USB_NET_CDCETHER && (USB_RTL8152 || USB_RTL8152=n)
-       default y
        help
          This option supports ECM mode for RTL8153 ethernet adapter, when
          CONFIG_USB_RTL8152 is not set, or the RTL8153 device is not
index 8c1d61c..6aaa067 100644 (file)
@@ -793,6 +793,13 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x721e, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
 /* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
 {
        USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,
index 2bac57d..291e76d 100644 (file)
@@ -1199,7 +1199,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
         * accordingly. Otherwise, we should check here.
         */
        if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
-               delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
+               delayed_ndp_size = ctx->max_ndp_size +
+                       max_t(u32,
+                             ctx->tx_ndp_modulus,
+                             ctx->tx_modulus + ctx->tx_remainder) - 1;
        else
                delayed_ndp_size = 0;
 
@@ -1410,7 +1413,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
        if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
            skb_out->len > ctx->min_tx_pkt) {
                padding_count = ctx->tx_curr_size - skb_out->len;
-               skb_put_zero(skb_out, padding_count);
+               if (!WARN_ON(padding_count > ctx->tx_curr_size))
+                       skb_put_zero(skb_out, padding_count);
        } else if (skb_out->len < ctx->tx_curr_size &&
                   (skb_out->len % dev->maxpacket) == 0) {
                skb_put_u8(skb_out, 0); /* force short packet */
@@ -1823,6 +1827,15 @@ cdc_ncm_speed_change(struct usbnet *dev,
        uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
        uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
 
+       /* if the speed hasn't changed, don't report it.
+        * RTL8156 shipped before 2021 sends notification about every 32ms.
+        */
+       if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed)
+               return;
+
+       dev->rx_speed = rx_speed;
+       dev->tx_speed = tx_speed;
+
        /*
         * Currently the USB-NET API does not support reporting the actual
         * device speed. Do print it instead.
@@ -1863,10 +1876,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
                 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
                 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
                 */
-               netif_info(dev, link, dev->net,
-                          "network connection: %sconnected\n",
-                          !!event->wValue ? "" : "dis");
-               usbnet_link_change(dev, !!event->wValue, 0);
+               if (netif_carrier_ok(dev->net) != !!event->wValue)
+                       usbnet_link_change(dev, !!event->wValue, 0);
                break;
 
        case USB_CDC_NOTIFY_SPEED_CHANGE:
index d166c32..af19513 100644 (file)
@@ -1013,6 +1013,7 @@ static const struct usb_device_id products[] = {
        {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)},   /* Quectel EC25, EC20 R2.0  Mini PCIe */
        {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)},   /* Quectel EP06/EG06/EM06 */
        {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)},   /* Quectel EG12/EM12 */
+       {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)},   /* Quectel EM160R-GL */
        {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)},   /* Quectel RM500Q-GL */
 
        /* 3. Combined interface devices matching on interface number */
index c448d60..67cd698 100644 (file)
@@ -6877,6 +6877,7 @@ static const struct usb_device_id rtl8152_table[] = {
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7205)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x720c)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7214)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x721e)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0xa387)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
        {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA,  0x09ff)},
index 2c3fabd..20b2df8 100644 (file)
@@ -122,12 +122,20 @@ static const struct driver_info r8153_info = {
 };
 
 static const struct usb_device_id products[] = {
+/* Realtek RTL8153 Based USB 3.0 Ethernet Adapters */
 {
        USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_REALTEK, 0x8153, USB_CLASS_COMM,
                                      USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
        .driver_info = (unsigned long)&r8153_info,
 },
 
+/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(VENDOR_ID_LENOVO, 0x721e, USB_CLASS_COMM,
+                                     USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long)&r8153_info,
+},
+
        { },            /* END */
 };
 MODULE_DEVICE_TABLE(usb, products);
index 6609d21..f813ca9 100644 (file)
@@ -387,7 +387,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
        reply_len = sizeof *phym;
        retval = rndis_query(dev, intf, u.buf,
                             RNDIS_OID_GEN_PHYSICAL_MEDIUM,
-                            0, (void **) &phym, &reply_len);
+                            reply_len, (void **)&phym, &reply_len);
        if (retval != 0 || !phym) {
                /* OID is optional so don't fail here. */
                phym_unspec = cpu_to_le32(RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED);
index 4c41df6..508408f 100644 (file)
@@ -2093,14 +2093,16 @@ static int virtnet_set_channels(struct net_device *dev,
 
        get_online_cpus();
        err = _virtnet_set_queues(vi, queue_pairs);
-       if (!err) {
-               netif_set_real_num_tx_queues(dev, queue_pairs);
-               netif_set_real_num_rx_queues(dev, queue_pairs);
-
-               virtnet_set_affinity(vi);
+       if (err) {
+               put_online_cpus();
+               goto err;
        }
+       virtnet_set_affinity(vi);
        put_online_cpus();
 
+       netif_set_real_num_tx_queues(dev, queue_pairs);
+       netif_set_real_num_rx_queues(dev, queue_pairs);
+ err:
        return err;
 }
 
index 4029fde..83c9481 100644 (file)
@@ -282,6 +282,7 @@ config SLIC_DS26522
        tristate "Slic Maxim ds26522 card support"
        depends on SPI
        depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
+       select BITREVERSE
        help
          This module initializes and configures the slic maxim card
          in T1 or E1 mode.
index 64f8556..261b53f 100644 (file)
@@ -569,6 +569,13 @@ static void ppp_timer(struct timer_list *t)
        unsigned long flags;
 
        spin_lock_irqsave(&ppp->lock, flags);
+       /* mod_timer could be called after we entered this function but
+        * before we got the lock.
+        */
+       if (timer_pending(&proto->timer)) {
+               spin_unlock_irqrestore(&ppp->lock, flags);
+               return;
+       }
        switch (proto->state) {
        case STOPPING:
        case REQ_SENT:
index b97c38b..350b791 100644 (file)
@@ -185,7 +185,7 @@ int ath11k_core_suspend(struct ath11k_base *ab)
        ath11k_hif_ce_irq_disable(ab);
 
        ret = ath11k_hif_suspend(ab);
-       if (!ret) {
+       if (ret) {
                ath11k_warn(ab, "failed to suspend hif: %d\n", ret);
                return ret;
        }
index 205c0f1..920e502 100644 (file)
@@ -2294,6 +2294,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
 {
        u8 channel_num;
        u32 center_freq;
+       struct ieee80211_channel *channel;
 
        rx_status->freq = 0;
        rx_status->rate_idx = 0;
@@ -2314,9 +2315,12 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
                rx_status->band = NL80211_BAND_5GHZ;
        } else {
                spin_lock_bh(&ar->data_lock);
-               rx_status->band = ar->rx_channel->band;
-               channel_num =
-                       ieee80211_frequency_to_channel(ar->rx_channel->center_freq);
+               channel = ar->rx_channel;
+               if (channel) {
+                       rx_status->band = channel->band;
+                       channel_num =
+                               ieee80211_frequency_to_channel(channel->center_freq);
+               }
                spin_unlock_bh(&ar->data_lock);
                ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
                                rx_desc, sizeof(struct hal_rx_desc));
index 5c175e3..c1608f6 100644 (file)
@@ -3021,6 +3021,7 @@ static int ath11k_mac_station_add(struct ath11k *ar,
        }
 
        if (ab->hw_params.vdev_start_delay &&
+           !arvif->is_started &&
            arvif->vdev_type != WMI_VDEV_TYPE_AP) {
                ret = ath11k_start_vdev_delay(ar->hw, vif);
                if (ret) {
@@ -5284,7 +5285,8 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
        /* for QCA6390 bss peer must be created before vdev_start */
        if (ab->hw_params.vdev_start_delay &&
            arvif->vdev_type != WMI_VDEV_TYPE_AP &&
-           arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
+           arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+           !ath11k_peer_find_by_vdev_id(ab, arvif->vdev_id)) {
                memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
                ret = 0;
                goto out;
@@ -5295,7 +5297,9 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
                goto out;
        }
 
-       if (ab->hw_params.vdev_start_delay) {
+       if (ab->hw_params.vdev_start_delay &&
+           (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+           arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) {
                param.vdev_id = arvif->vdev_id;
                param.peer_type = WMI_PEER_TYPE_DEFAULT;
                param.peer_addr = ar->mac_addr;
index 857647a..20b415c 100644 (file)
@@ -274,7 +274,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
                                      PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG,
                                      PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
                                      PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
-       if (!ret) {
+       if (ret) {
                ath11k_warn(ab, "failed to set sysclk: %d\n", ret);
                return ret;
        }
@@ -283,7 +283,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
-       if (!ret) {
+       if (ret) {
                ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret);
                return ret;
        }
@@ -292,7 +292,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
-       if (!ret) {
+       if (ret) {
                ath11k_warn(ab, "failed to set dtct config2: %d\n", ret);
                return ret;
        }
@@ -301,7 +301,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
-       if (!ret) {
+       if (ret) {
                ath11k_warn(ab, "failed to set dtct config4: %d\n", ret);
                return ret;
        }
@@ -886,6 +886,32 @@ static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
                pci_disable_device(pci_dev);
 }
 
+static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
+{
+       struct ath11k_base *ab = ab_pci->ab;
+
+       pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+                                 &ab_pci->link_ctl);
+
+       ath11k_dbg(ab, ATH11K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n",
+                  ab_pci->link_ctl,
+                  u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
+                  u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
+
+       /* disable L0s and L1 */
+       pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+                                  ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
+
+       set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags);
+}
+
+static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
+{
+       if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags))
+               pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+                                          ab_pci->link_ctl);
+}
+
 static int ath11k_pci_power_up(struct ath11k_base *ab)
 {
        struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -895,6 +921,11 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
        clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
        ath11k_pci_sw_reset(ab_pci->ab, true);
 
+       /* Disable ASPM during firmware download due to problems switching
+        * to AMSS state.
+        */
+       ath11k_pci_aspm_disable(ab_pci);
+
        ret = ath11k_mhi_start(ab_pci);
        if (ret) {
                ath11k_err(ab, "failed to start mhi: %d\n", ret);
@@ -908,6 +939,9 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
 {
        struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
 
+       /* restore aspm in case firmware bootup fails */
+       ath11k_pci_aspm_restore(ab_pci);
+
        ath11k_pci_force_wake(ab_pci->ab);
        ath11k_mhi_stop(ab_pci);
        clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
@@ -965,6 +999,8 @@ static int ath11k_pci_start(struct ath11k_base *ab)
 
        set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
 
+       ath11k_pci_aspm_restore(ab_pci);
+
        ath11k_pci_ce_irqs_enable(ab);
        ath11k_ce_rx_post_buf(ab);
 
index 0432a70..fe44d0d 100644 (file)
@@ -63,6 +63,7 @@ struct ath11k_msi_config {
 enum ath11k_pci_flags {
        ATH11K_PCI_FLAG_INIT_DONE,
        ATH11K_PCI_FLAG_IS_MSI_64,
+       ATH11K_PCI_ASPM_RESTORE,
 };
 
 struct ath11k_pci {
@@ -80,6 +81,7 @@ struct ath11k_pci {
 
        /* enum ath11k_pci_flags */
        unsigned long flags;
+       u16 link_ctl;
 };
 
 static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
index 1866d82..b69e7eb 100644 (file)
@@ -76,6 +76,23 @@ struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
        return NULL;
 }
 
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+                                               int vdev_id)
+{
+       struct ath11k_peer *peer;
+
+       spin_lock_bh(&ab->base_lock);
+
+       list_for_each_entry(peer, &ab->peers, list) {
+               if (vdev_id == peer->vdev_id) {
+                       spin_unlock_bh(&ab->base_lock);
+                       return peer;
+               }
+       }
+       spin_unlock_bh(&ab->base_lock);
+       return NULL;
+}
+
 void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
 {
        struct ath11k_peer *peer;
index bba2e00..8553ed0 100644 (file)
@@ -43,5 +43,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
                       struct ieee80211_sta *sta, struct peer_create_params *param);
 int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
                                     const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+                                               int vdev_id);
 
 #endif /* _PEER_H_ */
index f0b5c50..0db623f 100644 (file)
@@ -1660,6 +1660,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
        struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
        struct qmi_txn txn = {};
        int ret = 0, i;
+       bool delayed;
 
        req = kzalloc(sizeof(*req), GFP_KERNEL);
        if (!req)
@@ -1672,11 +1673,13 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
         * failure to FW and FW will then request mulitple blocks of small
         * chunk size memory.
         */
-       if (!ab->bus_params.fixed_mem_region && ab->qmi.mem_seg_count <= 2) {
+       if (!ab->bus_params.fixed_mem_region && ab->qmi.target_mem_delayed) {
+               delayed = true;
                ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi delays mem_request %d\n",
                           ab->qmi.mem_seg_count);
                memset(req, 0, sizeof(*req));
        } else {
+               delayed = false;
                req->mem_seg_len = ab->qmi.mem_seg_count;
 
                for (i = 0; i < req->mem_seg_len ; i++) {
@@ -1708,6 +1711,12 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
        }
 
        if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+               /* the error response is expected when
+                * target_mem_delayed is true.
+                */
+               if (delayed && resp.resp.error == 0)
+                       goto out;
+
                ath11k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
                            resp.resp.result, resp.resp.error);
                ret = -EINVAL;
@@ -1742,6 +1751,8 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
        int i;
        struct target_mem_chunk *chunk;
 
+       ab->qmi.target_mem_delayed = false;
+
        for (i = 0; i < ab->qmi.mem_seg_count; i++) {
                chunk = &ab->qmi.target_mem[i];
                chunk->vaddr = dma_alloc_coherent(ab->dev,
@@ -1749,6 +1760,15 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
                                                  &chunk->paddr,
                                                  GFP_KERNEL);
                if (!chunk->vaddr) {
+                       if (ab->qmi.mem_seg_count <= 2) {
+                               ath11k_dbg(ab, ATH11K_DBG_QMI,
+                                          "qmi dma allocation failed (%d B type %u), will try later with small size\n",
+                                           chunk->size,
+                                           chunk->type);
+                               ath11k_qmi_free_target_mem_chunk(ab);
+                               ab->qmi.target_mem_delayed = true;
+                               return 0;
+                       }
                        ath11k_err(ab, "failed to alloc memory, size: 0x%x, type: %u\n",
                                   chunk->size,
                                   chunk->type);
@@ -2517,7 +2537,7 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
                                    ret);
                        return;
                }
-       } else if (msg->mem_seg_len > 2) {
+       } else {
                ret = ath11k_qmi_alloc_target_mem_chunk(ab);
                if (ret) {
                        ath11k_warn(ab, "qmi failed to alloc target memory: %d\n",
index 92925c9..7bad374 100644 (file)
@@ -125,6 +125,7 @@ struct ath11k_qmi {
        struct target_mem_chunk target_mem[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
        u32 mem_seg_count;
        u32 target_mem_mode;
+       bool target_mem_delayed;
        u8 cal_done;
        struct target_info target;
        struct m3_mem_region m3_mem;
index da4b546..73869d4 100644 (file)
@@ -3460,6 +3460,9 @@ int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
        len = sizeof(*cmd);
 
        skb = ath11k_wmi_alloc_skb(wmi_ab, len);
+       if (!skb)
+               return -ENOMEM;
+
        cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data;
 
        cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
index 6a95b19..f074e9c 100644 (file)
@@ -2,6 +2,7 @@
 config WIL6210
        tristate "Wilocity 60g WiFi card wil6210 support"
        select WANT_DEV_COREDUMP
+       select CRC32
        depends on CFG80211
        depends on PCI
        default n
index ed4635b..102a8f1 100644 (file)
@@ -40,9 +40,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
                .types = BIT(NL80211_IFTYPE_ADHOC)
        }, {
                .max = 16,
-               .types = BIT(NL80211_IFTYPE_AP) |
+               .types = BIT(NL80211_IFTYPE_AP)
 #ifdef CONFIG_MAC80211_MESH
-                        BIT(NL80211_IFTYPE_MESH_POINT)
+                        BIT(NL80211_IFTYPE_MESH_POINT)
 #endif
        }, {
                .max = MT7915_MAX_INTERFACES,
index 62b5b91..0b6facb 100644 (file)
@@ -157,10 +157,14 @@ static void mt76s_net_worker(struct mt76_worker *w)
 
 static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
 {
-       bool wake, mcu = q == dev->q_mcu[MT_MCUQ_WM];
        struct mt76_queue_entry entry;
        int nframes = 0;
+       bool mcu;
 
+       if (!q)
+               return 0;
+
+       mcu = q == dev->q_mcu[MT_MCUQ_WM];
        while (q->queued > 0) {
                if (!q->entry[q->tail].done)
                        break;
@@ -177,21 +181,12 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
                nframes++;
        }
 
-       wake = q->stopped && q->queued < q->ndesc - 8;
-       if (wake)
-               q->stopped = false;
-
        if (!q->queued)
                wake_up(&dev->tx_wait);
 
-       if (mcu)
-               goto out;
-
-       mt76_txq_schedule(&dev->phy, q->qid);
+       if (!mcu)
+               mt76_txq_schedule(&dev->phy, q->qid);
 
-       if (wake)
-               ieee80211_wake_queue(dev->hw, q->qid);
-out:
        return nframes;
 }
 
index dc85010..b95d093 100644 (file)
@@ -811,11 +811,12 @@ static void mt76u_status_worker(struct mt76_worker *w)
        struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
        struct mt76_queue_entry entry;
        struct mt76_queue *q;
-       bool wake;
        int i;
 
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
                q = dev->phy.q_tx[i];
+               if (!q)
+                       continue;
 
                while (q->queued > 0) {
                        if (!q->entry[q->tail].done)
@@ -827,10 +828,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
                        mt76_queue_tx_complete(dev, q, &entry);
                }
 
-               wake = q->stopped && q->queued < q->ndesc - 8;
-               if (wake)
-                       q->stopped = false;
-
                if (!q->queued)
                        wake_up(&dev->tx_wait);
 
@@ -839,8 +836,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
                if (dev->drv->tx_status_data &&
                    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
                        queue_work(dev->wq, &dev->usb.stat_work);
-               if (wake)
-                       ieee80211_wake_queue(dev->hw, i);
        }
 }
 
index a7259db..965bd95 100644 (file)
@@ -78,7 +78,6 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
 
        rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
                "Firmware callback routine entered!\n");
-       complete(&rtlpriv->firmware_loading_complete);
        if (!firmware) {
                if (rtlpriv->cfg->alt_fw_name) {
                        err = request_firmware(&firmware,
@@ -91,13 +90,13 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
                }
                pr_err("Selected firmware is not available\n");
                rtlpriv->max_fw_size = 0;
-               return;
+               goto exit;
        }
 found_alt:
        if (firmware->size > rtlpriv->max_fw_size) {
                pr_err("Firmware is too big!\n");
                release_firmware(firmware);
-               return;
+               goto exit;
        }
        if (!is_wow) {
                memcpy(rtlpriv->rtlhal.pfirmware, firmware->data,
@@ -109,6 +108,9 @@ found_alt:
                rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
        }
        release_firmware(firmware);
+
+exit:
+       complete(&rtlpriv->firmware_loading_complete);
 }
 
 void rtl_fw_cb(const struct firmware *firmware, void *context)
index ce1b615..200bdd6 100644 (file)
@@ -179,7 +179,7 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
 }
 EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
 
-int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
 {
        int ret;
 
@@ -192,7 +192,6 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
 
 static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
 {
@@ -331,7 +330,7 @@ static inline void nvme_end_req(struct request *req)
                req->__sector = nvme_lba_to_sect(req->q->queuedata,
                        le64_to_cpu(nvme_req(req)->result.u64));
 
-       nvme_trace_bio_complete(req, status);
+       nvme_trace_bio_complete(req);
        blk_mq_end_request(req, status);
 }
 
@@ -578,7 +577,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
 }
 EXPORT_SYMBOL_GPL(nvme_alloc_request);
 
-struct request *nvme_alloc_request_qid(struct request_queue *q,
+static struct request *nvme_alloc_request_qid(struct request_queue *q,
                struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
 {
        struct request *req;
@@ -589,7 +588,6 @@ struct request *nvme_alloc_request_qid(struct request_queue *q,
                nvme_init_request(req, cmd);
        return req;
 }
-EXPORT_SYMBOL_GPL(nvme_alloc_request_qid);
 
 static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
 {
@@ -2858,6 +2856,11 @@ static const struct attribute_group *nvme_subsys_attrs_groups[] = {
        NULL,
 };
 
+static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl)
+{
+       return ctrl->opts && ctrl->opts->discovery_nqn;
+}
+
 static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
                struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
 {
@@ -2877,7 +2880,7 @@ static bool nvme_validate_cntlid(struct nvme_subsystem *subsys,
                }
 
                if ((id->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
-                   (ctrl->opts && ctrl->opts->discovery_nqn))
+                   nvme_discovery_ctrl(ctrl))
                        continue;
 
                dev_err(ctrl->device,
@@ -3146,7 +3149,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
                        goto out_free;
                }
 
-               if (!ctrl->opts->discovery_nqn && !ctrl->kas) {
+               if (!nvme_discovery_ctrl(ctrl) && !ctrl->kas) {
                        dev_err(ctrl->device,
                                "keep-alive support is mandatory for fabrics\n");
                        ret = -EINVAL;
@@ -3186,7 +3189,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
        if (ret < 0)
                return ret;
 
-       if (!ctrl->identified) {
+       if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
                ret = nvme_hwmon_init(ctrl);
                if (ret < 0)
                        return ret;
index 38373a0..5f36cfa 100644 (file)
@@ -166,6 +166,7 @@ struct nvme_fc_ctrl {
        struct blk_mq_tag_set   admin_tag_set;
        struct blk_mq_tag_set   tag_set;
 
+       struct work_struct      ioerr_work;
        struct delayed_work     connect_work;
 
        struct kref             ref;
@@ -1888,6 +1889,15 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
        }
 }
 
+static void
+nvme_fc_ctrl_ioerr_work(struct work_struct *work)
+{
+       struct nvme_fc_ctrl *ctrl =
+                       container_of(work, struct nvme_fc_ctrl, ioerr_work);
+
+       nvme_fc_error_recovery(ctrl, "transport detected io error");
+}
+
 static void
 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
 {
@@ -2046,7 +2056,7 @@ done:
 
 check_error:
        if (terminate_assoc)
-               nvme_fc_error_recovery(ctrl, "transport detected io error");
+               queue_work(nvme_reset_wq, &ctrl->ioerr_work);
 }
 
 static int
@@ -3233,6 +3243,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
 {
        struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
 
+       cancel_work_sync(&ctrl->ioerr_work);
        cancel_delayed_work_sync(&ctrl->connect_work);
        /*
         * kill the association on the link side.  this will block
@@ -3449,6 +3460,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 
        INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
        INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
+       INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
        spin_lock_init(&ctrl->lock);
 
        /* io queue count */
@@ -3540,6 +3552,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 
 fail_ctrl:
        nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
+       cancel_work_sync(&ctrl->ioerr_work);
        cancel_work_sync(&ctrl->ctrl.reset_work);
        cancel_delayed_work_sync(&ctrl->connect_work);
 
index 7e49f61..88a6b97 100644 (file)
@@ -610,8 +610,6 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
 #define NVME_QID_ANY -1
 struct request *nvme_alloc_request(struct request_queue *q,
                struct nvme_command *cmd, blk_mq_req_flags_t flags);
-struct request *nvme_alloc_request_qid(struct request_queue *q,
-               struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
 void nvme_cleanup_cmd(struct request *req);
 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
                struct nvme_command *cmd);
@@ -630,7 +628,6 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
 int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
-int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
 int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
 int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
 
@@ -675,8 +672,7 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
                kblockd_schedule_work(&head->requeue_work);
 }
 
-static inline void nvme_trace_bio_complete(struct request *req,
-        blk_status_t status)
+static inline void nvme_trace_bio_complete(struct request *req)
 {
        struct nvme_ns *ns = req->q->queuedata;
 
@@ -731,8 +727,7 @@ static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
 static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
 {
 }
-static inline void nvme_trace_bio_complete(struct request *req,
-        blk_status_t status)
+static inline void nvme_trace_bio_complete(struct request *req)
 {
 }
 static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
index b4385cb..50d9a20 100644 (file)
@@ -967,6 +967,7 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
 {
        struct nvme_completion *cqe = &nvmeq->cqes[idx];
+       __u16 command_id = READ_ONCE(cqe->command_id);
        struct request *req;
 
        /*
@@ -975,17 +976,17 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
         * aborts.  We don't even bother to allocate a struct request
         * for them but rather special case them here.
         */
-       if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
+       if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
                nvme_complete_async_event(&nvmeq->dev->ctrl,
                                cqe->status, &cqe->result);
                return;
        }
 
-       req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
+       req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
        if (unlikely(!req)) {
                dev_warn(nvmeq->dev->ctrl.device,
                        "invalid id %d completed on queue %d\n",
-                       cqe->command_id, le16_to_cpu(cqe->sq_id));
+                       command_id, le16_to_cpu(cqe->sq_id));
                return;
        }
 
@@ -3196,7 +3197,8 @@ static const struct pci_device_id nvme_id_table[] = {
        { PCI_DEVICE(0x144d, 0xa821),   /* Samsung PM1725 */
                .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
        { PCI_DEVICE(0x144d, 0xa822),   /* Samsung PM1725a */
-               .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+               .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+                               NVME_QUIRK_IGNORE_DEV_SUBNQN, },
        { PCI_DEVICE(0x1d1d, 0x1f1f),   /* LighNVM qemu device */
                .driver_data = NVME_QUIRK_LIGHTNVM, },
        { PCI_DEVICE(0x1d1d, 0x2807),   /* CNEX WL */
index 1ba6599..2166199 100644 (file)
@@ -201,7 +201,7 @@ static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
 
 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
 {
-       return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
+       return min_t(size_t, iov_iter_single_seg_count(&req->iter),
                        req->pdu_len - req->pdu_sent);
 }
 
@@ -262,6 +262,16 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
        }
 }
 
+static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
+{
+       int ret;
+
+       /* drain the send queue as much as we can... */
+       do {
+               ret = nvme_tcp_try_send(queue);
+       } while (ret > 0);
+}
+
 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
                bool sync, bool last)
 {
@@ -276,10 +286,10 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
         * directly, otherwise queue io_work. Also, only do that if we
         * are on the same cpu, so we don't introduce contention.
         */
-       if (queue->io_cpu == smp_processor_id() &&
+       if (queue->io_cpu == __smp_processor_id() &&
            sync && empty && mutex_trylock(&queue->send_mutex)) {
                queue->more_requests = !last;
-               nvme_tcp_try_send(queue);
+               nvme_tcp_send_all(queue);
                queue->more_requests = false;
                mutex_unlock(&queue->send_mutex);
        } else if (last) {
index 733d936..68213f0 100644 (file)
@@ -1501,7 +1501,8 @@ static ssize_t
 fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
                const char *buf, size_t count)
 {
-       int opcode, starting, amount;
+       unsigned int opcode;
+       int starting, amount;
 
        if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
                return -EBADRQC;
@@ -1588,8 +1589,8 @@ out_destroy_class:
 
 static void __exit fcloop_exit(void)
 {
-       struct fcloop_lport *lport;
-       struct fcloop_nport *nport;
+       struct fcloop_lport *lport = NULL;
+       struct fcloop_nport *nport = NULL;
        struct fcloop_tport *tport;
        struct fcloop_rport *rport;
        unsigned long flags;
index 5c1e7cb..06b6b74 100644 (file)
@@ -1220,6 +1220,14 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
        }
        ndev->inline_data_size = nport->inline_data_size;
        ndev->inline_page_count = inline_page_count;
+
+       if (nport->pi_enable && !(cm_id->device->attrs.device_cap_flags &
+                                 IB_DEVICE_INTEGRITY_HANDOVER)) {
+               pr_warn("T10-PI is not supported by device %s. Disabling it\n",
+                       cm_id->device->name);
+               nport->pi_enable = false;
+       }
+
        ndev->device = cm_id->device;
        kref_init(&ndev->ref);
 
@@ -1641,6 +1649,16 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
        spin_lock_irqsave(&queue->state_lock, flags);
        switch (queue->state) {
        case NVMET_RDMA_Q_CONNECTING:
+               while (!list_empty(&queue->rsp_wait_list)) {
+                       struct nvmet_rdma_rsp *rsp;
+
+                       rsp = list_first_entry(&queue->rsp_wait_list,
+                                              struct nvmet_rdma_rsp,
+                                              wait_list);
+                       list_del(&rsp->wait_list);
+                       nvmet_rdma_put_rsp(rsp);
+               }
+               fallthrough;
        case NVMET_RDMA_Q_LIVE:
                queue->state = NVMET_RDMA_Q_DISCONNECTING;
                disconnect = true;
@@ -1845,14 +1863,6 @@ static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
                goto out_destroy_id;
        }
 
-       if (port->nport->pi_enable &&
-           !(cm_id->device->attrs.device_cap_flags &
-             IB_DEVICE_INTEGRITY_HANDOVER)) {
-               pr_err("T10-PI is not supported for %pISpcs\n", addr);
-               ret = -EINVAL;
-               goto out_destroy_id;
-       }
-
        port->cm_id = cm_id;
        return 0;
 
index 794a37d..cb2f55f 100644 (file)
@@ -726,11 +726,6 @@ static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
        return per_cpu(hw_events->irq, cpu);
 }
 
-bool arm_pmu_irq_is_nmi(void)
-{
-       return has_nmi;
-}
-
 /*
  * PMU hardware loses all context when a CPU goes offline.
  * When a CPU is hotplugged back in, since some hardware registers are
index 34803a6..5c1a109 100644 (file)
@@ -347,7 +347,7 @@ FUNC_GROUP_DECL(RMII4, F24, E23, E24, E25, C25, C24, B26, B25, B24);
 
 #define D22 40
 SIG_EXPR_LIST_DECL_SESG(D22, SD1CLK, SD1, SIG_DESC_SET(SCU414, 8));
-SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU414, 8));
+SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU4B4, 8));
 PIN_DECL_2(D22, GPIOF0, SD1CLK, PWM8);
 GROUP_DECL(PWM8G0, D22);
 
index 7aeb552..72f17f2 100644 (file)
@@ -920,6 +920,10 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
                        err = hw->soc->bias_set(hw, desc, pullup);
                        if (err)
                                return err;
+               } else if (hw->soc->bias_set_combo) {
+                       err = hw->soc->bias_set_combo(hw, desc, pullup, arg);
+                       if (err)
+                               return err;
                } else {
                        return -ENOTSUPP;
                }
index d4ea108..abfe11c 100644 (file)
@@ -949,7 +949,6 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
        } else {
                int irq = chip->to_irq(chip, offset);
                const int pullidx = pull ? 1 : 0;
-               bool wake;
                int val;
                static const char * const pulls[] = {
                        "none        ",
index 53a6a24..3ea1634 100644 (file)
 #define JZ4740_GPIO_TRIG       0x70
 #define JZ4740_GPIO_FLAG       0x80
 
-#define JZ4760_GPIO_INT                0x10
-#define JZ4760_GPIO_PAT1       0x30
-#define JZ4760_GPIO_PAT0       0x40
-#define JZ4760_GPIO_FLAG       0x50
-#define JZ4760_GPIO_PEN                0x70
+#define JZ4770_GPIO_INT                0x10
+#define JZ4770_GPIO_PAT1       0x30
+#define JZ4770_GPIO_PAT0       0x40
+#define JZ4770_GPIO_FLAG       0x50
+#define JZ4770_GPIO_PEN                0x70
 
 #define X1830_GPIO_PEL                 0x110
 #define X1830_GPIO_PEH                 0x120
@@ -1688,8 +1688,8 @@ static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc,
 static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
                                   u8 offset, int value)
 {
-       if (jzgc->jzpc->info->version >= ID_JZ4760)
-               ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_PAT0, offset, !!value);
+       if (jzgc->jzpc->info->version >= ID_JZ4770)
+               ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value);
        else
                ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
 }
@@ -1718,9 +1718,9 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc,
                break;
        }
 
-       if (jzgc->jzpc->info->version >= ID_JZ4760) {
-               reg1 = JZ4760_GPIO_PAT1;
-               reg2 = JZ4760_GPIO_PAT0;
+       if (jzgc->jzpc->info->version >= ID_JZ4770) {
+               reg1 = JZ4770_GPIO_PAT1;
+               reg2 = JZ4770_GPIO_PAT0;
        } else {
                reg1 = JZ4740_GPIO_TRIG;
                reg2 = JZ4740_GPIO_DIR;
@@ -1758,8 +1758,8 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
        struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
        int irq = irqd->hwirq;
 
-       if (jzgc->jzpc->info->version >= ID_JZ4760)
-               ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, true);
+       if (jzgc->jzpc->info->version >= ID_JZ4770)
+               ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
        else
                ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
 
@@ -1774,8 +1774,8 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
 
        ingenic_gpio_irq_mask(irqd);
 
-       if (jzgc->jzpc->info->version >= ID_JZ4760)
-               ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, false);
+       if (jzgc->jzpc->info->version >= ID_JZ4770)
+               ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, false);
        else
                ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
 }
@@ -1799,8 +1799,8 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
                        irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH);
        }
 
-       if (jzgc->jzpc->info->version >= ID_JZ4760)
-               ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_FLAG, irq, false);
+       if (jzgc->jzpc->info->version >= ID_JZ4770)
+               ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false);
        else
                ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
 }
@@ -1856,8 +1856,8 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
 
        chained_irq_enter(irq_chip, desc);
 
-       if (jzgc->jzpc->info->version >= ID_JZ4760)
-               flag = ingenic_gpio_read_reg(jzgc, JZ4760_GPIO_FLAG);
+       if (jzgc->jzpc->info->version >= ID_JZ4770)
+               flag = ingenic_gpio_read_reg(jzgc, JZ4770_GPIO_FLAG);
        else
                flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG);
 
@@ -1938,9 +1938,9 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
        struct ingenic_pinctrl *jzpc = jzgc->jzpc;
        unsigned int pin = gc->base + offset;
 
-       if (jzpc->info->version >= ID_JZ4760) {
-               if (ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_INT) ||
-                   ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1))
+       if (jzpc->info->version >= ID_JZ4770) {
+               if (ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_INT) ||
+                   ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PAT1))
                        return GPIO_LINE_DIRECTION_IN;
                return GPIO_LINE_DIRECTION_OUT;
        }
@@ -1991,20 +1991,20 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
                        'A' + offt, idx, func);
 
        if (jzpc->info->version >= ID_X1000) {
-               ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+               ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
                ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, false);
-               ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
-               ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
+               ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
+               ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
                ingenic_shadow_config_pin_load(jzpc, pin);
-       } else if (jzpc->info->version >= ID_JZ4760) {
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+       } else if (jzpc->info->version >= ID_JZ4770) {
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
                ingenic_config_pin(jzpc, pin, GPIO_MSK, false);
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
        } else {
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, true);
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_TRIG, func & 0x2);
-               ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func > 0);
+               ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func & 0x1);
        }
 
        return 0;
@@ -2057,14 +2057,14 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
                        'A' + offt, idx, input ? "in" : "out");
 
        if (jzpc->info->version >= ID_X1000) {
-               ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+               ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
                ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, true);
-               ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+               ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
                ingenic_shadow_config_pin_load(jzpc, pin);
-       } else if (jzpc->info->version >= ID_JZ4760) {
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+       } else if (jzpc->info->version >= ID_JZ4770) {
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
                ingenic_config_pin(jzpc, pin, GPIO_MSK, true);
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
        } else {
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
@@ -2091,8 +2091,8 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
        unsigned int offt = pin / PINS_PER_GPIO_CHIP;
        bool pull;
 
-       if (jzpc->info->version >= ID_JZ4760)
-               pull = !ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PEN);
+       if (jzpc->info->version >= ID_JZ4770)
+               pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
        else
                pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
 
@@ -2141,8 +2141,8 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
                                        REG_SET(X1830_GPIO_PEH), bias << idxh);
                }
 
-       } else if (jzpc->info->version >= ID_JZ4760) {
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !bias);
+       } else if (jzpc->info->version >= ID_JZ4770) {
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PEN, !bias);
        } else {
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias);
        }
@@ -2151,8 +2151,8 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
 static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
                                     unsigned int pin, bool high)
 {
-       if (jzpc->info->version >= ID_JZ4760)
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
+       if (jzpc->info->version >= ID_JZ4770)
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, high);
        else
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
 }
index e051aec..d70caec 100644 (file)
@@ -51,6 +51,7 @@
  * @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
  *                  detection.
  * @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller
+ * @disabled_for_mux: These IRQs were disabled because we muxed away.
  * @soc:            Reference to soc_data of platform specific data.
  * @regs:           Base addresses for the TLMM tiles.
  * @phys_base:      Physical base address
@@ -72,6 +73,7 @@ struct msm_pinctrl {
        DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
        DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
        DECLARE_BITMAP(skip_wake_irqs, MAX_NR_GPIO);
+       DECLARE_BITMAP(disabled_for_mux, MAX_NR_GPIO);
 
        const struct msm_pinctrl_soc_data *soc;
        void __iomem *regs[MAX_NR_TILES];
@@ -96,6 +98,14 @@ MSM_ACCESSOR(intr_cfg)
 MSM_ACCESSOR(intr_status)
 MSM_ACCESSOR(intr_target)
 
+static void msm_ack_intr_status(struct msm_pinctrl *pctrl,
+                               const struct msm_pingroup *g)
+{
+       u32 val = g->intr_ack_high ? BIT(g->intr_status_bit) : 0;
+
+       msm_writel_intr_status(val, pctrl, g);
+}
+
 static int msm_get_groups_count(struct pinctrl_dev *pctldev)
 {
        struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -171,6 +181,10 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
                              unsigned group)
 {
        struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+       struct gpio_chip *gc = &pctrl->chip;
+       unsigned int irq = irq_find_mapping(gc->irq.domain, group);
+       struct irq_data *d = irq_get_irq_data(irq);
+       unsigned int gpio_func = pctrl->soc->gpio_func;
        const struct msm_pingroup *g;
        unsigned long flags;
        u32 val, mask;
@@ -187,6 +201,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
        if (WARN_ON(i == g->nfuncs))
                return -EINVAL;
 
+       /*
+        * If an GPIO interrupt is setup on this pin then we need special
+        * handling.  Specifically interrupt detection logic will still see
+        * the pin twiddle even when we're muxed away.
+        *
+        * When we see a pin with an interrupt setup on it then we'll disable
+        * (mask) interrupts on it when we mux away until we mux back.  Note
+        * that disable_irq() refcounts and interrupts are disabled as long as
+        * at least one disable_irq() has been called.
+        */
+       if (d && i != gpio_func &&
+           !test_and_set_bit(d->hwirq, pctrl->disabled_for_mux))
+               disable_irq(irq);
+
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
        val = msm_readl_ctl(pctrl, g);
@@ -196,6 +224,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
 
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
+       if (d && i == gpio_func &&
+           test_and_clear_bit(d->hwirq, pctrl->disabled_for_mux)) {
+               /*
+                * Clear interrupts detected while not GPIO since we only
+                * masked things.
+                */
+               if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+                       irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, false);
+               else
+                       msm_ack_intr_status(pctrl, g);
+
+               enable_irq(irq);
+       }
+
        return 0;
 }
 
@@ -210,8 +252,7 @@ static int msm_pinmux_request_gpio(struct pinctrl_dev *pctldev,
        if (!g->nfuncs)
                return 0;
 
-       /* For now assume function 0 is GPIO because it always is */
-       return msm_pinmux_set_mux(pctldev, g->funcs[0], offset);
+       return msm_pinmux_set_mux(pctldev, g->funcs[pctrl->soc->gpio_func], offset);
 }
 
 static const struct pinmux_ops msm_pinmux_ops = {
@@ -774,7 +815,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 }
 
-static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
+static void msm_gpio_irq_unmask(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
@@ -792,17 +833,6 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
 
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
-       if (status_clear) {
-               /*
-                * clear the interrupt status bit before unmask to avoid
-                * any erroneous interrupts that would have got latched
-                * when the interrupt is not in use.
-                */
-               val = msm_readl_intr_status(pctrl, g);
-               val &= ~BIT(g->intr_status_bit);
-               msm_writel_intr_status(val, pctrl, g);
-       }
-
        val = msm_readl_intr_cfg(pctrl, g);
        val |= BIT(g->intr_raw_status_bit);
        val |= BIT(g->intr_enable_bit);
@@ -822,7 +852,7 @@ static void msm_gpio_irq_enable(struct irq_data *d)
                irq_chip_enable_parent(d);
 
        if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
-               msm_gpio_irq_clear_unmask(d, true);
+               msm_gpio_irq_unmask(d);
 }
 
 static void msm_gpio_irq_disable(struct irq_data *d)
@@ -837,11 +867,6 @@ static void msm_gpio_irq_disable(struct irq_data *d)
                msm_gpio_irq_mask(d);
 }
 
-static void msm_gpio_irq_unmask(struct irq_data *d)
-{
-       msm_gpio_irq_clear_unmask(d, false);
-}
-
 /**
  * msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent.
  * @d: The irq dta.
@@ -894,7 +919,6 @@ static void msm_gpio_irq_ack(struct irq_data *d)
        struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
        const struct msm_pingroup *g;
        unsigned long flags;
-       u32 val;
 
        if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
                if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
@@ -906,12 +930,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
 
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
-       val = msm_readl_intr_status(pctrl, g);
-       if (g->intr_ack_high)
-               val |= BIT(g->intr_status_bit);
-       else
-               val &= ~BIT(g->intr_status_bit);
-       msm_writel_intr_status(val, pctrl, g);
+       msm_ack_intr_status(pctrl, g);
 
        if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
                msm_gpio_update_dual_edge_pos(pctrl, g, d);
@@ -936,6 +955,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
        struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
        const struct msm_pingroup *g;
        unsigned long flags;
+       bool was_enabled;
        u32 val;
 
        if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) {
@@ -997,6 +1017,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
         * could cause the INTR_STATUS to be set for EDGE interrupts.
         */
        val = msm_readl_intr_cfg(pctrl, g);
+       was_enabled = val & BIT(g->intr_raw_status_bit);
        val |= BIT(g->intr_raw_status_bit);
        if (g->intr_detection_width == 2) {
                val &= ~(3 << g->intr_detection_bit);
@@ -1046,6 +1067,14 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
        }
        msm_writel_intr_cfg(val, pctrl, g);
 
+       /*
+        * The first time we set RAW_STATUS_EN it could trigger an interrupt.
+        * Clear the interrupt.  This is safe because we have
+        * IRQCHIP_SET_TYPE_MASKED.
+        */
+       if (!was_enabled)
+               msm_ack_intr_status(pctrl, g);
+
        if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
                msm_gpio_update_dual_edge_pos(pctrl, g, d);
 
@@ -1099,16 +1128,11 @@ static int msm_gpio_irq_reqres(struct irq_data *d)
        }
 
        /*
-        * Clear the interrupt that may be pending before we enable
-        * the line.
-        * This is especially a problem with the GPIOs routed to the
-        * PDC. These GPIOs are direct-connect interrupts to the GIC.
-        * Disabling the interrupt line at the PDC does not prevent
-        * the interrupt from being latched at the GIC. The state at
-        * GIC needs to be cleared before enabling.
+        * The disable / clear-enable workaround we do in msm_pinmux_set_mux()
+        * only works if disable is not lazy since we only clear any bogus
+        * interrupt in hardware. Explicitly mark the interrupt as UNLAZY.
         */
-       if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
-               irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
+       irq_set_status_flags(d->irq, IRQ_DISABLE_UNLAZY);
 
        return 0;
 out:
index 333f992..e31a516 100644 (file)
@@ -118,6 +118,7 @@ struct msm_gpio_wakeirq_map {
  * @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need
  *                            to be aware that their parent can't handle dual
  *                            edge interrupts.
+ * @gpio_func: Which function number is GPIO (usually 0).
  */
 struct msm_pinctrl_soc_data {
        const struct pinctrl_pin_desc *pins;
@@ -134,6 +135,7 @@ struct msm_pinctrl_soc_data {
        const struct msm_gpio_wakeirq_map *wakeirq_map;
        unsigned int nwakeirq_map;
        bool wakeirq_dual_edge_errata;
+       unsigned int gpio_func;
 };
 
 extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
index 33040b0..2c941cd 100644 (file)
@@ -5,6 +5,7 @@
 
 menuconfig SURFACE_PLATFORMS
        bool "Microsoft Surface Platform-Specific Device Drivers"
+       depends on ACPI
        default y
        help
          Say Y here to get to see options for platform-specific device drivers
@@ -29,20 +30,19 @@ config SURFACE3_WMI
 
 config SURFACE_3_BUTTON
        tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet"
-       depends on ACPI && KEYBOARD_GPIO && I2C
+       depends on KEYBOARD_GPIO && I2C
        help
          This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet.
 
 config SURFACE_3_POWER_OPREGION
        tristate "Surface 3 battery platform operation region support"
-       depends on ACPI && I2C
+       depends on I2C
        help
          This driver provides support for ACPI operation
          region of the Surface 3 battery platform driver.
 
 config SURFACE_GPE
        tristate "Surface GPE/Lid Support Driver"
-       depends on ACPI
        depends on DMI
        help
          This driver marks the GPEs related to the ACPI lid device found on
@@ -52,7 +52,7 @@ config SURFACE_GPE
 
 config SURFACE_PRO3_BUTTON
        tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
-       depends on ACPI && INPUT
+       depends on INPUT
        help
          This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet.
 
index e49e5d6..86f6991 100644 (file)
@@ -181,12 +181,12 @@ static int surface_lid_enable_wakeup(struct device *dev, bool enable)
        return 0;
 }
 
-static int surface_gpe_suspend(struct device *dev)
+static int __maybe_unused surface_gpe_suspend(struct device *dev)
 {
        return surface_lid_enable_wakeup(dev, true);
 }
 
-static int surface_gpe_resume(struct device *dev)
+static int __maybe_unused surface_gpe_resume(struct device *dev)
 {
        return surface_lid_enable_wakeup(dev, false);
 }
index 0102bf1..ef83425 100644 (file)
@@ -85,7 +85,7 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
        iowrite32(val, dev->regbase + reg_offset);
 }
 
-#if CONFIG_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
 static int smu_fw_info_show(struct seq_file *s, void *unused)
 {
        struct amd_pmc_dev *dev = s->private;
index ecd4779..18bf8ae 100644 (file)
@@ -247,7 +247,8 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
        ret = bios_return->return_code;
 
        if (ret) {
-               if (ret != HPWMI_RET_UNKNOWN_CMDTYPE)
+               if (ret != HPWMI_RET_UNKNOWN_COMMAND &&
+                   ret != HPWMI_RET_UNKNOWN_CMDTYPE)
                        pr_warn("query 0x%x returned error 0x%x\n", query, ret);
                goto out_free;
        }
index b457b0b..2cce825 100644 (file)
@@ -164,13 +164,29 @@ static const struct i2c_inst_data bsg2150_data[]  = {
        {}
 };
 
-static const struct i2c_inst_data int3515_data[]  = {
-       { "tps6598x", IRQ_RESOURCE_APIC, 0 },
-       { "tps6598x", IRQ_RESOURCE_APIC, 1 },
-       { "tps6598x", IRQ_RESOURCE_APIC, 2 },
-       { "tps6598x", IRQ_RESOURCE_APIC, 3 },
-       {}
-};
+/*
+ * Device with _HID INT3515 (TI PD controllers) has some unresolved interrupt
+ * issues. The most common problem seen is interrupt flood.
+ *
+ * There are at least two known causes. Firstly, on some boards, the
+ * I2CSerialBus resource index does not match the Interrupt resource, i.e. they
+ * are not one-to-one mapped like in the array below. Secondly, on some boards
+ * the IRQ line from the PD controller is not actually connected at all. But the
+ * interrupt flood is also seen on some boards where those are not a problem, so
+ * there are some other problems as well.
+ *
+ * Because of the issues with the interrupt, the device is disabled for now. If
+ * you wish to debug the issues, uncomment the below, and add an entry for the
+ * INT3515 device to the i2c_multi_instance_ids table.
+ *
+ * static const struct i2c_inst_data int3515_data[]  = {
+ *     { "tps6598x", IRQ_RESOURCE_APIC, 0 },
+ *     { "tps6598x", IRQ_RESOURCE_APIC, 1 },
+ *     { "tps6598x", IRQ_RESOURCE_APIC, 2 },
+ *     { "tps6598x", IRQ_RESOURCE_APIC, 3 },
+ *     { }
+ * };
+ */
 
 /*
  * Note new device-ids must also be added to i2c_multi_instantiate_ids in
@@ -179,7 +195,6 @@ static const struct i2c_inst_data int3515_data[]  = {
 static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = {
        { "BSG1160", (unsigned long)bsg1160_data },
        { "BSG2150", (unsigned long)bsg2150_data },
-       { "INT3515", (unsigned long)int3515_data },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, i2c_multi_inst_acpi_ids);
index 7598cd4..5b81baf 100644 (file)
@@ -92,6 +92,7 @@ struct ideapad_private {
        struct dentry *debug;
        unsigned long cfg;
        bool has_hw_rfkill_switch;
+       bool has_touchpad_switch;
        const char *fnesc_guid;
 };
 
@@ -535,7 +536,9 @@ static umode_t ideapad_is_visible(struct kobject *kobj,
        } else if (attr == &dev_attr_fn_lock.attr) {
                supported = acpi_has_method(priv->adev->handle, "HALS") &&
                        acpi_has_method(priv->adev->handle, "SALS");
-       } else
+       } else if (attr == &dev_attr_touchpad.attr)
+               supported = priv->has_touchpad_switch;
+       else
                supported = true;
 
        return supported ? attr->mode : 0;
@@ -867,6 +870,9 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv)
 {
        unsigned long value;
 
+       if (!priv->has_touchpad_switch)
+               return;
+
        /* Without reading from EC touchpad LED doesn't switch state */
        if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
                /* Some IdeaPads don't really turn off touchpad - they only
@@ -989,6 +995,9 @@ static int ideapad_acpi_add(struct platform_device *pdev)
        priv->platform_device = pdev;
        priv->has_hw_rfkill_switch = dmi_check_system(hw_rfkill_list);
 
+       /* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */
+       priv->has_touchpad_switch = !acpi_dev_present("ELAN0634", NULL, -1);
+
        ret = ideapad_sysfs_init(priv);
        if (ret)
                return ret;
@@ -1006,6 +1015,10 @@ static int ideapad_acpi_add(struct platform_device *pdev)
        if (!priv->has_hw_rfkill_switch)
                write_ec_cmd(priv->adev->handle, VPCCMD_W_RF, 1);
 
+       /* The same for Touchpad */
+       if (!priv->has_touchpad_switch)
+               write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, 1);
+
        for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
                if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
                        ideapad_register_rfkill(priv, i);
index 3b49a1f..30a9062 100644 (file)
@@ -207,19 +207,19 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
        {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Stream x360 Convertible PC 11"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
                },
        },
        {
                .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
                },
        },
        {
                .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"),
                },
        },
        {} /* Array terminator */
index e03df28..f3e8eca 100644 (file)
@@ -8783,6 +8783,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
        TPACPI_Q_LNV3('N', '1', 'T', TPACPI_FAN_2CTL),  /* P71 */
        TPACPI_Q_LNV3('N', '1', 'U', TPACPI_FAN_2CTL),  /* P51 */
        TPACPI_Q_LNV3('N', '2', 'C', TPACPI_FAN_2CTL),  /* P52 / P72 */
+       TPACPI_Q_LNV3('N', '2', 'N', TPACPI_FAN_2CTL),  /* P53 / P73 */
        TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (1st gen) */
        TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (2nd gen) */
        TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (3nd gen) */
@@ -9951,9 +9952,9 @@ static int tpacpi_proxsensor_init(struct ibm_init_struct *iibm)
        if ((palm_err == -ENODEV) && (lap_err == -ENODEV))
                return 0;
        /* Otherwise, if there was an error return it */
-       if (palm_err && (palm_err != ENODEV))
+       if (palm_err && (palm_err != -ENODEV))
                return palm_err;
-       if (lap_err && (lap_err != ENODEV))
+       if (lap_err && (lap_err != -ENODEV))
                return lap_err;
 
        if (has_palmsensor) {
index 5783139..c4de932 100644 (file)
@@ -263,6 +263,16 @@ static const struct ts_dmi_data digma_citi_e200_data = {
        .properties     = digma_citi_e200_props,
 };
 
+static const struct property_entry estar_beauty_hd_props[] = {
+       PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+       { }
+};
+
+static const struct ts_dmi_data estar_beauty_hd_data = {
+       .acpi_name      = "GDIX1001:00",
+       .properties     = estar_beauty_hd_props,
+};
+
 static const struct property_entry gp_electronic_t701_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
@@ -942,6 +952,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
                },
        },
+       {
+               /* Estar Beauty HD (MID 7316R) */
+               .driver_data = (void *)&estar_beauty_hd_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Estar"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
+               },
+       },
        {
                /* GP-electronic T701 */
                .driver_data = (void *)&gp_electronic_t701_data,
index 476d7c7..f2edef0 100644 (file)
@@ -64,6 +64,7 @@ config DP83640_PHY
        depends on NETWORK_PHY_TIMESTAMPING
        depends on PHYLIB
        depends on PTP_1588_CLOCK
+       select CRC32
        help
          Supports the DP83640 PHYTER with IEEE 1588 features.
 
@@ -78,6 +79,7 @@ config DP83640_PHY
 config PTP_1588_CLOCK_INES
        tristate "ZHAW InES PTP time stamping IP core"
        depends on NETWORK_PHY_TIMESTAMPING
+       depends on HAS_IOMEM
        depends on PHYLIB
        depends on PTP_1588_CLOCK
        help
index 53fa84f..5abdd29 100644 (file)
@@ -881,6 +881,7 @@ config REGULATOR_QCOM_RPM
 config REGULATOR_QCOM_RPMH
        tristate "Qualcomm Technologies, Inc. RPMh regulator driver"
        depends on QCOM_RPMH || (QCOM_RPMH=n && COMPILE_TEST)
+       depends on QCOM_COMMAND_DB || (QCOM_COMMAND_DB=n && COMPILE_TEST)
        help
          This driver supports control of PMIC regulators via the RPMh hardware
          block found on Qualcomm Technologies Inc. SoCs.  RPMh regulator
index e6d5d98..9309765 100644 (file)
 #include <linux/regulator/of_regulator.h>
 #include <linux/slab.h>
 
+/* Typical regulator startup times as per data sheet in uS */
+#define BD71847_BUCK1_STARTUP_TIME 144
+#define BD71847_BUCK2_STARTUP_TIME 162
+#define BD71847_BUCK3_STARTUP_TIME 162
+#define BD71847_BUCK4_STARTUP_TIME 240
+#define BD71847_BUCK5_STARTUP_TIME 270
+#define BD71847_BUCK6_STARTUP_TIME 200
+#define BD71847_LDO1_STARTUP_TIME  440
+#define BD71847_LDO2_STARTUP_TIME  370
+#define BD71847_LDO3_STARTUP_TIME  310
+#define BD71847_LDO4_STARTUP_TIME  400
+#define BD71847_LDO5_STARTUP_TIME  530
+#define BD71847_LDO6_STARTUP_TIME  400
+
+#define BD71837_BUCK1_STARTUP_TIME 160
+#define BD71837_BUCK2_STARTUP_TIME 180
+#define BD71837_BUCK3_STARTUP_TIME 180
+#define BD71837_BUCK4_STARTUP_TIME 180
+#define BD71837_BUCK5_STARTUP_TIME 160
+#define BD71837_BUCK6_STARTUP_TIME 240
+#define BD71837_BUCK7_STARTUP_TIME 220
+#define BD71837_BUCK8_STARTUP_TIME 200
+#define BD71837_LDO1_STARTUP_TIME  440
+#define BD71837_LDO2_STARTUP_TIME  370
+#define BD71837_LDO3_STARTUP_TIME  310
+#define BD71837_LDO4_STARTUP_TIME  400
+#define BD71837_LDO5_STARTUP_TIME  310
+#define BD71837_LDO6_STARTUP_TIME  400
+#define BD71837_LDO7_STARTUP_TIME  530
+
 /*
  * BD718(37/47/50) have two "enable control modes". ON/OFF can either be
  * controlled by software - or by PMIC internal HW state machine. Whether
@@ -613,6 +643,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD718XX_REG_BUCK1_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK1_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -646,6 +677,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD718XX_REG_BUCK2_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK2_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -680,6 +712,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .linear_range_selectors = bd71847_buck3_volt_range_sel,
                        .enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK3_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -706,6 +739,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_range_mask = BD71847_BUCK4_RANGE_MASK,
                        .linear_range_selectors = bd71847_buck4_volt_range_sel,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK4_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -727,6 +761,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
                        .enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK5_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -750,6 +785,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
                        .enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK6_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -775,6 +811,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .linear_range_selectors = bd718xx_ldo1_volt_range_sel,
                        .enable_reg = BD718XX_REG_LDO1_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO1_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -796,6 +833,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .n_voltages = ARRAY_SIZE(ldo_2_volts),
                        .enable_reg = BD718XX_REG_LDO2_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO2_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -818,6 +856,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = BD718XX_LDO3_MASK,
                        .enable_reg = BD718XX_REG_LDO3_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO3_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -840,6 +879,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = BD718XX_LDO4_MASK,
                        .enable_reg = BD718XX_REG_LDO4_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO4_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -865,6 +905,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .linear_range_selectors = bd71847_ldo5_volt_range_sel,
                        .enable_reg = BD718XX_REG_LDO5_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO5_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -889,6 +930,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = BD718XX_LDO6_MASK,
                        .enable_reg = BD718XX_REG_LDO6_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO6_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -942,6 +984,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD718XX_REG_BUCK1_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK1_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -975,6 +1018,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD718XX_REG_BUCK2_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK2_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -1005,6 +1049,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD71837_REG_BUCK3_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK3_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -1033,6 +1078,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD71837_REG_BUCK4_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK4_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -1065,6 +1111,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .linear_range_selectors = bd71837_buck5_volt_range_sel,
                        .enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK5_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1088,6 +1135,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD71837_BUCK6_MASK,
                        .enable_reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK6_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1109,6 +1157,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
                        .enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK7_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1132,6 +1181,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
                        .enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK8_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1157,6 +1207,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .linear_range_selectors = bd718xx_ldo1_volt_range_sel,
                        .enable_reg = BD718XX_REG_LDO1_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO1_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1178,6 +1229,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .n_voltages = ARRAY_SIZE(ldo_2_volts),
                        .enable_reg = BD718XX_REG_LDO2_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO2_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1200,6 +1252,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD718XX_LDO3_MASK,
                        .enable_reg = BD718XX_REG_LDO3_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO3_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1222,6 +1275,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD718XX_LDO4_MASK,
                        .enable_reg = BD718XX_REG_LDO4_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO4_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1246,6 +1300,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD71837_LDO5_MASK,
                        .enable_reg = BD718XX_REG_LDO5_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO5_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1272,6 +1327,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD718XX_LDO6_MASK,
                        .enable_reg = BD718XX_REG_LDO6_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO6_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1296,6 +1352,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD71837_LDO7_MASK,
                        .enable_reg = BD71837_REG_LDO7_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO7_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
index 308c27f..af9918c 100644 (file)
@@ -469,13 +469,17 @@ static int pf8x00_i2c_probe(struct i2c_client *client)
 }
 
 static const struct of_device_id pf8x00_dt_ids[] = {
-       { .compatible = "nxp,pf8x00",},
+       { .compatible = "nxp,pf8100",},
+       { .compatible = "nxp,pf8121a",},
+       { .compatible = "nxp,pf8200",},
        { }
 };
 MODULE_DEVICE_TABLE(of, pf8x00_dt_ids);
 
 static const struct i2c_device_id pf8x00_i2c_id[] = {
-       { "pf8x00", 0 },
+       { "pf8100", 0 },
+       { "pf8121a", 0 },
+       { "pf8200", 0 },
        {},
 };
 MODULE_DEVICE_TABLE(i2c, pf8x00_i2c_id);
index fe030ec..c395a8d 100644 (file)
@@ -726,7 +726,7 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
 static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
        .regulator_type = VRM,
        .ops = &rpmh_regulator_vrm_ops,
-       .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600),
+       .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000),
        .n_voltages = 5,
        .pmic_mode_map = pmic_mode_map_pmic5_smps,
        .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
index 6f5ddc3..28f6370 100644 (file)
@@ -1079,7 +1079,8 @@ struct qeth_card *qeth_get_card_by_busid(char *bus_id);
 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
                              int clear_start_mask);
 int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_set_offline(struct qeth_card *card, bool resetting);
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+                    bool resetting);
 
 int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
                  int (*reply_cb)
index f4b6029..cf18d87 100644 (file)
@@ -5507,12 +5507,12 @@ out:
        return rc;
 }
 
-static int qeth_set_online(struct qeth_card *card)
+static int qeth_set_online(struct qeth_card *card,
+                          const struct qeth_discipline *disc)
 {
        bool carrier_ok;
        int rc;
 
-       mutex_lock(&card->discipline_mutex);
        mutex_lock(&card->conf_mutex);
        QETH_CARD_TEXT(card, 2, "setonlin");
 
@@ -5529,7 +5529,7 @@ static int qeth_set_online(struct qeth_card *card)
                /* no need for locking / error handling at this early stage: */
                qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
 
-       rc = card->discipline->set_online(card, carrier_ok);
+       rc = disc->set_online(card, carrier_ok);
        if (rc)
                goto err_online;
 
@@ -5537,7 +5537,6 @@ static int qeth_set_online(struct qeth_card *card)
        kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
 
        mutex_unlock(&card->conf_mutex);
-       mutex_unlock(&card->discipline_mutex);
        return 0;
 
 err_online:
@@ -5552,15 +5551,14 @@ err_hardsetup:
        qdio_free(CARD_DDEV(card));
 
        mutex_unlock(&card->conf_mutex);
-       mutex_unlock(&card->discipline_mutex);
        return rc;
 }
 
-int qeth_set_offline(struct qeth_card *card, bool resetting)
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+                    bool resetting)
 {
        int rc, rc2, rc3;
 
-       mutex_lock(&card->discipline_mutex);
        mutex_lock(&card->conf_mutex);
        QETH_CARD_TEXT(card, 3, "setoffl");
 
@@ -5581,7 +5579,7 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
 
        cancel_work_sync(&card->rx_mode_work);
 
-       card->discipline->set_offline(card);
+       disc->set_offline(card);
 
        qeth_qdio_clear_card(card, 0);
        qeth_drain_output_queues(card);
@@ -5602,16 +5600,19 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
        kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
 
        mutex_unlock(&card->conf_mutex);
-       mutex_unlock(&card->discipline_mutex);
        return 0;
 }
 EXPORT_SYMBOL_GPL(qeth_set_offline);
 
 static int qeth_do_reset(void *data)
 {
+       const struct qeth_discipline *disc;
        struct qeth_card *card = data;
        int rc;
 
+       /* Lock-free, other users will block until we are done. */
+       disc = card->discipline;
+
        QETH_CARD_TEXT(card, 2, "recover1");
        if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
                return 0;
@@ -5619,8 +5620,8 @@ static int qeth_do_reset(void *data)
        dev_warn(&card->gdev->dev,
                 "A recovery process has been started for the device\n");
 
-       qeth_set_offline(card, true);
-       rc = qeth_set_online(card);
+       qeth_set_offline(card, disc, true);
+       rc = qeth_set_online(card, disc);
        if (!rc) {
                dev_info(&card->gdev->dev,
                         "Device successfully recovered!\n");
@@ -6584,6 +6585,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
                break;
        default:
                card->info.layer_enforced = true;
+               /* It's so early that we don't need the discipline_mutex yet. */
                rc = qeth_core_load_discipline(card, enforced_disc);
                if (rc)
                        goto err_load;
@@ -6616,10 +6618,12 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
 
        QETH_CARD_TEXT(card, 2, "removedv");
 
+       mutex_lock(&card->discipline_mutex);
        if (card->discipline) {
                card->discipline->remove(gdev);
                qeth_core_free_discipline(card);
        }
+       mutex_unlock(&card->discipline_mutex);
 
        qeth_free_qdio_queues(card);
 
@@ -6634,6 +6638,7 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
        int rc = 0;
        enum qeth_discipline_id def_discipline;
 
+       mutex_lock(&card->discipline_mutex);
        if (!card->discipline) {
                def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
                                                QETH_DISCIPLINE_LAYER2;
@@ -6647,16 +6652,23 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
                }
        }
 
-       rc = qeth_set_online(card);
+       rc = qeth_set_online(card, card->discipline);
+
 err:
+       mutex_unlock(&card->discipline_mutex);
        return rc;
 }
 
 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
 {
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+       int rc;
 
-       return qeth_set_offline(card, false);
+       mutex_lock(&card->discipline_mutex);
+       rc = qeth_set_offline(card, card->discipline, false);
+       mutex_unlock(&card->discipline_mutex);
+
+       return rc;
 }
 
 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
index 4ed0fb0..4254caf 100644 (file)
@@ -2208,7 +2208,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
        wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
 
        if (gdev->state == CCWGROUP_ONLINE)
-               qeth_set_offline(card, false);
+               qeth_set_offline(card, card->discipline, false);
 
        cancel_work_sync(&card->close_dev_work);
        if (card->dev->reg_state == NETREG_REGISTERED)
index d138ac4..4c2cae7 100644 (file)
@@ -1813,7 +1813,7 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
                                                    struct net_device *dev,
                                                    netdev_features_t features)
 {
-       if (qeth_get_ip_version(skb) != 4)
+       if (vlan_get_protocol(skb) != htons(ETH_P_IP))
                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
        return qeth_features_check(skb, dev, features);
 }
@@ -1971,7 +1971,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
        wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
 
        if (cgdev->state == CCWGROUP_ONLINE)
-               qeth_set_offline(card, false);
+               qeth_set_offline(card, card->discipline, false);
 
        cancel_work_sync(&card->close_dev_work);
        if (card->dev->reg_state == NETREG_REGISTERED)
index a2beee6..5988c30 100644 (file)
@@ -444,7 +444,8 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
        fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
        if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone  */
                pr_err("error in devcmd2 init");
-               return -ENODEV;
+               err = -ENODEV;
+               goto err_free_wq;
        }
 
        /*
@@ -460,7 +461,7 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
        err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
                        DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
        if (err)
-               goto err_free_wq;
+               goto err_disable_wq;
 
        vdev->devcmd2->result =
                (struct devcmd2_result *) vdev->devcmd2->results_ring.descs;
@@ -481,8 +482,9 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
 
 err_free_desc_ring:
        vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
-err_free_wq:
+err_disable_wq:
        vnic_wq_disable(&vdev->devcmd2->wq);
+err_free_wq:
        vnic_wq_free(&vdev->devcmd2->wq);
 err_free_devcmd2:
        kfree(vdev->devcmd2);
index 2b28dd4..e821dd3 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/debugfs.h>
 #include <linux/dmapool.h>
 #include <linux/iopoll.h>
+#include <linux/irq.h>
 #include <linux/lcm.h>
 #include <linux/libata.h>
 #include <linux/mfd/syscon.h>
@@ -294,6 +295,7 @@ enum {
 
 struct hisi_sas_hw {
        int (*hw_init)(struct hisi_hba *hisi_hba);
+       int (*interrupt_preinit)(struct hisi_hba *hisi_hba);
        void (*setup_itct)(struct hisi_hba *hisi_hba,
                           struct hisi_sas_device *device);
        int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
@@ -393,6 +395,8 @@ struct hisi_hba {
        u32 refclk_frequency_mhz;
        u8 sas_addr[SAS_ADDR_SIZE];
 
+       int *irq_map; /* v2 hw */
+
        int n_phy;
        spinlock_t lock;
        struct semaphore sem;
index b6d4419..cf0bfac 100644 (file)
@@ -2614,6 +2614,13 @@ err_out:
        return NULL;
 }
 
+static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba)
+{
+       if (hisi_hba->hw->interrupt_preinit)
+               return hisi_hba->hw->interrupt_preinit(hisi_hba);
+       return 0;
+}
+
 int hisi_sas_probe(struct platform_device *pdev,
                   const struct hisi_sas_hw *hw)
 {
@@ -2671,6 +2678,10 @@ int hisi_sas_probe(struct platform_device *pdev,
                sha->sas_port[i] = &hisi_hba->port[i].sas_port;
        }
 
+       rc = hisi_sas_interrupt_preinit(hisi_hba);
+       if (rc)
+               goto err_out_ha;
+
        rc = scsi_add_host(shost, &pdev->dev);
        if (rc)
                goto err_out_ha;
index b57177b..9adfdef 100644 (file)
@@ -3302,6 +3302,28 @@ static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
        fatal_axi_int_v2_hw
 };
 
+#define CQ0_IRQ_INDEX (96)
+
+static int hisi_sas_v2_interrupt_preinit(struct hisi_hba *hisi_hba)
+{
+       struct platform_device *pdev = hisi_hba->platform_dev;
+       struct Scsi_Host *shost = hisi_hba->shost;
+       struct irq_affinity desc = {
+               .pre_vectors = CQ0_IRQ_INDEX,
+               .post_vectors = 16,
+       };
+       int resv = desc.pre_vectors + desc.post_vectors, minvec = resv + 1, nvec;
+
+       nvec = devm_platform_get_irqs_affinity(pdev, &desc, minvec, 128,
+                                              &hisi_hba->irq_map);
+       if (nvec < 0)
+               return nvec;
+
+       shost->nr_hw_queues = hisi_hba->cq_nvecs = nvec - resv;
+
+       return 0;
+}
+
 /*
  * There is a limitation in the hip06 chipset that we need
  * to map in all mbigen interrupts, even if they are not used.
@@ -3310,14 +3332,11 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
 {
        struct platform_device *pdev = hisi_hba->platform_dev;
        struct device *dev = &pdev->dev;
-       int irq, rc = 0, irq_map[128];
+       int irq, rc = 0;
        int i, phy_no, fatal_no, queue_no;
 
-       for (i = 0; i < 128; i++)
-               irq_map[i] = platform_get_irq(pdev, i);
-
        for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) {
-               irq = irq_map[i + 1]; /* Phy up/down is irq1 */
+               irq = hisi_hba->irq_map[i + 1]; /* Phy up/down is irq1 */
                rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
                                      DRV_NAME " phy", hisi_hba);
                if (rc) {
@@ -3331,7 +3350,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
        for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
                struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
 
-               irq = irq_map[phy_no + 72];
+               irq = hisi_hba->irq_map[phy_no + 72];
                rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
                                      DRV_NAME " sata", phy);
                if (rc) {
@@ -3343,7 +3362,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
        }
 
        for (fatal_no = 0; fatal_no < HISI_SAS_FATAL_INT_NR; fatal_no++) {
-               irq = irq_map[fatal_no + 81];
+               irq = hisi_hba->irq_map[fatal_no + 81];
                rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0,
                                      DRV_NAME " fatal", hisi_hba);
                if (rc) {
@@ -3354,24 +3373,22 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
                }
        }
 
-       for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) {
+       for (queue_no = 0; queue_no < hisi_hba->cq_nvecs; queue_no++) {
                struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no];
 
-               cq->irq_no = irq_map[queue_no + 96];
+               cq->irq_no = hisi_hba->irq_map[queue_no + 96];
                rc = devm_request_threaded_irq(dev, cq->irq_no,
                                               cq_interrupt_v2_hw,
                                               cq_thread_v2_hw, IRQF_ONESHOT,
                                               DRV_NAME " cq", cq);
                if (rc) {
                        dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
-                               irq, rc);
+                                       cq->irq_no, rc);
                        rc = -ENOENT;
                        goto err_out;
                }
+               cq->irq_mask = irq_get_affinity_mask(cq->irq_no);
        }
-
-       hisi_hba->cq_nvecs = hisi_hba->queue_count;
-
 err_out:
        return rc;
 }
@@ -3529,6 +3546,26 @@ static struct device_attribute *host_attrs_v2_hw[] = {
        NULL
 };
 
+static int map_queues_v2_hw(struct Scsi_Host *shost)
+{
+       struct hisi_hba *hisi_hba = shost_priv(shost);
+       struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+       const struct cpumask *mask;
+       unsigned int queue, cpu;
+
+       for (queue = 0; queue < qmap->nr_queues; queue++) {
+               mask = irq_get_affinity_mask(hisi_hba->irq_map[96 + queue]);
+               if (!mask)
+                       continue;
+
+               for_each_cpu(cpu, mask)
+                       qmap->mq_map[cpu] = qmap->queue_offset + queue;
+       }
+
+       return 0;
+
+}
+
 static struct scsi_host_template sht_v2_hw = {
        .name                   = DRV_NAME,
        .proc_name              = DRV_NAME,
@@ -3553,10 +3590,13 @@ static struct scsi_host_template sht_v2_hw = {
 #endif
        .shost_attrs            = host_attrs_v2_hw,
        .host_reset             = hisi_sas_host_reset,
+       .map_queues             = map_queues_v2_hw,
+       .host_tagset            = 1,
 };
 
 static const struct hisi_sas_hw hisi_sas_v2_hw = {
        .hw_init = hisi_sas_v2_init,
+       .interrupt_preinit = hisi_sas_v2_interrupt_preinit,
        .setup_itct = setup_itct_v2_hw,
        .slot_index_alloc = slot_index_alloc_quirk_v2_hw,
        .alloc_dev = alloc_dev_quirk_v2_hw,
index 42e4d35..65f168c 100644 (file)
@@ -1744,7 +1744,7 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
                iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
        }
 
-       vfc_cmd->correlation = cpu_to_be64(evt);
+       vfc_cmd->correlation = cpu_to_be64((u64)evt);
 
        if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
                return ibmvfc_send_event(evt, vhost, 0);
@@ -2418,7 +2418,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
                tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
                evt->sync_iu = &rsp_iu;
 
-               tmf->correlation = cpu_to_be64(evt);
+               tmf->correlation = cpu_to_be64((u64)evt);
 
                init_completion(&evt->comp);
                rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
@@ -3007,8 +3007,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
        unsigned long flags = 0;
 
        spin_lock_irqsave(shost->host_lock, flags);
-       if (sdev->type == TYPE_DISK)
+       if (sdev->type == TYPE_DISK) {
                sdev->allow_restart = 1;
+               blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
+       }
        spin_unlock_irqrestore(shost->host_lock, flags);
        return 0;
 }
index d71afae..8410004 100644 (file)
@@ -1623,8 +1623,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
                rc = fc_exch_done_locked(ep);
                WARN_ON(fc_seq_exch(sp) != ep);
                spin_unlock_bh(&ep->ex_lock);
-               if (!rc)
+               if (!rc) {
                        fc_exch_delete(ep);
+               } else {
+                       FC_EXCH_DBG(ep, "ep is completed already,"
+                                       "hence skip calling the resp\n");
+                       goto skip_resp;
+               }
        }
 
        /*
@@ -1643,6 +1648,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
        if (!fc_invoke_resp(ep, sp, fp))
                fc_frame_free(fp);
 
+skip_resp:
        fc_exch_release(ep);
        return;
 rel:
@@ -1899,10 +1905,16 @@ static void fc_exch_reset(struct fc_exch *ep)
 
        fc_exch_hold(ep);
 
-       if (!rc)
+       if (!rc) {
                fc_exch_delete(ep);
+       } else {
+               FC_EXCH_DBG(ep, "ep is completed already,"
+                               "hence skip calling the resp\n");
+               goto skip_resp;
+       }
 
        fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
+skip_resp:
        fc_seq_set_resp(sp, NULL, ep->arg);
        fc_exch_release(ep);
 }
index 6e4bf05..63a4f48 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/poll.h>
 #include <linux/vmalloc.h>
 #include <linux/irq_poll.h>
+#include <linux/blk-mq-pci.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -113,6 +114,10 @@ unsigned int enable_sdev_max_qd;
 module_param(enable_sdev_max_qd, int, 0444);
 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
 
+int host_tagset_enable = 1;
+module_param(host_tagset_enable, int, 0444);
+MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
+
 MODULE_LICENSE("GPL");
 MODULE_VERSION(MEGASAS_VERSION);
 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
@@ -3119,6 +3124,19 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
        return 0;
 }
 
+static int megasas_map_queues(struct Scsi_Host *shost)
+{
+       struct megasas_instance *instance;
+
+       instance = (struct megasas_instance *)shost->hostdata;
+
+       if (shost->nr_hw_queues == 1)
+               return 0;
+
+       return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+                       instance->pdev, instance->low_latency_index_start);
+}
+
 static void megasas_aen_polling(struct work_struct *work);
 
 /**
@@ -3427,6 +3445,7 @@ static struct scsi_host_template megasas_template = {
        .eh_timed_out = megasas_reset_timer,
        .shost_attrs = megaraid_host_attrs,
        .bios_param = megasas_bios_param,
+       .map_queues = megasas_map_queues,
        .change_queue_depth = scsi_change_queue_depth,
        .max_segment_size = 0xffffffff,
 };
@@ -6808,6 +6827,26 @@ static int megasas_io_attach(struct megasas_instance *instance)
        host->max_lun = MEGASAS_MAX_LUN;
        host->max_cmd_len = 16;
 
+       /* Use shared host tagset only for fusion adaptors
+        * if there are managed interrupts (smp affinity enabled case).
+        * Single msix_vectors in kdump, so shared host tag is also disabled.
+        */
+
+       host->host_tagset = 0;
+       host->nr_hw_queues = 1;
+
+       if ((instance->adapter_type != MFI_SERIES) &&
+               (instance->msix_vectors > instance->low_latency_index_start) &&
+               host_tagset_enable &&
+               instance->smp_affinity_enable) {
+               host->host_tagset = 1;
+               host->nr_hw_queues = instance->msix_vectors -
+                       instance->low_latency_index_start;
+       }
+
+       dev_info(&instance->pdev->dev,
+               "Max firmware commands: %d shared with nr_hw_queues = %d\n",
+               instance->max_fw_cmds, host->nr_hw_queues);
        /*
         * Notify the mid-layer about the new controller
         */
@@ -8205,11 +8244,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
                        goto out;
                }
 
+               /* always store 64 bits regardless of addressing */
                sense_ptr = (void *)cmd->frame + ioc->sense_off;
-               if (instance->consistent_mask_64bit)
-                       put_unaligned_le64(sense_handle, sense_ptr);
-               else
-                       put_unaligned_le32(sense_handle, sense_ptr);
+               put_unaligned_le64(sense_handle, sense_ptr);
        }
 
        /*
index b0c01cf..fd60728 100644 (file)
@@ -359,24 +359,29 @@ megasas_get_msix_index(struct megasas_instance *instance,
 {
        int sdev_busy;
 
-       /* nr_hw_queue = 1 for MegaRAID */
-       struct blk_mq_hw_ctx *hctx =
-               scmd->device->request_queue->queue_hw_ctx[0];
-
-       sdev_busy = atomic_read(&hctx->nr_active);
+       /* TBD - if sml remove device_busy in future, driver
+        * should track counter in internal structure.
+        */
+       sdev_busy = atomic_read(&scmd->device->device_busy);
 
        if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
-           sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
+           sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) {
                cmd->request_desc->SCSIIO.MSIxIndex =
                        mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
                                        MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
-       else if (instance->msix_load_balance)
+       } else if (instance->msix_load_balance) {
                cmd->request_desc->SCSIIO.MSIxIndex =
                        (mega_mod64(atomic64_add_return(1, &instance->total_io_count),
                                instance->msix_vectors));
-       else
+       } else if (instance->host->nr_hw_queues > 1) {
+               u32 tag = blk_mq_unique_tag(scmd->request);
+
+               cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
+                       instance->low_latency_index_start;
+       } else {
                cmd->request_desc->SCSIIO.MSIxIndex =
                        instance->reply_map[raw_smp_processor_id()];
+       }
 }
 
 /**
@@ -956,9 +961,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
        if (megasas_alloc_cmdlist_fusion(instance))
                goto fail_exit;
 
-       dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
-                instance->max_fw_cmds);
-
        /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
        io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
        io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
@@ -1102,8 +1104,9 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
                MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
                instance->perf_mode = MR_BALANCED_PERF_MODE;
 
-       dev_info(&instance->pdev->dev, "Performance mode :%s\n",
-               MEGASAS_PERF_MODE_2STR(instance->perf_mode));
+       dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n",
+               MEGASAS_PERF_MODE_2STR(instance->perf_mode),
+               instance->low_latency_index_start);
 
        instance->fw_sync_cache_support = (scratch_pad_1 &
                MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
index 8620945..c299f7e 100644 (file)
@@ -79,5 +79,5 @@ config SCSI_MPT2SAS
        select SCSI_MPT3SAS
        depends on PCI && SCSI
        help
-       Dummy config option for backwards compatiblity: configure the MPT3SAS
+       Dummy config option for backwards compatibility: configure the MPT3SAS
        driver instead.
index f5fc7f5..47ad64b 100644 (file)
@@ -2245,7 +2245,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
                             chap_name);
                break;
        case ISCSI_BOOT_TGT_CHAP_SECRET:
-               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
                             chap_secret);
                break;
        case ISCSI_BOOT_TGT_REV_CHAP_NAME:
@@ -2253,7 +2253,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
                             mchap_name);
                break;
        case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
-               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+               rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN,
                             mchap_secret);
                break;
        case ISCSI_BOOT_TGT_FLAGS:
index 24c0f7e..4a08c45 100644 (file)
@@ -6740,7 +6740,7 @@ static int __init scsi_debug_init(void)
                k = sdeb_zbc_model_str(sdeb_zbc_model_s);
                if (k < 0) {
                        ret = k;
-                       goto free_vm;
+                       goto free_q_arr;
                }
                sdeb_zbc_model = k;
                switch (sdeb_zbc_model) {
@@ -6753,7 +6753,8 @@ static int __init scsi_debug_init(void)
                        break;
                default:
                        pr_err("Invalid ZBC model\n");
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto free_q_arr;
                }
        }
        if (sdeb_zbc_model != BLK_ZONED_NONE) {
index cba1cf6..1e939a2 100644 (file)
@@ -541,7 +541,14 @@ int srp_reconnect_rport(struct srp_rport *rport)
        res = mutex_lock_interruptible(&rport->mutex);
        if (res)
                goto out;
-       scsi_target_block(&shost->shost_gendev);
+       if (rport->state != SRP_RPORT_FAIL_FAST)
+               /*
+                * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
+                * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
+                * later is ok though, scsi_internal_device_unblock_nowait()
+                * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
+                */
+               scsi_target_block(&shost->shost_gendev);
        res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
        pr_debug("%s (state %d): transport.reconnect() returned %d\n",
                 dev_name(&shost->shost_gendev), rport->state, res);
index 679c2c0..a3d2d4b 100644 (file)
@@ -984,8 +984,10 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
                }
        }
 
-       if (sdp->no_write_same)
+       if (sdp->no_write_same) {
+               rq->rq_flags |= RQF_QUIET;
                return BLK_STS_TARGET;
+       }
 
        if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
                return sd_setup_write_same16_cmnd(cmd, false);
@@ -3510,10 +3512,8 @@ static int sd_probe(struct device *dev)
 static int sd_remove(struct device *dev)
 {
        struct scsi_disk *sdkp;
-       dev_t devt;
 
        sdkp = dev_get_drvdata(dev);
-       devt = disk_devt(sdkp->disk);
        scsi_autopm_get_device(sdkp->device);
 
        async_synchronize_full_domain(&scsi_sd_pm_domain);
index 3f6dfed..b915b38 100644 (file)
@@ -72,6 +72,7 @@ config SCSI_UFS_DWC_TC_PCI
 config SCSI_UFSHCD_PLATFORM
        tristate "Platform bus based UFS Controller support"
        depends on SCSI_UFSHCD
+       depends on HAS_IOMEM
        help
        This selects the UFS host controller support. Select this if
        you have an UFS controller on Platform bus.
index 82ad317..fb32d12 100644 (file)
@@ -289,7 +289,8 @@ static inline void ufshcd_wb_config(struct ufs_hba *hba)
        if (ret)
                dev_err(hba->dev, "%s: En WB flush during H8: failed: %d\n",
                        __func__, ret);
-       ufshcd_wb_toggle_flush(hba, true);
+       if (!(hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL))
+               ufshcd_wb_toggle_flush(hba, true);
 }
 
 static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
@@ -3995,6 +3996,8 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
        if (ret)
                dev_err(hba->dev, "%s: link recovery failed, err %d",
                        __func__, ret);
+       else
+               ufshcd_clear_ua_wluns(hba);
 
        return ret;
 }
@@ -4991,7 +4994,8 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
                break;
        } /* end of switch */
 
-       if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
+       if ((host_byte(result) != DID_OK) &&
+           (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
                ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
        return result;
 }
@@ -5436,9 +5440,6 @@ static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
 
 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
 {
-       if (hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)
-               return;
-
        if (enable)
                ufshcd_wb_buf_flush_enable(hba);
        else
@@ -6003,6 +6004,9 @@ skip_err_handling:
        ufshcd_scsi_unblock_requests(hba);
        ufshcd_err_handling_unprepare(hba);
        up(&hba->eh_sem);
+
+       if (!err && needs_reset)
+               ufshcd_clear_ua_wluns(hba);
 }
 
 /**
@@ -6297,9 +6301,13 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
                intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
        }
 
-       if (enabled_intr_status && retval == IRQ_NONE) {
-               dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
-                                       __func__, intr_status);
+       if (enabled_intr_status && retval == IRQ_NONE &&
+                               !ufshcd_eh_in_progress(hba)) {
+               dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
+                                       __func__,
+                                       intr_status,
+                                       hba->ufs_stats.last_intr_status,
+                                       enabled_intr_status);
                ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
        }
 
@@ -6343,7 +6351,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
         * Even though we use wait_event() which sleeps indefinitely,
         * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
         */
-       req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
+       req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
+
        req->end_io_data = &wait;
        free_slot = req->tag;
        WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
@@ -6661,19 +6672,16 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
 {
        struct Scsi_Host *host;
        struct ufs_hba *hba;
-       unsigned int tag;
        u32 pos;
        int err;
-       u8 resp = 0xF;
-       struct ufshcd_lrb *lrbp;
+       u8 resp = 0xF, lun;
        unsigned long flags;
 
        host = cmd->device->host;
        hba = shost_priv(host);
-       tag = cmd->request->tag;
 
-       lrbp = &hba->lrb[tag];
-       err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
+       lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
+       err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
        if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
                if (!err)
                        err = resp;
@@ -6682,7 +6690,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
 
        /* clear the commands that were pending for corresponding LUN */
        for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
-               if (hba->lrb[pos].lun == lrbp->lun) {
+               if (hba->lrb[pos].lun == lun) {
                        err = ufshcd_clear_cmd(hba, pos);
                        if (err)
                                break;
@@ -6943,14 +6951,11 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
        ufshcd_set_clk_freq(hba, true);
 
        err = ufshcd_hba_enable(hba);
-       if (err)
-               goto out;
 
        /* Establish the link again and restore the device */
-       err = ufshcd_probe_hba(hba, false);
        if (!err)
-               ufshcd_clear_ua_wluns(hba);
-out:
+               err = ufshcd_probe_hba(hba, false);
+
        if (err)
                dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
        ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
@@ -7721,6 +7726,8 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
        if (ret)
                goto out;
 
+       ufshcd_clear_ua_wluns(hba);
+
        /* Initialize devfreq after UFS device is detected */
        if (ufshcd_is_clkscaling_supported(hba)) {
                memcpy(&hba->clk_scaling.saved_pwr_info.info,
@@ -7922,8 +7929,6 @@ out:
                pm_runtime_put_sync(hba->dev);
                ufshcd_exit_clk_scaling(hba);
                ufshcd_hba_exit(hba);
-       } else {
-               ufshcd_clear_ua_wluns(hba);
        }
 }
 
@@ -8698,6 +8703,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                        ufshcd_wb_need_flush(hba));
        }
 
+       flush_work(&hba->eeh_work);
+
        if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
                if (!ufshcd_is_runtime_pm(pm_op))
                        /* ensure that bkops is disabled */
@@ -8710,8 +8717,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                }
        }
 
-       flush_work(&hba->eeh_work);
-
        /*
         * In the case of DeepSleep, the device is expected to remain powered
         * with the link off, so do not check for bkops.
@@ -8780,6 +8785,7 @@ enable_gating:
                ufshcd_resume_clkscaling(hba);
        hba->clk_gating.is_suspended = false;
        hba->dev_info.b_rpm_dev_flush_capable = false;
+       ufshcd_clear_ua_wluns(hba);
        ufshcd_release(hba);
 out:
        if (hba->dev_info.b_rpm_dev_flush_capable) {
@@ -8890,6 +8896,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
        }
 
+       ufshcd_clear_ua_wluns(hba);
+
        /* Schedule clock gating in case of no access to UFS device yet */
        ufshcd_release(hba);
 
@@ -8938,7 +8946,8 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
        if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
             hba->curr_dev_pwr_mode) &&
            (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
-            hba->uic_link_state))
+            hba->uic_link_state) &&
+            !hba->dev_info.b_rpm_dev_flush_capable)
                goto out;
 
        if (pm_runtime_suspended(hba->dev)) {
index 1217caf..9b07663 100644 (file)
@@ -140,12 +140,13 @@ struct litex_soc_ctrl_device {
        void __iomem *base;
 };
 
+#ifdef CONFIG_OF
 static const struct of_device_id litex_soc_ctrl_of_match[] = {
        {.compatible = "litex,soc-controller"},
        {},
 };
-
 MODULE_DEVICE_TABLE(of, litex_soc_ctrl_of_match);
+#endif /* CONFIG_OF */
 
 static int litex_soc_ctrl_probe(struct platform_device *pdev)
 {
index 809bfff..cbc4c28 100644 (file)
@@ -189,24 +189,26 @@ static int altera_spi_txrx(struct spi_master *master,
 
                /* send the first byte */
                altera_spi_tx_word(hw);
-       } else {
-               while (hw->count < hw->len) {
-                       altera_spi_tx_word(hw);
 
-                       for (;;) {
-                               altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
-                               if (val & ALTERA_SPI_STATUS_RRDY_MSK)
-                                       break;
+               return 1;
+       }
+
+       while (hw->count < hw->len) {
+               altera_spi_tx_word(hw);
 
-                               cpu_relax();
-                       }
+               for (;;) {
+                       altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
+                       if (val & ALTERA_SPI_STATUS_RRDY_MSK)
+                               break;
 
-                       altera_spi_rx_word(hw);
+                       cpu_relax();
                }
-               spi_finalize_current_transfer(master);
+
+               altera_spi_rx_word(hw);
        }
+       spi_finalize_current_transfer(master);
 
-       return t->len;
+       return 0;
 }
 
 static irqreturn_t altera_spi_irq(int irq, void *dev)
index 70467b9..a3afd1b 100644 (file)
@@ -115,6 +115,7 @@ struct cdns_spi {
        void __iomem *regs;
        struct clk *ref_clk;
        struct clk *pclk;
+       unsigned int clk_rate;
        u32 speed_hz;
        const u8 *txbuf;
        u8 *rxbuf;
@@ -250,7 +251,7 @@ static void cdns_spi_config_clock_freq(struct spi_device *spi,
        u32 ctrl_reg, baud_rate_val;
        unsigned long frequency;
 
-       frequency = clk_get_rate(xspi->ref_clk);
+       frequency = xspi->clk_rate;
 
        ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
 
@@ -558,8 +559,9 @@ static int cdns_spi_probe(struct platform_device *pdev)
        master->auto_runtime_pm = true;
        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
 
+       xspi->clk_rate = clk_get_rate(xspi->ref_clk);
        /* Set to default valid value */
-       master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4;
+       master->max_speed_hz = xspi->clk_rate / 4;
        xspi->speed_hz = master->max_speed_hz;
 
        master->bits_per_word_mask = SPI_BPW_MASK(8);
index 9494257..6d8e0a0 100644 (file)
@@ -115,14 +115,13 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value)
 {
        struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
        struct fsl_spi_platform_data *pdata;
-       bool pol = spi->mode & SPI_CS_HIGH;
        struct spi_mpc8xxx_cs   *cs = spi->controller_state;
 
        pdata = spi->dev.parent->parent->platform_data;
 
        if (value == BITBANG_CS_INACTIVE) {
                if (pdata->cs_control)
-                       pdata->cs_control(spi, !pol);
+                       pdata->cs_control(spi, false);
        }
 
        if (value == BITBANG_CS_ACTIVE) {
@@ -134,7 +133,7 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value)
                fsl_spi_change_mode(spi);
 
                if (pdata->cs_control)
-                       pdata->cs_control(spi, pol);
+                       pdata->cs_control(spi, true);
        }
 }
 
index 512e925..881f645 100644 (file)
@@ -83,6 +83,7 @@ struct spi_geni_master {
        spinlock_t lock;
        int irq;
        bool cs_flag;
+       bool abort_failed;
 };
 
 static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -141,8 +142,49 @@ static void handle_fifo_timeout(struct spi_master *spi,
        spin_unlock_irq(&mas->lock);
 
        time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
-       if (!time_left)
+       if (!time_left) {
                dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
+
+               /*
+                * No need for a lock since SPI core has a lock and we never
+                * access this from an interrupt.
+                */
+               mas->abort_failed = true;
+       }
+}
+
+static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
+{
+       struct geni_se *se = &mas->se;
+       u32 m_irq, m_irq_en;
+
+       if (!mas->abort_failed)
+               return false;
+
+       /*
+        * The only known case where a transfer times out and then a cancel
+        * times out then an abort times out is if something is blocking our
+        * interrupt handler from running.  Avoid starting any new transfers
+        * until that sorts itself out.
+        */
+       spin_lock_irq(&mas->lock);
+       m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
+       m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
+       spin_unlock_irq(&mas->lock);
+
+       if (m_irq & m_irq_en) {
+               dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
+                       m_irq & m_irq_en);
+               return true;
+       }
+
+       /*
+        * If we're here the problem resolved itself so no need to check more
+        * on future transfers.
+        */
+       mas->abort_failed = false;
+
+       return false;
 }
 
 static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
@@ -158,10 +200,21 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
        if (set_flag == mas->cs_flag)
                return;
 
-       mas->cs_flag = set_flag;
-
        pm_runtime_get_sync(mas->dev);
+
+       if (spi_geni_is_abort_still_pending(mas)) {
+               dev_err(mas->dev, "Can't set chip select\n");
+               goto exit;
+       }
+
        spin_lock_irq(&mas->lock);
+       if (mas->cur_xfer) {
+               dev_err(mas->dev, "Can't set CS when prev xfer running\n");
+               spin_unlock_irq(&mas->lock);
+               goto exit;
+       }
+
+       mas->cs_flag = set_flag;
        reinit_completion(&mas->cs_done);
        if (set_flag)
                geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
@@ -170,9 +223,12 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
        spin_unlock_irq(&mas->lock);
 
        time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
-       if (!time_left)
+       if (!time_left) {
+               dev_warn(mas->dev, "Timeout setting chip select\n");
                handle_fifo_timeout(spi, NULL);
+       }
 
+exit:
        pm_runtime_put(mas->dev);
 }
 
@@ -280,6 +336,9 @@ static int spi_geni_prepare_message(struct spi_master *spi,
        int ret;
        struct spi_geni_master *mas = spi_master_get_devdata(spi);
 
+       if (spi_geni_is_abort_still_pending(mas))
+               return -EBUSY;
+
        ret = setup_fifo_params(spi_msg->spi, spi);
        if (ret)
                dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@@ -354,6 +413,12 @@ static bool geni_spi_handle_tx(struct spi_geni_master *mas)
        unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
        unsigned int i = 0;
 
+       /* Stop the watermark IRQ if nothing to send */
+       if (!mas->cur_xfer) {
+               writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+               return false;
+       }
+
        max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
        if (mas->tx_rem_bytes < max_bytes)
                max_bytes = mas->tx_rem_bytes;
@@ -396,6 +461,14 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
                if (rx_last_byte_valid && rx_last_byte_valid < 4)
                        rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
        }
+
+       /* Clear out the FIFO and bail if nowhere to put it */
+       if (!mas->cur_xfer) {
+               for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++)
+                       readl(se->base + SE_GENI_RX_FIFOn);
+               return;
+       }
+
        if (mas->rx_rem_bytes < rx_bytes)
                rx_bytes = mas->rx_rem_bytes;
 
@@ -495,6 +568,9 @@ static int spi_geni_transfer_one(struct spi_master *spi,
 {
        struct spi_geni_master *mas = spi_master_get_devdata(spi);
 
+       if (spi_geni_is_abort_still_pending(mas))
+               return -EBUSY;
+
        /* Terminate and return success for 0 byte length transfer */
        if (!xfer->len)
                return 0;
index 471dedf..6017209 100644 (file)
@@ -493,9 +493,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
 
        /* align packet size with data registers access */
        if (spi->cur_bpw > 8)
-               fthlv -= (fthlv % 2); /* multiple of 2 */
+               fthlv += (fthlv % 2) ? 1 : 0;
        else
-               fthlv -= (fthlv % 4); /* multiple of 4 */
+               fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0;
 
        if (!fthlv)
                fthlv = 1;
index 51d7c00..720ab34 100644 (file)
@@ -1108,6 +1108,7 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
 {
        struct spi_statistics *statm = &ctlr->statistics;
        struct spi_statistics *stats = &msg->spi->statistics;
+       u32 speed_hz = xfer->speed_hz;
        unsigned long long ms;
 
        if (spi_controller_is_slave(ctlr)) {
@@ -1116,8 +1117,11 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
                        return -EINTR;
                }
        } else {
+               if (!speed_hz)
+                       speed_hz = 100000;
+
                ms = 8LL * 1000LL * xfer->len;
-               do_div(ms, xfer->speed_hz);
+               do_div(ms, speed_hz);
                ms += ms + 200; /* some tolerance */
 
                if (ms > UINT_MAX)
@@ -3378,8 +3382,9 @@ int spi_setup(struct spi_device *spi)
        if (status)
                return status;
 
-       if (!spi->max_speed_hz ||
-           spi->max_speed_hz > spi->controller->max_speed_hz)
+       if (spi->controller->max_speed_hz &&
+           (!spi->max_speed_hz ||
+            spi->max_speed_hz > spi->controller->max_speed_hz))
                spi->max_speed_hz = spi->controller->max_speed_hz;
 
        mutex_lock(&spi->controller->io_mutex);
index d99231c..80d74cc 100644 (file)
@@ -2987,7 +2987,9 @@ static int put_compat_cmd(struct comedi32_cmd_struct __user *cmd32,
        v32.chanlist_len = cmd->chanlist_len;
        v32.data = ptr_to_compat(cmd->data);
        v32.data_len = cmd->data_len;
-       return copy_to_user(cmd32, &v32, sizeof(v32));
+       if (copy_to_user(cmd32, &v32, sizeof(v32)))
+               return -EFAULT;
+       return 0;
 }
 
 /* Handle 32-bit COMEDI_CMD ioctl. */
index 861aedd..0d42bc6 100644 (file)
@@ -278,21 +278,24 @@ static int spmi_controller_probe(struct platform_device *pdev)
        iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!iores) {
                dev_err(&pdev->dev, "can not get resource!\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_put_controller;
        }
 
        spmi_controller->base = devm_ioremap(&pdev->dev, iores->start,
                                             resource_size(iores));
        if (!spmi_controller->base) {
                dev_err(&pdev->dev, "can not remap base addr!\n");
-               return -EADDRNOTAVAIL;
+               ret = -EADDRNOTAVAIL;
+               goto err_put_controller;
        }
 
        ret = of_property_read_u32(pdev->dev.of_node, "spmi-channel",
                                   &spmi_controller->channel);
        if (ret) {
                dev_err(&pdev->dev, "can not get channel\n");
-               return -ENODEV;
+               ret = -ENODEV;
+               goto err_put_controller;
        }
 
        platform_set_drvdata(pdev, spmi_controller);
@@ -309,9 +312,15 @@ static int spmi_controller_probe(struct platform_device *pdev)
        ctrl->write_cmd = spmi_write_cmd;
 
        ret = spmi_controller_add(ctrl);
-       if (ret)
-               dev_err(&pdev->dev, "spmi_add_controller failed with error %d!\n", ret);
+       if (ret) {
+               dev_err(&pdev->dev, "spmi_controller_add failed with error %d!\n", ret);
+               goto err_put_controller;
+       }
+
+       return 0;
 
+err_put_controller:
+       spmi_controller_put(ctrl);
        return ret;
 }
 
@@ -320,7 +329,7 @@ static int spmi_del_controller(struct platform_device *pdev)
        struct spmi_controller *ctrl = platform_get_drvdata(pdev);
 
        spmi_controller_remove(ctrl);
-       kfree(ctrl);
+       spmi_controller_put(ctrl);
        return 0;
 }
 
index 52b9fb1..b666cb2 100644 (file)
@@ -1062,26 +1062,6 @@ static const struct v4l2_ctrl_config ctrl_select_isp_version = {
        .def = 0,
 };
 
-#if 0 /* #ifdef CONFIG_ION */
-/*
- * Control for ISP ion device fd
- *
- * userspace will open ion device and pass the fd to kernel.
- * this fd will be used to map shared fd to buffer.
- */
-/* V4L2_CID_ATOMISP_ION_DEVICE_FD is not defined */
-static const struct v4l2_ctrl_config ctrl_ion_dev_fd = {
-       .ops = &ctrl_ops,
-       .id = V4L2_CID_ATOMISP_ION_DEVICE_FD,
-       .type = V4L2_CTRL_TYPE_INTEGER,
-       .name = "Ion Device Fd",
-       .min = -1,
-       .max = 1024,
-       .step = 1,
-       .def = ION_FD_UNSET
-};
-#endif
-
 static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd,
                                     struct atomisp_video_pipe *pipe, enum v4l2_buf_type buf_type)
 {
index d241349..bc4bb43 100644 (file)
@@ -712,7 +712,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
        ret = dma_async_device_register(dd);
        if (ret) {
                dev_err(&pdev->dev, "failed to register dma device\n");
-               return ret;
+               goto err_uninit_hsdma;
        }
 
        ret = of_dma_controller_register(pdev->dev.of_node,
@@ -728,6 +728,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
 
 err_unregister:
        dma_async_device_unregister(dd);
+err_uninit_hsdma:
+       mtk_hsdma_uninit(hsdma);
        return ret;
 }
 
index 6b171ff..a5991df 100644 (file)
@@ -562,8 +562,6 @@ tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
 
 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
 {
-       if (tcmu_cmd->se_cmd)
-               tcmu_cmd->se_cmd->priv = NULL;
        kfree(tcmu_cmd->dbi);
        kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
 }
@@ -1174,11 +1172,12 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
        mutex_lock(&udev->cmdr_lock);
-       se_cmd->priv = tcmu_cmd;
        if (!(se_cmd->transport_state & CMD_T_ABORTED))
                ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
        if (ret < 0)
                tcmu_free_cmd(tcmu_cmd);
+       else
+               se_cmd->priv = tcmu_cmd;
        mutex_unlock(&udev->cmdr_lock);
        return scsi_ret;
 }
@@ -1241,6 +1240,7 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
 
                list_del_init(&cmd->queue_entry);
                tcmu_free_cmd(cmd);
+               se_cmd->priv = NULL;
                target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
                unqueued = true;
        }
@@ -1332,6 +1332,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
        }
 
 done:
+       se_cmd->priv = NULL;
        if (read_len_valid) {
                pr_debug("read_len = %d\n", read_len);
                target_complete_cmd_with_length(cmd->se_cmd,
@@ -1478,6 +1479,7 @@ static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
        se_cmd = cmd->se_cmd;
        tcmu_free_cmd(cmd);
 
+       se_cmd->priv = NULL;
        target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
 }
 
@@ -1592,6 +1594,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
                         * removed then LIO core will do the right thing and
                         * fail the retry.
                         */
+                       tcmu_cmd->se_cmd->priv = NULL;
                        target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
                        tcmu_free_cmd(tcmu_cmd);
                        continue;
@@ -1605,6 +1608,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
                         * Ignore scsi_ret for now. target_complete_cmd
                         * drops it.
                         */
+                       tcmu_cmd->se_cmd->priv = NULL;
                        target_complete_cmd(tcmu_cmd->se_cmd,
                                            SAM_STAT_CHECK_CONDITION);
                        tcmu_free_cmd(tcmu_cmd);
@@ -2212,6 +2216,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
                if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
                        WARN_ON(!cmd->se_cmd);
                        list_del_init(&cmd->queue_entry);
+                       cmd->se_cmd->priv = NULL;
                        if (err_level == 1) {
                                /*
                                 * Userspace was not able to start the
index 44e15d7..66d6f1d 100644 (file)
@@ -46,60 +46,83 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
        return 0;
 }
 
-struct xcopy_dev_search_info {
-       const unsigned char *dev_wwn;
-       struct se_device *found_dev;
-};
-
+/**
+ * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers
+ *
+ * @se_dev: device being considered for match
+ * @dev_wwn: XCOPY requested NAA dev_wwn
+ * @return: 1 on match, 0 on no-match
+ */
 static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
-                                             void *data)
+                                             const unsigned char *dev_wwn)
 {
-       struct xcopy_dev_search_info *info = data;
        unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
        int rc;
 
-       if (!se_dev->dev_attrib.emulate_3pc)
+       if (!se_dev->dev_attrib.emulate_3pc) {
+               pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev);
                return 0;
+       }
 
        memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
        target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
 
-       rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
-       if (rc != 0)
-               return 0;
-
-       info->found_dev = se_dev;
-       pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
-
-       rc = target_depend_item(&se_dev->dev_group.cg_item);
+       rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
        if (rc != 0) {
-               pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n",
-                      rc, se_dev);
-               return rc;
+               pr_debug("XCOPY: skip non-matching: %*ph\n",
+                        XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn);
+               return 0;
        }
+       pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
 
-       pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n",
-                se_dev, &se_dev->dev_group);
        return 1;
 }
 
-static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
-                                       struct se_device **found_dev)
+static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
+                                       const unsigned char *dev_wwn,
+                                       struct se_device **_found_dev,
+                                       struct percpu_ref **_found_lun_ref)
 {
-       struct xcopy_dev_search_info info;
-       int ret;
-
-       memset(&info, 0, sizeof(info));
-       info.dev_wwn = dev_wwn;
-
-       ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
-       if (ret == 1) {
-               *found_dev = info.found_dev;
-               return 0;
-       } else {
-               pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
-               return -EINVAL;
+       struct se_dev_entry *deve;
+       struct se_node_acl *nacl;
+       struct se_lun *this_lun = NULL;
+       struct se_device *found_dev = NULL;
+
+       /* cmd with NULL sess indicates no associated $FABRIC_MOD */
+       if (!sess)
+               goto err_out;
+
+       pr_debug("XCOPY 0xe4: searching for: %*ph\n",
+                XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn);
+
+       nacl = sess->se_node_acl;
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+               struct se_device *this_dev;
+               int rc;
+
+               this_lun = rcu_dereference(deve->se_lun);
+               this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
+
+               rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
+               if (rc) {
+                       if (percpu_ref_tryget_live(&this_lun->lun_ref))
+                               found_dev = this_dev;
+                       break;
+               }
        }
+       rcu_read_unlock();
+       if (found_dev == NULL)
+               goto err_out;
+
+       pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n",
+                found_dev, &found_dev->dev_group);
+       *_found_dev = found_dev;
+       *_found_lun_ref = &this_lun->lun_ref;
+       return 0;
+err_out:
+       pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+       return -EINVAL;
 }
 
 static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
@@ -246,12 +269,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
 
        switch (xop->op_origin) {
        case XCOL_SOURCE_RECV_OP:
-               rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
-                                               &xop->dst_dev);
+               rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+                                               xop->dst_tid_wwn,
+                                               &xop->dst_dev,
+                                               &xop->remote_lun_ref);
                break;
        case XCOL_DEST_RECV_OP:
-               rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
-                                               &xop->src_dev);
+               rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+                                               xop->src_tid_wwn,
+                                               &xop->src_dev,
+                                               &xop->remote_lun_ref);
                break;
        default:
                pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
@@ -391,18 +418,12 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
 
 static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
 {
-       struct se_device *remote_dev;
-
        if (xop->op_origin == XCOL_SOURCE_RECV_OP)
-               remote_dev = xop->dst_dev;
+               pr_debug("putting dst lun_ref for %p\n", xop->dst_dev);
        else
-               remote_dev = xop->src_dev;
-
-       pr_debug("Calling configfs_undepend_item for"
-                 " remote_dev: %p remote_dev->dev_group: %p\n",
-                 remote_dev, &remote_dev->dev_group.cg_item);
+               pr_debug("putting src lun_ref for %p\n", xop->src_dev);
 
-       target_undepend_item(&remote_dev->dev_group.cg_item);
+       percpu_ref_put(xop->remote_lun_ref);
 }
 
 static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
index c56a1bd..e5f2000 100644 (file)
@@ -27,6 +27,7 @@ struct xcopy_op {
        struct se_device *dst_dev;
        unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
        unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+       struct percpu_ref *remote_lun_ref;
 
        sector_t src_lba;
        sector_t dst_lba;
index 8b7f941..b8c4159 100644 (file)
@@ -2316,7 +2316,7 @@ static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw,
 
        if (auth && auth->reply.route_hi == sw->config.route_hi &&
            auth->reply.route_lo == sw->config.route_lo) {
-               tb_dbg(tb, "NVM_AUTH found for %llx flags 0x%#x status %#x\n",
+               tb_dbg(tb, "NVM_AUTH found for %llx flags %#x status %#x\n",
                       tb_route(sw), auth->reply.hdr.flags, auth->reply.status);
                if (auth->reply.hdr.flags & ICM_FLAGS_ERROR)
                        ret = -EIO;
index 47a6e42..e15cd6b 100644 (file)
@@ -401,6 +401,20 @@ config MIPS_EJTAG_FDC_KGDB_CHAN
        help
          FDC channel number to use for KGDB.
 
+config NULL_TTY
+       tristate "NULL TTY driver"
+       help
+         Say Y here if you want a NULL TTY which simply discards messages.
+
+         This is useful to allow userspace applications which expect a console
+         device to work without modifications even when no console is
+         available or desired.
+
+         In order to use this driver, you should redirect the console to this
+         TTY, or boot the kernel with console=ttynull.
+
+         If unsure, say N.
+
 config TRACE_ROUTER
        tristate "Trace data router for MIPI P1149.7 cJTAG standard"
        depends on TRACE_SINK
index 3c1c5a9..b3ccae9 100644 (file)
@@ -2,7 +2,7 @@
 obj-$(CONFIG_TTY)              += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
                                   tty_buffer.o tty_port.o tty_mutex.o \
                                   tty_ldsem.o tty_baudrate.o tty_jobctrl.o \
-                                  n_null.o ttynull.o
+                                  n_null.o
 obj-$(CONFIG_LEGACY_PTYS)      += pty.o
 obj-$(CONFIG_UNIX98_PTYS)      += pty.o
 obj-$(CONFIG_AUDIT)            += tty_audit.o
@@ -25,6 +25,7 @@ obj-$(CONFIG_ISI)             += isicom.o
 obj-$(CONFIG_MOXA_INTELLIO)    += moxa.o
 obj-$(CONFIG_MOXA_SMARTIO)     += mxser.o
 obj-$(CONFIG_NOZOMI)           += nozomi.o
+obj-$(CONFIG_NULL_TTY)         += ttynull.o
 obj-$(CONFIG_ROCKETPORT)       += rocket.o
 obj-$(CONFIG_SYNCLINK_GT)      += synclink_gt.o
 obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
index 118b299..e0c00a1 100644 (file)
@@ -648,6 +648,14 @@ static void wait_for_xmitr(struct uart_port *port)
                                  (val & STAT_TX_RDY(port)), 1, 10000);
 }
 
+static void wait_for_xmite(struct uart_port *port)
+{
+       u32 val;
+
+       readl_poll_timeout_atomic(port->membase + UART_STAT, val,
+                                 (val & STAT_TX_EMP), 1, 10000);
+}
+
 static void mvebu_uart_console_putchar(struct uart_port *port, int ch)
 {
        wait_for_xmitr(port);
@@ -675,7 +683,7 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
 
        uart_console_write(port, s, count, mvebu_uart_console_putchar);
 
-       wait_for_xmitr(port);
+       wait_for_xmite(port);
 
        if (ier)
                writel(ier, port->membase + UART_CTRL(port));
index 1066eeb..328d5a7 100644 (file)
@@ -1000,6 +1000,7 @@ static int sifive_serial_probe(struct platform_device *pdev)
        /* Set up clock divider */
        ssp->clkin_rate = clk_get_rate(ssp->clk);
        ssp->baud_rate = SIFIVE_DEFAULT_BAUD_RATE;
+       ssp->port.uartclk = ssp->baud_rate * 16;
        __ssp_update_div(ssp);
 
        platform_set_drvdata(pdev, ssp);
index 8034489..4a208a9 100644 (file)
@@ -143,9 +143,8 @@ LIST_HEAD(tty_drivers);                     /* linked list of tty drivers */
 DEFINE_MUTEX(tty_mutex);
 
 static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
-ssize_t redirected_tty_write(struct file *, const char __user *,
-                                                       size_t, loff_t *);
+static ssize_t tty_write(struct kiocb *, struct iov_iter *);
+ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *);
 static __poll_t tty_poll(struct file *, poll_table *);
 static int tty_open(struct inode *, struct file *);
 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
@@ -438,8 +437,7 @@ static ssize_t hung_up_tty_read(struct file *file, char __user *buf,
        return 0;
 }
 
-static ssize_t hung_up_tty_write(struct file *file, const char __user *buf,
-                                size_t count, loff_t *ppos)
+static ssize_t hung_up_tty_write(struct kiocb *iocb, struct iov_iter *from)
 {
        return -EIO;
 }
@@ -478,7 +476,8 @@ static void tty_show_fdinfo(struct seq_file *m, struct file *file)
 static const struct file_operations tty_fops = {
        .llseek         = no_llseek,
        .read           = tty_read,
-       .write          = tty_write,
+       .write_iter     = tty_write,
+       .splice_write   = iter_file_splice_write,
        .poll           = tty_poll,
        .unlocked_ioctl = tty_ioctl,
        .compat_ioctl   = tty_compat_ioctl,
@@ -491,7 +490,8 @@ static const struct file_operations tty_fops = {
 static const struct file_operations console_fops = {
        .llseek         = no_llseek,
        .read           = tty_read,
-       .write          = redirected_tty_write,
+       .write_iter     = redirected_tty_write,
+       .splice_write   = iter_file_splice_write,
        .poll           = tty_poll,
        .unlocked_ioctl = tty_ioctl,
        .compat_ioctl   = tty_compat_ioctl,
@@ -503,7 +503,7 @@ static const struct file_operations console_fops = {
 static const struct file_operations hung_up_tty_fops = {
        .llseek         = no_llseek,
        .read           = hung_up_tty_read,
-       .write          = hung_up_tty_write,
+       .write_iter     = hung_up_tty_write,
        .poll           = hung_up_tty_poll,
        .unlocked_ioctl = hung_up_tty_ioctl,
        .compat_ioctl   = hung_up_tty_compat_ioctl,
@@ -606,9 +606,9 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
        /* This breaks for file handles being sent over AF_UNIX sockets ? */
        list_for_each_entry(priv, &tty->tty_files, list) {
                filp = priv->file;
-               if (filp->f_op->write == redirected_tty_write)
+               if (filp->f_op->write_iter == redirected_tty_write)
                        cons_filp = filp;
-               if (filp->f_op->write != tty_write)
+               if (filp->f_op->write_iter != tty_write)
                        continue;
                closecount++;
                __tty_fasync(-1, filp, 0);      /* can't block */
@@ -901,9 +901,9 @@ static inline ssize_t do_tty_write(
        ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
        struct tty_struct *tty,
        struct file *file,
-       const char __user *buf,
-       size_t count)
+       struct iov_iter *from)
 {
+       size_t count = iov_iter_count(from);
        ssize_t ret, written = 0;
        unsigned int chunk;
 
@@ -955,14 +955,20 @@ static inline ssize_t do_tty_write(
                size_t size = count;
                if (size > chunk)
                        size = chunk;
+
                ret = -EFAULT;
-               if (copy_from_user(tty->write_buf, buf, size))
+               if (copy_from_iter(tty->write_buf, size, from) != size)
                        break;
+
                ret = write(tty, file, tty->write_buf, size);
                if (ret <= 0)
                        break;
+
+               /* FIXME! Have Al check this! */
+               if (ret != size)
+                       iov_iter_revert(from, size-ret);
+
                written += ret;
-               buf += ret;
                count -= ret;
                if (!count)
                        break;
@@ -1022,9 +1028,9 @@ void tty_write_message(struct tty_struct *tty, char *msg)
  *     write method will not be invoked in parallel for each device.
  */
 
-static ssize_t tty_write(struct file *file, const char __user *buf,
-                                               size_t count, loff_t *ppos)
+static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
 {
+       struct file *file = iocb->ki_filp;
        struct tty_struct *tty = file_tty(file);
        struct tty_ldisc *ld;
        ssize_t ret;
@@ -1038,17 +1044,16 @@ static ssize_t tty_write(struct file *file, const char __user *buf,
                tty_err(tty, "missing write_room method\n");
        ld = tty_ldisc_ref_wait(tty);
        if (!ld)
-               return hung_up_tty_write(file, buf, count, ppos);
+               return hung_up_tty_write(iocb, from);
        if (!ld->ops->write)
                ret = -EIO;
        else
-               ret = do_tty_write(ld->ops->write, tty, file, buf, count);
+               ret = do_tty_write(ld->ops->write, tty, file, from);
        tty_ldisc_deref(ld);
        return ret;
 }
 
-ssize_t redirected_tty_write(struct file *file, const char __user *buf,
-                                               size_t count, loff_t *ppos)
+ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
 {
        struct file *p = NULL;
 
@@ -1059,11 +1064,11 @@ ssize_t redirected_tty_write(struct file *file, const char __user *buf,
 
        if (p) {
                ssize_t res;
-               res = vfs_write(p, buf, count, &p->f_pos);
+               res = vfs_iocb_iter_write(p, iocb, iter);
                fput(p);
                return res;
        }
-       return tty_write(file, buf, count, ppos);
+       return tty_write(iocb, iter);
 }
 
 /*
@@ -2295,7 +2300,7 @@ static int tioccons(struct file *file)
 {
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
-       if (file->f_op->write == redirected_tty_write) {
+       if (file->f_op->write_iter == redirected_tty_write) {
                struct file *f;
                spin_lock(&redirect_lock);
                f = redirect;
index eced70e..17f05b7 100644 (file)
@@ -2,13 +2,6 @@
 /*
  * Copyright (C) 2019 Axis Communications AB
  *
- * The console is useful for userspace applications which expect a console
- * device to work without modifications even when no console is available
- * or desired.
- *
- * In order to use this driver, you should redirect the console to this
- * TTY, or boot the kernel with console=ttynull.
- *
  * Based on ttyprintk.c:
  *  Copyright (C) 2010 Samo Pogacnik
  */
@@ -66,17 +59,6 @@ static struct console ttynull_console = {
        .device = ttynull_device,
 };
 
-void __init register_ttynull_console(void)
-{
-       if (!ttynull_driver)
-               return;
-
-       if (add_preferred_console(ttynull_console.name, 0, NULL))
-               return;
-
-       register_console(&ttynull_console);
-}
-
 static int __init ttynull_init(void)
 {
        struct tty_driver *driver;
index 22a56c4..7990fee 100644 (file)
@@ -185,7 +185,11 @@ static int cdns_imx_probe(struct platform_device *pdev)
        }
 
        data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks);
-       data->clks = (struct clk_bulk_data *)imx_cdns3_core_clks;
+       data->clks = devm_kmemdup(dev, imx_cdns3_core_clks,
+                               sizeof(imx_cdns3_core_clks), GFP_KERNEL);
+       if (!data->clks)
+               return -ENOMEM;
+
        ret = devm_clk_bulk_get(dev, data->num_clks, data->clks);
        if (ret)
                return ret;
@@ -214,20 +218,16 @@ err:
        return ret;
 }
 
-static int cdns_imx_remove_core(struct device *dev, void *data)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-
-       platform_device_unregister(pdev);
-
-       return 0;
-}
-
 static int cdns_imx_remove(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
+       struct cdns_imx *data = dev_get_drvdata(dev);
 
-       device_for_each_child(dev, NULL, cdns_imx_remove_core);
+       pm_runtime_get_sync(dev);
+       of_platform_depopulate(dev);
+       clk_bulk_disable_unprepare(data->num_clks, data->clks);
+       pm_runtime_disable(dev);
+       pm_runtime_put_noidle(dev);
        platform_set_drvdata(pdev, NULL);
 
        return 0;
index 9e12152..8b7bc10 100644 (file)
@@ -139,9 +139,13 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
        misc_pdev = of_find_device_by_node(args.np);
        of_node_put(args.np);
 
-       if (!misc_pdev || !platform_get_drvdata(misc_pdev))
+       if (!misc_pdev)
                return ERR_PTR(-EPROBE_DEFER);
 
+       if (!platform_get_drvdata(misc_pdev)) {
+               put_device(&misc_pdev->dev);
+               return ERR_PTR(-EPROBE_DEFER);
+       }
        data->dev = &misc_pdev->dev;
 
        /*
index f52f1bc..7819057 100644 (file)
@@ -1895,6 +1895,10 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x04d8, 0xfd08),
        .driver_info = IGNORE_DEVICE,
        },
+
+       { USB_DEVICE(0x04d8, 0xf58b),
+       .driver_info = IGNORE_DEVICE,
+       },
 #endif
 
        /*Samsung phone in firmware update mode */
index 02d0cfd..508b1c3 100644 (file)
@@ -465,13 +465,23 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
        if (!desc->resp_count || !--desc->resp_count)
                goto out;
 
+       if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
+               rv = -ENODEV;
+               goto out;
+       }
+       if (test_bit(WDM_RESETTING, &desc->flags)) {
+               rv = -EIO;
+               goto out;
+       }
+
        set_bit(WDM_RESPONDING, &desc->flags);
        spin_unlock_irq(&desc->iuspin);
        rv = usb_submit_urb(desc->response, GFP_KERNEL);
        spin_lock_irq(&desc->iuspin);
        if (rv) {
-               dev_err(&desc->intf->dev,
-                       "usb_submit_urb failed with result %d\n", rv);
+               if (!test_bit(WDM_DISCONNECTING, &desc->flags))
+                       dev_err(&desc->intf->dev,
+                               "usb_submit_urb failed with result %d\n", rv);
 
                /* make sure the next notification trigger a submit */
                clear_bit(WDM_RESPONDING, &desc->flags);
@@ -1027,9 +1037,9 @@ static void wdm_disconnect(struct usb_interface *intf)
        wake_up_all(&desc->wait);
        mutex_lock(&desc->rlock);
        mutex_lock(&desc->wlock);
-       kill_urbs(desc);
        cancel_work_sync(&desc->rxwork);
        cancel_work_sync(&desc->service_outs_intr);
+       kill_urbs(desc);
        mutex_unlock(&desc->wlock);
        mutex_unlock(&desc->rlock);
 
index 67cbd42..134dc20 100644 (file)
@@ -274,8 +274,25 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i
 #define usblp_reset(usblp)\
        usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0)
 
-#define usblp_hp_channel_change_request(usblp, channel, buffer) \
-       usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buffer, 1)
+static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel)
+{
+       u8 *buf;
+       int ret;
+
+       buf = kzalloc(1, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST,
+                       USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE,
+                       channel, buf, 1);
+       if (ret == 0)
+               *new_channel = buf[0];
+
+       kfree(buf);
+
+       return ret;
+}
 
 /*
  * See the description for usblp_select_alts() below for the usage
index 60886a7..ad5a0f4 100644 (file)
@@ -1649,14 +1649,12 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
        urb->status = status;
        /*
         * This function can be called in task context inside another remote
-        * coverage collection section, but KCOV doesn't support that kind of
+        * coverage collection section, but kcov doesn't support that kind of
         * recursion yet. Only collect coverage in softirq context for now.
         */
-       if (in_serving_softirq())
-               kcov_remote_start_usb((u64)urb->dev->bus->busnum);
+       kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
        urb->complete(urb);
-       if (in_serving_softirq())
-               kcov_remote_stop();
+       kcov_remote_stop_softirq();
 
        usb_anchor_resume_wakeups(anchor);
        atomic_dec(&urb->use_count);
index 2f95f08..1b241f9 100644 (file)
 
 /* Global USB2 PHY Vendor Control Register */
 #define DWC3_GUSB2PHYACC_NEWREGREQ     BIT(25)
+#define DWC3_GUSB2PHYACC_DONE          BIT(24)
 #define DWC3_GUSB2PHYACC_BUSY          BIT(23)
 #define DWC3_GUSB2PHYACC_WRITE         BIT(22)
 #define DWC3_GUSB2PHYACC_ADDR(n)       (n << 16)
index 417e053..bdf1f98 100644 (file)
@@ -754,7 +754,7 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
 
        ret = priv->drvdata->setup_regmaps(priv, base);
        if (ret)
-               return ret;
+               goto err_disable_clks;
 
        if (priv->vbus) {
                ret = regulator_enable(priv->vbus);
index 78cb4db..ee44321 100644 (file)
@@ -1763,6 +1763,8 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
                        list_for_each_entry_safe(r, t, &dep->started_list, list)
                                dwc3_gadget_move_cancelled_request(r);
 
+                       dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
+
                        goto out;
                }
        }
@@ -2083,6 +2085,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
 
 static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
 static void __dwc3_gadget_stop(struct dwc3 *dwc);
+static int __dwc3_gadget_start(struct dwc3 *dwc);
 
 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 {
@@ -2145,6 +2148,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
                        dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
                                                dwc->ev_buf->length;
                }
+       } else {
+               __dwc3_gadget_start(dwc);
        }
 
        ret = dwc3_gadget_run_stop(dwc, is_on, false);
@@ -2319,10 +2324,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
        }
 
        dwc->gadget_driver      = driver;
-
-       if (pm_runtime_active(dwc->dev))
-               __dwc3_gadget_start(dwc);
-
        spin_unlock_irqrestore(&dwc->lock, flags);
 
        return 0;
@@ -2348,13 +2349,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
        unsigned long           flags;
 
        spin_lock_irqsave(&dwc->lock, flags);
-
-       if (pm_runtime_suspended(dwc->dev))
-               goto out;
-
-       __dwc3_gadget_stop(dwc);
-
-out:
        dwc->gadget_driver      = NULL;
        spin_unlock_irqrestore(&dwc->lock, flags);
 
index aa213c9..f23f4c9 100644 (file)
@@ -7,6 +7,8 @@
  * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
  */
 
+#include <linux/delay.h>
+#include <linux/time64.h>
 #include <linux/ulpi/regs.h>
 
 #include "core.h"
                DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \
                DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a))
 
-static int dwc3_ulpi_busyloop(struct dwc3 *dwc)
+#define DWC3_ULPI_BASE_DELAY   DIV_ROUND_UP(NSEC_PER_SEC, 60000000L)
+
+static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read)
 {
-       unsigned int count = 1000;
+       unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY;
+       unsigned int count = 10000;
        u32 reg;
 
+       if (addr >= ULPI_EXT_VENDOR_SPECIFIC)
+               ns += DWC3_ULPI_BASE_DELAY;
+
+       if (read)
+               ns += DWC3_ULPI_BASE_DELAY;
+
+       reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+       if (reg & DWC3_GUSB2PHYCFG_SUSPHY)
+               usleep_range(1000, 1200);
+
        while (count--) {
+               ndelay(ns);
                reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
-               if (!(reg & DWC3_GUSB2PHYACC_BUSY))
+               if (reg & DWC3_GUSB2PHYACC_DONE)
                        return 0;
                cpu_relax();
        }
@@ -38,16 +54,10 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr)
        u32 reg;
        int ret;
 
-       reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
-       if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
-               reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
-               dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
-       }
-
        reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
        dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
 
-       ret = dwc3_ulpi_busyloop(dwc);
+       ret = dwc3_ulpi_busyloop(dwc, addr, true);
        if (ret)
                return ret;
 
@@ -61,17 +71,11 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
        struct dwc3 *dwc = dev_get_drvdata(dev);
        u32 reg;
 
-       reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
-       if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
-               reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
-               dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
-       }
-
        reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
        reg |= DWC3_GUSB2PHYACC_WRITE | val;
        dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
 
-       return dwc3_ulpi_busyloop(dwc);
+       return dwc3_ulpi_busyloop(dwc, addr, false);
 }
 
 static const struct ulpi_ops dwc3_ulpi_ops = {
index 7e47e62..2d15257 100644 (file)
@@ -265,6 +265,7 @@ config USB_CONFIGFS_NCM
        depends on NET
        select USB_U_ETHER
        select USB_F_NCM
+       select CRC32
        help
          NCM is an advanced protocol for Ethernet encapsulation, allows
          grouping of several ethernet frames into one USB transfer and
@@ -314,6 +315,7 @@ config USB_CONFIGFS_EEM
        depends on NET
        select USB_U_ETHER
        select USB_F_EEM
+       select CRC32
        help
          CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
          and therefore can be supported by more hardware.  Technically ECM and
index c6d455f..1a556a6 100644 (file)
@@ -392,8 +392,11 @@ int usb_function_deactivate(struct usb_function *function)
 
        spin_lock_irqsave(&cdev->lock, flags);
 
-       if (cdev->deactivations == 0)
+       if (cdev->deactivations == 0) {
+               spin_unlock_irqrestore(&cdev->lock, flags);
                status = usb_gadget_deactivate(cdev->gadget);
+               spin_lock_irqsave(&cdev->lock, flags);
+       }
        if (status == 0)
                cdev->deactivations++;
 
@@ -424,8 +427,11 @@ int usb_function_activate(struct usb_function *function)
                status = -EINVAL;
        else {
                cdev->deactivations--;
-               if (cdev->deactivations == 0)
+               if (cdev->deactivations == 0) {
+                       spin_unlock_irqrestore(&cdev->lock, flags);
                        status = usb_gadget_activate(cdev->gadget);
+                       spin_lock_irqsave(&cdev->lock, flags);
+               }
        }
 
        spin_unlock_irqrestore(&cdev->lock, flags);
index 56051bb..36ffb43 100644 (file)
@@ -221,9 +221,16 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item,
 
 static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page)
 {
-       char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name;
+       struct gadget_info *gi = to_gadget_info(item);
+       char *udc_name;
+       int ret;
+
+       mutex_lock(&gi->lock);
+       udc_name = gi->composite.gadget_driver.udc_name;
+       ret = sprintf(page, "%s\n", udc_name ?: "");
+       mutex_unlock(&gi->lock);
 
-       return sprintf(page, "%s\n", udc_name ?: "");
+       return ret;
 }
 
 static int unregister_gadget(struct gadget_info *gi)
@@ -1248,9 +1255,9 @@ static void purge_configs_funcs(struct gadget_info *gi)
 
                cfg = container_of(c, struct config_usb_cfg, c);
 
-               list_for_each_entry_safe(f, tmp, &c->functions, list) {
+               list_for_each_entry_safe_reverse(f, tmp, &c->functions, list) {
 
-                       list_move_tail(&f->list, &cfg->func_list);
+                       list_move(&f->list, &cfg->func_list);
                        if (f->unbind) {
                                dev_dbg(&gi->cdev.gadget->dev,
                                        "unbind function '%s'/%p\n",
@@ -1536,7 +1543,7 @@ static const struct usb_gadget_driver configfs_driver_template = {
        .suspend        = configfs_composite_suspend,
        .resume         = configfs_composite_resume,
 
-       .max_speed      = USB_SPEED_SUPER,
+       .max_speed      = USB_SPEED_SUPER_PLUS,
        .driver = {
                .owner          = THIS_MODULE,
                .name           = "configfs-gadget",
@@ -1576,7 +1583,7 @@ static struct config_group *gadgets_make(
        gi->composite.unbind = configfs_do_nothing;
        gi->composite.suspend = NULL;
        gi->composite.resume = NULL;
-       gi->composite.max_speed = USB_SPEED_SUPER;
+       gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
 
        spin_lock_init(&gi->spinlock);
        mutex_init(&gi->lock);
index 64a4112..2f1eb2e 100644 (file)
@@ -1162,6 +1162,7 @@ fail_tx_reqs:
                printer_req_free(dev->in_ep, req);
        }
 
+       usb_free_all_descriptors(f);
        return ret;
 
 }
index 3633df6..5d960b6 100644 (file)
@@ -271,7 +271,7 @@ static struct usb_endpoint_descriptor fs_epout_desc = {
 
        .bEndpointAddress = USB_DIR_OUT,
        .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
-       .wMaxPacketSize = cpu_to_le16(1023),
+       /* .wMaxPacketSize = DYNAMIC */
        .bInterval = 1,
 };
 
@@ -280,7 +280,7 @@ static struct usb_endpoint_descriptor hs_epout_desc = {
        .bDescriptorType = USB_DT_ENDPOINT,
 
        .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
-       .wMaxPacketSize = cpu_to_le16(1024),
+       /* .wMaxPacketSize = DYNAMIC */
        .bInterval = 4,
 };
 
@@ -348,7 +348,7 @@ static struct usb_endpoint_descriptor fs_epin_desc = {
 
        .bEndpointAddress = USB_DIR_IN,
        .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
-       .wMaxPacketSize = cpu_to_le16(1023),
+       /* .wMaxPacketSize = DYNAMIC */
        .bInterval = 1,
 };
 
@@ -357,7 +357,7 @@ static struct usb_endpoint_descriptor hs_epin_desc = {
        .bDescriptorType = USB_DT_ENDPOINT,
 
        .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
-       .wMaxPacketSize = cpu_to_le16(1024),
+       /* .wMaxPacketSize = DYNAMIC */
        .bInterval = 4,
 };
 
@@ -444,12 +444,28 @@ struct cntrl_range_lay3 {
        __le32  dRES;
 } __packed;
 
-static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
+static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
        struct usb_endpoint_descriptor *ep_desc,
-       unsigned int factor, bool is_playback)
+       enum usb_device_speed speed, bool is_playback)
 {
        int chmask, srate, ssize;
-       u16 max_packet_size;
+       u16 max_size_bw, max_size_ep;
+       unsigned int factor;
+
+       switch (speed) {
+       case USB_SPEED_FULL:
+               max_size_ep = 1023;
+               factor = 1000;
+               break;
+
+       case USB_SPEED_HIGH:
+               max_size_ep = 1024;
+               factor = 8000;
+               break;
+
+       default:
+               return -EINVAL;
+       }
 
        if (is_playback) {
                chmask = uac2_opts->p_chmask;
@@ -461,10 +477,12 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
                ssize = uac2_opts->c_ssize;
        }
 
-       max_packet_size = num_channels(chmask) * ssize *
+       max_size_bw = num_channels(chmask) * ssize *
                DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
-       ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size,
-                               le16_to_cpu(ep_desc->wMaxPacketSize)));
+       ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
+                                                   max_size_ep));
+
+       return 0;
 }
 
 /* Use macro to overcome line length limitation */
@@ -670,10 +688,33 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
        }
 
        /* Calculate wMaxPacketSize according to audio bandwidth */
-       set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
-       set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false);
-       set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
-       set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
+       ret = set_ep_max_packet_size(uac2_opts, &fs_epin_desc, USB_SPEED_FULL,
+                                    true);
+       if (ret < 0) {
+               dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+               return ret;
+       }
+
+       ret = set_ep_max_packet_size(uac2_opts, &fs_epout_desc, USB_SPEED_FULL,
+                                    false);
+       if (ret < 0) {
+               dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+               return ret;
+       }
+
+       ret = set_ep_max_packet_size(uac2_opts, &hs_epin_desc, USB_SPEED_HIGH,
+                                    true);
+       if (ret < 0) {
+               dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+               return ret;
+       }
+
+       ret = set_ep_max_packet_size(uac2_opts, &hs_epout_desc, USB_SPEED_HIGH,
+                                    false);
+       if (ret < 0) {
+               dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+               return ret;
+       }
 
        if (EPOUT_EN(uac2_opts)) {
                agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
index 31ea76a..c019f2b 100644 (file)
 #define UETH__VERSION  "29-May-2008"
 
 /* Experiments show that both Linux and Windows hosts allow up to 16k
- * frame sizes. Set the max size to 15k+52 to prevent allocating 32k
+ * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
  * blocks and still have efficient handling. */
-#define GETHER_MAX_ETH_FRAME_LEN 15412
+#define GETHER_MAX_MTU_SIZE 15412
+#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
 
 struct eth_dev {
        /* lock is held while accessing port_usb
@@ -786,7 +787,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
 
        /* MTU range: 14 - 15412 */
        net->min_mtu = ETH_HLEN;
-       net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+       net->max_mtu = GETHER_MAX_MTU_SIZE;
 
        dev->gadget = g;
        SET_NETDEV_DEV(net, &g->dev);
@@ -848,7 +849,7 @@ struct net_device *gether_setup_name_default(const char *netname)
 
        /* MTU range: 14 - 15412 */
        net->min_mtu = ETH_HLEN;
-       net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+       net->max_mtu = GETHER_MAX_MTU_SIZE;
 
        return net;
 }
index 59be2d8..e8033e5 100644 (file)
@@ -200,8 +200,10 @@ static int acm_ms_bind(struct usb_composite_dev *cdev)
                struct usb_descriptor_header *usb_desc;
 
                usb_desc = usb_otg_descriptor_alloc(gadget);
-               if (!usb_desc)
+               if (!usb_desc) {
+                       status = -ENOMEM;
                        goto fail_string_ids;
+               }
                usb_otg_descriptor_init(gadget, usb_desc);
                otg_desc[0] = usb_desc;
                otg_desc[1] = NULL;
index 1a12aab..8c614bb 100644 (file)
@@ -90,7 +90,7 @@ config USB_BCM63XX_UDC
 
 config USB_FSL_USB2
        tristate "Freescale Highspeed USB DR Peripheral Controller"
-       depends on FSL_SOC || ARCH_MXC
+       depends on FSL_SOC
        help
           Some of Freescale PowerPC and i.MX processors have a High Speed
           Dual-Role(DR) USB controller, which supports device mode.
index f5a7ce2..a21f222 100644 (file)
@@ -23,7 +23,6 @@ obj-$(CONFIG_USB_ATMEL_USBA)  += atmel_usba_udc.o
 obj-$(CONFIG_USB_BCM63XX_UDC)  += bcm63xx_udc.o
 obj-$(CONFIG_USB_FSL_USB2)     += fsl_usb2_udc.o
 fsl_usb2_udc-y                 := fsl_udc_core.o
-fsl_usb2_udc-$(CONFIG_ARCH_MXC)        += fsl_mxc_udc.o
 obj-$(CONFIG_USB_TEGRA_XUDC)   += tegra-xudc.o
 obj-$(CONFIG_USB_M66592)       += m66592-udc.o
 obj-$(CONFIG_USB_R8A66597)     += r8a66597-udc.o
index 0bd6b20..02d8bfa 100644 (file)
@@ -420,7 +420,10 @@ static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
        u32 state, reg, loops;
 
        /* Stop DMA activity */
-       writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+       if (ep->epn.desc_mode)
+               writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+       else
+               writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
 
        /* Wait for it to complete */
        for (loops = 0; loops < 1000; loops++) {
index 3e88c76..fb01ff4 100644 (file)
@@ -17,7 +17,7 @@ if USB_BDC_UDC
 comment "Platform Support"
 config USB_BDC_PCI
        tristate "BDC support for PCIe based platforms"
-       depends on USB_PCI
+       depends on USB_PCI && BROKEN
        default USB_BDC_UDC
        help
                Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform.
index 5b5cfeb..ea114f9 100644 (file)
@@ -659,8 +659,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
  *
  * Enables the D+ (or potentially D-) pullup.  The host will start
  * enumerating this gadget when the pullup is active and a VBUS session
- * is active (the link is powered).  This pullup is always enabled unless
- * usb_gadget_disconnect() has been used to disable it.
+ * is active (the link is powered).
  *
  * Returns zero on success, else negative errno.
  */
@@ -1530,10 +1529,13 @@ static ssize_t soft_connect_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t n)
 {
        struct usb_udc          *udc = container_of(dev, struct usb_udc, dev);
+       ssize_t                 ret;
 
+       mutex_lock(&udc_lock);
        if (!udc->driver) {
                dev_err(dev, "soft-connect without a gadget driver\n");
-               return -EOPNOTSUPP;
+               ret = -EOPNOTSUPP;
+               goto out;
        }
 
        if (sysfs_streq(buf, "connect")) {
@@ -1544,10 +1546,14 @@ static ssize_t soft_connect_store(struct device *dev,
                usb_gadget_udc_stop(udc);
        } else {
                dev_err(dev, "unsupported command '%s'\n", buf);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
-       return n;
+       ret = n;
+out:
+       mutex_unlock(&udc_lock);
+       return ret;
 }
 static DEVICE_ATTR_WO(soft_connect);
 
index ab5e978..5706776 100644 (file)
@@ -2118,9 +2118,21 @@ static int dummy_hub_control(
                                dum_hcd->port_status &= ~USB_PORT_STAT_POWER;
                        set_link_state(dum_hcd);
                        break;
-               default:
+               case USB_PORT_FEAT_ENABLE:
+               case USB_PORT_FEAT_C_ENABLE:
+               case USB_PORT_FEAT_C_SUSPEND:
+                       /* Not allowed for USB-3 */
+                       if (hcd->speed == HCD_USB3)
+                               goto error;
+                       fallthrough;
+               case USB_PORT_FEAT_C_CONNECTION:
+               case USB_PORT_FEAT_C_RESET:
                        dum_hcd->port_status &= ~(1 << wValue);
                        set_link_state(dum_hcd);
+                       break;
+               default:
+               /* Disallow INDICATOR and C_OVER_CURRENT */
+                       goto error;
                }
                break;
        case GetHubDescriptor:
@@ -2258,17 +2270,20 @@ static int dummy_hub_control(
                        }
                        fallthrough;
                case USB_PORT_FEAT_RESET:
+                       if (!(dum_hcd->port_status & USB_PORT_STAT_CONNECTION))
+                               break;
                        /* if it's already enabled, disable */
                        if (hcd->speed == HCD_USB3) {
-                               dum_hcd->port_status = 0;
                                dum_hcd->port_status =
                                        (USB_SS_PORT_STAT_POWER |
                                         USB_PORT_STAT_CONNECTION |
                                         USB_PORT_STAT_RESET);
-                       } else
+                       } else {
                                dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE
                                        | USB_PORT_STAT_LOW_SPEED
                                        | USB_PORT_STAT_HIGH_SPEED);
+                               dum_hcd->port_status |= USB_PORT_STAT_RESET;
+                       }
                        /*
                         * We want to reset device status. All but the
                         * Self powered feature
@@ -2280,19 +2295,19 @@ static int dummy_hub_control(
                         * interval? Is it still 50msec as for HS?
                         */
                        dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
-                       fallthrough;
-               default:
-                       if (hcd->speed == HCD_USB3) {
-                               if ((dum_hcd->port_status &
-                                    USB_SS_PORT_STAT_POWER) != 0) {
-                                       dum_hcd->port_status |= (1 << wValue);
-                               }
-                       } else
-                               if ((dum_hcd->port_status &
-                                    USB_PORT_STAT_POWER) != 0) {
-                                       dum_hcd->port_status |= (1 << wValue);
-                               }
                        set_link_state(dum_hcd);
+                       break;
+               case USB_PORT_FEAT_C_CONNECTION:
+               case USB_PORT_FEAT_C_RESET:
+               case USB_PORT_FEAT_C_ENABLE:
+               case USB_PORT_FEAT_C_SUSPEND:
+                       /* Not allowed for USB-3, and ignored for USB-2 */
+                       if (hcd->speed == HCD_USB3)
+                               goto error;
+                       break;
+               default:
+               /* Disallow TEST, INDICATOR, and C_OVER_CURRENT */
+                       goto error;
                }
                break;
        case GetPortErrorCount:
diff --git a/drivers/usb/gadget/udc/fsl_mxc_udc.c b/drivers/usb/gadget/udc/fsl_mxc_udc.c
deleted file mode 100644 (file)
index 5a32199..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2009
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Description:
- * Helper routines for i.MX3x SoCs from Freescale, needed by the fsl_usb2_udc.c
- * driver to function correctly on these systems.
- */
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/fsl_devices.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include "fsl_usb2_udc.h"
-
-static struct clk *mxc_ahb_clk;
-static struct clk *mxc_per_clk;
-static struct clk *mxc_ipg_clk;
-
-/* workaround ENGcm09152 for i.MX35 */
-#define MX35_USBPHYCTRL_OFFSET         0x600
-#define USBPHYCTRL_OTGBASE_OFFSET      0x8
-#define USBPHYCTRL_EVDO                        (1 << 23)
-
-int fsl_udc_clk_init(struct platform_device *pdev)
-{
-       struct fsl_usb2_platform_data *pdata;
-       unsigned long freq;
-       int ret;
-
-       pdata = dev_get_platdata(&pdev->dev);
-
-       mxc_ipg_clk = devm_clk_get(&pdev->dev, "ipg");
-       if (IS_ERR(mxc_ipg_clk)) {
-               dev_err(&pdev->dev, "clk_get(\"ipg\") failed\n");
-               return PTR_ERR(mxc_ipg_clk);
-       }
-
-       mxc_ahb_clk = devm_clk_get(&pdev->dev, "ahb");
-       if (IS_ERR(mxc_ahb_clk)) {
-               dev_err(&pdev->dev, "clk_get(\"ahb\") failed\n");
-               return PTR_ERR(mxc_ahb_clk);
-       }
-
-       mxc_per_clk = devm_clk_get(&pdev->dev, "per");
-       if (IS_ERR(mxc_per_clk)) {
-               dev_err(&pdev->dev, "clk_get(\"per\") failed\n");
-               return PTR_ERR(mxc_per_clk);
-       }
-
-       clk_prepare_enable(mxc_ipg_clk);
-       clk_prepare_enable(mxc_ahb_clk);
-       clk_prepare_enable(mxc_per_clk);
-
-       /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
-       if (!strcmp(pdev->id_entry->name, "imx-udc-mx27")) {
-               freq = clk_get_rate(mxc_per_clk);
-               if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
-                   (freq < 59999000 || freq > 60001000)) {
-                       dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
-                       ret = -EINVAL;
-                       goto eclkrate;
-               }
-       }
-
-       return 0;
-
-eclkrate:
-       clk_disable_unprepare(mxc_ipg_clk);
-       clk_disable_unprepare(mxc_ahb_clk);
-       clk_disable_unprepare(mxc_per_clk);
-       mxc_per_clk = NULL;
-       return ret;
-}
-
-int fsl_udc_clk_finalize(struct platform_device *pdev)
-{
-       struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
-       int ret = 0;
-
-       /* workaround ENGcm09152 for i.MX35 */
-       if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
-               unsigned int v;
-               struct resource *res = platform_get_resource
-                       (pdev, IORESOURCE_MEM, 0);
-               void __iomem *phy_regs = ioremap(res->start +
-                                               MX35_USBPHYCTRL_OFFSET, 512);
-               if (!phy_regs) {
-                       dev_err(&pdev->dev, "ioremap for phy address fails\n");
-                       ret = -EINVAL;
-                       goto ioremap_err;
-               }
-
-               v = readl(phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
-               writel(v | USBPHYCTRL_EVDO,
-                       phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
-
-               iounmap(phy_regs);
-       }
-
-
-ioremap_err:
-       /* ULPI transceivers don't need usbpll */
-       if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
-               clk_disable_unprepare(mxc_per_clk);
-               mxc_per_clk = NULL;
-       }
-
-       return ret;
-}
-
-void fsl_udc_clk_release(void)
-{
-       if (mxc_per_clk)
-               clk_disable_unprepare(mxc_per_clk);
-       clk_disable_unprepare(mxc_ahb_clk);
-       clk_disable_unprepare(mxc_ipg_clk);
-}
index e358ae1..1926b32 100644 (file)
@@ -574,6 +574,7 @@ static int ehci_run (struct usb_hcd *hcd)
        struct ehci_hcd         *ehci = hcd_to_ehci (hcd);
        u32                     temp;
        u32                     hcc_params;
+       int                     rc;
 
        hcd->uses_new_polling = 1;
 
@@ -629,9 +630,20 @@ static int ehci_run (struct usb_hcd *hcd)
        down_write(&ehci_cf_port_reset_rwsem);
        ehci->rh_state = EHCI_RH_RUNNING;
        ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+
+       /* Wait until HC become operational */
        ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
        msleep(5);
+       rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000);
+
        up_write(&ehci_cf_port_reset_rwsem);
+
+       if (rc) {
+               ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n",
+                        ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc);
+               return rc;
+       }
+
        ehci->last_periodic_enable = ktime_get_real();
 
        temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
index 087402a..9f9ab5c 100644 (file)
@@ -345,6 +345,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
 
        unlink_empty_async_suspended(ehci);
 
+       /* Some Synopsys controllers mistakenly leave IAA turned on */
+       ehci_writel(ehci, STS_IAA, &ehci->regs->status);
+
        /* Any IAA cycle that started before the suspend is now invalid */
        end_iaa_cycle(ehci);
        ehci_handle_start_intr_unlinks(ehci);
index 5677b81..cf0c93a 100644 (file)
@@ -2931,6 +2931,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
        trb->field[0] = cpu_to_le32(field1);
        trb->field[1] = cpu_to_le32(field2);
        trb->field[2] = cpu_to_le32(field3);
+       /* make sure TRB is fully written before giving it to the controller */
+       wmb();
        trb->field[3] = cpu_to_le32(field4);
 
        trace_xhci_queue_trb(ring, trb);
index 934be16..50bb91b 100644 (file)
@@ -623,6 +623,13 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
                                                                     enable);
                        if (err < 0)
                                break;
+
+                       /*
+                        * wait 500us for LFPS detector to be disabled before
+                        * sending ACK
+                        */
+                       if (!enable)
+                               usleep_range(500, 1000);
                }
 
                if (err < 0) {
index 91ab81c..e869405 100644 (file)
@@ -4770,19 +4770,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
 {
        unsigned long long timeout_ns;
 
+       if (xhci->quirks & XHCI_INTEL_HOST)
+               timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
+       else
+               timeout_ns = udev->u1_params.sel;
+
        /* Prevent U1 if service interval is shorter than U1 exit latency */
        if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
-               if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
+               if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
                        dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
                        return USB3_LPM_DISABLED;
                }
        }
 
-       if (xhci->quirks & XHCI_INTEL_HOST)
-               timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
-       else
-               timeout_ns = udev->u1_params.sel;
-
        /* The U1 timeout is encoded in 1us intervals.
         * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
         */
@@ -4834,19 +4834,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
 {
        unsigned long long timeout_ns;
 
+       if (xhci->quirks & XHCI_INTEL_HOST)
+               timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
+       else
+               timeout_ns = udev->u2_params.sel;
+
        /* Prevent U2 if service interval is shorter than U2 exit latency */
        if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
-               if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
+               if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
                        dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
                        return USB3_LPM_DISABLED;
                }
        }
 
-       if (xhci->quirks & XHCI_INTEL_HOST)
-               timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
-       else
-               timeout_ns = udev->u2_params.sel;
-
        /* The U2 timeout is encoded in 256us intervals */
        timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
        /* If the necessary timeout value is bigger than what we can set in the
index 73ebfa6..c640f98 100644 (file)
@@ -496,6 +496,9 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
                timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
        finish_wait(&dev->waitq, &wait);
 
+       /* make sure URB is idle after timeout or (spurious) CMD_ACK */
+       usb_kill_urb(dev->cntl_urb);
+
        mutex_unlock(&dev->io_mutex);
 
        if (retval < 0) {
index f1201d4..e8f06b4 100644 (file)
@@ -532,23 +532,29 @@ static int iuu_uart_flush(struct usb_serial_port *port)
        struct device *dev = &port->dev;
        int i;
        int status;
-       u8 rxcmd = IUU_UART_RX;
+       u8 *rxcmd;
        struct iuu_private *priv = usb_get_serial_port_data(port);
 
        if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0)
                return -EIO;
 
+       rxcmd = kmalloc(1, GFP_KERNEL);
+       if (!rxcmd)
+               return -ENOMEM;
+
+       rxcmd[0] = IUU_UART_RX;
+
        for (i = 0; i < 2; i++) {
-               status = bulk_immediate(port, &rxcmd, 1);
+               status = bulk_immediate(port, rxcmd, 1);
                if (status != IUU_OPERATION_OK) {
                        dev_dbg(dev, "%s - uart_flush_write error\n", __func__);
-                       return status;
+                       goto out_free;
                }
 
                status = read_immediate(port, &priv->len, 1);
                if (status != IUU_OPERATION_OK) {
                        dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
-                       return status;
+                       goto out_free;
                }
 
                if (priv->len > 0) {
@@ -556,12 +562,16 @@ static int iuu_uart_flush(struct usb_serial_port *port)
                        status = read_immediate(port, priv->buf, priv->len);
                        if (status != IUU_OPERATION_OK) {
                                dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
-                               return status;
+                               goto out_free;
                        }
                }
        }
        dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__);
        iuu_led(port, 0, 0xF000, 0, 0xFF);
+
+out_free:
+       kfree(rxcmd);
+
        return status;
 }
 
index 2c21e34..3fe9591 100644 (file)
@@ -1117,6 +1117,8 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
@@ -2057,6 +2059,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff),                     /* Fibocom NL678 series */
          .driver_info = RSVD(6) },
        { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) },                   /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+       { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) },                   /* LongSung M5710 */
        { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) },                   /* GosunCn GM500 RNDIS */
        { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) },                   /* GosunCn GM500 MBIM */
        { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) },                   /* GosunCn GM500 ECM/NCM */
index 870e9cf..f9677a5 100644 (file)
@@ -90,6 +90,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_BROKEN_FUA),
 
+/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
+UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
+               "PNY",
+               "Pro Elite SSD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_ATA_1X),
+
 /* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
 UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
                "PNY",
index 187690f..60d375e 100644 (file)
@@ -20,6 +20,6 @@ config TYPEC_NVIDIA_ALTMODE
          to enable support for VirtualLink devices with NVIDIA GPUs.
 
          To compile this driver as a module, choose M here: the
-         module will be called typec_displayport.
+         module will be called typec_nvidia.
 
 endmenu
index ebfd311..8f77669 100644 (file)
@@ -766,6 +766,7 @@ int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmod
                return ret;
 
        sysfs_notify(&partner->dev.kobj, NULL, "number_of_alternate_modes");
+       kobject_uevent(&partner->dev.kobj, KOBJ_CHANGE);
 
        return 0;
 }
@@ -923,6 +924,7 @@ int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes)
                return ret;
 
        sysfs_notify(&plug->dev.kobj, NULL, "number_of_alternate_modes");
+       kobject_uevent(&plug->dev.kobj, KOBJ_CHANGE);
 
        return 0;
 }
index cf37a59..46a25b8 100644 (file)
@@ -207,10 +207,21 @@ static int
 pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_displayport_data *dp)
 {
        u8 msg[2] = { };
+       int ret;
 
        msg[0] = PMC_USB_DP_HPD;
        msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
 
+       /* Configure HPD first if HPD,IRQ comes together */
+       if (!IOM_PORT_HPD_ASSERTED(port->iom_status) &&
+           dp->status & DP_STATUS_IRQ_HPD &&
+           dp->status & DP_STATUS_HPD_STATE) {
+               msg[1] = PMC_USB_DP_HPD_LVL;
+               ret = pmc_usb_command(port, msg, sizeof(msg));
+               if (ret)
+                       return ret;
+       }
+
        if (dp->status & DP_STATUS_IRQ_HPD)
                msg[1] = PMC_USB_DP_HPD_IRQ;
 
index 66cde5e..3209b5d 100644 (file)
@@ -396,6 +396,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                default:
                        usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n",
                                          wValue);
+                       if (wValue >= 32)
+                               goto error;
                        vhci_hcd->port_status[rhport] &= ~(1 << wValue);
                        break;
                }
index 531a00d..c8784df 100644 (file)
@@ -863,6 +863,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
        size_t len, total_len = 0;
        int err;
        struct vhost_net_ubuf_ref *ubufs;
+       struct ubuf_info *ubuf;
        bool zcopy_used;
        int sent_pkts = 0;
 
@@ -895,9 +896,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
 
                /* use msg_control to pass vhost zerocopy ubuf info to skb */
                if (zcopy_used) {
-                       struct ubuf_info *ubuf;
                        ubuf = nvq->ubuf_info + nvq->upend_idx;
-
                        vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
                        vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
                        ubuf->callback = vhost_zerocopy_callback;
@@ -927,7 +926,8 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
                err = sock->ops->sendmsg(sock, &msg, len);
                if (unlikely(err < 0)) {
                        if (zcopy_used) {
-                               vhost_net_ubuf_put(ubufs);
+                               if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
+                                       vhost_net_ubuf_put(ubufs);
                                nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
                                        % UIO_MAXIOV;
                        }
index a483cec..5e78fb7 100644 (file)
 #define VHOST_VSOCK_PKT_WEIGHT 256
 
 enum {
-       VHOST_VSOCK_FEATURES = VHOST_FEATURES,
+       VHOST_VSOCK_FEATURES = VHOST_FEATURES |
+                              (1ULL << VIRTIO_F_ACCESS_PLATFORM)
+};
+
+enum {
+       VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
 };
 
 /* Used to track all the vhost_vsock instances on the system. */
@@ -94,6 +99,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
        if (!vhost_vq_get_backend(vq))
                goto out;
 
+       if (!vq_meta_prefetch(vq))
+               goto out;
+
        /* Avoid further vmexits, we're already processing the virtqueue */
        vhost_disable_notify(&vsock->dev, vq);
 
@@ -449,6 +457,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
        if (!vhost_vq_get_backend(vq))
                goto out;
 
+       if (!vq_meta_prefetch(vq))
+               goto out;
+
        vhost_disable_notify(&vsock->dev, vq);
        do {
                u32 len;
@@ -766,8 +777,12 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
        mutex_lock(&vsock->dev.mutex);
        if ((features & (1 << VHOST_F_LOG_ALL)) &&
            !vhost_log_access_ok(&vsock->dev)) {
-               mutex_unlock(&vsock->dev.mutex);
-               return -EFAULT;
+               goto err;
+       }
+
+       if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
+               if (vhost_init_device_iotlb(&vsock->dev, true))
+                       goto err;
        }
 
        for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
@@ -778,6 +793,10 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
        }
        mutex_unlock(&vsock->dev.mutex);
        return 0;
+
+err:
+       mutex_unlock(&vsock->dev.mutex);
+       return -EFAULT;
 }
 
 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
@@ -811,6 +830,18 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
                if (copy_from_user(&features, argp, sizeof(features)))
                        return -EFAULT;
                return vhost_vsock_set_features(vsock, features);
+       case VHOST_GET_BACKEND_FEATURES:
+               features = VHOST_VSOCK_BACKEND_FEATURES;
+               if (copy_to_user(argp, &features, sizeof(features)))
+                       return -EFAULT;
+               return 0;
+       case VHOST_SET_BACKEND_FEATURES:
+               if (copy_from_user(&features, argp, sizeof(features)))
+                       return -EFAULT;
+               if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
+                       return -EOPNOTSUPP;
+               vhost_set_backend_features(&vsock->dev, features);
+               return 0;
        default:
                mutex_lock(&vsock->dev.mutex);
                r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
@@ -823,6 +854,34 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
        }
 }
 
+static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+       struct file *file = iocb->ki_filp;
+       struct vhost_vsock *vsock = file->private_data;
+       struct vhost_dev *dev = &vsock->dev;
+       int noblock = file->f_flags & O_NONBLOCK;
+
+       return vhost_chr_read_iter(dev, to, noblock);
+}
+
+static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
+                                       struct iov_iter *from)
+{
+       struct file *file = iocb->ki_filp;
+       struct vhost_vsock *vsock = file->private_data;
+       struct vhost_dev *dev = &vsock->dev;
+
+       return vhost_chr_write_iter(dev, from);
+}
+
+static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
+{
+       struct vhost_vsock *vsock = file->private_data;
+       struct vhost_dev *dev = &vsock->dev;
+
+       return vhost_chr_poll(file, dev, wait);
+}
+
 static const struct file_operations vhost_vsock_fops = {
        .owner          = THIS_MODULE,
        .open           = vhost_vsock_dev_open,
@@ -830,6 +889,9 @@ static const struct file_operations vhost_vsock_fops = {
        .llseek         = noop_llseek,
        .unlocked_ioctl = vhost_vsock_dev_ioctl,
        .compat_ioctl   = compat_ptr_ioctl,
+       .read_iter      = vhost_vsock_chr_read_iter,
+       .write_iter     = vhost_vsock_chr_write_iter,
+       .poll           = vhost_vsock_chr_poll,
 };
 
 static struct miscdevice vhost_vsock_misc = {
index a803033..e850f79 100644 (file)
@@ -2060,16 +2060,6 @@ static struct irq_chip xen_percpu_chip __read_mostly = {
        .irq_ack                = ack_dynirq,
 };
 
-int xen_set_callback_via(uint64_t via)
-{
-       struct xen_hvm_param a;
-       a.domid = DOMID_SELF;
-       a.index = HVM_PARAM_CALLBACK_IRQ;
-       a.value = via;
-       return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
-}
-EXPORT_SYMBOL_GPL(xen_set_callback_via);
-
 #ifdef CONFIG_XEN_PVHVM
 /* Vector callbacks are better than PCI interrupts to receive event
  * channel notifications because we can receive vector callbacks on any
index dd911e1..18f0ed8 100644 (file)
@@ -132,6 +132,13 @@ static int platform_pci_probe(struct pci_dev *pdev,
                        dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
                        goto out;
                }
+               /*
+                * It doesn't strictly *have* to run on CPU0 but it sure
+                * as hell better process the event channel ports delivered
+                * to CPU0.
+                */
+               irq_set_affinity(pdev->irq, cpumask_of(0));
+
                callback_via = get_callback_via(pdev);
                ret = xen_set_callback_via(callback_via);
                if (ret) {
@@ -149,7 +156,6 @@ static int platform_pci_probe(struct pci_dev *pdev,
        ret = gnttab_init();
        if (ret)
                goto grant_out;
-       xenbus_probe(NULL);
        return 0;
 grant_out:
        gnttab_free_auto_xlat_frames();
index b0c73c5..720a7b7 100644 (file)
@@ -717,14 +717,15 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
        return 0;
 }
 
-static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
+static long privcmd_ioctl_mmap_resource(struct file *file,
+                               struct privcmd_mmap_resource __user *udata)
 {
        struct privcmd_data *data = file->private_data;
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        struct privcmd_mmap_resource kdata;
        xen_pfn_t *pfns = NULL;
-       struct xen_mem_acquire_resource xdata;
+       struct xen_mem_acquire_resource xdata = { };
        int rc;
 
        if (copy_from_user(&kdata, udata, sizeof(kdata)))
@@ -734,6 +735,22 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
        if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
                return -EPERM;
 
+       /* Both fields must be set or unset */
+       if (!!kdata.addr != !!kdata.num)
+               return -EINVAL;
+
+       xdata.domid = kdata.dom;
+       xdata.type = kdata.type;
+       xdata.id = kdata.id;
+
+       if (!kdata.addr && !kdata.num) {
+               /* Query the size of the resource. */
+               rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
+               if (rc)
+                       return rc;
+               return __put_user(xdata.nr_frames, &udata->num);
+       }
+
        mmap_write_lock(mm);
 
        vma = find_vma(mm, kdata.addr);
@@ -768,10 +785,6 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
        } else
                vma->vm_private_data = PRIV_VMA_LOCKED;
 
-       memset(&xdata, 0, sizeof(xdata));
-       xdata.domid = kdata.dom;
-       xdata.type = kdata.type;
-       xdata.id = kdata.id;
        xdata.frame = kdata.idx;
        xdata.nr_frames = kdata.num;
        set_xen_guest_handle(xdata.frame_list, pfns);
index 2a93b7c..dc15373 100644 (file)
@@ -115,6 +115,7 @@ int xenbus_probe_node(struct xen_bus_type *bus,
                      const char *type,
                      const char *nodename);
 int xenbus_probe_devices(struct xen_bus_type *bus);
+void xenbus_probe(void);
 
 void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
 
index eb5151f..e5fda02 100644 (file)
@@ -57,16 +57,8 @@ DEFINE_MUTEX(xs_response_mutex);
 static int xenbus_irq;
 static struct task_struct *xenbus_task;
 
-static DECLARE_WORK(probe_work, xenbus_probe);
-
-
 static irqreturn_t wake_waiting(int irq, void *unused)
 {
-       if (unlikely(xenstored_ready == 0)) {
-               xenstored_ready = 1;
-               schedule_work(&probe_work);
-       }
-
        wake_up(&xb_waitq);
        return IRQ_HANDLED;
 }
index 44634d9..c8f0282 100644 (file)
@@ -683,29 +683,76 @@ void unregister_xenstore_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
 
-void xenbus_probe(struct work_struct *unused)
+void xenbus_probe(void)
 {
        xenstored_ready = 1;
 
+       /*
+        * In the HVM case, xenbus_init() deferred its call to
+        * xs_init() in case callbacks were not operational yet.
+        * So do it now.
+        */
+       if (xen_store_domain_type == XS_HVM)
+               xs_init();
+
        /* Notify others that xenstore is up */
        blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
 }
-EXPORT_SYMBOL_GPL(xenbus_probe);
 
-static int __init xenbus_probe_initcall(void)
+/*
+ * Returns true when XenStore init must be deferred in order to
+ * allow the PCI platform device to be initialised, before we
+ * can actually have event channel interrupts working.
+ */
+static bool xs_hvm_defer_init_for_callback(void)
 {
-       if (!xen_domain())
-               return -ENODEV;
+#ifdef CONFIG_XEN_PVHVM
+       return xen_store_domain_type == XS_HVM &&
+               !xen_have_vector_callback;
+#else
+       return false;
+#endif
+}
 
-       if (xen_initial_domain() || xen_hvm_domain())
-               return 0;
+static int __init xenbus_probe_initcall(void)
+{
+       /*
+        * Probe XenBus here in the XS_PV case, and also XS_HVM unless we
+        * need to wait for the platform PCI device to come up.
+        */
+       if (xen_store_domain_type == XS_PV ||
+           (xen_store_domain_type == XS_HVM &&
+            !xs_hvm_defer_init_for_callback()))
+               xenbus_probe();
 
-       xenbus_probe(NULL);
        return 0;
 }
-
 device_initcall(xenbus_probe_initcall);
 
+int xen_set_callback_via(uint64_t via)
+{
+       struct xen_hvm_param a;
+       int ret;
+
+       a.domid = DOMID_SELF;
+       a.index = HVM_PARAM_CALLBACK_IRQ;
+       a.value = via;
+
+       ret = HYPERVISOR_hvm_op(HVMOP_set_param, &a);
+       if (ret)
+               return ret;
+
+       /*
+        * If xenbus_probe_initcall() deferred the xenbus_probe()
+        * due to the callback not functioning yet, we can do it now.
+        */
+       if (!xenstored_ready && xs_hvm_defer_init_for_callback())
+               xenbus_probe();
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(xen_set_callback_via);
+
 /* Set up event channel for xenstored which is run as a local process
  * (this is normally used only in dom0)
  */
@@ -818,11 +865,17 @@ static int __init xenbus_init(void)
                break;
        }
 
-       /* Initialize the interface to xenstore. */
-       err = xs_init();
-       if (err) {
-               pr_warn("Error initializing xenstore comms: %i\n", err);
-               goto out_error;
+       /*
+        * HVM domains may not have a functional callback yet. In that
+        * case let xs_init() be called from xenbus_probe(), which will
+        * get invoked at an appropriate time.
+        */
+       if (xen_store_domain_type != XS_HVM) {
+               err = xs_init();
+               if (err) {
+                       pr_warn("Error initializing xenstore comms: %i\n", err);
+                       goto out_error;
+               }
        }
 
        if ((xen_store_domain_type != XS_LOCAL) &&
index 9068d55..7bd659a 100644 (file)
@@ -350,7 +350,7 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
                                 unsigned blkoff)
 {
        union afs_xdr_dirent *dire;
-       unsigned offset, next, curr;
+       unsigned offset, next, curr, nr_slots;
        size_t nlen;
        int tmp;
 
@@ -363,13 +363,12 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
             offset < AFS_DIR_SLOTS_PER_BLOCK;
             offset = next
             ) {
-               next = offset + 1;
-
                /* skip entries marked unused in the bitmap */
                if (!(block->hdr.bitmap[offset / 8] &
                      (1 << (offset % 8)))) {
                        _debug("ENT[%zu.%u]: unused",
                               blkoff / sizeof(union afs_xdr_dir_block), offset);
+                       next = offset + 1;
                        if (offset >= curr)
                                ctx->pos = blkoff +
                                        next * sizeof(union afs_xdr_dirent);
@@ -381,35 +380,39 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
                nlen = strnlen(dire->u.name,
                               sizeof(*block) -
                               offset * sizeof(union afs_xdr_dirent));
+               if (nlen > AFSNAMEMAX - 1) {
+                       _debug("ENT[%zu]: name too long (len %u/%zu)",
+                              blkoff / sizeof(union afs_xdr_dir_block),
+                              offset, nlen);
+                       return afs_bad(dvnode, afs_file_error_dir_name_too_long);
+               }
 
                _debug("ENT[%zu.%u]: %s %zu \"%s\"",
                       blkoff / sizeof(union afs_xdr_dir_block), offset,
                       (offset < curr ? "skip" : "fill"),
                       nlen, dire->u.name);
 
-               /* work out where the next possible entry is */
-               for (tmp = nlen; tmp > 15; tmp -= sizeof(union afs_xdr_dirent)) {
-                       if (next >= AFS_DIR_SLOTS_PER_BLOCK) {
-                               _debug("ENT[%zu.%u]:"
-                                      " %u travelled beyond end dir block"
-                                      " (len %u/%zu)",
-                                      blkoff / sizeof(union afs_xdr_dir_block),
-                                      offset, next, tmp, nlen);
-                               return afs_bad(dvnode, afs_file_error_dir_over_end);
-                       }
-                       if (!(block->hdr.bitmap[next / 8] &
-                             (1 << (next % 8)))) {
-                               _debug("ENT[%zu.%u]:"
-                                      " %u unmarked extension (len %u/%zu)",
+               nr_slots = afs_dir_calc_slots(nlen);
+               next = offset + nr_slots;
+               if (next > AFS_DIR_SLOTS_PER_BLOCK) {
+                       _debug("ENT[%zu.%u]:"
+                              " %u extends beyond end dir block"
+                              " (len %zu)",
+                              blkoff / sizeof(union afs_xdr_dir_block),
+                              offset, next, nlen);
+                       return afs_bad(dvnode, afs_file_error_dir_over_end);
+               }
+
+               /* Check that the name-extension dirents are all allocated */
+               for (tmp = 1; tmp < nr_slots; tmp++) {
+                       unsigned int ix = offset + tmp;
+                       if (!(block->hdr.bitmap[ix / 8] & (1 << (ix % 8)))) {
+                               _debug("ENT[%zu.u]:"
+                                      " %u unmarked extension (%u/%u)",
                                       blkoff / sizeof(union afs_xdr_dir_block),
-                                      offset, next, tmp, nlen);
+                                      offset, tmp, nr_slots);
                                return afs_bad(dvnode, afs_file_error_dir_unmarked_ext);
                        }
-
-                       _debug("ENT[%zu.%u]: ext %u/%zu",
-                              blkoff / sizeof(union afs_xdr_dir_block),
-                              next, tmp, nlen);
-                       next++;
                }
 
                /* skip if starts before the current position */
index 2ffe09a..f4600c1 100644 (file)
@@ -215,8 +215,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
        }
 
        /* Work out how many slots we're going to need. */
-       need_slots = round_up(12 + name->len + 1 + 4, AFS_DIR_DIRENT_SIZE);
-       need_slots /= AFS_DIR_DIRENT_SIZE;
+       need_slots = afs_dir_calc_slots(name->len);
 
        meta_page = kmap(page0);
        meta = &meta_page->blocks[0];
@@ -393,8 +392,7 @@ void afs_edit_dir_remove(struct afs_vnode *vnode,
        }
 
        /* Work out how many slots we're going to discard. */
-       need_slots = round_up(12 + name->len + 1 + 4, AFS_DIR_DIRENT_SIZE);
-       need_slots /= AFS_DIR_DIRENT_SIZE;
+       need_slots = afs_dir_calc_slots(name->len);
 
        meta_page = kmap(page0);
        meta = &meta_page->blocks[0];
index 94f1f39..8ca8681 100644 (file)
@@ -54,10 +54,16 @@ union afs_xdr_dirent {
                __be16          hash_next;
                __be32          vnode;
                __be32          unique;
-               u8              name[16];
-               u8              overflow[4];    /* if any char of the name (inc
-                                                * NUL) reaches here, consume
-                                                * the next dirent too */
+               u8              name[];
+               /* When determining the number of dirent slots needed to
+                * represent a directory entry, name should be assumed to be 16
+                * bytes, due to a now-standardised (mis)calculation, but it is
+                * in fact 20 bytes in size.  afs_dir_calc_slots() should be
+                * used for this.
+                *
+                * For names longer than (16 or) 20 bytes, extra slots should
+                * be annexed to this one using the extended_name format.
+                */
        } u;
        u8                      extended_name[32];
 } __packed;
@@ -96,4 +102,15 @@ struct afs_xdr_dir_page {
        union afs_xdr_dir_block blocks[AFS_DIR_BLOCKS_PER_PAGE];
 };
 
+/*
+ * Calculate the number of dirent slots required for any given name length.
+ * The calculation is made assuming the part of the name in the first slot is
+ * 16 bytes, rather than 20, but this miscalculation is now standardised.
+ */
+static inline unsigned int afs_dir_calc_slots(size_t name_len)
+{
+       name_len++; /* NUL-terminated */
+       return 1 + ((name_len + 15) / AFS_DIR_DIRENT_SIZE);
+}
+
 #endif /* XDR_FS_H */
index 3e5b02f..3b8963e 100644 (file)
@@ -605,6 +605,8 @@ int thaw_bdev(struct block_device *bdev)
                error = thaw_super(sb);
        if (error)
                bdev->bd_fsfreeze_count++;
+       else
+               bdev->bd_fsfreeze_sb = NULL;
 out:
        mutex_unlock(&bdev->bd_fsfreeze_mutex);
        return error;
@@ -774,8 +776,11 @@ static struct kmem_cache * bdev_cachep __read_mostly;
 static struct inode *bdev_alloc_inode(struct super_block *sb)
 {
        struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
+
        if (!ei)
                return NULL;
+       memset(&ei->bdev, 0, sizeof(ei->bdev));
+       ei->bdev.bd_bdi = &noop_backing_dev_info;
        return &ei->vfs_inode;
 }
 
@@ -869,14 +874,12 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
        mapping_set_gfp_mask(&inode->i_data, GFP_USER);
 
        bdev = I_BDEV(inode);
-       memset(bdev, 0, sizeof(*bdev));
        mutex_init(&bdev->bd_mutex);
        mutex_init(&bdev->bd_fsfreeze_mutex);
        spin_lock_init(&bdev->bd_size_lock);
        bdev->bd_disk = disk;
        bdev->bd_partno = partno;
        bdev->bd_inode = inode;
-       bdev->bd_bdi = &noop_backing_dev_info;
 #ifdef CONFIG_SYSFS
        INIT_LIST_HEAD(&bdev->bd_holder_disks);
 #endif
index 02d7d7b..9cadacf 100644 (file)
@@ -3117,7 +3117,7 @@ void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
                list_del_init(&lower->list);
                if (lower == node)
                        node = NULL;
-               btrfs_backref_free_node(cache, lower);
+               btrfs_backref_drop_node(cache, lower);
        }
 
        btrfs_backref_cleanup_node(cache, node);
index 52f2198..0886e81 100644 (file)
@@ -2669,7 +2669,8 @@ again:
         * Go through delayed refs for all the stuff we've just kicked off
         * and then loop back (just once)
         */
-       ret = btrfs_run_delayed_refs(trans, 0);
+       if (!ret)
+               ret = btrfs_run_delayed_refs(trans, 0);
        if (!ret && loops == 0) {
                loops++;
                spin_lock(&cur_trans->dirty_bgs_lock);
index 555cbce..d9bf53d 100644 (file)
@@ -42,6 +42,15 @@ enum {
         * to an inode.
         */
        BTRFS_INODE_NO_XATTRS,
+       /*
+        * Set when we are in a context where we need to start a transaction and
+        * have dirty pages with the respective file range locked. This is to
+        * ensure that when reserving space for the transaction, if we are low
+        * on available space and need to flush delalloc, we will not flush
+        * delalloc for this inode, because that could result in a deadlock (on
+        * the file range, inode's io_tree).
+        */
+       BTRFS_INODE_NO_DELALLOC_FLUSH,
 };
 
 /* in memory btrfs inode */
index 0781089..cc89b63 100644 (file)
@@ -2555,8 +2555,14 @@ out:
  * @p:         Holds all btree nodes along the search path
  * @root:      The root node of the tree
  * @key:       The key we are looking for
- * @ins_len:   Indicates purpose of search, for inserts it is 1, for
- *             deletions it's -1. 0 for plain searches
+ * @ins_len:   Indicates purpose of search:
+ *              >0  for inserts it's size of item inserted (*)
+ *              <0  for deletions
+ *               0  for plain searches, not modifying the tree
+ *
+ *              (*) If size of item inserted doesn't include
+ *              sizeof(struct btrfs_item), then p->search_for_extension must
+ *              be set.
  * @cow:       boolean should CoW operations be performed. Must always be 1
  *             when modifying the tree.
  *
@@ -2717,6 +2723,20 @@ cow_done:
 
                if (level == 0) {
                        p->slots[level] = slot;
+                       /*
+                        * Item key already exists. In this case, if we are
+                        * allowed to insert the item (for example, in dir_item
+                        * case, item key collision is allowed), it will be
+                        * merged with the original item. Only the item size
+                        * grows, no new btrfs item will be added. If
+                        * search_for_extension is not set, ins_len already
+                        * accounts the size btrfs_item, deduct it here so leaf
+                        * space check will be correct.
+                        */
+                       if (ret == 0 && ins_len > 0 && !p->search_for_extension) {
+                               ASSERT(ins_len >= sizeof(struct btrfs_item));
+                               ins_len -= sizeof(struct btrfs_item);
+                       }
                        if (ins_len > 0 &&
                            btrfs_leaf_free_space(b) < ins_len) {
                                if (write_lock_level < 1) {
index 1d3c1e4..e6e3759 100644 (file)
@@ -131,6 +131,8 @@ enum {
         * defrag
         */
        BTRFS_FS_STATE_REMOUNTING,
+       /* Filesystem in RO mode */
+       BTRFS_FS_STATE_RO,
        /* Track if a transaction abort has been reported on this filesystem */
        BTRFS_FS_STATE_TRANS_ABORTED,
        /*
@@ -367,6 +369,12 @@ struct btrfs_path {
        unsigned int search_commit_root:1;
        unsigned int need_commit_sem:1;
        unsigned int skip_release_on_error:1;
+       /*
+        * Indicate that new item (btrfs_search_slot) is extending already
+        * existing item and ins_len contains only the data size and not item
+        * header (ie. sizeof(struct btrfs_item) is not included).
+        */
+       unsigned int search_for_extension:1;
 };
 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
                                        sizeof(struct btrfs_item))
@@ -2885,10 +2893,26 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
  * If we remount the fs to be R/O or umount the fs, the cleaner needn't do
  * anything except sleeping. This function is used to check the status of
  * the fs.
+ * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount,
+ * since setting and checking for SB_RDONLY in the superblock's flags is not
+ * atomic.
  */
 static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
 {
-       return fs_info->sb->s_flags & SB_RDONLY || btrfs_fs_closing(fs_info);
+       return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) ||
+               btrfs_fs_closing(fs_info);
+}
+
+static inline void btrfs_set_sb_rdonly(struct super_block *sb)
+{
+       sb->s_flags |= SB_RDONLY;
+       set_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state);
+}
+
+static inline void btrfs_clear_sb_rdonly(struct super_block *sb)
+{
+       sb->s_flags &= ~SB_RDONLY;
+       clear_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state);
 }
 
 /* tree mod log functions from ctree.c */
@@ -3073,7 +3097,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
                               u32 min_type);
 
 int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
-int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr);
+int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
+                              bool in_reclaim_context);
 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
                              unsigned int extra_bits,
                              struct extent_state **cached_state);
index a98e33f..324f646 100644 (file)
@@ -715,7 +715,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
         * flush all outstanding I/O and inode extent mappings before the
         * copy operation is declared as being finished
         */
-       ret = btrfs_start_delalloc_roots(fs_info, U64_MAX);
+       ret = btrfs_start_delalloc_roots(fs_info, U64_MAX, false);
        if (ret) {
                mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
                return ret;
index 1db966b..2b8383d 100644 (file)
@@ -199,16 +199,15 @@ static struct btrfs_block_group *find_next_block_group(
 static struct btrfs_block_group *peek_discard_list(
                                        struct btrfs_discard_ctl *discard_ctl,
                                        enum btrfs_discard_state *discard_state,
-                                       int *discard_index)
+                                       int *discard_index, u64 now)
 {
        struct btrfs_block_group *block_group;
-       const u64 now = ktime_get_ns();
 
        spin_lock(&discard_ctl->lock);
 again:
        block_group = find_next_block_group(discard_ctl, now);
 
-       if (block_group && now > block_group->discard_eligible_time) {
+       if (block_group && now >= block_group->discard_eligible_time) {
                if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
                    block_group->used != 0) {
                        if (btrfs_is_block_group_data_only(block_group))
@@ -222,12 +221,11 @@ again:
                        block_group->discard_state = BTRFS_DISCARD_EXTENTS;
                }
                discard_ctl->block_group = block_group;
+       }
+       if (block_group) {
                *discard_state = block_group->discard_state;
                *discard_index = block_group->discard_index;
-       } else {
-               block_group = NULL;
        }
-
        spin_unlock(&discard_ctl->lock);
 
        return block_group;
@@ -330,28 +328,15 @@ void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl,
                btrfs_discard_schedule_work(discard_ctl, false);
 }
 
-/**
- * btrfs_discard_schedule_work - responsible for scheduling the discard work
- * @discard_ctl: discard control
- * @override: override the current timer
- *
- * Discards are issued by a delayed workqueue item.  @override is used to
- * update the current delay as the baseline delay interval is reevaluated on
- * transaction commit.  This is also maxed with any other rate limit.
- */
-void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
-                                bool override)
+static void __btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
+                                         u64 now, bool override)
 {
        struct btrfs_block_group *block_group;
-       const u64 now = ktime_get_ns();
-
-       spin_lock(&discard_ctl->lock);
 
        if (!btrfs_run_discard_work(discard_ctl))
-               goto out;
-
+               return;
        if (!override && delayed_work_pending(&discard_ctl->work))
-               goto out;
+               return;
 
        block_group = find_next_block_group(discard_ctl, now);
        if (block_group) {
@@ -393,7 +378,24 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
                mod_delayed_work(discard_ctl->discard_workers,
                                 &discard_ctl->work, nsecs_to_jiffies(delay));
        }
-out:
+}
+
+/*
+ * btrfs_discard_schedule_work - responsible for scheduling the discard work
+ * @discard_ctl:  discard control
+ * @override:     override the current timer
+ *
+ * Discards are issued by a delayed workqueue item.  @override is used to
+ * update the current delay as the baseline delay interval is reevaluated on
+ * transaction commit.  This is also maxed with any other rate limit.
+ */
+void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
+                                bool override)
+{
+       const u64 now = ktime_get_ns();
+
+       spin_lock(&discard_ctl->lock);
+       __btrfs_discard_schedule_work(discard_ctl, now, override);
        spin_unlock(&discard_ctl->lock);
 }
 
@@ -438,13 +440,18 @@ static void btrfs_discard_workfn(struct work_struct *work)
        int discard_index = 0;
        u64 trimmed = 0;
        u64 minlen = 0;
+       u64 now = ktime_get_ns();
 
        discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);
 
        block_group = peek_discard_list(discard_ctl, &discard_state,
-                                       &discard_index);
+                                       &discard_index, now);
        if (!block_group || !btrfs_run_discard_work(discard_ctl))
                return;
+       if (now < block_group->discard_eligible_time) {
+               btrfs_discard_schedule_work(discard_ctl, false);
+               return;
+       }
 
        /* Perform discarding */
        minlen = discard_minlen[discard_index];
@@ -474,13 +481,6 @@ static void btrfs_discard_workfn(struct work_struct *work)
                discard_ctl->discard_extent_bytes += trimmed;
        }
 
-       /*
-        * Updated without locks as this is inside the workfn and nothing else
-        * is reading the values
-        */
-       discard_ctl->prev_discard = trimmed;
-       discard_ctl->prev_discard_time = ktime_get_ns();
-
        /* Determine next steps for a block_group */
        if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) {
                if (discard_state == BTRFS_DISCARD_BITMAPS) {
@@ -496,11 +496,13 @@ static void btrfs_discard_workfn(struct work_struct *work)
                }
        }
 
+       now = ktime_get_ns();
        spin_lock(&discard_ctl->lock);
+       discard_ctl->prev_discard = trimmed;
+       discard_ctl->prev_discard_time = now;
        discard_ctl->block_group = NULL;
+       __btrfs_discard_schedule_work(discard_ctl, now, false);
        spin_unlock(&discard_ctl->lock);
-
-       btrfs_discard_schedule_work(discard_ctl, false);
 }
 
 /**
index 765deef..6b35b7e 100644 (file)
@@ -1457,7 +1457,7 @@ void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)
                root = list_first_entry(&fs_info->allocated_roots,
                                        struct btrfs_root, leak_list);
                btrfs_err(fs_info, "leaked root %s refcount %d",
-                         btrfs_root_name(root->root_key.objectid, buf),
+                         btrfs_root_name(&root->root_key, buf),
                          refcount_read(&root->refs));
                while (refcount_read(&root->refs) > 1)
                        btrfs_put_root(root);
@@ -1729,7 +1729,7 @@ static int cleaner_kthread(void *arg)
                 */
                btrfs_delete_unused_bgs(fs_info);
 sleep:
-               clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
+               clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
                if (kthread_should_park())
                        kthread_parkme();
                if (kthread_should_stop())
@@ -2830,6 +2830,9 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
                return -ENOMEM;
        btrfs_init_delayed_root(fs_info->delayed_root);
 
+       if (sb_rdonly(sb))
+               set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
+
        return btrfs_alloc_stripe_hash_table(fs_info);
 }
 
@@ -2969,6 +2972,7 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
                }
        }
 
+       ret = btrfs_find_orphan_roots(fs_info);
 out:
        return ret;
 }
@@ -3383,10 +3387,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
                }
        }
 
-       ret = btrfs_find_orphan_roots(fs_info);
-       if (ret)
-               goto fail_qgroup;
-
        fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
        if (IS_ERR(fs_info->fs_root)) {
                err = PTR_ERR(fs_info->fs_root);
@@ -4181,6 +4181,9 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
        invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
        btrfs_stop_all_workers(fs_info);
 
+       /* We shouldn't have any transaction open at this point */
+       ASSERT(list_empty(&fs_info->trans_list));
+
        clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
        free_root_pointers(fs_info, true);
        btrfs_free_fs_roots(fs_info);
index 56ea380..30b1a63 100644 (file)
@@ -844,6 +844,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
        want = extent_ref_type(parent, owner);
        if (insert) {
                extra_size = btrfs_extent_inline_ref_size(want);
+               path->search_for_extension = 1;
                path->keep_locks = 1;
        } else
                extra_size = -1;
@@ -996,6 +997,7 @@ again:
 out:
        if (insert) {
                path->keep_locks = 0;
+               path->search_for_extension = 0;
                btrfs_unlock_up_safe(path, 1);
        }
        return err;
@@ -5547,7 +5549,15 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
                                goto out_free;
                        }
 
-                       trans = btrfs_start_transaction(tree_root, 0);
+                      /*
+                       * Use join to avoid potential EINTR from transaction
+                       * start. See wait_reserve_ticket and the whole
+                       * reservation callchain.
+                       */
+                       if (for_reloc)
+                               trans = btrfs_join_transaction(tree_root);
+                       else
+                               trans = btrfs_start_transaction(tree_root, 0);
                        if (IS_ERR(trans)) {
                                err = PTR_ERR(trans);
                                goto out_free;
index 6e3b72e..c9cee45 100644 (file)
@@ -676,9 +676,7 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
 
 static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
 {
-       struct inode *inode = tree->private_data;
-
-       btrfs_panic(btrfs_sb(inode->i_sb), err,
+       btrfs_panic(tree->fs_info, err,
        "locking error: extent tree was modified by another thread while locked");
 }
 
index 1545c22..6ccfc01 100644 (file)
@@ -1016,8 +1016,10 @@ again:
        }
 
        btrfs_release_path(path);
+       path->search_for_extension = 1;
        ret = btrfs_search_slot(trans, root, &file_key, path,
                                csum_size, 1);
+       path->search_for_extension = 0;
        if (ret < 0)
                goto out;
 
index 8e23780..a8e0a6b 100644 (file)
@@ -9390,7 +9390,9 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
  * some fairly slow code that needs optimization. This walks the list
  * of all the inodes with pending delalloc and forces them to disk.
  */
-static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot)
+static int start_delalloc_inodes(struct btrfs_root *root,
+                                struct writeback_control *wbc, bool snapshot,
+                                bool in_reclaim_context)
 {
        struct btrfs_inode *binode;
        struct inode *inode;
@@ -9398,6 +9400,7 @@ static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot
        struct list_head works;
        struct list_head splice;
        int ret = 0;
+       bool full_flush = wbc->nr_to_write == LONG_MAX;
 
        INIT_LIST_HEAD(&works);
        INIT_LIST_HEAD(&splice);
@@ -9411,6 +9414,11 @@ static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot
 
                list_move_tail(&binode->delalloc_inodes,
                               &root->delalloc_inodes);
+
+               if (in_reclaim_context &&
+                   test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags))
+                       continue;
+
                inode = igrab(&binode->vfs_inode);
                if (!inode) {
                        cond_resched_lock(&root->delalloc_lock);
@@ -9421,18 +9429,24 @@ static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot
                if (snapshot)
                        set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
                                &binode->runtime_flags);
-               work = btrfs_alloc_delalloc_work(inode);
-               if (!work) {
-                       iput(inode);
-                       ret = -ENOMEM;
-                       goto out;
-               }
-               list_add_tail(&work->list, &works);
-               btrfs_queue_work(root->fs_info->flush_workers,
-                                &work->work);
-               if (*nr != U64_MAX) {
-                       (*nr)--;
-                       if (*nr == 0)
+               if (full_flush) {
+                       work = btrfs_alloc_delalloc_work(inode);
+                       if (!work) {
+                               iput(inode);
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+                       list_add_tail(&work->list, &works);
+                       btrfs_queue_work(root->fs_info->flush_workers,
+                                        &work->work);
+               } else {
+                       ret = sync_inode(inode, wbc);
+                       if (!ret &&
+                           test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+                                    &BTRFS_I(inode)->runtime_flags))
+                               ret = sync_inode(inode, wbc);
+                       btrfs_add_delayed_iput(inode);
+                       if (ret || wbc->nr_to_write <= 0)
                                goto out;
                }
                cond_resched();
@@ -9458,17 +9472,29 @@ out:
 
 int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
 {
+       struct writeback_control wbc = {
+               .nr_to_write = LONG_MAX,
+               .sync_mode = WB_SYNC_NONE,
+               .range_start = 0,
+               .range_end = LLONG_MAX,
+       };
        struct btrfs_fs_info *fs_info = root->fs_info;
-       u64 nr = U64_MAX;
 
        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
                return -EROFS;
 
-       return start_delalloc_inodes(root, &nr, true);
+       return start_delalloc_inodes(root, &wbc, true, false);
 }
 
-int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr)
+int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
+                              bool in_reclaim_context)
 {
+       struct writeback_control wbc = {
+               .nr_to_write = (nr == U64_MAX) ? LONG_MAX : (unsigned long)nr,
+               .sync_mode = WB_SYNC_NONE,
+               .range_start = 0,
+               .range_end = LLONG_MAX,
+       };
        struct btrfs_root *root;
        struct list_head splice;
        int ret;
@@ -9482,6 +9508,13 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr)
        spin_lock(&fs_info->delalloc_root_lock);
        list_splice_init(&fs_info->delalloc_roots, &splice);
        while (!list_empty(&splice) && nr) {
+               /*
+                * Reset nr_to_write here so we know that we're doing a full
+                * flush.
+                */
+               if (nr == U64_MAX)
+                       wbc.nr_to_write = LONG_MAX;
+
                root = list_first_entry(&splice, struct btrfs_root,
                                        delalloc_root);
                root = btrfs_grab_root(root);
@@ -9490,9 +9523,9 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr)
                               &fs_info->delalloc_roots);
                spin_unlock(&fs_info->delalloc_root_lock);
 
-               ret = start_delalloc_inodes(root, &nr, false);
+               ret = start_delalloc_inodes(root, &wbc, false, in_reclaim_context);
                btrfs_put_root(root);
-               if (ret < 0)
+               if (ret < 0 || wbc.nr_to_write <= 0)
                        goto out;
                spin_lock(&fs_info->delalloc_root_lock);
        }
index 703212f..dde49a7 100644 (file)
@@ -4951,7 +4951,7 @@ long btrfs_ioctl(struct file *file, unsigned int
        case BTRFS_IOC_SYNC: {
                int ret;
 
-               ret = btrfs_start_delalloc_roots(fs_info, U64_MAX);
+               ret = btrfs_start_delalloc_roots(fs_info, U64_MAX, false);
                if (ret)
                        return ret;
                ret = btrfs_sync_fs(inode->i_sb, 1);
index fe5e002..aae1027 100644 (file)
@@ -26,22 +26,22 @@ static const struct root_name_map root_map[] = {
        { BTRFS_DATA_RELOC_TREE_OBJECTID,       "DATA_RELOC_TREE"       },
 };
 
-const char *btrfs_root_name(u64 objectid, char *buf)
+const char *btrfs_root_name(const struct btrfs_key *key, char *buf)
 {
        int i;
 
-       if (objectid == BTRFS_TREE_RELOC_OBJECTID) {
+       if (key->objectid == BTRFS_TREE_RELOC_OBJECTID) {
                snprintf(buf, BTRFS_ROOT_NAME_BUF_LEN,
-                        "TREE_RELOC offset=%llu", objectid);
+                        "TREE_RELOC offset=%llu", key->offset);
                return buf;
        }
 
        for (i = 0; i < ARRAY_SIZE(root_map); i++) {
-               if (root_map[i].id == objectid)
+               if (root_map[i].id == key->objectid)
                        return root_map[i].name;
        }
 
-       snprintf(buf, BTRFS_ROOT_NAME_BUF_LEN, "%llu", objectid);
+       snprintf(buf, BTRFS_ROOT_NAME_BUF_LEN, "%llu", key->objectid);
        return buf;
 }
 
index 78b9938..8c3e931 100644 (file)
@@ -11,6 +11,6 @@
 
 void btrfs_print_leaf(struct extent_buffer *l);
 void btrfs_print_tree(struct extent_buffer *c, bool follow);
-const char *btrfs_root_name(u64 objectid, char *buf);
+const char *btrfs_root_name(const struct btrfs_key *key, char *buf);
 
 #endif
index fe30460..808370a 100644 (file)
@@ -3190,6 +3190,12 @@ out:
        return ret;
 }
 
+static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
+{
+       return btrfs_fs_closing(fs_info) ||
+               test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
+}
+
 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
 {
        struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
@@ -3198,6 +3204,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
        struct btrfs_trans_handle *trans = NULL;
        int err = -ENOMEM;
        int ret = 0;
+       bool stopped = false;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -3210,7 +3217,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
        path->skip_locking = 1;
 
        err = 0;
-       while (!err && !btrfs_fs_closing(fs_info)) {
+       while (!err && !(stopped = rescan_should_stop(fs_info))) {
                trans = btrfs_start_transaction(fs_info->fs_root, 0);
                if (IS_ERR(trans)) {
                        err = PTR_ERR(trans);
@@ -3253,7 +3260,7 @@ out:
        }
 
        mutex_lock(&fs_info->qgroup_rescan_lock);
-       if (!btrfs_fs_closing(fs_info))
+       if (!stopped)
                fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
        if (trans) {
                ret = update_qgroup_status_item(trans);
@@ -3272,7 +3279,7 @@ out:
 
        btrfs_end_transaction(trans);
 
-       if (btrfs_fs_closing(fs_info)) {
+       if (stopped) {
                btrfs_info(fs_info, "qgroup scan paused");
        } else if (err >= 0) {
                btrfs_info(fs_info, "qgroup scan completed%s",
@@ -3530,16 +3537,6 @@ static int try_flush_qgroup(struct btrfs_root *root)
        int ret;
        bool can_commit = true;
 
-       /*
-        * We don't want to run flush again and again, so if there is a running
-        * one, we won't try to start a new flush, but exit directly.
-        */
-       if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
-               wait_event(root->qgroup_flush_wait,
-                       !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
-               return 0;
-       }
-
        /*
         * If current process holds a transaction, we shouldn't flush, as we
         * assume all space reservation happens before a transaction handle is
@@ -3554,6 +3551,26 @@ static int try_flush_qgroup(struct btrfs_root *root)
            current->journal_info != BTRFS_SEND_TRANS_STUB)
                can_commit = false;
 
+       /*
+        * We don't want to run flush again and again, so if there is a running
+        * one, we won't try to start a new flush, but exit directly.
+        */
+       if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
+               /*
+                * We are already holding a transaction, thus we can block other
+                * threads from flushing.  So exit right now. This increases
+                * the chance of EDQUOT for heavy load and near limit cases.
+                * But we can argue that if we're already near limit, EDQUOT is
+                * unavoidable anyway.
+                */
+               if (!can_commit)
+                       return 0;
+
+               wait_event(root->qgroup_flush_wait,
+                       !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
+               return 0;
+       }
+
        ret = btrfs_start_delalloc_snapshot(root);
        if (ret < 0)
                goto out;
index ab80896..b03e789 100644 (file)
@@ -89,6 +89,19 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
        if (ret)
                goto out_unlock;
 
+       /*
+        * After dirtying the page our caller will need to start a transaction,
+        * and if we are low on metadata free space, that can cause flushing of
+        * delalloc for all inodes in order to get metadata space released.
+        * However we are holding the range locked for the whole duration of
+        * the clone/dedupe operation, so we may deadlock if that happens and no
+        * other task releases enough space. So mark this inode as not being
+        * possible to flush to avoid such deadlock. We will clear that flag
+        * when we finish cloning all extents, since a transaction is started
+        * after finding each extent to clone.
+        */
+       set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags);
+
        if (comp_type == BTRFS_COMPRESS_NONE) {
                char *map;
 
@@ -549,6 +562,8 @@ process_slot:
 out:
        btrfs_free_path(path);
        kvfree(buf);
+       clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags);
+
        return ret;
 }
 
index 19b7db8..df63ef6 100644 (file)
@@ -2975,11 +2975,16 @@ static int delete_v1_space_cache(struct extent_buffer *leaf,
                return 0;
 
        for (i = 0; i < btrfs_header_nritems(leaf); i++) {
+               u8 type;
+
                btrfs_item_key_to_cpu(leaf, &key, i);
                if (key.type != BTRFS_EXTENT_DATA_KEY)
                        continue;
                ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
-               if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_REG &&
+               type = btrfs_file_extent_type(leaf, ei);
+
+               if ((type == BTRFS_FILE_EXTENT_REG ||
+                    type == BTRFS_FILE_EXTENT_PREALLOC) &&
                    btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
                        found = true;
                        space_cache_ino = key.objectid;
index d719a27..78a3537 100644 (file)
@@ -236,6 +236,7 @@ struct waiting_dir_move {
         * after this directory is moved, we can try to rmdir the ino rmdir_ino.
         */
        u64 rmdir_ino;
+       u64 rmdir_gen;
        bool orphanized;
 };
 
@@ -316,7 +317,7 @@ static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
 static struct waiting_dir_move *
 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
 
-static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
+static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen);
 
 static int need_send_hole(struct send_ctx *sctx)
 {
@@ -2299,7 +2300,7 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
 
                fs_path_reset(name);
 
-               if (is_waiting_for_rm(sctx, ino)) {
+               if (is_waiting_for_rm(sctx, ino, gen)) {
                        ret = gen_unique_name(sctx, ino, gen, name);
                        if (ret < 0)
                                goto out;
@@ -2858,8 +2859,8 @@ out:
        return ret;
 }
 
-static struct orphan_dir_info *
-add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
+static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
+                                                  u64 dir_ino, u64 dir_gen)
 {
        struct rb_node **p = &sctx->orphan_dirs.rb_node;
        struct rb_node *parent = NULL;
@@ -2868,20 +2869,23 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
        while (*p) {
                parent = *p;
                entry = rb_entry(parent, struct orphan_dir_info, node);
-               if (dir_ino < entry->ino) {
+               if (dir_ino < entry->ino)
                        p = &(*p)->rb_left;
-               } else if (dir_ino > entry->ino) {
+               else if (dir_ino > entry->ino)
                        p = &(*p)->rb_right;
-               } else {
+               else if (dir_gen < entry->gen)
+                       p = &(*p)->rb_left;
+               else if (dir_gen > entry->gen)
+                       p = &(*p)->rb_right;
+               else
                        return entry;
-               }
        }
 
        odi = kmalloc(sizeof(*odi), GFP_KERNEL);
        if (!odi)
                return ERR_PTR(-ENOMEM);
        odi->ino = dir_ino;
-       odi->gen = 0;
+       odi->gen = dir_gen;
        odi->last_dir_index_offset = 0;
 
        rb_link_node(&odi->node, parent, p);
@@ -2889,8 +2893,8 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
        return odi;
 }
 
-static struct orphan_dir_info *
-get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
+static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx,
+                                                  u64 dir_ino, u64 gen)
 {
        struct rb_node *n = sctx->orphan_dirs.rb_node;
        struct orphan_dir_info *entry;
@@ -2901,15 +2905,19 @@ get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
                        n = n->rb_left;
                else if (dir_ino > entry->ino)
                        n = n->rb_right;
+               else if (gen < entry->gen)
+                       n = n->rb_left;
+               else if (gen > entry->gen)
+                       n = n->rb_right;
                else
                        return entry;
        }
        return NULL;
 }
 
-static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
+static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen)
 {
-       struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
+       struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen);
 
        return odi != NULL;
 }
@@ -2954,7 +2962,7 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
        key.type = BTRFS_DIR_INDEX_KEY;
        key.offset = 0;
 
-       odi = get_orphan_dir_info(sctx, dir);
+       odi = get_orphan_dir_info(sctx, dir, dir_gen);
        if (odi)
                key.offset = odi->last_dir_index_offset;
 
@@ -2985,7 +2993,7 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
 
                dm = get_waiting_dir_move(sctx, loc.objectid);
                if (dm) {
-                       odi = add_orphan_dir_info(sctx, dir);
+                       odi = add_orphan_dir_info(sctx, dir, dir_gen);
                        if (IS_ERR(odi)) {
                                ret = PTR_ERR(odi);
                                goto out;
@@ -2993,12 +3001,13 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
                        odi->gen = dir_gen;
                        odi->last_dir_index_offset = found_key.offset;
                        dm->rmdir_ino = dir;
+                       dm->rmdir_gen = dir_gen;
                        ret = 0;
                        goto out;
                }
 
                if (loc.objectid > send_progress) {
-                       odi = add_orphan_dir_info(sctx, dir);
+                       odi = add_orphan_dir_info(sctx, dir, dir_gen);
                        if (IS_ERR(odi)) {
                                ret = PTR_ERR(odi);
                                goto out;
@@ -3038,6 +3047,7 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
                return -ENOMEM;
        dm->ino = ino;
        dm->rmdir_ino = 0;
+       dm->rmdir_gen = 0;
        dm->orphanized = orphanized;
 
        while (*p) {
@@ -3183,7 +3193,7 @@ static int path_loop(struct send_ctx *sctx, struct fs_path *name,
        while (ino != BTRFS_FIRST_FREE_OBJECTID) {
                fs_path_reset(name);
 
-               if (is_waiting_for_rm(sctx, ino))
+               if (is_waiting_for_rm(sctx, ino, gen))
                        break;
                if (is_waiting_for_move(sctx, ino)) {
                        if (*ancestor_ino == 0)
@@ -3223,6 +3233,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
        u64 parent_ino, parent_gen;
        struct waiting_dir_move *dm = NULL;
        u64 rmdir_ino = 0;
+       u64 rmdir_gen;
        u64 ancestor;
        bool is_orphan;
        int ret;
@@ -3237,6 +3248,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
        dm = get_waiting_dir_move(sctx, pm->ino);
        ASSERT(dm);
        rmdir_ino = dm->rmdir_ino;
+       rmdir_gen = dm->rmdir_gen;
        is_orphan = dm->orphanized;
        free_waiting_dir_move(sctx, dm);
 
@@ -3273,6 +3285,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
                        dm = get_waiting_dir_move(sctx, pm->ino);
                        ASSERT(dm);
                        dm->rmdir_ino = rmdir_ino;
+                       dm->rmdir_gen = rmdir_gen;
                }
                goto out;
        }
@@ -3291,7 +3304,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
                struct orphan_dir_info *odi;
                u64 gen;
 
-               odi = get_orphan_dir_info(sctx, rmdir_ino);
+               odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen);
                if (!odi) {
                        /* already deleted */
                        goto finish;
@@ -5499,6 +5512,21 @@ static int clone_range(struct send_ctx *sctx,
                        break;
                offset += clone_len;
                clone_root->offset += clone_len;
+
+               /*
+                * If we are cloning from the file we are currently processing,
+                * and using the send root as the clone root, we must stop once
+                * the current clone offset reaches the current eof of the file
+                * at the receiver, otherwise we would issue an invalid clone
+                * operation (source range going beyond eof) and cause the
+                * receiver to fail. So if we reach the current eof, bail out
+                * and fallback to a regular write.
+                */
+               if (clone_root->root == sctx->send_root &&
+                   clone_root->ino == sctx->cur_ino &&
+                   clone_root->offset >= sctx->cur_inode_next_write_offset)
+                       break;
+
                data_offset += clone_len;
 next:
                path->slots[0]++;
index 6409956..e834746 100644 (file)
@@ -532,7 +532,9 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
 
        loops = 0;
        while ((delalloc_bytes || dio_bytes) && loops < 3) {
-               btrfs_start_delalloc_roots(fs_info, items);
+               u64 nr_pages = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
+
+               btrfs_start_delalloc_roots(fs_info, nr_pages, true);
 
                loops++;
                if (wait_ordered && !trans) {
index 022f208..12d7d3b 100644 (file)
@@ -175,7 +175,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
        btrfs_discard_stop(fs_info);
 
        /* btrfs handle error by forcing the filesystem readonly */
-       sb->s_flags |= SB_RDONLY;
+       btrfs_set_sb_rdonly(sb);
        btrfs_info(fs_info, "forced readonly");
        /*
         * Note that a running device replace operation is not canceled here
@@ -1953,7 +1953,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                /* avoid complains from lockdep et al. */
                up(&fs_info->uuid_tree_rescan_sem);
 
-               sb->s_flags |= SB_RDONLY;
+               btrfs_set_sb_rdonly(sb);
 
                /*
                 * Setting SB_RDONLY will put the cleaner thread to
@@ -1964,10 +1964,42 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                 */
                btrfs_delete_unused_bgs(fs_info);
 
+               /*
+                * The cleaner task could be already running before we set the
+                * flag BTRFS_FS_STATE_RO (and SB_RDONLY in the superblock).
+                * We must make sure that after we finish the remount, i.e. after
+                * we call btrfs_commit_super(), the cleaner can no longer start
+                * a transaction - either because it was dropping a dead root,
+                * running delayed iputs or deleting an unused block group (the
+                * cleaner picked a block group from the list of unused block
+                * groups before we were able to in the previous call to
+                * btrfs_delete_unused_bgs()).
+                */
+               wait_on_bit(&fs_info->flags, BTRFS_FS_CLEANER_RUNNING,
+                           TASK_UNINTERRUPTIBLE);
+
+               /*
+                * We've set the superblock to RO mode, so we might have made
+                * the cleaner task sleep without running all pending delayed
+                * iputs. Go through all the delayed iputs here, so that if an
+                * unmount happens without remounting RW we don't end up at
+                * finishing close_ctree() with a non-empty list of delayed
+                * iputs.
+                */
+               btrfs_run_delayed_iputs(fs_info);
+
                btrfs_dev_replace_suspend_for_unmount(fs_info);
                btrfs_scrub_cancel(fs_info);
                btrfs_pause_balance(fs_info);
 
+               /*
+                * Pause the qgroup rescan worker if it is running. We don't want
+                * it to be still running after we are in RO mode, as after that,
+                * by the time we unmount, it might have left a transaction open,
+                * so we would leak the transaction and/or crash.
+                */
+               btrfs_qgroup_wait_for_completion(fs_info, false);
+
                ret = btrfs_commit_super(fs_info);
                if (ret)
                        goto restore;
@@ -2006,7 +2038,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                if (ret)
                        goto restore;
 
-               sb->s_flags &= ~SB_RDONLY;
+               btrfs_clear_sb_rdonly(sb);
 
                set_bit(BTRFS_FS_OPEN, &fs_info->flags);
        }
@@ -2028,6 +2060,8 @@ restore:
        /* We've hit an error - don't reset SB_RDONLY */
        if (sb_rdonly(sb))
                old_flags |= SB_RDONLY;
+       if (!(old_flags & SB_RDONLY))
+               clear_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
        sb->s_flags = old_flags;
        fs_info->mount_opt = old_opts;
        fs_info->compress_type = old_compress_type;
index 8ca334d..6bd97bd 100644 (file)
@@ -55,8 +55,14 @@ struct inode *btrfs_new_test_inode(void)
        struct inode *inode;
 
        inode = new_inode(test_mnt->mnt_sb);
-       if (inode)
-               inode_init_owner(inode, NULL, S_IFREG);
+       if (!inode)
+               return NULL;
+
+       inode->i_mode = S_IFREG;
+       BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
+       BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
+       BTRFS_I(inode)->location.offset = 0;
+       inode_init_owner(inode, NULL, S_IFREG);
 
        return inode;
 }
index 0402206..c9874b1 100644 (file)
@@ -232,11 +232,6 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
                return ret;
        }
 
-       inode->i_mode = S_IFREG;
-       BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
-       BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
-       BTRFS_I(inode)->location.offset = 0;
-
        fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
        if (!fs_info) {
                test_std_err(TEST_ALLOC_FS_INFO);
@@ -835,10 +830,6 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
                return ret;
        }
 
-       BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
-       BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
-       BTRFS_I(inode)->location.offset = 0;
-
        fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
        if (!fs_info) {
                test_std_err(TEST_ALLOC_FS_INFO);
index 8e0f7a1..6af7f2b 100644 (file)
@@ -2264,14 +2264,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
         */
        btrfs_free_log_root_tree(trans, fs_info);
 
-       /*
-        * commit_fs_roots() can call btrfs_save_ino_cache(), which generates
-        * new delayed refs. Must handle them or qgroup can be wrong.
-        */
-       ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
-       if (ret)
-               goto unlock_tree_log;
-
        /*
         * Since fs roots are all committed, we can get a quite accurate
         * new_roots. So let's do quota accounting.
index 028e733..582061c 100644 (file)
@@ -760,6 +760,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
 {
        struct btrfs_fs_info *fs_info = leaf->fs_info;
        u64 length;
+       u64 chunk_end;
        u64 stripe_len;
        u16 num_stripes;
        u16 sub_stripes;
@@ -814,6 +815,12 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
                          "invalid chunk length, have %llu", length);
                return -EUCLEAN;
        }
+       if (unlikely(check_add_overflow(logical, length, &chunk_end))) {
+               chunk_err(leaf, chunk, logical,
+"invalid chunk logical start and length, have logical start %llu length %llu",
+                         logical, length);
+               return -EUCLEAN;
+       }
        if (unlikely(!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN)) {
                chunk_err(leaf, chunk, logical,
                          "invalid chunk stripe length: %llu",
index ee086fc..0a6de85 100644 (file)
@@ -2592,7 +2592,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
        set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
 
        if (seeding_dev) {
-               sb->s_flags &= ~SB_RDONLY;
+               btrfs_clear_sb_rdonly(sb);
                ret = btrfs_prepare_sprout(fs_info);
                if (ret) {
                        btrfs_abort_transaction(trans, ret);
@@ -2728,7 +2728,7 @@ error_sysfs:
        mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 error_trans:
        if (seeding_dev)
-               sb->s_flags |= SB_RDONLY;
+               btrfs_set_sb_rdonly(sb);
        if (trans)
                btrfs_end_transaction(trans);
 error_free_zone:
@@ -4317,6 +4317,8 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
                btrfs_warn(fs_info,
        "balance: cannot set exclusive op status, resume manually");
 
+       btrfs_release_path(path);
+
        mutex_lock(&fs_info->balance_mutex);
        BUG_ON(fs_info->balance_ctl);
        spin_lock(&fs_info->balance_lock);
index 8bda092..e027c71 100644 (file)
@@ -413,7 +413,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
 
        inode = d_backing_inode(object->backer);
        ASSERT(S_ISREG(inode->i_mode));
-       ASSERT(inode->i_mapping->a_ops->readpages);
 
        /* calculate the shift required to use bmap */
        shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
@@ -713,7 +712,6 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
 
        inode = d_backing_inode(object->backer);
        ASSERT(S_ISREG(inode->i_mode));
-       ASSERT(inode->i_mapping->a_ops->readpages);
 
        /* calculate the shift required to use bmap */
        shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
index 8405870..d87bd85 100644 (file)
@@ -5038,7 +5038,7 @@ bad:
        return;
 }
 
-static struct ceph_connection *con_get(struct ceph_connection *con)
+static struct ceph_connection *mds_get_con(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
 
@@ -5047,7 +5047,7 @@ static struct ceph_connection *con_get(struct ceph_connection *con)
        return NULL;
 }
 
-static void con_put(struct ceph_connection *con)
+static void mds_put_con(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
 
@@ -5058,7 +5058,7 @@ static void con_put(struct ceph_connection *con)
  * if the client is unresponsive for long enough, the mds will kill
  * the session entirely.
  */
-static void peer_reset(struct ceph_connection *con)
+static void mds_peer_reset(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5067,7 +5067,7 @@ static void peer_reset(struct ceph_connection *con)
        send_mds_reconnect(mdsc, s);
 }
 
-static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5125,8 +5125,8 @@ out:
  * Note: returned pointer is the address of a structure that's
  * managed separately.  Caller must *not* attempt to free it.
  */
-static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
-                                       int *proto, int force_new)
+static struct ceph_auth_handshake *
+mds_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5142,7 +5142,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
        return auth;
 }
 
-static int add_authorizer_challenge(struct ceph_connection *con,
+static int mds_add_authorizer_challenge(struct ceph_connection *con,
                                    void *challenge_buf, int challenge_buf_len)
 {
        struct ceph_mds_session *s = con->private;
@@ -5153,7 +5153,7 @@ static int add_authorizer_challenge(struct ceph_connection *con,
                                            challenge_buf, challenge_buf_len);
 }
 
-static int verify_authorizer_reply(struct ceph_connection *con)
+static int mds_verify_authorizer_reply(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5165,7 +5165,7 @@ static int verify_authorizer_reply(struct ceph_connection *con)
                NULL, NULL, NULL, NULL);
 }
 
-static int invalidate_authorizer(struct ceph_connection *con)
+static int mds_invalidate_authorizer(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5288,15 +5288,15 @@ static int mds_check_message_signature(struct ceph_msg *msg)
 }
 
 static const struct ceph_connection_operations mds_con_ops = {
-       .get = con_get,
-       .put = con_put,
-       .dispatch = dispatch,
-       .get_authorizer = get_authorizer,
-       .add_authorizer_challenge = add_authorizer_challenge,
-       .verify_authorizer_reply = verify_authorizer_reply,
-       .invalidate_authorizer = invalidate_authorizer,
-       .peer_reset = peer_reset,
+       .get = mds_get_con,
+       .put = mds_put_con,
        .alloc_msg = mds_alloc_msg,
+       .dispatch = mds_dispatch,
+       .peer_reset = mds_peer_reset,
+       .get_authorizer = mds_get_authorizer,
+       .add_authorizer_challenge = mds_add_authorizer_challenge,
+       .verify_authorizer_reply = mds_verify_authorizer_reply,
+       .invalidate_authorizer = mds_invalidate_authorizer,
        .sign_message = mds_sign_message,
        .check_message_signature = mds_check_message_signature,
        .get_auth_request = mds_get_auth_request,
index b9df855..c8ef24b 100644 (file)
@@ -2195,7 +2195,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
        if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
                tcon->nohandlecache = ctx->nohandlecache;
        else
-               tcon->nohandlecache = 1;
+               tcon->nohandlecache = true;
        tcon->nodelete = ctx->nodelete;
        tcon->local_lease = ctx->local_lease;
        INIT_LIST_HEAD(&tcon->pending_opens);
@@ -2628,7 +2628,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
        } else if (ctx)
                tcon->unix_ext = 1; /* Unix Extensions supported */
 
-       if (tcon->unix_ext == 0) {
+       if (!tcon->unix_ext) {
                cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n");
                return;
        }
@@ -3740,7 +3740,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
 
        if (!ses->binding) {
                ses->capabilities = server->capabilities;
-               if (linuxExtEnabled == 0)
+               if (!linuxExtEnabled)
                        ses->capabilities &= (~server->vals->cap_unix);
 
                if (ses->auth_key.response) {
index 6ad6ba5..0fdb0de 100644 (file)
@@ -1260,7 +1260,8 @@ void dfs_cache_del_vol(const char *fullpath)
        vi = find_vol(fullpath);
        spin_unlock(&vol_list_lock);
 
-       kref_put(&vi->refcnt, vol_release);
+       if (!IS_ERR(vi))
+               kref_put(&vi->refcnt, vol_release);
 }
 
 /**
index 0afccbb..076bcad 100644 (file)
@@ -303,8 +303,6 @@ do {                                                                        \
 int
 smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx)
 {
-       int rc = 0;
-
        memcpy(new_ctx, ctx, sizeof(*ctx));
        new_ctx->prepath = NULL;
        new_ctx->mount_options = NULL;
@@ -327,7 +325,7 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
        DUP_CTX_STR(nodename);
        DUP_CTX_STR(iocharset);
 
-       return rc;
+       return 0;
 }
 
 static int
index 067eb44..794fc3b 100644 (file)
@@ -3248,7 +3248,7 @@ close_exit:
        free_rsp_buf(resp_buftype, rsp);
 
        /* retry close in a worker thread if this one is interrupted */
-       if (rc == -EINTR) {
+       if (is_interrupt_error(rc)) {
                int tmp_rc;
 
                tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
index 204a622..d85edf5 100644 (file)
@@ -424,7 +424,7 @@ struct smb2_rdma_transform_capabilities_context {
        __le16  TransformCount;
        __u16   Reserved1;
        __u32   Reserved2;
-       __le16  RDMATransformIds[1];
+       __le16  RDMATransformIds[];
 } __packed;
 
 /* Signing algorithms */
index e9abb41..95ef26b 100644 (file)
@@ -338,7 +338,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
        if (ssocket == NULL)
                return -EAGAIN;
 
-       if (signal_pending(current)) {
+       if (fatal_signal_pending(current)) {
                cifs_dbg(FYI, "signal pending before send request\n");
                return -ERESTARTSYS;
        }
@@ -429,7 +429,7 @@ unmask:
 
        if (signal_pending(current) && (total_len != send_length)) {
                cifs_dbg(FYI, "signal is pending after attempt to send\n");
-               rc = -EINTR;
+               rc = -ERESTARTSYS;
        }
 
        /* uncork it */
index 1a0a827..be79904 100644 (file)
@@ -372,20 +372,3 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
        }
        return err;
 }
-
-int __ext4_handle_dirty_super(const char *where, unsigned int line,
-                             handle_t *handle, struct super_block *sb)
-{
-       struct buffer_head *bh = EXT4_SB(sb)->s_sbh;
-       int err = 0;
-
-       ext4_superblock_csum_set(sb);
-       if (ext4_handle_valid(handle)) {
-               err = jbd2_journal_dirty_metadata(handle, bh);
-               if (err)
-                       ext4_journal_abort_handle(where, line, __func__,
-                                                 bh, handle, err);
-       } else
-               mark_buffer_dirty(bh);
-       return err;
-}
index a124c68..0d2fa42 100644 (file)
@@ -244,9 +244,6 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
                                 handle_t *handle, struct inode *inode,
                                 struct buffer_head *bh);
 
-int __ext4_handle_dirty_super(const char *where, unsigned int line,
-                             handle_t *handle, struct super_block *sb);
-
 #define ext4_journal_get_write_access(handle, bh) \
        __ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh))
 #define ext4_forget(handle, is_metadata, inode, bh, block_nr) \
@@ -257,8 +254,6 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
 #define ext4_handle_dirty_metadata(handle, inode, bh) \
        __ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \
                                     (bh))
-#define ext4_handle_dirty_super(handle, sb) \
-       __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
 
 handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
                                  int type, int blocks, int rsv_blocks,
index 4fcc21c..0a14a7c 100644 (file)
@@ -604,13 +604,13 @@ void ext4_fc_track_range(handle_t *handle, struct inode *inode, ext4_lblk_t star
        trace_ext4_fc_track_range(inode, start, end, ret);
 }
 
-static void ext4_fc_submit_bh(struct super_block *sb)
+static void ext4_fc_submit_bh(struct super_block *sb, bool is_tail)
 {
        int write_flags = REQ_SYNC;
        struct buffer_head *bh = EXT4_SB(sb)->s_fc_bh;
 
-       /* TODO: REQ_FUA | REQ_PREFLUSH is unnecessarily expensive. */
-       if (test_opt(sb, BARRIER))
+       /* Add REQ_FUA | REQ_PREFLUSH only its tail */
+       if (test_opt(sb, BARRIER) && is_tail)
                write_flags |= REQ_FUA | REQ_PREFLUSH;
        lock_buffer(bh);
        set_buffer_dirty(bh);
@@ -684,7 +684,7 @@ static u8 *ext4_fc_reserve_space(struct super_block *sb, int len, u32 *crc)
                *crc = ext4_chksum(sbi, *crc, tl, sizeof(*tl));
        if (pad_len > 0)
                ext4_fc_memzero(sb, tl + 1, pad_len, crc);
-       ext4_fc_submit_bh(sb);
+       ext4_fc_submit_bh(sb, false);
 
        ret = jbd2_fc_get_buf(EXT4_SB(sb)->s_journal, &bh);
        if (ret)
@@ -741,7 +741,7 @@ static int ext4_fc_write_tail(struct super_block *sb, u32 crc)
        tail.fc_crc = cpu_to_le32(crc);
        ext4_fc_memcpy(sb, dst, &tail.fc_crc, sizeof(tail.fc_crc), NULL);
 
-       ext4_fc_submit_bh(sb);
+       ext4_fc_submit_bh(sb, true);
 
        return 0;
 }
@@ -1268,7 +1268,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full)
        list_splice_init(&sbi->s_fc_dentry_q[FC_Q_STAGING],
                                &sbi->s_fc_dentry_q[FC_Q_MAIN]);
        list_splice_init(&sbi->s_fc_q[FC_Q_STAGING],
-                               &sbi->s_fc_q[FC_Q_STAGING]);
+                               &sbi->s_fc_q[FC_Q_MAIN]);
 
        ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING);
        ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
@@ -1318,14 +1318,14 @@ static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl)
        entry.len = darg.dname_len;
        inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
 
-       if (IS_ERR_OR_NULL(inode)) {
+       if (IS_ERR(inode)) {
                jbd_debug(1, "Inode %d not found", darg.ino);
                return 0;
        }
 
        old_parent = ext4_iget(sb, darg.parent_ino,
                                EXT4_IGET_NORMAL);
-       if (IS_ERR_OR_NULL(old_parent)) {
+       if (IS_ERR(old_parent)) {
                jbd_debug(1, "Dir with inode  %d not found", darg.parent_ino);
                iput(inode);
                return 0;
@@ -1410,7 +1410,7 @@ static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl)
                        darg.parent_ino, darg.dname_len);
 
        inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
-       if (IS_ERR_OR_NULL(inode)) {
+       if (IS_ERR(inode)) {
                jbd_debug(1, "Inode not found.");
                return 0;
        }
@@ -1466,10 +1466,11 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
        trace_ext4_fc_replay(sb, tag, ino, 0, 0);
 
        inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
-       if (!IS_ERR_OR_NULL(inode)) {
+       if (!IS_ERR(inode)) {
                ext4_ext_clear_bb(inode);
                iput(inode);
        }
+       inode = NULL;
 
        ext4_fc_record_modified_inode(sb, ino);
 
@@ -1512,7 +1513,7 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
 
        /* Given that we just wrote the inode on disk, this SHOULD succeed. */
        inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
-       if (IS_ERR_OR_NULL(inode)) {
+       if (IS_ERR(inode)) {
                jbd_debug(1, "Inode not found.");
                return -EFSCORRUPTED;
        }
@@ -1564,7 +1565,7 @@ static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl)
                goto out;
 
        inode = ext4_iget(sb, darg.ino, EXT4_IGET_NORMAL);
-       if (IS_ERR_OR_NULL(inode)) {
+       if (IS_ERR(inode)) {
                jbd_debug(1, "inode %d not found.", darg.ino);
                inode = NULL;
                ret = -EINVAL;
@@ -1577,7 +1578,7 @@ static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl)
                 * dot and dot dot dirents are setup properly.
                 */
                dir = ext4_iget(sb, darg.parent_ino, EXT4_IGET_NORMAL);
-               if (IS_ERR_OR_NULL(dir)) {
+               if (IS_ERR(dir)) {
                        jbd_debug(1, "Dir %d not found.", darg.ino);
                        goto out;
                }
@@ -1653,7 +1654,7 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
 
        inode = ext4_iget(sb, le32_to_cpu(fc_add_ex->fc_ino),
                                EXT4_IGET_NORMAL);
-       if (IS_ERR_OR_NULL(inode)) {
+       if (IS_ERR(inode)) {
                jbd_debug(1, "Inode not found.");
                return 0;
        }
@@ -1777,7 +1778,7 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl)
                le32_to_cpu(lrange->fc_ino), cur, remaining);
 
        inode = ext4_iget(sb, le32_to_cpu(lrange->fc_ino), EXT4_IGET_NORMAL);
-       if (IS_ERR_OR_NULL(inode)) {
+       if (IS_ERR(inode)) {
                jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange->fc_ino));
                return 0;
        }
@@ -1832,7 +1833,7 @@ static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb)
        for (i = 0; i < state->fc_modified_inodes_used; i++) {
                inode = ext4_iget(sb, state->fc_modified_inodes[i],
                        EXT4_IGET_NORMAL);
-               if (IS_ERR_OR_NULL(inode)) {
+               if (IS_ERR(inode)) {
                        jbd_debug(1, "Inode %d not found.",
                                state->fc_modified_inodes[i]);
                        continue;
@@ -1849,7 +1850,7 @@ static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb)
 
                        if (ret > 0) {
                                path = ext4_find_extent(inode, map.m_lblk, NULL, 0);
-                               if (!IS_ERR_OR_NULL(path)) {
+                               if (!IS_ERR(path)) {
                                        for (j = 0; j < path->p_depth; j++)
                                                ext4_mb_mark_bb(inode->i_sb,
                                                        path[j].p_block, 1, 1);
index 3ed8c04..349b27f 100644 (file)
@@ -809,9 +809,12 @@ static int ext4_sample_last_mounted(struct super_block *sb,
        err = ext4_journal_get_write_access(handle, sbi->s_sbh);
        if (err)
                goto out_journal;
-       strlcpy(sbi->s_es->s_last_mounted, cp,
+       lock_buffer(sbi->s_sbh);
+       strncpy(sbi->s_es->s_last_mounted, cp,
                sizeof(sbi->s_es->s_last_mounted));
-       ext4_handle_dirty_super(handle, sb);
+       ext4_superblock_csum_set(sb);
+       unlock_buffer(sbi->s_sbh);
+       ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
 out_journal:
        ext4_journal_stop(handle);
 out:
index 2794688..c173c84 100644 (file)
@@ -5150,9 +5150,13 @@ static int ext4_do_update_inode(handle_t *handle,
                err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
                if (err)
                        goto out_brelse;
+               lock_buffer(EXT4_SB(sb)->s_sbh);
                ext4_set_feature_large_file(sb);
+               ext4_superblock_csum_set(sb);
+               unlock_buffer(EXT4_SB(sb)->s_sbh);
                ext4_handle_sync(handle);
-               err = ext4_handle_dirty_super(handle, sb);
+               err = ext4_handle_dirty_metadata(handle, NULL,
+                                                EXT4_SB(sb)->s_sbh);
        }
        ext4_update_inode_fsync_trans(handle, inode, need_datasync);
 out_brelse:
index 524e134..d9665d2 100644 (file)
@@ -1157,7 +1157,10 @@ resizefs_out:
                        err = ext4_journal_get_write_access(handle, sbi->s_sbh);
                        if (err)
                                goto pwsalt_err_journal;
+                       lock_buffer(sbi->s_sbh);
                        generate_random_uuid(sbi->s_es->s_encrypt_pw_salt);
+                       ext4_superblock_csum_set(sb);
+                       unlock_buffer(sbi->s_sbh);
                        err = ext4_handle_dirty_metadata(handle, NULL,
                                                         sbi->s_sbh);
                pwsalt_err_journal:
index b17a082..cf652ba 100644 (file)
@@ -2976,14 +2976,17 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
            (le32_to_cpu(sbi->s_es->s_inodes_count))) {
                /* Insert this inode at the head of the on-disk orphan list */
                NEXT_ORPHAN(inode) = le32_to_cpu(sbi->s_es->s_last_orphan);
+               lock_buffer(sbi->s_sbh);
                sbi->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
+               ext4_superblock_csum_set(sb);
+               unlock_buffer(sbi->s_sbh);
                dirty = true;
        }
        list_add(&EXT4_I(inode)->i_orphan, &sbi->s_orphan);
        mutex_unlock(&sbi->s_orphan_lock);
 
        if (dirty) {
-               err = ext4_handle_dirty_super(handle, sb);
+               err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
                rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
                if (!err)
                        err = rc;
@@ -3059,9 +3062,12 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
                        mutex_unlock(&sbi->s_orphan_lock);
                        goto out_brelse;
                }
+               lock_buffer(sbi->s_sbh);
                sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
+               ext4_superblock_csum_set(inode->i_sb);
+               unlock_buffer(sbi->s_sbh);
                mutex_unlock(&sbi->s_orphan_lock);
-               err = ext4_handle_dirty_super(handle, inode->i_sb);
+               err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
        } else {
                struct ext4_iloc iloc2;
                struct inode *i_prev =
@@ -3593,9 +3599,6 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
                        return retval2;
                }
        }
-       brelse(ent->bh);
-       ent->bh = NULL;
-
        return retval;
 }
 
@@ -3794,6 +3797,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
                }
        }
 
+       old_file_type = old.de->file_type;
        if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
                ext4_handle_sync(handle);
 
@@ -3821,7 +3825,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
        force_reread = (new.dir->i_ino == old.dir->i_ino &&
                        ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA));
 
-       old_file_type = old.de->file_type;
        if (whiteout) {
                /*
                 * Do this before adding a new entry, so the old entry is sure
@@ -3919,15 +3922,19 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
        retval = 0;
 
 end_rename:
-       brelse(old.dir_bh);
-       brelse(old.bh);
-       brelse(new.bh);
        if (whiteout) {
-               if (retval)
+               if (retval) {
+                       ext4_setent(handle, &old,
+                               old.inode->i_ino, old_file_type);
                        drop_nlink(whiteout);
+               }
                unlock_new_inode(whiteout);
                iput(whiteout);
+
        }
+       brelse(old.dir_bh);
+       brelse(old.bh);
+       brelse(new.bh);
        if (handle)
                ext4_journal_stop(handle);
        return retval;
index 928700d..bd0d185 100644 (file)
@@ -899,8 +899,11 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
        EXT4_SB(sb)->s_gdb_count++;
        ext4_kvfree_array_rcu(o_group_desc);
 
+       lock_buffer(EXT4_SB(sb)->s_sbh);
        le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
-       err = ext4_handle_dirty_super(handle, sb);
+       ext4_superblock_csum_set(sb);
+       unlock_buffer(EXT4_SB(sb)->s_sbh);
+       err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
        if (err)
                ext4_std_error(sb, err);
        return err;
@@ -1384,6 +1387,7 @@ static void ext4_update_super(struct super_block *sb,
        reserved_blocks *= blocks_count;
        do_div(reserved_blocks, 100);
 
+       lock_buffer(sbi->s_sbh);
        ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
        ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
        le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
@@ -1421,6 +1425,8 @@ static void ext4_update_super(struct super_block *sb,
         * active. */
        ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
                                reserved_blocks);
+       ext4_superblock_csum_set(sb);
+       unlock_buffer(sbi->s_sbh);
 
        /* Update the free space counts */
        percpu_counter_add(&sbi->s_freeclusters_counter,
@@ -1515,7 +1521,7 @@ static int ext4_flex_group_add(struct super_block *sb,
 
        ext4_update_super(sb, flex_gd);
 
-       err = ext4_handle_dirty_super(handle, sb);
+       err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
 
 exit_journal:
        err2 = ext4_journal_stop(handle);
@@ -1717,15 +1723,18 @@ static int ext4_group_extend_no_check(struct super_block *sb,
                goto errout;
        }
 
+       lock_buffer(EXT4_SB(sb)->s_sbh);
        ext4_blocks_count_set(es, o_blocks_count + add);
        ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
+       ext4_superblock_csum_set(sb);
+       unlock_buffer(EXT4_SB(sb)->s_sbh);
        ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
                   o_blocks_count + add);
        /* We add the blocks to the bitmap and set the group need init bit */
        err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
        if (err)
                goto errout;
-       ext4_handle_dirty_super(handle, sb);
+       ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
        ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
                   o_blocks_count + add);
 errout:
@@ -1874,12 +1883,15 @@ static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
        if (err)
                goto errout;
 
+       lock_buffer(sbi->s_sbh);
        ext4_clear_feature_resize_inode(sb);
        ext4_set_feature_meta_bg(sb);
        sbi->s_es->s_first_meta_bg =
                cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
+       ext4_superblock_csum_set(sb);
+       unlock_buffer(sbi->s_sbh);
 
-       err = ext4_handle_dirty_super(handle, sb);
+       err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
        if (err) {
                ext4_std_error(sb, err);
                goto errout;
index 2112178..9a6f987 100644 (file)
@@ -65,7 +65,8 @@ static struct ratelimit_state ext4_mount_msg_ratelimit;
 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
                             unsigned long journal_devnum);
 static int ext4_show_options(struct seq_file *seq, struct dentry *root);
-static int ext4_commit_super(struct super_block *sb, int sync);
+static void ext4_update_super(struct super_block *sb);
+static int ext4_commit_super(struct super_block *sb);
 static int ext4_mark_recovery_complete(struct super_block *sb,
                                        struct ext4_super_block *es);
 static int ext4_clear_journal_err(struct super_block *sb,
@@ -586,15 +587,12 @@ static int ext4_errno_to_code(int errno)
        return EXT4_ERR_UNKNOWN;
 }
 
-static void __save_error_info(struct super_block *sb, int error,
-                             __u32 ino, __u64 block,
-                             const char *func, unsigned int line)
+static void save_error_info(struct super_block *sb, int error,
+                           __u32 ino, __u64 block,
+                           const char *func, unsigned int line)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
 
-       EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
-       if (bdev_read_only(sb->s_bdev))
-               return;
        /* We default to EFSCORRUPTED error... */
        if (error == 0)
                error = EFSCORRUPTED;
@@ -618,15 +616,6 @@ static void __save_error_info(struct super_block *sb, int error,
        spin_unlock(&sbi->s_error_lock);
 }
 
-static void save_error_info(struct super_block *sb, int error,
-                           __u32 ino, __u64 block,
-                           const char *func, unsigned int line)
-{
-       __save_error_info(sb, error, ino, block, func, line);
-       if (!bdev_read_only(sb->s_bdev))
-               ext4_commit_super(sb, 1);
-}
-
 /* Deal with the reporting of failure conditions on a filesystem such as
  * inconsistencies detected or read IO failures.
  *
@@ -647,19 +636,40 @@ static void save_error_info(struct super_block *sb, int error,
  * used to deal with unrecoverable failures such as journal IO errors or ENOMEM
  * at a critical moment in log management.
  */
-static void ext4_handle_error(struct super_block *sb, bool force_ro)
+static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
+                             __u32 ino, __u64 block,
+                             const char *func, unsigned int line)
 {
        journal_t *journal = EXT4_SB(sb)->s_journal;
+       bool continue_fs = !force_ro && test_opt(sb, ERRORS_CONT);
 
+       EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
        if (test_opt(sb, WARN_ON_ERROR))
                WARN_ON_ONCE(1);
 
-       if (sb_rdonly(sb) || (!force_ro && test_opt(sb, ERRORS_CONT)))
+       if (!continue_fs && !sb_rdonly(sb)) {
+               ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
+               if (journal)
+                       jbd2_journal_abort(journal, -EIO);
+       }
+
+       if (!bdev_read_only(sb->s_bdev)) {
+               save_error_info(sb, error, ino, block, func, line);
+               /*
+                * In case the fs should keep running, we need to writeout
+                * superblock through the journal. Due to lock ordering
+                * constraints, it may not be safe to do it right here so we
+                * defer superblock flushing to a workqueue.
+                */
+               if (continue_fs)
+                       schedule_work(&EXT4_SB(sb)->s_error_work);
+               else
+                       ext4_commit_super(sb);
+       }
+
+       if (sb_rdonly(sb) || continue_fs)
                return;
 
-       ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
-       if (journal)
-               jbd2_journal_abort(journal, -EIO);
        /*
         * We force ERRORS_RO behavior when system is rebooting. Otherwise we
         * could panic during 'reboot -f' as the underlying device got already
@@ -682,8 +692,39 @@ static void flush_stashed_error_work(struct work_struct *work)
 {
        struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info,
                                                s_error_work);
+       journal_t *journal = sbi->s_journal;
+       handle_t *handle;
 
-       ext4_commit_super(sbi->s_sb, 1);
+       /*
+        * If the journal is still running, we have to write out superblock
+        * through the journal to avoid collisions of other journalled sb
+        * updates.
+        *
+        * We use directly jbd2 functions here to avoid recursing back into
+        * ext4 error handling code during handling of previous errors.
+        */
+       if (!sb_rdonly(sbi->s_sb) && journal) {
+               handle = jbd2_journal_start(journal, 1);
+               if (IS_ERR(handle))
+                       goto write_directly;
+               if (jbd2_journal_get_write_access(handle, sbi->s_sbh)) {
+                       jbd2_journal_stop(handle);
+                       goto write_directly;
+               }
+               ext4_update_super(sbi->s_sb);
+               if (jbd2_journal_dirty_metadata(handle, sbi->s_sbh)) {
+                       jbd2_journal_stop(handle);
+                       goto write_directly;
+               }
+               jbd2_journal_stop(handle);
+               return;
+       }
+write_directly:
+       /*
+        * Write through journal failed. Write sb directly to get error info
+        * out and hope for the best.
+        */
+       ext4_commit_super(sbi->s_sb);
 }
 
 #define ext4_error_ratelimit(sb)                                       \
@@ -710,8 +751,7 @@ void __ext4_error(struct super_block *sb, const char *function,
                       sb->s_id, function, line, current->comm, &vaf);
                va_end(args);
        }
-       save_error_info(sb, error, 0, block, function, line);
-       ext4_handle_error(sb, force_ro);
+       ext4_handle_error(sb, force_ro, error, 0, block, function, line);
 }
 
 void __ext4_error_inode(struct inode *inode, const char *function,
@@ -741,9 +781,8 @@ void __ext4_error_inode(struct inode *inode, const char *function,
                               current->comm, &vaf);
                va_end(args);
        }
-       save_error_info(inode->i_sb, error, inode->i_ino, block,
-                       function, line);
-       ext4_handle_error(inode->i_sb, false);
+       ext4_handle_error(inode->i_sb, false, error, inode->i_ino, block,
+                         function, line);
 }
 
 void __ext4_error_file(struct file *file, const char *function,
@@ -780,9 +819,8 @@ void __ext4_error_file(struct file *file, const char *function,
                               current->comm, path, &vaf);
                va_end(args);
        }
-       save_error_info(inode->i_sb, EFSCORRUPTED, inode->i_ino, block,
-                       function, line);
-       ext4_handle_error(inode->i_sb, false);
+       ext4_handle_error(inode->i_sb, false, EFSCORRUPTED, inode->i_ino, block,
+                         function, line);
 }
 
 const char *ext4_decode_error(struct super_block *sb, int errno,
@@ -849,8 +887,7 @@ void __ext4_std_error(struct super_block *sb, const char *function,
                       sb->s_id, function, line, errstr);
        }
 
-       save_error_info(sb, -errno, 0, 0, function, line);
-       ext4_handle_error(sb, false);
+       ext4_handle_error(sb, false, -errno, 0, 0, function, line);
 }
 
 void __ext4_msg(struct super_block *sb,
@@ -944,13 +981,16 @@ __acquires(bitlock)
        if (test_opt(sb, ERRORS_CONT)) {
                if (test_opt(sb, WARN_ON_ERROR))
                        WARN_ON_ONCE(1);
-               __save_error_info(sb, EFSCORRUPTED, ino, block, function, line);
-               schedule_work(&EXT4_SB(sb)->s_error_work);
+               EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
+               if (!bdev_read_only(sb->s_bdev)) {
+                       save_error_info(sb, EFSCORRUPTED, ino, block, function,
+                                       line);
+                       schedule_work(&EXT4_SB(sb)->s_error_work);
+               }
                return;
        }
        ext4_unlock_group(sb, grp);
-       save_error_info(sb, EFSCORRUPTED, ino, block, function, line);
-       ext4_handle_error(sb, false);
+       ext4_handle_error(sb, false, EFSCORRUPTED, ino, block, function, line);
        /*
         * We only get here in the ERRORS_RO case; relocking the group
         * may be dangerous, but nothing bad will happen since the
@@ -1152,7 +1192,7 @@ static void ext4_put_super(struct super_block *sb)
                es->s_state = cpu_to_le16(sbi->s_mount_state);
        }
        if (!sb_rdonly(sb))
-               ext4_commit_super(sb, 1);
+               ext4_commit_super(sb);
 
        rcu_read_lock();
        group_desc = rcu_dereference(sbi->s_group_desc);
@@ -2642,7 +2682,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
        if (sbi->s_journal)
                ext4_set_feature_journal_needs_recovery(sb);
 
-       err = ext4_commit_super(sb, 1);
+       err = ext4_commit_super(sb);
 done:
        if (test_opt(sb, DEBUG))
                printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
@@ -4868,7 +4908,7 @@ no_journal:
        if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
            !ext4_has_feature_encrypt(sb)) {
                ext4_set_feature_encrypt(sb);
-               ext4_commit_super(sb, 1);
+               ext4_commit_super(sb);
        }
 
        /*
@@ -5418,7 +5458,7 @@ static int ext4_load_journal(struct super_block *sb,
                es->s_journal_dev = cpu_to_le32(journal_devnum);
 
                /* Make sure we flush the recovery flag to disk. */
-               ext4_commit_super(sb, 1);
+               ext4_commit_super(sb);
        }
 
        return 0;
@@ -5428,16 +5468,14 @@ err_out:
        return err;
 }
 
-static int ext4_commit_super(struct super_block *sb, int sync)
+/* Copy state of EXT4_SB(sb) into buffer for on-disk superblock */
+static void ext4_update_super(struct super_block *sb)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
-       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
-       struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
-       int error = 0;
-
-       if (!sbh || block_device_ejected(sb))
-               return error;
+       struct ext4_super_block *es = sbi->s_es;
+       struct buffer_head *sbh = sbi->s_sbh;
 
+       lock_buffer(sbh);
        /*
         * If the file system is mounted read-only, don't update the
         * superblock write time.  This avoids updating the superblock
@@ -5451,17 +5489,17 @@ static int ext4_commit_super(struct super_block *sb, int sync)
        if (!(sb->s_flags & SB_RDONLY))
                ext4_update_tstamp(es, s_wtime);
        es->s_kbytes_written =
-               cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
+               cpu_to_le64(sbi->s_kbytes_written +
                    ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
-                     EXT4_SB(sb)->s_sectors_written_start) >> 1));
-       if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
+                     sbi->s_sectors_written_start) >> 1));
+       if (percpu_counter_initialized(&sbi->s_freeclusters_counter))
                ext4_free_blocks_count_set(es,
-                       EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
-                               &EXT4_SB(sb)->s_freeclusters_counter)));
-       if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
+                       EXT4_C2B(sbi, percpu_counter_sum_positive(
+                               &sbi->s_freeclusters_counter)));
+       if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
                es->s_free_inodes_count =
                        cpu_to_le32(percpu_counter_sum_positive(
-                               &EXT4_SB(sb)->s_freeinodes_counter));
+                               &sbi->s_freeinodes_counter));
        /* Copy error information to the on-disk superblock */
        spin_lock(&sbi->s_error_lock);
        if (sbi->s_add_error_count > 0) {
@@ -5502,10 +5540,20 @@ static int ext4_commit_super(struct super_block *sb, int sync)
        }
        spin_unlock(&sbi->s_error_lock);
 
-       BUFFER_TRACE(sbh, "marking dirty");
        ext4_superblock_csum_set(sb);
-       if (sync)
-               lock_buffer(sbh);
+       unlock_buffer(sbh);
+}
+
+static int ext4_commit_super(struct super_block *sb)
+{
+       struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
+       int error = 0;
+
+       if (!sbh || block_device_ejected(sb))
+               return error;
+
+       ext4_update_super(sb);
+
        if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
                /*
                 * Oh, dear.  A previous attempt to write the
@@ -5520,17 +5568,15 @@ static int ext4_commit_super(struct super_block *sb, int sync)
                clear_buffer_write_io_error(sbh);
                set_buffer_uptodate(sbh);
        }
+       BUFFER_TRACE(sbh, "marking dirty");
        mark_buffer_dirty(sbh);
-       if (sync) {
-               unlock_buffer(sbh);
-               error = __sync_dirty_buffer(sbh,
-                       REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
-               if (buffer_write_io_error(sbh)) {
-                       ext4_msg(sb, KERN_ERR, "I/O error while writing "
-                              "superblock");
-                       clear_buffer_write_io_error(sbh);
-                       set_buffer_uptodate(sbh);
-               }
+       error = __sync_dirty_buffer(sbh,
+               REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
+       if (buffer_write_io_error(sbh)) {
+               ext4_msg(sb, KERN_ERR, "I/O error while writing "
+                      "superblock");
+               clear_buffer_write_io_error(sbh);
+               set_buffer_uptodate(sbh);
        }
        return error;
 }
@@ -5561,7 +5607,7 @@ static int ext4_mark_recovery_complete(struct super_block *sb,
 
        if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
                ext4_clear_feature_journal_needs_recovery(sb);
-               ext4_commit_super(sb, 1);
+               ext4_commit_super(sb);
        }
 out:
        jbd2_journal_unlock_updates(journal);
@@ -5603,7 +5649,7 @@ static int ext4_clear_journal_err(struct super_block *sb,
 
                EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
                es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
-               ext4_commit_super(sb, 1);
+               ext4_commit_super(sb);
 
                jbd2_journal_clear_err(journal);
                jbd2_journal_update_sb_errno(journal);
@@ -5705,7 +5751,7 @@ static int ext4_freeze(struct super_block *sb)
                ext4_clear_feature_journal_needs_recovery(sb);
        }
 
-       error = ext4_commit_super(sb, 1);
+       error = ext4_commit_super(sb);
 out:
        if (journal)
                /* we rely on upper layer to stop further updates */
@@ -5727,7 +5773,7 @@ static int ext4_unfreeze(struct super_block *sb)
                ext4_set_feature_journal_needs_recovery(sb);
        }
 
-       ext4_commit_super(sb, 1);
+       ext4_commit_super(sb);
        return 0;
 }
 
@@ -5987,7 +6033,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        }
 
        if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
-               err = ext4_commit_super(sb, 1);
+               err = ext4_commit_super(sb);
                if (err)
                        goto restore_opts;
        }
index 4e3b1f8..3722085 100644 (file)
@@ -792,8 +792,11 @@ static void ext4_xattr_update_super_block(handle_t *handle,
 
        BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
        if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
+               lock_buffer(EXT4_SB(sb)->s_sbh);
                ext4_set_feature_xattr(sb);
-               ext4_handle_dirty_super(handle, sb);
+               ext4_superblock_csum_set(sb);
+               unlock_buffer(EXT4_SB(sb)->s_sbh);
+               ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
        }
 }
 
index acfb558..c41cb88 100644 (file)
@@ -1474,21 +1474,25 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
        }
 
        /*
-        * Some filesystems may redirty the inode during the writeback
-        * due to delalloc, clear dirty metadata flags right before
-        * write_inode()
+        * If the inode has dirty timestamps and we need to write them, call
+        * mark_inode_dirty_sync() to notify the filesystem about it and to
+        * change I_DIRTY_TIME into I_DIRTY_SYNC.
         */
-       spin_lock(&inode->i_lock);
-
-       dirty = inode->i_state & I_DIRTY;
        if ((inode->i_state & I_DIRTY_TIME) &&
-           ((dirty & I_DIRTY_INODE) ||
-            wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
+           (wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
             time_after(jiffies, inode->dirtied_time_when +
                        dirtytime_expire_interval * HZ))) {
-               dirty |= I_DIRTY_TIME;
                trace_writeback_lazytime(inode);
+               mark_inode_dirty_sync(inode);
        }
+
+       /*
+        * Some filesystems may redirty the inode during the writeback
+        * due to delalloc, clear dirty metadata flags right before
+        * write_inode()
+        */
+       spin_lock(&inode->i_lock);
+       dirty = inode->i_state & I_DIRTY;
        inode->i_state &= ~dirty;
 
        /*
@@ -1509,8 +1513,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
 
        spin_unlock(&inode->i_lock);
 
-       if (dirty & I_DIRTY_TIME)
-               mark_inode_dirty_sync(inode);
        /* Don't write the inode if only I_DIRTY_PAGES was set */
        if (dirty & ~I_DIRTY_PAGES) {
                int err = write_inode(inode, wbc);
index ca46f31..985a9e3 100644 (file)
@@ -262,6 +262,7 @@ struct io_ring_ctx {
                unsigned int            drain_next: 1;
                unsigned int            eventfd_async: 1;
                unsigned int            restricted: 1;
+               unsigned int            sqo_dead: 1;
 
                /*
                 * Ring buffer of indices into array of io_uring_sqe, which is
@@ -353,6 +354,7 @@ struct io_ring_ctx {
                unsigned                cq_entries;
                unsigned                cq_mask;
                atomic_t                cq_timeouts;
+               unsigned                cq_last_tm_flush;
                unsigned long           cq_check_overflow;
                struct wait_queue_head  cq_wait;
                struct fasync_struct    *cq_fasync;
@@ -992,6 +994,9 @@ enum io_mem_account {
        ACCT_PINNED,
 };
 
+static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
+                                           struct task_struct *task);
+
 static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node);
 static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
                        struct io_ring_ctx *ctx);
@@ -1102,6 +1107,9 @@ static void io_sq_thread_drop_mm_files(void)
 
 static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx)
 {
+       if (current->flags & PF_EXITING)
+               return -EFAULT;
+
        if (!current->files) {
                struct files_struct *files;
                struct nsproxy *nsproxy;
@@ -1129,6 +1137,8 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
 {
        struct mm_struct *mm;
 
+       if (current->flags & PF_EXITING)
+               return -EFAULT;
        if (current->mm)
                return 0;
 
@@ -1342,11 +1352,6 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
 
        /* order cqe stores with ring update */
        smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
-
-       if (wq_has_sleeper(&ctx->cq_wait)) {
-               wake_up_interruptible(&ctx->cq_wait);
-               kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
-       }
 }
 
 static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
@@ -1520,10 +1525,8 @@ static void io_prep_async_work(struct io_kiocb *req)
 {
        const struct io_op_def *def = &io_op_defs[req->opcode];
        struct io_ring_ctx *ctx = req->ctx;
-       struct io_identity *id;
 
        io_req_init_async(req);
-       id = req->work.identity;
 
        if (req->flags & REQ_F_FORCE_ASYNC)
                req->work.flags |= IO_WQ_WORK_CONCURRENT;
@@ -1637,19 +1640,38 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)
 
 static void io_flush_timeouts(struct io_ring_ctx *ctx)
 {
-       while (!list_empty(&ctx->timeout_list)) {
+       u32 seq;
+
+       if (list_empty(&ctx->timeout_list))
+               return;
+
+       seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
+
+       do {
+               u32 events_needed, events_got;
                struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
                                                struct io_kiocb, timeout.list);
 
                if (io_is_timeout_noseq(req))
                        break;
-               if (req->timeout.target_seq != ctx->cached_cq_tail
-                                       - atomic_read(&ctx->cq_timeouts))
+
+               /*
+                * Since seq can easily wrap around over time, subtract
+                * the last seq at which timeouts were flushed before comparing.
+                * Assuming not more than 2^31-1 events have happened since,
+                * these subtractions won't have wrapped, so we can check if
+                * target is in [last_seq, current_seq] by comparing the two.
+                */
+               events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
+               events_got = seq - ctx->cq_last_tm_flush;
+               if (events_got < events_needed)
                        break;
 
                list_del_init(&req->timeout.list);
                io_kill_timeout(req);
-       }
+       } while (!list_empty(&ctx->timeout_list));
+
+       ctx->cq_last_tm_flush = seq;
 }
 
 static void io_commit_cqring(struct io_ring_ctx *ctx)
@@ -1704,18 +1726,42 @@ static inline unsigned __io_cqring_events(struct io_ring_ctx *ctx)
 
 static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
 {
+       /* see waitqueue_active() comment */
+       smp_mb();
+
        if (waitqueue_active(&ctx->wait))
                wake_up(&ctx->wait);
        if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
                wake_up(&ctx->sq_data->wait);
        if (io_should_trigger_evfd(ctx))
                eventfd_signal(ctx->cq_ev_fd, 1);
+       if (waitqueue_active(&ctx->cq_wait)) {
+               wake_up_interruptible(&ctx->cq_wait);
+               kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
+       }
+}
+
+static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
+{
+       /* see waitqueue_active() comment */
+       smp_mb();
+
+       if (ctx->flags & IORING_SETUP_SQPOLL) {
+               if (waitqueue_active(&ctx->wait))
+                       wake_up(&ctx->wait);
+       }
+       if (io_should_trigger_evfd(ctx))
+               eventfd_signal(ctx->cq_ev_fd, 1);
+       if (waitqueue_active(&ctx->cq_wait)) {
+               wake_up_interruptible(&ctx->cq_wait);
+               kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
+       }
 }
 
 /* Returns true if there are no backlogged entries after the flush */
-static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
-                                    struct task_struct *tsk,
-                                    struct files_struct *files)
+static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
+                                      struct task_struct *tsk,
+                                      struct files_struct *files)
 {
        struct io_rings *rings = ctx->rings;
        struct io_kiocb *req, *tmp;
@@ -1768,6 +1814,20 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
        return all_flushed;
 }
 
+static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
+                                    struct task_struct *tsk,
+                                    struct files_struct *files)
+{
+       if (test_bit(0, &ctx->cq_check_overflow)) {
+               /* iopoll syncs against uring_lock, not completion_lock */
+               if (ctx->flags & IORING_SETUP_IOPOLL)
+                       mutex_lock(&ctx->uring_lock);
+               __io_cqring_overflow_flush(ctx, force, tsk, files);
+               if (ctx->flags & IORING_SETUP_IOPOLL)
+                       mutex_unlock(&ctx->uring_lock);
+       }
+}
+
 static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
 {
        struct io_ring_ctx *ctx = req->ctx;
@@ -2127,14 +2187,14 @@ static void __io_req_task_submit(struct io_kiocb *req)
 {
        struct io_ring_ctx *ctx = req->ctx;
 
-       if (!__io_sq_thread_acquire_mm(ctx) &&
-           !__io_sq_thread_acquire_files(ctx)) {
-               mutex_lock(&ctx->uring_lock);
+       mutex_lock(&ctx->uring_lock);
+       if (!ctx->sqo_dead &&
+           !__io_sq_thread_acquire_mm(ctx) &&
+           !__io_sq_thread_acquire_files(ctx))
                __io_queue_sqe(req, NULL);
-               mutex_unlock(&ctx->uring_lock);
-       } else {
+       else
                __io_req_task_cancel(req, -EFAULT);
-       }
+       mutex_unlock(&ctx->uring_lock);
 }
 
 static void io_req_task_submit(struct callback_head *cb)
@@ -2313,20 +2373,8 @@ static void io_double_put_req(struct io_kiocb *req)
                io_free_req(req);
 }
 
-static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
+static unsigned io_cqring_events(struct io_ring_ctx *ctx)
 {
-       if (test_bit(0, &ctx->cq_check_overflow)) {
-               /*
-                * noflush == true is from the waitqueue handler, just ensure
-                * we wake up the task, and the next invocation will flush the
-                * entries. We cannot safely to it from here.
-                */
-               if (noflush)
-                       return -1U;
-
-               io_cqring_overflow_flush(ctx, false, NULL, NULL);
-       }
-
        /* See comment at the top of this file */
        smp_rmb();
        return __io_cqring_events(ctx);
@@ -2424,8 +2472,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
        }
 
        io_commit_cqring(ctx);
-       if (ctx->flags & IORING_SETUP_SQPOLL)
-               io_cqring_ev_posted(ctx);
+       io_cqring_ev_posted_iopoll(ctx);
        io_req_free_batch_finish(ctx, &rb);
 
        if (!list_empty(&again))
@@ -2551,7 +2598,9 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
                 * If we do, we can potentially be spinning for commands that
                 * already triggered a CQE (eg in error).
                 */
-               if (io_cqring_events(ctx, false))
+               if (test_bit(0, &ctx->cq_check_overflow))
+                       __io_cqring_overflow_flush(ctx, false, NULL, NULL);
+               if (io_cqring_events(ctx))
                        break;
 
                /*
@@ -2668,6 +2717,8 @@ static bool io_rw_reissue(struct io_kiocb *req, long res)
        if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
                return false;
 
+       lockdep_assert_held(&req->ctx->uring_lock);
+
        ret = io_sq_thread_acquire_mm_files(req->ctx, req);
 
        if (io_resubmit_prep(req, ret)) {
@@ -5806,6 +5857,12 @@ static int io_timeout(struct io_kiocb *req)
        tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
        req->timeout.target_seq = tail + off;
 
+       /* Update the last seq here in case io_flush_timeouts() hasn't.
+        * This is safe because ->completion_lock is held, and submissions
+        * and completions are never mixed in the same ->completion_lock section.
+        */
+       ctx->cq_last_tm_flush = tail;
+
        /*
         * Insertion sort, ensuring the first entry in the list is always
         * the one we need first.
@@ -6826,7 +6883,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
 
        /* if we have a backlog and couldn't flush it all, return BUSY */
        if (test_bit(0, &ctx->sq_check_overflow)) {
-               if (!io_cqring_overflow_flush(ctx, false, NULL, NULL))
+               if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL))
                        return -EBUSY;
        }
 
@@ -6928,7 +6985,8 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
                if (!list_empty(&ctx->iopoll_list))
                        io_do_iopoll(ctx, &nr_events, 0);
 
-               if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)))
+               if (to_submit && !ctx->sqo_dead &&
+                   likely(!percpu_ref_is_dying(&ctx->refs)))
                        ret = io_submit_sqes(ctx, to_submit);
                mutex_unlock(&ctx->uring_lock);
        }
@@ -7029,6 +7087,7 @@ static int io_sq_thread(void *data)
 
                if (sqt_spin || !time_after(jiffies, timeout)) {
                        io_run_task_work();
+                       io_sq_thread_drop_mm_files();
                        cond_resched();
                        if (sqt_spin)
                                timeout = jiffies + sqd->sq_thread_idle;
@@ -7066,6 +7125,7 @@ static int io_sq_thread(void *data)
        }
 
        io_run_task_work();
+       io_sq_thread_drop_mm_files();
 
        if (cur_css)
                io_sq_thread_unassociate_blkcg();
@@ -7089,7 +7149,7 @@ struct io_wait_queue {
        unsigned nr_timeouts;
 };
 
-static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
+static inline bool io_should_wake(struct io_wait_queue *iowq)
 {
        struct io_ring_ctx *ctx = iowq->ctx;
 
@@ -7098,7 +7158,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
         * started waiting. For timeouts, we always want to return to userspace,
         * regardless of event count.
         */
-       return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
+       return io_cqring_events(ctx) >= iowq->to_wait ||
                        atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
 }
 
@@ -7108,11 +7168,13 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
        struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
                                                        wq);
 
-       /* use noflush == true, as we can't safely rely on locking context */
-       if (!io_should_wake(iowq, true))
-               return -1;
-
-       return autoremove_wake_function(curr, mode, wake_flags, key);
+       /*
+        * Cannot safely flush overflowed CQEs from here, ensure we wake up
+        * the task, and the next invocation will do it.
+        */
+       if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
+               return autoremove_wake_function(curr, mode, wake_flags, key);
+       return -1;
 }
 
 static int io_run_task_work_sig(void)
@@ -7149,7 +7211,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
        int ret = 0;
 
        do {
-               if (io_cqring_events(ctx, false) >= min_events)
+               io_cqring_overflow_flush(ctx, false, NULL, NULL);
+               if (io_cqring_events(ctx) >= min_events)
                        return 0;
                if (!io_run_task_work())
                        break;
@@ -7177,6 +7240,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
        iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
        trace_io_uring_cqring_wait(ctx, min_events);
        do {
+               io_cqring_overflow_flush(ctx, false, NULL, NULL);
                prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
                                                TASK_INTERRUPTIBLE);
                /* make sure we run task_work before checking for signals */
@@ -7185,8 +7249,10 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                        continue;
                else if (ret < 0)
                        break;
-               if (io_should_wake(&iowq, false))
+               if (io_should_wake(&iowq))
                        break;
+               if (test_bit(0, &ctx->cq_check_overflow))
+                       continue;
                if (uts) {
                        timeout = schedule_timeout(timeout);
                        if (timeout == 0) {
@@ -7684,12 +7750,12 @@ static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
 
        ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
        if (!ref_node)
-               return ERR_PTR(-ENOMEM);
+               return NULL;
 
        if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
                            0, GFP_KERNEL)) {
                kfree(ref_node);
-               return ERR_PTR(-ENOMEM);
+               return NULL;
        }
        INIT_LIST_HEAD(&ref_node->node);
        INIT_LIST_HEAD(&ref_node->file_list);
@@ -7783,9 +7849,9 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
        }
 
        ref_node = alloc_fixed_file_ref_node(ctx);
-       if (IS_ERR(ref_node)) {
+       if (!ref_node) {
                io_sqe_files_unregister(ctx);
-               return PTR_ERR(ref_node);
+               return -ENOMEM;
        }
 
        io_sqe_files_set_node(file_data, ref_node);
@@ -7885,8 +7951,8 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
                return -EINVAL;
 
        ref_node = alloc_fixed_file_ref_node(ctx);
-       if (IS_ERR(ref_node))
-               return PTR_ERR(ref_node);
+       if (!ref_node)
+               return -ENOMEM;
 
        done = 0;
        fds = u64_to_user_ptr(up->fds);
@@ -8624,7 +8690,8 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
        smp_rmb();
        if (!io_sqring_full(ctx))
                mask |= EPOLLOUT | EPOLLWRNORM;
-       if (io_cqring_events(ctx, false))
+       io_cqring_overflow_flush(ctx, false, NULL, NULL);
+       if (io_cqring_events(ctx))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        return mask;
@@ -8663,7 +8730,7 @@ static void io_ring_exit_work(struct work_struct *work)
         * as nobody else will be looking for them.
         */
        do {
-               io_iopoll_try_reap_events(ctx);
+               __io_uring_cancel_task_requests(ctx, NULL);
        } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
        io_ring_ctx_free(ctx);
 }
@@ -8679,10 +8746,14 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
 {
        mutex_lock(&ctx->uring_lock);
        percpu_ref_kill(&ctx->refs);
+
+       if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
+               ctx->sqo_dead = 1;
+
        /* if force is set, the ring is going away. always drop after that */
        ctx->cq_overflow_flushed = 1;
        if (ctx->rings)
-               io_cqring_overflow_flush(ctx, true, NULL, NULL);
+               __io_cqring_overflow_flush(ctx, true, NULL, NULL);
        mutex_unlock(&ctx->uring_lock);
 
        io_kill_timeouts(ctx, NULL, NULL);
@@ -8818,9 +8889,11 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
                enum io_wq_cancel cret;
                bool ret = false;
 
-               cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
-               if (cret != IO_WQ_CANCEL_NOTFOUND)
-                       ret = true;
+               if (ctx->io_wq) {
+                       cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
+                                              &cancel, true);
+                       ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
+               }
 
                /* SQPOLL thread does its own polling */
                if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
@@ -8839,6 +8912,19 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
        }
 }
 
+static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
+{
+       WARN_ON_ONCE(ctx->sqo_task != current);
+
+       mutex_lock(&ctx->uring_lock);
+       ctx->sqo_dead = 1;
+       mutex_unlock(&ctx->uring_lock);
+
+       /* make sure callers enter the ring to get error */
+       if (ctx->rings)
+               io_ring_set_wakeup_flag(ctx);
+}
+
 /*
  * We need to iteratively cancel requests, in case a request has dependent
  * hard links. These persist even for failure of cancelations, hence keep
@@ -8850,15 +8936,15 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
        struct task_struct *task = current;
 
        if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
+               /* for SQPOLL only sqo_task has task notes */
+               io_disable_sqo_submit(ctx);
                task = ctx->sq_data->thread;
                atomic_inc(&task->io_uring->in_idle);
                io_sq_thread_park(ctx->sq_data);
        }
 
        io_cancel_defer_files(ctx, task, files);
-       io_ring_submit_lock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
        io_cqring_overflow_flush(ctx, true, task, files);
-       io_ring_submit_unlock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
 
        if (!files)
                __io_uring_cancel_task_requests(ctx, task);
@@ -8931,20 +9017,12 @@ static void io_uring_del_task_file(struct file *file)
                fput(file);
 }
 
-/*
- * Drop task note for this file if we're the only ones that hold it after
- * pending fput()
- */
-static void io_uring_attempt_task_drop(struct file *file)
+static void io_uring_remove_task_files(struct io_uring_task *tctx)
 {
-       if (!current->io_uring)
-               return;
-       /*
-        * fput() is pending, will be 2 if the only other ref is our potential
-        * task file note. If the task is exiting, drop regardless of count.
-        */
-       if (fatal_signal_pending(current) || (current->flags & PF_EXITING) ||
-           atomic_long_read(&file->f_count) == 2)
+       struct file *file;
+       unsigned long index;
+
+       xa_for_each(&tctx->xa, index, file)
                io_uring_del_task_file(file);
 }
 
@@ -8956,16 +9034,12 @@ void __io_uring_files_cancel(struct files_struct *files)
 
        /* make sure overflow events are dropped */
        atomic_inc(&tctx->in_idle);
-
-       xa_for_each(&tctx->xa, index, file) {
-               struct io_ring_ctx *ctx = file->private_data;
-
-               io_uring_cancel_task_requests(ctx, files);
-               if (files)
-                       io_uring_del_task_file(file);
-       }
-
+       xa_for_each(&tctx->xa, index, file)
+               io_uring_cancel_task_requests(file->private_data, files);
        atomic_dec(&tctx->in_idle);
+
+       if (files)
+               io_uring_remove_task_files(tctx);
 }
 
 static s64 tctx_inflight(struct io_uring_task *tctx)
@@ -9027,12 +9101,41 @@ void __io_uring_task_cancel(void)
                finish_wait(&tctx->wait, &wait);
        } while (1);
 
+       finish_wait(&tctx->wait, &wait);
        atomic_dec(&tctx->in_idle);
+
+       io_uring_remove_task_files(tctx);
 }
 
 static int io_uring_flush(struct file *file, void *data)
 {
-       io_uring_attempt_task_drop(file);
+       struct io_uring_task *tctx = current->io_uring;
+       struct io_ring_ctx *ctx = file->private_data;
+
+       if (!tctx)
+               return 0;
+
+       /* we should have cancelled and erased it before PF_EXITING */
+       WARN_ON_ONCE((current->flags & PF_EXITING) &&
+                    xa_load(&tctx->xa, (unsigned long)file));
+
+       /*
+        * fput() is pending, will be 2 if the only other ref is our potential
+        * task file note. If the task is exiting, drop regardless of count.
+        */
+       if (atomic_long_read(&file->f_count) != 2)
+               return 0;
+
+       if (ctx->flags & IORING_SETUP_SQPOLL) {
+               /* there is only one file note, which is owned by sqo_task */
+               WARN_ON_ONCE((ctx->sqo_task == current) ==
+                            !xa_load(&tctx->xa, (unsigned long)file));
+
+               io_disable_sqo_submit(ctx);
+       }
+
+       if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current)
+               io_uring_del_task_file(file);
        return 0;
 }
 
@@ -9106,8 +9209,9 @@ static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
 
 #endif /* !CONFIG_MMU */
 
-static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
+static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
 {
+       int ret = 0;
        DEFINE_WAIT(wait);
 
        do {
@@ -9116,6 +9220,11 @@ static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
 
                prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
 
+               if (unlikely(ctx->sqo_dead)) {
+                       ret = -EOWNERDEAD;
+                       goto out;
+               }
+
                if (!io_sqring_full(ctx))
                        break;
 
@@ -9123,6 +9232,8 @@ static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
        } while (!signal_pending(current));
 
        finish_wait(&ctx->sqo_sq_wait, &wait);
+out:
+       return ret;
 }
 
 static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
@@ -9194,17 +9305,18 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
         */
        ret = 0;
        if (ctx->flags & IORING_SETUP_SQPOLL) {
-               if (!list_empty_careful(&ctx->cq_overflow_list)) {
-                       bool needs_lock = ctx->flags & IORING_SETUP_IOPOLL;
+               io_cqring_overflow_flush(ctx, false, NULL, NULL);
 
-                       io_ring_submit_lock(ctx, needs_lock);
-                       io_cqring_overflow_flush(ctx, false, NULL, NULL);
-                       io_ring_submit_unlock(ctx, needs_lock);
-               }
+               ret = -EOWNERDEAD;
+               if (unlikely(ctx->sqo_dead))
+                       goto out;
                if (flags & IORING_ENTER_SQ_WAKEUP)
                        wake_up(&ctx->sq_data->wait);
-               if (flags & IORING_ENTER_SQ_WAIT)
-                       io_sqpoll_wait_sq(ctx);
+               if (flags & IORING_ENTER_SQ_WAIT) {
+                       ret = io_sqpoll_wait_sq(ctx);
+                       if (ret)
+                               goto out;
+               }
                submitted = to_submit;
        } else if (to_submit) {
                ret = io_uring_add_task_file(ctx, f.file);
@@ -9623,6 +9735,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
         */
        ret = io_uring_install_fd(ctx, file);
        if (ret < 0) {
+               io_disable_sqo_submit(ctx);
                /* fput will clean it up */
                fput(file);
                return ret;
@@ -9631,6 +9744,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
        trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
        return ret;
 err:
+       io_disable_sqo_submit(ctx);
        io_ring_ctx_wait_and_kill(ctx);
        return ret;
 }
index d2db7df..9d33909 100644 (file)
@@ -1713,8 +1713,6 @@ static int can_umount(const struct path *path, int flags)
 {
        struct mount *mnt = real_mount(path->mnt);
 
-       if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
-               return -EINVAL;
        if (!may_mount())
                return -EPERM;
        if (path->dentry != path->mnt->mnt_root)
@@ -1728,6 +1726,7 @@ static int can_umount(const struct path *path, int flags)
        return 0;
 }
 
+// caller is responsible for flags being sane
 int path_umount(struct path *path, int flags)
 {
        struct mount *mnt = real_mount(path->mnt);
@@ -1749,6 +1748,10 @@ static int ksys_umount(char __user *name, int flags)
        struct path path;
        int ret;
 
+       // basic validity checks done first
+       if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
+               return -EINVAL;
+
        if (!(flags & UMOUNT_NOFOLLOW))
                lookup_flags |= LOOKUP_FOLLOW;
        ret = user_path_at(AT_FDCWD, name, lookup_flags, &path);
index 816e142..04bf806 100644 (file)
@@ -1011,22 +1011,24 @@ nfs_delegation_find_inode_server(struct nfs_server *server,
                                 const struct nfs_fh *fhandle)
 {
        struct nfs_delegation *delegation;
-       struct inode *freeme, *res = NULL;
+       struct super_block *freeme = NULL;
+       struct inode *res = NULL;
 
        list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
                spin_lock(&delegation->lock);
                if (delegation->inode != NULL &&
                    !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) &&
                    nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
-                       freeme = igrab(delegation->inode);
-                       if (freeme && nfs_sb_active(freeme->i_sb))
-                               res = freeme;
+                       if (nfs_sb_active(server->super)) {
+                               freeme = server->super;
+                               res = igrab(delegation->inode);
+                       }
                        spin_unlock(&delegation->lock);
                        if (res != NULL)
                                return res;
                        if (freeme) {
                                rcu_read_unlock();
-                               iput(freeme);
+                               nfs_sb_deactive(freeme);
                                rcu_read_lock();
                        }
                        return ERR_PTR(-EAGAIN);
index b840d0a..62d3189 100644 (file)
@@ -136,9 +136,29 @@ struct nfs_fs_context {
        } clone_data;
 };
 
-#define nfs_errorf(fc, fmt, ...) errorf(fc, fmt, ## __VA_ARGS__)
-#define nfs_invalf(fc, fmt, ...) invalf(fc, fmt, ## __VA_ARGS__)
-#define nfs_warnf(fc, fmt, ...) warnf(fc, fmt, ## __VA_ARGS__)
+#define nfs_errorf(fc, fmt, ...) ((fc)->log.log ?              \
+       errorf(fc, fmt, ## __VA_ARGS__) :                       \
+       ({ dprintk(fmt "\n", ## __VA_ARGS__); }))
+
+#define nfs_ferrorf(fc, fac, fmt, ...) ((fc)->log.log ?                \
+       errorf(fc, fmt, ## __VA_ARGS__) :                       \
+       ({ dfprintk(fac, fmt "\n", ## __VA_ARGS__); }))
+
+#define nfs_invalf(fc, fmt, ...) ((fc)->log.log ?              \
+       invalf(fc, fmt, ## __VA_ARGS__) :                       \
+       ({ dprintk(fmt "\n", ## __VA_ARGS__);  -EINVAL; }))
+
+#define nfs_finvalf(fc, fac, fmt, ...) ((fc)->log.log ?                \
+       invalf(fc, fmt, ## __VA_ARGS__) :                       \
+       ({ dfprintk(fac, fmt "\n", ## __VA_ARGS__);  -EINVAL; }))
+
+#define nfs_warnf(fc, fmt, ...) ((fc)->log.log ?               \
+       warnf(fc, fmt, ## __VA_ARGS__) :                        \
+       ({ dprintk(fmt "\n", ## __VA_ARGS__); }))
+
+#define nfs_fwarnf(fc, fac, fmt, ...) ((fc)->log.log ?         \
+       warnf(fc, fmt, ## __VA_ARGS__) :                        \
+       ({ dfprintk(fac, fmt "\n", ## __VA_ARGS__); }))
 
 static inline struct nfs_fs_context *nfs_fc2context(const struct fs_context *fc)
 {
@@ -579,12 +599,14 @@ extern void nfs4_test_session_trunk(struct rpc_clnt *clnt,
 
 static inline struct inode *nfs_igrab_and_active(struct inode *inode)
 {
-       inode = igrab(inode);
-       if (inode != NULL && !nfs_sb_active(inode->i_sb)) {
-               iput(inode);
-               inode = NULL;
+       struct super_block *sb = inode->i_sb;
+
+       if (sb && nfs_sb_active(sb)) {
+               if (igrab(inode))
+                       return inode;
+               nfs_sb_deactive(sb);
        }
-       return inode;
+       return NULL;
 }
 
 static inline void nfs_iput_and_deactive(struct inode *inode)
index 0ce04e0..2f4679a 100644 (file)
@@ -3536,10 +3536,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
        trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
 
        /* Handle Layoutreturn errors */
-       if (pnfs_roc_done(task, calldata->inode,
-                               &calldata->arg.lr_args,
-                               &calldata->res.lr_res,
-                               &calldata->res.lr_ret) == -EAGAIN)
+       if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res,
+                         &calldata->res.lr_ret) == -EAGAIN)
                goto out_restart;
 
        /* hmm. we are done with the inode, and in the process of freeing
@@ -6384,10 +6382,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
        trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
 
        /* Handle Layoutreturn errors */
-       if (pnfs_roc_done(task, data->inode,
-                               &data->args.lr_args,
-                               &data->res.lr_res,
-                               &data->res.lr_ret) == -EAGAIN)
+       if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res,
+                         &data->res.lr_ret) == -EAGAIN)
                goto out_restart;
 
        switch (task->tk_status) {
@@ -6441,10 +6437,10 @@ static void nfs4_delegreturn_release(void *calldata)
        struct nfs4_delegreturndata *data = calldata;
        struct inode *inode = data->inode;
 
+       if (data->lr.roc)
+               pnfs_roc_release(&data->lr.arg, &data->lr.res,
+                                data->res.lr_ret);
        if (inode) {
-               if (data->lr.roc)
-                       pnfs_roc_release(&data->lr.arg, &data->lr.res,
-                                       data->res.lr_ret);
                nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
                nfs_iput_and_deactive(inode);
        }
@@ -6520,16 +6516,14 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
        nfs_fattr_init(data->res.fattr);
        data->timestamp = jiffies;
        data->rpc_status = 0;
-       data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, cred);
        data->inode = nfs_igrab_and_active(inode);
-       if (data->inode) {
+       if (data->inode || issync) {
+               data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res,
+                                       cred);
                if (data->lr.roc) {
                        data->args.lr_args = &data->lr.arg;
                        data->res.lr_res = &data->lr.res;
                }
-       } else if (data->lr.roc) {
-               pnfs_roc_release(&data->lr.arg, &data->lr.res, 0);
-               data->lr.roc = false;
        }
 
        task_setup_data.callback_data = data;
@@ -7111,9 +7105,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
                                        data->arg.new_lock_owner, ret);
        } else
                data->cancelled = true;
+       trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
        rpc_put_task(task);
        dprintk("%s: done, ret = %d!\n", __func__, ret);
-       trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
        return ret;
 }
 
index 984cc42..d09bcfd 100644 (file)
@@ -227,7 +227,7 @@ int nfs4_try_get_tree(struct fs_context *fc)
                           fc, ctx->nfs_server.hostname,
                           ctx->nfs_server.export_path);
        if (err) {
-               nfs_errorf(fc, "NFS4: Couldn't follow remote path");
+               nfs_ferrorf(fc, MOUNT, "NFS4: Couldn't follow remote path");
                dfprintk(MOUNT, "<-- nfs4_try_get_tree() = %d [error]\n", err);
        } else {
                dfprintk(MOUNT, "<-- nfs4_try_get_tree() = 0\n");
@@ -250,7 +250,7 @@ int nfs4_get_referral_tree(struct fs_context *fc)
                            fc, ctx->nfs_server.hostname,
                            ctx->nfs_server.export_path);
        if (err) {
-               nfs_errorf(fc, "NFS4: Couldn't follow remote path");
+               nfs_ferrorf(fc, MOUNT, "NFS4: Couldn't follow remote path");
                dfprintk(MOUNT, "<-- nfs4_get_referral_tree() = %d [error]\n", err);
        } else {
                dfprintk(MOUNT, "<-- nfs4_get_referral_tree() = 0\n");
index 07f59dc..4f274f2 100644 (file)
@@ -1152,7 +1152,7 @@ void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
        LIST_HEAD(freeme);
 
        spin_lock(&inode->i_lock);
-       if (!pnfs_layout_is_valid(lo) || !arg_stateid ||
+       if (!pnfs_layout_is_valid(lo) ||
            !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
                goto out_unlock;
        if (stateid) {
@@ -1509,10 +1509,8 @@ out_noroc:
        return false;
 }
 
-int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
-               struct nfs4_layoutreturn_args **argpp,
-               struct nfs4_layoutreturn_res **respp,
-               int *ret)
+int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
+                 struct nfs4_layoutreturn_res **respp, int *ret)
 {
        struct nfs4_layoutreturn_args *arg = *argpp;
        int retval = -EAGAIN;
@@ -1545,7 +1543,7 @@ int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
                return 0;
        case -NFS4ERR_OLD_STATEID:
                if (!nfs4_layout_refresh_old_stateid(&arg->stateid,
-                                       &arg->range, inode))
+                                                    &arg->range, arg->inode))
                        break;
                *ret = -NFS4ERR_NOMATCHING_LAYOUT;
                return -EAGAIN;
@@ -1560,23 +1558,28 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
                int ret)
 {
        struct pnfs_layout_hdr *lo = args->layout;
-       const nfs4_stateid *arg_stateid = NULL;
+       struct inode *inode = args->inode;
        const nfs4_stateid *res_stateid = NULL;
        struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
 
        switch (ret) {
        case -NFS4ERR_NOMATCHING_LAYOUT:
+               spin_lock(&inode->i_lock);
+               if (pnfs_layout_is_valid(lo) &&
+                   nfs4_stateid_match_other(&args->stateid, &lo->plh_stateid))
+                       pnfs_set_plh_return_info(lo, args->range.iomode, 0);
+               pnfs_clear_layoutreturn_waitbit(lo);
+               spin_unlock(&inode->i_lock);
                break;
        case 0:
                if (res->lrs_present)
                        res_stateid = &res->stateid;
                fallthrough;
        default:
-               arg_stateid = &args->stateid;
+               pnfs_layoutreturn_free_lsegs(lo, &args->stateid, &args->range,
+                                            res_stateid);
        }
        trace_nfs4_layoutreturn_on_close(args->inode, &args->stateid, ret);
-       pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
-                       res_stateid);
        if (ld_private && ld_private->ops && ld_private->ops->free)
                ld_private->ops->free(ld_private);
        pnfs_put_layout_hdr(lo);
@@ -2015,6 +2018,27 @@ lookup_again:
                goto lookup_again;
        }
 
+       /*
+        * Because we free lsegs when sending LAYOUTRETURN, we need to wait
+        * for LAYOUTRETURN.
+        */
+       if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
+               spin_unlock(&ino->i_lock);
+               dprintk("%s wait for layoutreturn\n", __func__);
+               lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
+               if (!IS_ERR(lseg)) {
+                       pnfs_put_layout_hdr(lo);
+                       dprintk("%s retrying\n", __func__);
+                       trace_pnfs_update_layout(ino, pos, count, iomode, lo,
+                                                lseg,
+                                                PNFS_UPDATE_LAYOUT_RETRY);
+                       goto lookup_again;
+               }
+               trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
+                                        PNFS_UPDATE_LAYOUT_RETURN);
+               goto out_put_layout_hdr;
+       }
+
        lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
        if (lseg) {
                trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
@@ -2067,28 +2091,6 @@ lookup_again:
                nfs4_stateid_copy(&stateid, &lo->plh_stateid);
        }
 
-       /*
-        * Because we free lsegs before sending LAYOUTRETURN, we need to wait
-        * for LAYOUTRETURN even if first is true.
-        */
-       if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
-               spin_unlock(&ino->i_lock);
-               dprintk("%s wait for layoutreturn\n", __func__);
-               lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
-               if (!IS_ERR(lseg)) {
-                       if (first)
-                               pnfs_clear_first_layoutget(lo);
-                       pnfs_put_layout_hdr(lo);
-                       dprintk("%s retrying\n", __func__);
-                       trace_pnfs_update_layout(ino, pos, count, iomode, lo,
-                                       lseg, PNFS_UPDATE_LAYOUT_RETRY);
-                       goto lookup_again;
-               }
-               trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
-                               PNFS_UPDATE_LAYOUT_RETURN);
-               goto out_put_layout_hdr;
-       }
-
        if (pnfs_layoutgets_blocked(lo)) {
                trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
                                PNFS_UPDATE_LAYOUT_BLOCKED);
@@ -2242,6 +2244,7 @@ static void _lgopen_prepare_attached(struct nfs4_opendata *data,
                                             &rng, GFP_KERNEL);
        if (!lgp) {
                pnfs_clear_first_layoutget(lo);
+               nfs_layoutget_end(lo);
                pnfs_put_layout_hdr(lo);
                return;
        }
index bbd3de1..d810ae6 100644 (file)
@@ -297,10 +297,8 @@ bool pnfs_roc(struct inode *ino,
                struct nfs4_layoutreturn_args *args,
                struct nfs4_layoutreturn_res *res,
                const struct cred *cred);
-int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
-               struct nfs4_layoutreturn_args **argpp,
-               struct nfs4_layoutreturn_res **respp,
-               int *ret);
+int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
+                 struct nfs4_layoutreturn_res **respp, int *ret);
 void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
                struct nfs4_layoutreturn_res *res,
                int ret);
@@ -772,7 +770,7 @@ pnfs_roc(struct inode *ino,
 }
 
 static inline int
-pnfs_roc_done(struct rpc_task *task, struct inode *inode,
+pnfs_roc_done(struct rpc_task *task,
                struct nfs4_layoutreturn_args **argpp,
                struct nfs4_layoutreturn_res **respp,
                int *ret)
index 2efcfdd..49d3389 100644 (file)
@@ -78,22 +78,18 @@ void
 pnfs_generic_clear_request_commit(struct nfs_page *req,
                                  struct nfs_commit_info *cinfo)
 {
-       struct pnfs_layout_segment *freeme = NULL;
+       struct pnfs_commit_bucket *bucket = NULL;
 
        if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
                goto out;
        cinfo->ds->nwritten--;
-       if (list_is_singular(&req->wb_list)) {
-               struct pnfs_commit_bucket *bucket;
-
+       if (list_is_singular(&req->wb_list))
                bucket = list_first_entry(&req->wb_list,
-                                         struct pnfs_commit_bucket,
-                                         written);
-               freeme = pnfs_free_bucket_lseg(bucket);
-       }
+                                         struct pnfs_commit_bucket, written);
 out:
        nfs_request_remove_commit_list(req, cinfo);
-       pnfs_put_lseg(freeme);
+       if (bucket)
+               pnfs_put_lseg(pnfs_free_bucket_lseg(bucket));
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit);
 
@@ -407,12 +403,16 @@ pnfs_bucket_get_committing(struct list_head *head,
                           struct pnfs_commit_bucket *bucket,
                           struct nfs_commit_info *cinfo)
 {
+       struct pnfs_layout_segment *lseg;
        struct list_head *pos;
 
        list_for_each(pos, &bucket->committing)
                cinfo->ds->ncommitting--;
        list_splice_init(&bucket->committing, head);
-       return pnfs_free_bucket_lseg(bucket);
+       lseg = pnfs_free_bucket_lseg(bucket);
+       if (!lseg)
+               lseg = pnfs_get_lseg(bucket->lseg);
+       return lseg;
 }
 
 static struct nfs_commit_data *
@@ -424,8 +424,6 @@ pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket *bucket,
        if (!data)
                return NULL;
        data->lseg = pnfs_bucket_get_committing(&data->pages, bucket, cinfo);
-       if (!data->lseg)
-               data->lseg = pnfs_get_lseg(bucket->lseg);
        return data;
 }
 
index 821db21..34b8802 100644 (file)
@@ -865,9 +865,14 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
        if (isdotent(name, namlen)) {
                if (namlen == 2) {
                        dchild = dget_parent(dparent);
-                       /* filesystem root - cannot return filehandle for ".." */
+                       /*
+                        * Don't return filehandle for ".." if we're at
+                        * the filesystem or export root:
+                        */
                        if (dchild == dparent)
                                goto out;
+                       if (dparent == exp->ex_path.dentry)
+                               goto out;
                } else
                        dchild = dget(dparent);
        } else
index 4727b7f..8d6d267 100644 (file)
 #include "pnfs.h"
 #include "trace.h"
 
+static bool inter_copy_offload_enable;
+module_param(inter_copy_offload_enable, bool, 0644);
+MODULE_PARM_DESC(inter_copy_offload_enable,
+                "Enable inter server to server copy offload. Default: false");
+
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
 #include <linux/security.h>
 
index 45ee6b1..eaaa160 100644 (file)
@@ -147,6 +147,25 @@ svcxdr_dupstr(struct nfsd4_compoundargs *argp, void *buf, u32 len)
        return p;
 }
 
+static void *
+svcxdr_savemem(struct nfsd4_compoundargs *argp, __be32 *p, u32 len)
+{
+       __be32 *tmp;
+
+       /*
+        * The location of the decoded data item is stable,
+        * so @p is OK to use. This is the common case.
+        */
+       if (p != argp->xdr->scratch.iov_base)
+               return p;
+
+       tmp = svcxdr_tmpalloc(argp, len);
+       if (!tmp)
+               return NULL;
+       memcpy(tmp, p, len);
+       return tmp;
+}
+
 /*
  * NFSv4 basic data type decoders
  */
@@ -183,11 +202,10 @@ nfsd4_decode_opaque(struct nfsd4_compoundargs *argp, struct xdr_netobj *o)
        p = xdr_inline_decode(argp->xdr, len);
        if (!p)
                return nfserr_bad_xdr;
-       o->data = svcxdr_tmpalloc(argp, len);
+       o->data = svcxdr_savemem(argp, p, len);
        if (!o->data)
                return nfserr_jukebox;
        o->len = len;
-       memcpy(o->data, p, len);
 
        return nfs_ok;
 }
@@ -205,10 +223,9 @@ nfsd4_decode_component4(struct nfsd4_compoundargs *argp, char **namp, u32 *lenp)
        status = check_filename((char *)p, *lenp);
        if (status)
                return status;
-       *namp = svcxdr_tmpalloc(argp, *lenp);
+       *namp = svcxdr_savemem(argp, p, *lenp);
        if (!*namp)
                return nfserr_jukebox;
-       memcpy(*namp, p, *lenp);
 
        return nfs_ok;
 }
@@ -1200,10 +1217,9 @@ nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, struct nfsd4_putfh *putfh)
        p = xdr_inline_decode(argp->xdr, putfh->pf_fhlen);
        if (!p)
                return nfserr_bad_xdr;
-       putfh->pf_fhval = svcxdr_tmpalloc(argp, putfh->pf_fhlen);
+       putfh->pf_fhval = svcxdr_savemem(argp, p, putfh->pf_fhlen);
        if (!putfh->pf_fhval)
                return nfserr_jukebox;
-       memcpy(putfh->pf_fhval, p, putfh->pf_fhlen);
 
        return nfs_ok;
 }
@@ -1318,24 +1334,20 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
        p = xdr_inline_decode(argp->xdr, setclientid->se_callback_netid_len);
        if (!p)
                return nfserr_bad_xdr;
-       setclientid->se_callback_netid_val = svcxdr_tmpalloc(argp,
+       setclientid->se_callback_netid_val = svcxdr_savemem(argp, p,
                                                setclientid->se_callback_netid_len);
        if (!setclientid->se_callback_netid_val)
                return nfserr_jukebox;
-       memcpy(setclientid->se_callback_netid_val, p,
-              setclientid->se_callback_netid_len);
 
        if (xdr_stream_decode_u32(argp->xdr, &setclientid->se_callback_addr_len) < 0)
                return nfserr_bad_xdr;
        p = xdr_inline_decode(argp->xdr, setclientid->se_callback_addr_len);
        if (!p)
                return nfserr_bad_xdr;
-       setclientid->se_callback_addr_val = svcxdr_tmpalloc(argp,
+       setclientid->se_callback_addr_val = svcxdr_savemem(argp, p,
                                                setclientid->se_callback_addr_len);
        if (!setclientid->se_callback_addr_val)
                return nfserr_jukebox;
-       memcpy(setclientid->se_callback_addr_val, p,
-              setclientid->se_callback_addr_len);
        if (xdr_stream_decode_u32(argp->xdr, &setclientid->se_callback_ident) < 0)
                return nfserr_bad_xdr;
 
@@ -1375,10 +1387,9 @@ nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify
        p = xdr_inline_decode(argp->xdr, verify->ve_attrlen);
        if (!p)
                return nfserr_bad_xdr;
-       verify->ve_attrval = svcxdr_tmpalloc(argp, verify->ve_attrlen);
+       verify->ve_attrval = svcxdr_savemem(argp, p, verify->ve_attrlen);
        if (!verify->ve_attrval)
                return nfserr_jukebox;
-       memcpy(verify->ve_attrval, p, verify->ve_attrlen);
 
        return nfs_ok;
 }
@@ -2333,10 +2344,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
                p = xdr_inline_decode(argp->xdr, argp->taglen);
                if (!p)
                        return 0;
-               argp->tag = svcxdr_tmpalloc(argp, argp->taglen);
+               argp->tag = svcxdr_savemem(argp, p, argp->taglen);
                if (!argp->tag)
                        return 0;
-               memcpy(argp->tag, p, argp->taglen);
                max_reply += xdr_align_size(argp->taglen);
        }
 
@@ -4756,6 +4766,7 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
                            resp->rqstp->rq_vec, read->rd_vlen, maxcount, eof);
        if (nfserr)
                return nfserr;
+       xdr_truncate_encode(xdr, starting_len + 16 + xdr_align_size(*maxcount));
 
        tmp = htonl(NFS4_CONTENT_DATA);
        write_bytes_to_xdr_buf(xdr->buf, starting_len,      &tmp,   4);
@@ -4763,6 +4774,10 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
        write_bytes_to_xdr_buf(xdr->buf, starting_len + 4,  &tmp64, 8);
        tmp = htonl(*maxcount);
        write_bytes_to_xdr_buf(xdr->buf, starting_len + 12, &tmp,   4);
+
+       tmp = xdr_zero;
+       write_bytes_to_xdr_buf(xdr->buf, starting_len + 16 + *maxcount, &tmp,
+                              xdr_pad_size(*maxcount));
        return nfs_ok;
 }
 
@@ -4855,14 +4870,15 @@ out:
        if (nfserr && segments == 0)
                xdr_truncate_encode(xdr, starting_len);
        else {
-               tmp = htonl(eof);
-               write_bytes_to_xdr_buf(xdr->buf, starting_len,     &tmp, 4);
-               tmp = htonl(segments);
-               write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
                if (nfserr) {
                        xdr_truncate_encode(xdr, last_segment);
                        nfserr = nfs_ok;
+                       eof = 0;
                }
+               tmp = htonl(eof);
+               write_bytes_to_xdr_buf(xdr->buf, starting_len,     &tmp, 4);
+               tmp = htonl(segments);
+               write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
        }
 
        return nfserr;
index 00384c3..f9c9f4c 100644 (file)
 
 #define NFSDDBG_FACILITY       NFSDDBG_SVC
 
-bool inter_copy_offload_enable;
-EXPORT_SYMBOL_GPL(inter_copy_offload_enable);
-module_param(inter_copy_offload_enable, bool, 0644);
-MODULE_PARM_DESC(inter_copy_offload_enable,
-                "Enable inter server to server copy offload. Default: false");
-
 extern struct svc_program      nfsd_program;
 static int                     nfsd(void *vrqstp);
 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
index a60ff5c..c300885 100644 (file)
@@ -568,7 +568,6 @@ struct nfsd4_copy {
        struct nfs_fh           c_fh;
        nfs4_stateid            stateid;
 };
-extern bool inter_copy_offload_enable;
 
 struct nfsd4_seek {
        /* request */
index 3e01d8f..dcab112 100644 (file)
@@ -1285,26 +1285,23 @@ fput_and_out:
        return ret;
 }
 
+#ifndef CONFIG_ARCH_SPLIT_ARG64
 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
                              __u64, mask, int, dfd,
                              const char  __user *, pathname)
 {
        return do_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname);
 }
+#endif
 
-#ifdef CONFIG_COMPAT
-COMPAT_SYSCALL_DEFINE6(fanotify_mark,
+#if defined(CONFIG_ARCH_SPLIT_ARG64) || defined(CONFIG_COMPAT)
+SYSCALL32_DEFINE6(fanotify_mark,
                                int, fanotify_fd, unsigned int, flags,
-                               __u32, mask0, __u32, mask1, int, dfd,
+                               SC_ARG64(mask), int, dfd,
                                const char  __user *, pathname)
 {
-       return do_fanotify_mark(fanotify_fd, flags,
-#ifdef __BIG_ENDIAN
-                               ((__u64)mask0 << 32) | mask1,
-#else
-                               ((__u64)mask1 << 32) | mask0,
-#endif
-                                dfd, pathname);
+       return do_fanotify_mark(fanotify_fd, flags, SC_VAL64(__u64, mask),
+                               dfd, pathname);
 }
 #endif
 
index ee5a235..602e3a5 100644 (file)
@@ -1035,6 +1035,25 @@ struct clear_refs_private {
 };
 
 #ifdef CONFIG_MEM_SOFT_DIRTY
+
+#define is_cow_mapping(flags) (((flags) & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE)
+
+static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+{
+       struct page *page;
+
+       if (!pte_write(pte))
+               return false;
+       if (!is_cow_mapping(vma->vm_flags))
+               return false;
+       if (likely(!atomic_read(&vma->vm_mm->has_pinned)))
+               return false;
+       page = vm_normal_page(vma, addr, pte);
+       if (!page)
+               return false;
+       return page_maybe_dma_pinned(page);
+}
+
 static inline void clear_soft_dirty(struct vm_area_struct *vma,
                unsigned long addr, pte_t *pte)
 {
@@ -1049,6 +1068,8 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
        if (pte_present(ptent)) {
                pte_t old_pte;
 
+               if (pte_is_pinned(vma, addr, ptent))
+                       return;
                old_pte = ptep_modify_prot_start(vma, addr, pte);
                ptent = pte_wrprotect(old_pte);
                ptent = pte_clear_soft_dirty(ptent);
@@ -1215,41 +1236,26 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                        .type = type,
                };
 
+               if (mmap_write_lock_killable(mm)) {
+                       count = -EINTR;
+                       goto out_mm;
+               }
                if (type == CLEAR_REFS_MM_HIWATER_RSS) {
-                       if (mmap_write_lock_killable(mm)) {
-                               count = -EINTR;
-                               goto out_mm;
-                       }
-
                        /*
                         * Writing 5 to /proc/pid/clear_refs resets the peak
                         * resident set size to this mm's current rss value.
                         */
                        reset_mm_hiwater_rss(mm);
-                       mmap_write_unlock(mm);
-                       goto out_mm;
+                       goto out_unlock;
                }
 
-               if (mmap_read_lock_killable(mm)) {
-                       count = -EINTR;
-                       goto out_mm;
-               }
                tlb_gather_mmu(&tlb, mm, 0, -1);
                if (type == CLEAR_REFS_SOFT_DIRTY) {
                        for (vma = mm->mmap; vma; vma = vma->vm_next) {
                                if (!(vma->vm_flags & VM_SOFTDIRTY))
                                        continue;
-                               mmap_read_unlock(mm);
-                               if (mmap_write_lock_killable(mm)) {
-                                       count = -EINTR;
-                                       goto out_mm;
-                               }
-                               for (vma = mm->mmap; vma; vma = vma->vm_next) {
-                                       vma->vm_flags &= ~VM_SOFTDIRTY;
-                                       vma_set_page_prot(vma);
-                               }
-                               mmap_write_downgrade(mm);
-                               break;
+                               vma->vm_flags &= ~VM_SOFTDIRTY;
+                               vma_set_page_prot(vma);
                        }
 
                        mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
@@ -1261,7 +1267,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
                if (type == CLEAR_REFS_SOFT_DIRTY)
                        mmu_notifier_invalidate_range_end(&range);
                tlb_finish_mmu(&tlb, 0, -1);
-               mmap_read_unlock(mm);
+out_unlock:
+               mmap_write_unlock(mm);
 out_mm:
                mmput(mm);
        }
index ebfebdf..37aaa83 100644 (file)
@@ -1011,14 +1011,17 @@ static int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
        fdcount = do_poll(head, &table, end_time);
        poll_freewait(&table);
 
+       if (!user_write_access_begin(ufds, nfds * sizeof(*ufds)))
+               goto out_fds;
+
        for (walk = head; walk; walk = walk->next) {
                struct pollfd *fds = walk->entries;
                int j;
 
-               for (j = 0; j < walk->len; j++, ufds++)
-                       if (__put_user(fds[j].revents, &ufds->revents))
-                               goto out_fds;
+               for (j = walk->len; j; fds++, ufds++, j--)
+                       unsafe_put_user(fds->revents, &ufds->revents, Efault);
        }
+       user_write_access_end();
 
        err = fdcount;
 out_fds:
@@ -1030,6 +1033,11 @@ out_fds:
        }
 
        return err;
+
+Efault:
+       user_write_access_end();
+       err = -EFAULT;
+       goto out_fds;
 }
 
 static long do_restart_poll(struct restart_block *restart_block)
index 5bef3a6..d0df217 100644 (file)
@@ -705,6 +705,7 @@ static int udf_check_vsd(struct super_block *sb)
        struct buffer_head *bh = NULL;
        int nsr = 0;
        struct udf_sb_info *sbi;
+       loff_t session_offset;
 
        sbi = UDF_SB(sb);
        if (sb->s_blocksize < sizeof(struct volStructDesc))
@@ -712,7 +713,8 @@ static int udf_check_vsd(struct super_block *sb)
        else
                sectorsize = sb->s_blocksize;
 
-       sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits);
+       session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits;
+       sector += session_offset;
 
        udf_debug("Starting at sector %u (%lu byte sectors)\n",
                  (unsigned int)(sector >> sb->s_blocksize_bits),
@@ -757,8 +759,7 @@ static int udf_check_vsd(struct super_block *sb)
 
        if (nsr > 0)
                return 1;
-       else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) ==
-                       VSD_FIRST_SECTOR_OFFSET)
+       else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET)
                return -1;
        else
                return 0;
index ef2697b..827278f 100644 (file)
@@ -3,6 +3,7 @@ config ZONEFS_FS
        depends on BLOCK
        depends on BLK_DEV_ZONED
        select FS_IOMAP
+       select CRC32
        help
          zonefs is a simple file system which exposes zones of a zoned block
          device (e.g. host-managed or host-aware SMR disk drives) as files.
index dd90c97..0e7316a 100644 (file)
  * See Documentation/atomic_bitops.txt for details.
  */
 
-static inline void set_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
        atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
-static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
        atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
 }
 
-static inline void change_bit(unsigned int nr, volatile unsigned long *p)
+static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p)
 {
        p += BIT_WORD(nr);
        atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
index fc85f50..8dcb3e1 100644 (file)
@@ -13,7 +13,7 @@
 #define ARMV8_PMU_CYCLE_IDX            (ARMV8_PMU_MAX_COUNTERS - 1)
 #define ARMV8_PMU_MAX_COUNTER_PAIRS    ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
 
-#ifdef CONFIG_KVM_ARM_PMU
+#ifdef CONFIG_HW_PERF_EVENTS
 
 struct kvm_pmc {
        u8 idx; /* index into the pmu->pmc array */
index 2630c2e..053bf05 100644 (file)
@@ -885,6 +885,13 @@ static inline int acpi_device_modalias(struct device *dev,
        return -ENODEV;
 }
 
+static inline struct platform_device *
+acpi_create_platform_device(struct acpi_device *adev,
+                           struct property_entry *properties)
+{
+       return NULL;
+}
+
 static inline bool acpi_dma_supported(struct acpi_device *adev)
 {
        return false;
index 74c6c04..555ab0f 100644 (file)
 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */
 #if GCC_VERSION < 40900
 # error Sorry, your version of GCC is too old - please use 4.9 or newer.
+#elif defined(CONFIG_ARM64) && GCC_VERSION < 50100
+/*
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63293
+ * https://lore.kernel.org/r/20210107111841.GN1551@shell.armlinux.org.uk
+ */
+# error Sorry, your version of GCC is too old - please use 5.1 or newer.
 #endif
 
 /*
index b2a3f4f..ea5e04e 100644 (file)
  */
 #define __used                          __attribute__((__used__))
 
+/*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warn_005funused_005fresult-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#nodiscard-warn-unused-result
+ */
+#define __must_check                    __attribute__((__warn_unused_result__))
+
 /*
  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute
  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute
index bbaa39e..e5dd5a4 100644 (file)
@@ -121,12 +121,6 @@ struct ftrace_likely_data {
        unsigned long                   constant;
 };
 
-#ifdef CONFIG_ENABLE_MUST_CHECK
-#define __must_check           __attribute__((__warn_unused_result__))
-#else
-#define __must_check
-#endif
-
 #if defined(CC_USING_HOTPATCH)
 #define notrace                        __attribute__((hotpatch(0, 0)))
 #elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY)
index dbe78e8..20874db 100644 (file)
@@ -186,12 +186,9 @@ extern int braille_register_console(struct console *, int index,
 extern int braille_unregister_console(struct console *);
 #ifdef CONFIG_TTY
 extern void console_sysfs_notify(void);
-extern void register_ttynull_console(void);
 #else
 static inline void console_sysfs_notify(void)
 { }
-static inline void register_ttynull_console(void)
-{ }
 #endif
 extern bool console_suspend_enabled;
 
index 29d255f..90bd558 100644 (file)
@@ -150,6 +150,7 @@ void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
 
 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
+struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
 sector_t dm_bufio_get_block_number(struct dm_buffer *b);
 void *dm_bufio_get_block_data(struct dm_buffer *b);
 void *dm_bufio_get_aux_data(struct dm_buffer *b);
index d956987..09c6a0b 100644 (file)
@@ -533,11 +533,10 @@ struct dmar_domain {
                                        /* Domain ids per IOMMU. Use u16 since
                                         * domain ids are 16 bit wide according
                                         * to VT-d spec, section 9.3 */
-       unsigned int    auxd_refcnt;    /* Refcount of auxiliary attaching */
 
        bool has_iotlb_device;
        struct list_head devices;       /* all devices' list */
-       struct list_head auxd;          /* link to device's auxiliary list */
+       struct list_head subdevices;    /* all subdevices' list */
        struct iova_domain iovad;       /* iova's that belong to this domain */
 
        struct dma_pte  *pgd;           /* virtual address */
@@ -610,14 +609,21 @@ struct intel_iommu {
        struct dmar_drhd_unit *drhd;
 };
 
+/* Per subdevice private data */
+struct subdev_domain_info {
+       struct list_head link_phys;     /* link to phys device siblings */
+       struct list_head link_domain;   /* link to domain siblings */
+       struct device *pdev;            /* physical device derived from */
+       struct dmar_domain *domain;     /* aux-domain */
+       int users;                      /* user count */
+};
+
 /* PCI domain-device relationship */
 struct device_domain_info {
        struct list_head link;  /* link to domain siblings */
        struct list_head global; /* link to global list */
        struct list_head table; /* link to pasid table */
-       struct list_head auxiliary_domains; /* auxiliary domains
-                                            * attached to this device
-                                            */
+       struct list_head subdevices; /* subdevices sibling */
        u32 segment;            /* PCI segment number */
        u8 bus;                 /* PCI bus number */
        u8 devfn;               /* PCI devfn number */
@@ -758,6 +764,7 @@ struct intel_svm_dev {
        struct list_head list;
        struct rcu_head rcu;
        struct device *dev;
+       struct intel_iommu *iommu;
        struct svm_dev_ops *ops;
        struct iommu_sva sva;
        u32 pasid;
@@ -771,7 +778,6 @@ struct intel_svm {
        struct mmu_notifier notifier;
        struct mm_struct *mm;
 
-       struct intel_iommu *iommu;
        unsigned int flags;
        u32 pasid;
        int gpasid; /* In case that guest PASID is different from host PASID */
index 5e0655f..fe1ae73 100644 (file)
@@ -35,8 +35,12 @@ struct kunit_kasan_expectation {
 #define KASAN_SHADOW_INIT 0
 #endif
 
+#ifndef PTE_HWTABLE_PTRS
+#define PTE_HWTABLE_PTRS 0
+#endif
+
 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
-extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
+extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
 extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
 extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
index a10e847..4e3037d 100644 (file)
@@ -52,6 +52,25 @@ static inline void kcov_remote_start_usb(u64 id)
        kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_USB, id));
 }
 
+/*
+ * The softirq flavor of kcov_remote_*() functions is introduced as a temporary
+ * work around for kcov's lack of nested remote coverage sections support in
+ * task context. Adding suport for nested sections is tracked in:
+ * https://bugzilla.kernel.org/show_bug.cgi?id=210337
+ */
+
+static inline void kcov_remote_start_usb_softirq(u64 id)
+{
+       if (in_serving_softirq())
+               kcov_remote_start_usb(id);
+}
+
+static inline void kcov_remote_stop_softirq(void)
+{
+       if (in_serving_softirq())
+               kcov_remote_stop();
+}
+
 #else
 
 static inline void kcov_task_init(struct task_struct *t) {}
@@ -66,6 +85,8 @@ static inline u64 kcov_common_handle(void)
 }
 static inline void kcov_remote_start_common(u64 id) {}
 static inline void kcov_remote_start_usb(u64 id) {}
+static inline void kcov_remote_start_usb_softirq(u64 id) {}
+static inline void kcov_remote_stop_softirq(void) {}
 
 #endif /* CONFIG_KCOV */
 #endif /* _LINUX_KCOV_H */
index 65b81e0..2484ed9 100644 (file)
@@ -33,6 +33,9 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
                                          unsigned int cpu,
                                          const char *namefmt);
 
+void kthread_set_per_cpu(struct task_struct *k, int cpu);
+bool kthread_is_per_cpu(struct task_struct *k);
+
 /**
  * kthread_run - create and wake a thread.
  * @threadfn: the function to run until signal_pending(current).
index a12b552..73f20de 100644 (file)
@@ -230,6 +230,5 @@ static inline ktime_t ms_to_ktime(u64 ms)
 }
 
 # include <linux/timekeeping.h>
-# include <linux/timekeeping32.h>
 
 #endif
index 5d71e8a..aca4dc0 100644 (file)
@@ -35,6 +35,9 @@ struct mdiobb_ctrl {
        const struct mdiobb_ops *ops;
 };
 
+int mdiobb_read(struct mii_bus *bus, int phy, int reg);
+int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val);
+
 /* The returned bus is not yet registered with the phy layer. */
 struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl);
 
index d827bd7..eeb0b52 100644 (file)
@@ -665,7 +665,7 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
 {
        struct mem_cgroup *memcg = page_memcg(page);
 
-       VM_WARN_ON_ONCE_PAGE(!memcg, page);
+       VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page);
        return mem_cgroup_lruvec(memcg, pgdat);
 }
 
index 8fbddec..442c016 100644 (file)
@@ -1280,7 +1280,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         ece_support[0x1];
        u8         reserved_at_a4[0x7];
        u8         log_max_srq[0x5];
-       u8         reserved_at_b0[0x2];
+       u8         reserved_at_b0[0x1];
+       u8         uplink_follow[0x1];
        u8         ts_cqe_to_dest_cqn[0x1];
        u8         reserved_at_b3[0xd];
 
index bf79667..5054802 100644 (file)
@@ -163,8 +163,6 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
 static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
 #endif
 
-bool arm_pmu_irq_is_nmi(void);
-
 /* Internal functions only for core arm_pmu code */
 struct arm_pmu *armpmu_alloc(void);
 struct arm_pmu *armpmu_alloc_atomic(void);
index de08264..fd02c5f 100644 (file)
@@ -86,6 +86,12 @@ void rcu_sched_clock_irq(int user);
 void rcu_report_dead(unsigned int cpu);
 void rcutree_migrate_callbacks(int cpu);
 
+#ifdef CONFIG_TASKS_RCU_GENERIC
+void rcu_init_tasks_generic(void);
+#else
+static inline void rcu_init_tasks_generic(void) { }
+#endif
+
 #ifdef CONFIG_RCU_STALL_COMMON
 void rcu_sysrq_start(void);
 void rcu_sysrq_end(void);
index 333bcdc..5f60c9e 100644 (file)
@@ -366,7 +366,7 @@ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
 static inline bool skb_frag_must_loop(struct page *p)
 {
 #if defined(CONFIG_HIGHMEM)
-       if (PageHighMem(p))
+       if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p))
                return true;
 #endif
        return false;
@@ -1203,6 +1203,7 @@ struct skb_seq_state {
        struct sk_buff  *root_skb;
        struct sk_buff  *cur_skb;
        __u8            *frag_data;
+       __u32           frag_off;
 };
 
 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
index f3929af..7688bc9 100644 (file)
@@ -251,6 +251,30 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
        static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 #endif /* __SYSCALL_DEFINEx */
 
+/* For split 64-bit arguments on 32-bit architectures */
+#ifdef __LITTLE_ENDIAN
+#define SC_ARG64(name) u32, name##_lo, u32, name##_hi
+#else
+#define SC_ARG64(name) u32, name##_hi, u32, name##_lo
+#endif
+#define SC_VAL64(type, name) ((type) name##_hi << 32 | name##_lo)
+
+#ifdef CONFIG_COMPAT
+#define SYSCALL32_DEFINE1 COMPAT_SYSCALL_DEFINE1
+#define SYSCALL32_DEFINE2 COMPAT_SYSCALL_DEFINE2
+#define SYSCALL32_DEFINE3 COMPAT_SYSCALL_DEFINE3
+#define SYSCALL32_DEFINE4 COMPAT_SYSCALL_DEFINE4
+#define SYSCALL32_DEFINE5 COMPAT_SYSCALL_DEFINE5
+#define SYSCALL32_DEFINE6 COMPAT_SYSCALL_DEFINE6
+#else
+#define SYSCALL32_DEFINE1 SYSCALL_DEFINE1
+#define SYSCALL32_DEFINE2 SYSCALL_DEFINE2
+#define SYSCALL32_DEFINE3 SYSCALL_DEFINE3
+#define SYSCALL32_DEFINE4 SYSCALL_DEFINE4
+#define SYSCALL32_DEFINE5 SYSCALL_DEFINE5
+#define SYSCALL32_DEFINE6 SYSCALL_DEFINE6
+#endif
+
 /*
  * Called before coming back to user-mode. Returning to user-mode with an
  * address limit different than USER_DS can allow to overwrite kernel memory.
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h
deleted file mode 100644 (file)
index 266017f..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _LINUX_TIMEKEEPING32_H
-#define _LINUX_TIMEKEEPING32_H
-/*
- * These interfaces are all based on the old timespec type
- * and should get replaced with the timespec64 based versions
- * over time so we can remove the file here.
- */
-
-static inline unsigned long get_seconds(void)
-{
-       return ktime_get_real_seconds();
-}
-
-#endif
index 88a7673..cfbfd6f 100644 (file)
@@ -81,6 +81,8 @@ struct usbnet {
 #              define EVENT_LINK_CHANGE        11
 #              define EVENT_SET_RX_MODE        12
 #              define EVENT_NO_IP_ALIGN        13
+       u32                     rx_speed;       /* in bps - NOT Mbps */
+       u32                     tx_speed;       /* in bps - NOT Mbps */
 };
 
 static inline struct usb_driver *driver_of(struct usb_interface *intf)
index 9a4bbcc..0d6f7ec 100644 (file)
@@ -1756,7 +1756,7 @@ struct cfg80211_sar_specs {
 
 
 /**
- * @struct cfg80211_sar_chan_ranges - sar frequency ranges
+ * struct cfg80211_sar_freq_ranges - sar frequency ranges
  * @start_freq:  start range edge frequency
  * @end_freq:    end range edge frequency
  */
@@ -3972,6 +3972,8 @@ struct mgmt_frame_regs {
  *     This callback may sleep.
  * @reset_tid_config: Reset TID specific configuration for the peer, for the
  *     given TIDs. This callback may sleep.
+ *
+ * @set_sar_specs: Update the SAR (TX power) settings.
  */
 struct cfg80211_ops {
        int     (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -4929,6 +4931,7 @@ struct wiphy_iftype_akm_suites {
  * @max_data_retry_count: maximum supported per TID retry count for
  *     configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and
  *     %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
+ * @sar_capa: SAR control capabilities
  */
 struct wiphy {
        /* assign these fields before you register the wiphy */
index 7338b38..111d777 100644 (file)
@@ -76,6 +76,8 @@ struct inet_connection_sock_af_ops {
  * @icsk_ext_hdr_len:     Network protocol overhead (IP/IPv6 options)
  * @icsk_ack:             Delayed ACK control data
  * @icsk_mtup;            MTU probing control data
+ * @icsk_probes_tstamp:    Probe timestamp (cleared by non-zero window ack)
+ * @icsk_user_timeout:    TCP_USER_TIMEOUT value
  */
 struct inet_connection_sock {
        /* inet_sock has to be the first member! */
@@ -129,6 +131,7 @@ struct inet_connection_sock {
 
                u32               probe_timestamp;
        } icsk_mtup;
+       u32                       icsk_probes_tstamp;
        u32                       icsk_user_timeout;
 
        u64                       icsk_ca_priv[104 / sizeof(u64)];
index d315740..2bdbf62 100644 (file)
@@ -3880,6 +3880,7 @@ enum ieee80211_reconfig_type {
  *     This callback may sleep.
  * @sta_set_4addr: Called to notify the driver when a station starts/stops using
  *     4-address mode
+ * @set_sar_specs: Update the SAR (TX power) settings.
  */
 struct ieee80211_ops {
        void (*tx)(struct ieee80211_hw *hw,
index fc45544..932f0d7 100644 (file)
@@ -168,12 +168,14 @@ static inline void red_set_vars(struct red_vars *v)
        v->qcount       = -1;
 }
 
-static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
+static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_log)
 {
        if (fls(qth_min) + Wlog > 32)
                return false;
        if (fls(qth_max) + Wlog > 32)
                return false;
+       if (Scell_log >= 32)
+               return false;
        if (qth_max < qth_min)
                return false;
        return true;
index bdc4323..129d200 100644 (file)
@@ -1921,10 +1921,13 @@ static inline void sk_set_txhash(struct sock *sk)
        sk->sk_txhash = net_tx_rndhash();
 }
 
-static inline void sk_rethink_txhash(struct sock *sk)
+static inline bool sk_rethink_txhash(struct sock *sk)
 {
-       if (sk->sk_txhash)
+       if (sk->sk_txhash) {
                sk_set_txhash(sk);
+               return true;
+       }
+       return false;
 }
 
 static inline struct dst_entry *
@@ -1947,12 +1950,10 @@ sk_dst_get(struct sock *sk)
        return dst;
 }
 
-static inline void dst_negative_advice(struct sock *sk)
+static inline void __dst_negative_advice(struct sock *sk)
 {
        struct dst_entry *ndst, *dst = __sk_dst_get(sk);
 
-       sk_rethink_txhash(sk);
-
        if (dst && dst->ops->negative_advice) {
                ndst = dst->ops->negative_advice(dst);
 
@@ -1964,6 +1965,12 @@ static inline void dst_negative_advice(struct sock *sk)
        }
 }
 
+static inline void dst_negative_advice(struct sock *sk)
+{
+       sk_rethink_txhash(sk);
+       __dst_negative_advice(sk);
+}
+
 static inline void
 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
 {
index 4f4e93b..cc17bc9 100644 (file)
@@ -58,10 +58,6 @@ struct xdp_sock {
 
        struct xsk_queue *tx ____cacheline_aligned_in_smp;
        struct list_head tx_list;
-       /* Mutual exclusion of NAPI TX thread and sendmsg error paths
-        * in the SKB destructor callback.
-        */
-       spinlock_t tx_completion_lock;
        /* Protects generic receive. */
        spinlock_t rx_lock;
 
index 01755b8..eaa8386 100644 (file)
@@ -73,6 +73,11 @@ struct xsk_buff_pool {
        bool dma_need_sync;
        bool unaligned;
        void *addrs;
+       /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
+        * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
+        * sockets share a single cq when the same netdev and queue id is shared.
+        */
+       spinlock_t cq_lock;
        struct xdp_buff_xsk *free_heads[];
 };
 
diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h
deleted file mode 100644 (file)
index 8c18dc6..0000000
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef SOC_NPS_COMMON_H
-#define SOC_NPS_COMMON_H
-
-#ifdef CONFIG_SMP
-#define NPS_IPI_IRQ                                    5
-#endif
-
-#define NPS_HOST_REG_BASE                      0xF6000000
-
-#define NPS_MSU_BLKID                          0x018
-
-#define CTOP_INST_RSPI_GIC_0_R12               0x3C56117E
-#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST     0x5B60
-#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM     0x00010422
-
-#ifndef AUX_IENABLE
-#define AUX_IENABLE                            0x40c
-#endif
-
-#define CTOP_AUX_IACK                          (0xFFFFF800 + 0x088)
-
-#ifndef __ASSEMBLY__
-
-/* In order to increase compilation test coverage */
-#ifdef CONFIG_ARC
-static inline void nps_ack_gic(void)
-{
-       __asm__ __volatile__ (
-       "       .word %0\n"
-       :
-       : "i"(CTOP_INST_RSPI_GIC_0_R12)
-       : "memory");
-}
-#else
-static inline void nps_ack_gic(void) { }
-#define write_aux_reg(r, v)
-#define read_aux_reg(r) 0
-#endif
-
-/* CPU global ID */
-struct global_id {
-       union {
-               struct {
-#ifdef CONFIG_EZNPS_MTM_EXT
-                       u32 __reserved:20, cluster:4, core:4, thread:4;
-#else
-                       u32 __reserved:24, cluster:4, core:4;
-#endif
-               };
-               u32 value;
-       };
-};
-
-/*
- * Convert logical to physical CPU IDs
- *
- * The conversion swap bits 1 and 2 of cluster id (out of 4 bits)
- * Now quad of logical clusters id's are adjacent physically,
- * and not like the id's physically came with each cluster.
- * Below table is 4x4 mesh of core clusters as it layout on chip.
- * Cluster ids are in format: logical (physical)
- *
- *    -----------------   ------------------
- * 3 |  5 (3)   7 (7)  | | 13 (11)   15 (15)|
- *
- * 2 |  4 (2)   6 (6)  | | 12 (10)   14 (14)|
- *    -----------------   ------------------
- * 1 |  1 (1)   3 (5)  | |  9  (9)   11 (13)|
- *
- * 0 |  0 (0)   2 (4)  | |  8  (8)   10 (12)|
- *    -----------------   ------------------
- *       0       1            2        3
- */
-static inline int nps_cluster_logic_to_phys(int cluster)
-{
-#ifdef __arc__
-        __asm__ __volatile__(
-       "       mov r3,%0\n"
-       "       .short %1\n"
-       "       .word %2\n"
-       "       mov %0,r3\n"
-       : "+r"(cluster)
-       : "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST),
-         "i"(CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM)
-       : "r3");
-#endif
-
-       return cluster;
-}
-
-#define NPS_CPU_TO_CLUSTER_NUM(cpu) \
-       ({ struct global_id gid; gid.value = cpu; \
-               nps_cluster_logic_to_phys(gid.cluster); })
-
-struct nps_host_reg_address {
-       union {
-               struct {
-                       u32 base:8, cl_x:4, cl_y:4,
-                       blkid:6, reg:8, __reserved:2;
-               };
-               u32 value;
-       };
-};
-
-struct nps_host_reg_address_non_cl {
-       union {
-               struct {
-                       u32 base:7, blkid:11, reg:12, __reserved:2;
-               };
-               u32 value;
-       };
-};
-
-static inline void *nps_host_reg_non_cl(u32 blkid, u32 reg)
-{
-       struct nps_host_reg_address_non_cl reg_address;
-
-       reg_address.value = NPS_HOST_REG_BASE;
-       reg_address.blkid = blkid;
-       reg_address.reg = reg;
-
-       return (void *)reg_address.value;
-}
-
-static inline void *nps_host_reg(u32 cpu, u32 blkid, u32 reg)
-{
-       struct nps_host_reg_address reg_address;
-       u32 cl = NPS_CPU_TO_CLUSTER_NUM(cpu);
-
-       reg_address.value = NPS_HOST_REG_BASE;
-       reg_address.cl_x  = (cl >> 2) & 0x3;
-       reg_address.cl_y  = cl & 0x3;
-       reg_address.blkid = blkid;
-       reg_address.reg   = reg;
-
-       return (void *)reg_address.value;
-}
-#endif /* __ASSEMBLY__ */
-
-#endif /* SOC_NPS_COMMON_H */
diff --git a/include/soc/nps/mtm.h b/include/soc/nps/mtm.h
deleted file mode 100644 (file)
index d2f5e7e..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef SOC_NPS_MTM_H
-#define SOC_NPS_MTM_H
-
-#define CTOP_INST_HWSCHD_OFF_R3                 0x3B6F00BF
-#define CTOP_INST_HWSCHD_RESTORE_R3             0x3E6F70C3
-
-static inline void hw_schd_save(unsigned int *flags)
-{
-       __asm__ __volatile__(
-       "       .word %1\n"
-       "       st r3,[%0]\n"
-       :
-       : "r"(flags), "i"(CTOP_INST_HWSCHD_OFF_R3)
-       : "r3", "memory");
-}
-
-static inline void hw_schd_restore(unsigned int flags)
-{
-       __asm__ __volatile__(
-       "       mov r3, %0\n"
-       "       .word %1\n"
-       :
-       : "r"(flags), "i"(CTOP_INST_HWSCHD_RESTORE_R3)
-       : "r3");
-}
-
-#endif /* SOC_NPS_MTM_H */
index 4eef374..4a5cc8c 100644 (file)
@@ -231,6 +231,7 @@ enum afs_file_error {
        afs_file_error_dir_bad_magic,
        afs_file_error_dir_big,
        afs_file_error_dir_missing_page,
+       afs_file_error_dir_name_too_long,
        afs_file_error_dir_over_end,
        afs_file_error_dir_small,
        afs_file_error_dir_unmarked_ext,
@@ -488,6 +489,7 @@ enum afs_cb_break_reason {
        EM(afs_file_error_dir_bad_magic,        "DIR_BAD_MAGIC")        \
        EM(afs_file_error_dir_big,              "DIR_BIG")              \
        EM(afs_file_error_dir_missing_page,     "DIR_MISSING_PAGE")     \
+       EM(afs_file_error_dir_name_too_long,    "DIR_NAME_TOO_LONG")    \
        EM(afs_file_error_dir_over_end,         "DIR_ENT_OVER_END")     \
        EM(afs_file_error_dir_small,            "DIR_SMALL")            \
        EM(afs_file_error_dir_unmarked_ext,     "DIR_UNMARKED_EXT")     \
index 5039af6..cbe3e15 100644 (file)
@@ -366,7 +366,7 @@ TRACE_EVENT(sched_process_wait,
 );
 
 /*
- * Tracepoint for do_fork:
+ * Tracepoint for kernel_clone:
  */
 TRACE_EVENT(sched_process_fork,
 
index 58994e0..6f89c27 100644 (file)
@@ -1424,13 +1424,61 @@ TRACE_EVENT(rpcb_unregister,
        )
 );
 
+/* Record an xdr_buf containing a fully-formed RPC message */
+DECLARE_EVENT_CLASS(svc_xdr_msg_class,
+       TP_PROTO(
+               const struct xdr_buf *xdr
+       ),
+
+       TP_ARGS(xdr),
+
+       TP_STRUCT__entry(
+               __field(u32, xid)
+               __field(const void *, head_base)
+               __field(size_t, head_len)
+               __field(const void *, tail_base)
+               __field(size_t, tail_len)
+               __field(unsigned int, page_len)
+               __field(unsigned int, msg_len)
+       ),
+
+       TP_fast_assign(
+               __be32 *p = (__be32 *)xdr->head[0].iov_base;
+
+               __entry->xid = be32_to_cpu(*p);
+               __entry->head_base = p;
+               __entry->head_len = xdr->head[0].iov_len;
+               __entry->tail_base = xdr->tail[0].iov_base;
+               __entry->tail_len = xdr->tail[0].iov_len;
+               __entry->page_len = xdr->page_len;
+               __entry->msg_len = xdr->len;
+       ),
+
+       TP_printk("xid=0x%08x head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+               __entry->xid,
+               __entry->head_base, __entry->head_len, __entry->page_len,
+               __entry->tail_base, __entry->tail_len, __entry->msg_len
+       )
+);
+
+#define DEFINE_SVCXDRMSG_EVENT(name)                                   \
+               DEFINE_EVENT(svc_xdr_msg_class,                         \
+                               svc_xdr_##name,                         \
+                               TP_PROTO(                               \
+                                       const struct xdr_buf *xdr       \
+                               ),                                      \
+                               TP_ARGS(xdr))
+
+DEFINE_SVCXDRMSG_EVENT(recvfrom);
+
+/* Record an xdr_buf containing arbitrary data, tagged with an XID */
 DECLARE_EVENT_CLASS(svc_xdr_buf_class,
        TP_PROTO(
-               const struct svc_rqst *rqst,
+               __be32 xid,
                const struct xdr_buf *xdr
        ),
 
-       TP_ARGS(rqst, xdr),
+       TP_ARGS(xid, xdr),
 
        TP_STRUCT__entry(
                __field(u32, xid)
@@ -1443,7 +1491,7 @@ DECLARE_EVENT_CLASS(svc_xdr_buf_class,
        ),
 
        TP_fast_assign(
-               __entry->xid = be32_to_cpu(rqst->rq_xid);
+               __entry->xid = be32_to_cpu(xid);
                __entry->head_base = xdr->head[0].iov_base;
                __entry->head_len = xdr->head[0].iov_len;
                __entry->tail_base = xdr->tail[0].iov_base;
@@ -1463,12 +1511,11 @@ DECLARE_EVENT_CLASS(svc_xdr_buf_class,
                DEFINE_EVENT(svc_xdr_buf_class,                         \
                                svc_xdr_##name,                         \
                                TP_PROTO(                               \
-                                       const struct svc_rqst *rqst,    \
+                                       __be32 xid,                     \
                                        const struct xdr_buf *xdr       \
                                ),                                      \
-                               TP_ARGS(rqst, xdr))
+                               TP_ARGS(xid, xdr))
 
-DEFINE_SVCXDRBUF_EVENT(recvfrom);
 DEFINE_SVCXDRBUF_EVENT(sendto);
 
 /*
index 52e8bcb..cf7399f 100644 (file)
@@ -213,7 +213,7 @@ struct cache_sb_disk {
                __le16          keys;
        };
        __le64                  d[SB_JOURNAL_BUCKETS];  /* journal buckets */
-       __le16                  bucket_size_hi;
+       __le16                  obso_bucket_size_hi;    /* obsoleted */
 };
 
 /*
index 874cc12..82708c6 100644 (file)
@@ -75,8 +75,9 @@ struct rtnl_link_stats {
  *
  * @rx_dropped: Number of packets received but not processed,
  *   e.g. due to lack of resources or unsupported protocol.
- *   For hardware interfaces this counter should not include packets
- *   dropped by the device which are counted separately in
+ *   For hardware interfaces this counter may include packets discarded
+ *   due to L2 address filtering but should not include packets dropped
+ *   by the device due to buffer exhaustion which are counted separately in
  *   @rx_missed_errors (since procfs folds those two counters together).
  *
  * @tx_dropped: Number of packets dropped on their way to transmission,
index 886802b..374c678 100644 (file)
@@ -251,6 +251,7 @@ struct kvm_hyperv_exit {
 #define KVM_EXIT_X86_RDMSR        29
 #define KVM_EXIT_X86_WRMSR        30
 #define KVM_EXIT_DIRTY_RING_FULL  31
+#define KVM_EXIT_AP_RESET_HOLD    32
 
 /* For KVM_EXIT_INTERNAL_ERROR */
 /* Emulate instruction failed. */
@@ -573,6 +574,7 @@ struct kvm_vapic_addr {
 #define KVM_MP_STATE_CHECK_STOP        6
 #define KVM_MP_STATE_OPERATING         7
 #define KVM_MP_STATE_LOAD              8
+#define KVM_MP_STATE_AP_RESET_HOLD     9
 
 struct kvm_mp_state {
        __u32 mp_state;
index 28b6ee5..b1633e7 100644 (file)
@@ -293,6 +293,7 @@ enum nft_rule_compat_attributes {
  * @NFT_SET_EVAL: set can be updated from the evaluation path
  * @NFT_SET_OBJECT: set contains stateful objects
  * @NFT_SET_CONCAT: set contains a concatenation
+ * @NFT_SET_EXPR: set contains expressions
  */
 enum nft_set_flags {
        NFT_SET_ANONYMOUS               = 0x1,
@@ -303,6 +304,7 @@ enum nft_set_flags {
        NFT_SET_EVAL                    = 0x20,
        NFT_SET_OBJECT                  = 0x40,
        NFT_SET_CONCAT                  = 0x80,
+       NFT_SET_EXPR                    = 0x100,
 };
 
 /**
@@ -706,6 +708,7 @@ enum nft_dynset_ops {
 
 enum nft_dynset_flags {
        NFT_DYNSET_F_INV        = (1 << 0),
+       NFT_DYNSET_F_EXPR       = (1 << 1),
 };
 
 /**
index 8dbecb3..1cc5ce0 100644 (file)
@@ -116,7 +116,7 @@ struct pppol2tp_ioc_stats {
 #define PPPIOCGCHAN    _IOR('t', 55, int)      /* get ppp channel number */
 #define PPPIOCGL2TPSTATS _IOR('t', 54, struct pppol2tp_ioc_stats)
 #define PPPIOCBRIDGECHAN _IOW('t', 53, int)    /* bridge one channel to another */
-#define PPPIOCUNBRIDGECHAN _IO('t', 54)        /* unbridge channel */
+#define PPPIOCUNBRIDGECHAN _IO('t', 52)        /* unbridge channel */
 
 #define SIOCGPPPSTATS   (SIOCDEVPRIVATE + 0)
 #define SIOCGPPPVER     (SIOCDEVPRIVATE + 1)   /* NEVER change this!! */
index 8c15a7d..dba3827 100644 (file)
@@ -279,6 +279,7 @@ enum hl_device_status {
  * HL_INFO_CLK_THROTTLE_REASON - Retrieve clock throttling reason
  * HL_INFO_SYNC_MANAGER  - Retrieve sync manager info per dcore
  * HL_INFO_TOTAL_ENERGY  - Retrieve total energy consumption
+ * HL_INFO_PLL_FREQUENCY - Retrieve PLL frequency
  */
 #define HL_INFO_HW_IP_INFO             0
 #define HL_INFO_HW_EVENTS              1
@@ -425,6 +426,8 @@ struct hl_info_sync_manager {
  * @ctx_device_in_reset_drop_cnt: context dropped due to device in reset
  * @total_max_cs_in_flight_drop_cnt: total dropped due to maximum CS in-flight
  * @ctx_max_cs_in_flight_drop_cnt: context dropped due to maximum CS in-flight
+ * @total_validation_drop_cnt: total dropped due to validation error
+ * @ctx_validation_drop_cnt: context dropped due to validation error
  */
 struct hl_info_cs_counters {
        __u64 total_out_of_mem_drop_cnt;
@@ -437,6 +440,8 @@ struct hl_info_cs_counters {
        __u64 ctx_device_in_reset_drop_cnt;
        __u64 total_max_cs_in_flight_drop_cnt;
        __u64 ctx_max_cs_in_flight_drop_cnt;
+       __u64 total_validation_drop_cnt;
+       __u64 ctx_validation_drop_cnt;
 };
 
 enum gaudi_dcores {
index 00c7235..2c43b0e 100644 (file)
@@ -192,7 +192,7 @@ void xs_suspend_cancel(void);
 
 struct work_struct;
 
-void xenbus_probe(struct work_struct *);
+void xenbus_probe(void);
 
 #define XENBUS_IS_ERR_READ(str) ({                     \
        if (!IS_ERR(str) && strlen(str) == 0) {         \
index 6feee7f..c68d784 100644 (file)
@@ -1480,14 +1480,8 @@ void __init console_on_rootfs(void)
        struct file *file = filp_open("/dev/console", O_RDWR, 0);
 
        if (IS_ERR(file)) {
-               pr_err("Warning: unable to open an initial console. Fallback to ttynull.\n");
-               register_ttynull_console();
-
-               file = filp_open("/dev/console", O_RDWR, 0);
-               if (IS_ERR(file)) {
-                       pr_err("Warning: Failed to add ttynull console. No stdin, stdout, and stderr for the init process!\n");
-                       return;
-               }
+               pr_err("Warning: unable to open an initial console.\n");
+               return;
        }
        init_dup(file);
        init_dup(file);
@@ -1518,6 +1512,7 @@ static noinline void __init kernel_init_freeable(void)
 
        init_mm_internals();
 
+       rcu_init_tasks_generic();
        do_pre_smp_initcalls();
        lockup_detector_init();
 
index 6edff97..2f05973 100644 (file)
@@ -176,14 +176,14 @@ BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode,
         * bpf_local_storage_update expects the owner to have a
         * valid storage pointer.
         */
-       if (!inode_storage_ptr(inode))
+       if (!inode || !inode_storage_ptr(inode))
                return (unsigned long)NULL;
 
        sdata = inode_storage_lookup(inode, map, true);
        if (sdata)
                return (unsigned long)sdata->data;
 
-       /* This helper must only called from where the inode is gurranteed
+       /* This helper must only called from where the inode is guaranteed
         * to have a refcount and cannot be freed.
         */
        if (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) {
@@ -200,7 +200,10 @@ BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode,
 BPF_CALL_2(bpf_inode_storage_delete,
           struct bpf_map *, map, struct inode *, inode)
 {
-       /* This helper must only called from where the inode is gurranteed
+       if (!inode)
+               return -EINVAL;
+
+       /* This helper must only called from where the inode is guaranteed
         * to have a refcount and cannot be freed.
         */
        return inode_storage_delete(inode, map);
index 4ef1959..e0da025 100644 (file)
@@ -218,7 +218,7 @@ BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
         * bpf_local_storage_update expects the owner to have a
         * valid storage pointer.
         */
-       if (!task_storage_ptr(task))
+       if (!task || !task_storage_ptr(task))
                return (unsigned long)NULL;
 
        sdata = task_storage_lookup(task, map, true);
@@ -243,6 +243,9 @@ BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
 BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
           task)
 {
+       if (!task)
+               return -EINVAL;
+
        /* This helper must only be called from places where the lifetime of the task
         * is guaranteed. Either by being refcounted or by being protected
         * by an RCU read-side critical section.
index 8d6bdb4..84a36ee 100644 (file)
@@ -4172,7 +4172,7 @@ static int btf_parse_hdr(struct btf_verifier_env *env)
                return -ENOTSUPP;
        }
 
-       if (btf_data_size == hdr->hdr_len) {
+       if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
                btf_verifier_log(env, "No data");
                return -EINVAL;
        }
index 6ec088a..96555a8 100644 (file)
@@ -1391,12 +1391,13 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
                if (ctx.optlen != 0) {
                        *optlen = ctx.optlen;
                        *kernel_optval = ctx.optval;
+                       /* export and don't free sockopt buf */
+                       return 0;
                }
        }
 
 out:
-       if (ret)
-               sockopt_free_buf(&ctx);
+       sockopt_free_buf(&ctx);
        return ret;
 }
 
index 7e84820..c1ac7f9 100644 (file)
@@ -152,6 +152,7 @@ static void htab_init_buckets(struct bpf_htab *htab)
                        lockdep_set_class(&htab->buckets[i].lock,
                                          &htab->lockdep_key);
                }
+               cond_resched();
        }
 }
 
index bd8a318..41ca280 100644 (file)
@@ -108,7 +108,7 @@ BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
 }
 
 const struct bpf_func_proto bpf_map_peek_elem_proto = {
-       .func           = bpf_map_pop_elem,
+       .func           = bpf_map_peek_elem,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_CONST_MAP_PTR,
index 4caf06f..e5999d8 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/fs.h>
 #include <linux/license.h>
 #include <linux/filter.h>
-#include <linux/version.h>
 #include <linux/kernel.h>
 #include <linux/idr.h>
 #include <linux/cred.h>
@@ -2713,7 +2712,6 @@ out_unlock:
 out_put_prog:
        if (tgt_prog_fd && tgt_prog)
                bpf_prog_put(tgt_prog);
-       bpf_prog_put(prog);
        return err;
 }
 
@@ -2826,7 +2824,10 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
                        tp_name = prog->aux->attach_func_name;
                        break;
                }
-               return bpf_tracing_prog_attach(prog, 0, 0);
+               err = bpf_tracing_prog_attach(prog, 0, 0);
+               if (err >= 0)
+                       return err;
+               goto out_put_prog;
        case BPF_PROG_TYPE_RAW_TRACEPOINT:
        case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
                if (strncpy_from_user(buf,
index e73c075..175b7b4 100644 (file)
@@ -37,7 +37,7 @@ retry:
                if (!task) {
                        ++*tid;
                        goto retry;
-               } else if (skip_if_dup_files && task->tgid != task->pid &&
+               } else if (skip_if_dup_files && !thread_group_leader(task) &&
                           task->files == task->group_leader->files) {
                        put_task_struct(task);
                        task = NULL;
@@ -151,13 +151,14 @@ again:
                curr_task = info->task;
                curr_fd = info->fd;
        } else {
-               curr_task = task_seq_get_next(ns, &curr_tid, true);
-               if (!curr_task) {
-                       info->task = NULL;
-                       return NULL;
-               }
-
-               /* set info->task and info->tid */
+                curr_task = task_seq_get_next(ns, &curr_tid, true);
+                if (!curr_task) {
+                        info->task = NULL;
+                        info->tid = curr_tid;
+                        return NULL;
+                }
+
+                /* set info->task and info->tid */
                info->task = curr_task;
                if (curr_tid == info->tid) {
                        curr_fd = info->fd;
index 17270b8..e7368c5 100644 (file)
@@ -2217,6 +2217,8 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
        case PTR_TO_RDWR_BUF:
        case PTR_TO_RDWR_BUF_OR_NULL:
        case PTR_TO_PERCPU_BTF_ID:
+       case PTR_TO_MEM:
+       case PTR_TO_MEM_OR_NULL:
                return true;
        default:
                return false;
@@ -5311,7 +5313,7 @@ static bool signed_add_overflows(s64 a, s64 b)
        return res < a;
 }
 
-static bool signed_add32_overflows(s64 a, s64 b)
+static bool signed_add32_overflows(s32 a, s32 b)
 {
        /* Do the add in u32, where overflow is well-defined */
        s32 res = (s32)((u32)a + (u32)b);
@@ -5321,7 +5323,7 @@ static bool signed_add32_overflows(s64 a, s64 b)
        return res < a;
 }
 
-static bool signed_sub_overflows(s32 a, s32 b)
+static bool signed_sub_overflows(s64 a, s64 b)
 {
        /* Do the sub in u64, where overflow is well-defined */
        s64 res = (s64)((u64)a - (u64)b);
@@ -5333,7 +5335,7 @@ static bool signed_sub_overflows(s32 a, s32 b)
 
 static bool signed_sub32_overflows(s32 a, s32 b)
 {
-       /* Do the sub in u64, where overflow is well-defined */
+       /* Do the sub in u32, where overflow is well-defined */
        s32 res = (s32)((u32)a - (u32)b);
 
        if (b < 0)
index 53d688b..eb0029c 100644 (file)
@@ -81,7 +81,6 @@ CONFIG_INPUT_JOYSTICK=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_TABLET=y
 CONFIG_INPUT_UINPUT=y
-CONFIG_ION=y
 CONFIG_JOYSTICK_XPAD=y
 CONFIG_JOYSTICK_XPAD_FF=y
 CONFIG_JOYSTICK_XPAD_LEDS=y
index 37720a6..d66cd10 100644 (file)
@@ -819,9 +819,8 @@ void __init fork_init(void)
        init_task.signal->rlim[RLIMIT_SIGPENDING] =
                init_task.signal->rlim[RLIMIT_NPROC];
 
-       for (i = 0; i < UCOUNT_COUNTS; i++) {
+       for (i = 0; i < UCOUNT_COUNTS; i++)
                init_user_ns.ucount_max[i] = max_threads/2;
-       }
 
 #ifdef CONFIG_VMAP_STACK
        cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
@@ -1654,9 +1653,8 @@ static inline void init_task_pid_links(struct task_struct *task)
 {
        enum pid_type type;
 
-       for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
+       for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type)
                INIT_HLIST_NODE(&task->pid_links[type]);
-       }
 }
 
 static inline void
index ab8567f..dec3f73 100644 (file)
@@ -2859,3 +2859,4 @@ bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
        rcu_read_unlock();
        return res;
 }
+EXPORT_SYMBOL_GPL(irq_check_status_bit);
index 2c0c4d6..dc0e2d7 100644 (file)
@@ -402,7 +402,7 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
        struct msi_domain_ops *ops = info->ops;
        struct irq_data *irq_data;
        struct msi_desc *desc;
-       msi_alloc_info_t arg;
+       msi_alloc_info_t arg = { };
        int i, ret, virq;
        bool can_reserve;
 
index a5eceec..1578973 100644 (file)
@@ -294,7 +294,7 @@ static int kthread(void *_create)
        do_exit(ret);
 }
 
-/* called from do_fork() to get node information for about to be created task */
+/* called from kernel_clone() to get node information for about to be created task */
 int tsk_fork_get_node(struct task_struct *tsk)
 {
 #ifdef CONFIG_NUMA
@@ -493,11 +493,36 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
                return p;
        kthread_bind(p, cpu);
        /* CPU hotplug need to bind once again when unparking the thread. */
-       set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
        to_kthread(p)->cpu = cpu;
        return p;
 }
 
+void kthread_set_per_cpu(struct task_struct *k, int cpu)
+{
+       struct kthread *kthread = to_kthread(k);
+       if (!kthread)
+               return;
+
+       WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
+
+       if (cpu < 0) {
+               clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+               return;
+       }
+
+       kthread->cpu = cpu;
+       set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+}
+
+bool kthread_is_per_cpu(struct task_struct *k)
+{
+       struct kthread *kthread = to_kthread(k);
+       if (!kthread)
+               return false;
+
+       return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+}
+
 /**
  * kthread_unpark - unpark a thread created by kthread_create().
  * @k:         thread created by kthread_create().
index c1418b4..bdaf482 100644 (file)
@@ -79,7 +79,7 @@ module_param(lock_stat, int, 0644);
 DEFINE_PER_CPU(unsigned int, lockdep_recursion);
 EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
 
-static inline bool lockdep_enabled(void)
+static __always_inline bool lockdep_enabled(void)
 {
        if (!debug_locks)
                return false;
@@ -5271,12 +5271,15 @@ static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie
 /*
  * Check whether we follow the irq-flags state precisely:
  */
-static void check_flags(unsigned long flags)
+static noinstr void check_flags(unsigned long flags)
 {
 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
        if (!debug_locks)
                return;
 
+       /* Get the warning out..  */
+       instrumentation_begin();
+
        if (irqs_disabled_flags(flags)) {
                if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
                        printk("possible reason: unannotated irqs-off.\n");
@@ -5304,6 +5307,8 @@ static void check_flags(unsigned long flags)
 
        if (!debug_locks)
                print_irqtrace_events(current);
+
+       instrumentation_end();
 #endif
 }
 
index ffdd0dc..6639a0c 100644 (file)
@@ -1291,11 +1291,16 @@ static size_t info_print_prefix(const struct printk_info  *info, bool syslog,
  * done:
  *
  *   - Add prefix for each line.
+ *   - Drop truncated lines that no longer fit into the buffer.
  *   - Add the trailing newline that has been removed in vprintk_store().
- *   - Drop truncated lines that do not longer fit into the buffer.
+ *   - Add a string terminator.
+ *
+ * Since the produced string is always terminated, the maximum possible
+ * return value is @r->text_buf_size - 1;
  *
  * Return: The length of the updated/prepared text, including the added
- * prefixes and the newline. The dropped line(s) are not counted.
+ * prefixes and the newline. The terminator is not counted. The dropped
+ * line(s) are not counted.
  */
 static size_t record_print_text(struct printk_record *r, bool syslog,
                                bool time)
@@ -1338,26 +1343,31 @@ static size_t record_print_text(struct printk_record *r, bool syslog,
 
                /*
                 * Truncate the text if there is not enough space to add the
-                * prefix and a trailing newline.
+                * prefix and a trailing newline and a terminator.
                 */
-               if (len + prefix_len + text_len + 1 > buf_size) {
+               if (len + prefix_len + text_len + 1 + 1 > buf_size) {
                        /* Drop even the current line if no space. */
-                       if (len + prefix_len + line_len + 1 > buf_size)
+                       if (len + prefix_len + line_len + 1 + 1 > buf_size)
                                break;
 
-                       text_len = buf_size - len - prefix_len - 1;
+                       text_len = buf_size - len - prefix_len - 1 - 1;
                        truncated = true;
                }
 
                memmove(text + prefix_len, text, text_len);
                memcpy(text, prefix, prefix_len);
 
+               /*
+                * Increment the prepared length to include the text and
+                * prefix that were just moved+copied. Also increment for the
+                * newline at the end of this line. If this is the last line,
+                * there is no newline, but it will be added immediately below.
+                */
                len += prefix_len + line_len + 1;
-
                if (text_len == line_len) {
                        /*
-                        * Add the trailing newline removed in
-                        * vprintk_store().
+                        * This is the last line. Add the trailing newline
+                        * removed in vprintk_store().
                         */
                        text[prefix_len + line_len] = '\n';
                        break;
@@ -1382,6 +1392,14 @@ static size_t record_print_text(struct printk_record *r, bool syslog,
                text_len -= line_len + 1;
        }
 
+       /*
+        * If a buffer was provided, it will be terminated. Space for the
+        * string terminator is guaranteed to be available. The terminator is
+        * not counted in the return value.
+        */
+       if (buf_size > 0)
+               text[len] = 0;
+
        return len;
 }
 
@@ -3427,7 +3445,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
        while (prb_read_valid_info(prb, seq, &info, &line_count)) {
                if (r.info->seq >= dumper->next_seq)
                        break;
-               l += get_record_print_text_size(&info, line_count, true, time);
+               l += get_record_print_text_size(&info, line_count, syslog, time);
                seq = r.info->seq + 1;
        }
 
@@ -3437,7 +3455,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
                                                &info, &line_count)) {
                if (r.info->seq >= dumper->next_seq)
                        break;
-               l -= get_record_print_text_size(&info, line_count, true, time);
+               l -= get_record_print_text_size(&info, line_count, syslog, time);
                seq = r.info->seq + 1;
        }
 
index 6704f06..8a7b736 100644 (file)
@@ -1718,7 +1718,7 @@ static bool copy_data(struct prb_data_ring *data_ring,
 
        /* Caller interested in the line count? */
        if (line_count)
-               *line_count = count_lines(data, data_size);
+               *line_count = count_lines(data, len);
 
        /* Caller interested in the data content? */
        if (!buf || !buf_size)
index 35bdcfd..3660755 100644 (file)
@@ -241,7 +241,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
        }
 }
 
-/* Spawn RCU-tasks grace-period kthread, e.g., at core_initcall() time. */
+/* Spawn RCU-tasks grace-period kthread. */
 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
 {
        struct task_struct *t;
@@ -564,7 +564,6 @@ static int __init rcu_spawn_tasks_kthread(void)
        rcu_spawn_tasks_kthread_generic(&rcu_tasks);
        return 0;
 }
-core_initcall(rcu_spawn_tasks_kthread);
 
 #if !defined(CONFIG_TINY_RCU)
 void show_rcu_tasks_classic_gp_kthread(void)
@@ -692,7 +691,6 @@ static int __init rcu_spawn_tasks_rude_kthread(void)
        rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
        return 0;
 }
-core_initcall(rcu_spawn_tasks_rude_kthread);
 
 #if !defined(CONFIG_TINY_RCU)
 void show_rcu_tasks_rude_gp_kthread(void)
@@ -968,6 +966,11 @@ static void rcu_tasks_trace_pregp_step(void)
 static void rcu_tasks_trace_pertask(struct task_struct *t,
                                    struct list_head *hop)
 {
+       // During early boot when there is only the one boot CPU, there
+       // is no idle task for the other CPUs. Just return.
+       if (unlikely(t == NULL))
+               return;
+
        WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
        WRITE_ONCE(t->trc_reader_checked, false);
        t->trc_ipi_to_cpu = -1;
@@ -1193,7 +1196,6 @@ static int __init rcu_spawn_tasks_trace_kthread(void)
        rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
        return 0;
 }
-core_initcall(rcu_spawn_tasks_trace_kthread);
 
 #if !defined(CONFIG_TINY_RCU)
 void show_rcu_tasks_trace_gp_kthread(void)
@@ -1222,6 +1224,21 @@ void show_rcu_tasks_gp_kthreads(void)
 }
 #endif /* #ifndef CONFIG_TINY_RCU */
 
+void __init rcu_init_tasks_generic(void)
+{
+#ifdef CONFIG_TASKS_RCU
+       rcu_spawn_tasks_kthread();
+#endif
+
+#ifdef CONFIG_TASKS_RUDE_RCU
+       rcu_spawn_tasks_rude_kthread();
+#endif
+
+#ifdef CONFIG_TASKS_TRACE_RCU
+       rcu_spawn_tasks_trace_kthread();
+#endif
+}
+
 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
 static inline void rcu_tasks_bootup_oddness(void) {}
 void show_rcu_tasks_gp_kthreads(void) {}
index 15d2562..ff74fca 100644 (file)
@@ -1796,13 +1796,28 @@ static inline bool rq_has_pinned_tasks(struct rq *rq)
  */
 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
 {
+       /* When not in the task's cpumask, no point in looking further. */
        if (!cpumask_test_cpu(cpu, p->cpus_ptr))
                return false;
 
-       if (is_per_cpu_kthread(p) || is_migration_disabled(p))
+       /* migrate_disabled() must be allowed to finish. */
+       if (is_migration_disabled(p))
                return cpu_online(cpu);
 
-       return cpu_active(cpu);
+       /* Non kernel threads are not allowed during either online or offline. */
+       if (!(p->flags & PF_KTHREAD))
+               return cpu_active(cpu);
+
+       /* KTHREAD_IS_PER_CPU is always allowed. */
+       if (kthread_is_per_cpu(p))
+               return cpu_online(cpu);
+
+       /* Regular kernel threads don't get to stay during offline. */
+       if (cpu_rq(cpu)->balance_push)
+               return false;
+
+       /* But are allowed during online. */
+       return cpu_online(cpu);
 }
 
 /*
@@ -2327,7 +2342,9 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
 
        if (p->flags & PF_KTHREAD || is_migration_disabled(p)) {
                /*
-                * Kernel threads are allowed on online && !active CPUs.
+                * Kernel threads are allowed on online && !active CPUs,
+                * however, during cpu-hot-unplug, even these might get pushed
+                * away if not KTHREAD_IS_PER_CPU.
                 *
                 * Specifically, migration_disabled() tasks must not fail the
                 * cpumask_any_and_distribute() pick below, esp. so on
@@ -2371,16 +2388,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
 
        __do_set_cpus_allowed(p, new_mask, flags);
 
-       if (p->flags & PF_KTHREAD) {
-               /*
-                * For kernel threads that do indeed end up on online &&
-                * !active we want to ensure they are strict per-CPU threads.
-                */
-               WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
-                       !cpumask_intersects(new_mask, cpu_active_mask) &&
-                       p->nr_cpus_allowed != 1);
-       }
-
        return affine_move_task(rq, p, &rf, dest_cpu, flags);
 
 out:
@@ -3121,6 +3128,13 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
 
 static inline bool ttwu_queue_cond(int cpu, int wake_flags)
 {
+       /*
+        * Do not complicate things with the async wake_list while the CPU is
+        * in hotplug state.
+        */
+       if (!cpu_active(cpu))
+               return false;
+
        /*
         * If the CPU does not share cache, then queue the task on the
         * remote rqs wakelist to avoid accessing remote data.
@@ -7276,8 +7290,14 @@ static void balance_push(struct rq *rq)
        /*
         * Both the cpu-hotplug and stop task are in this case and are
         * required to complete the hotplug process.
+        *
+        * XXX: the idle task does not match kthread_is_per_cpu() due to
+        * histerical raisins.
         */
-       if (is_per_cpu_kthread(push_task) || is_migration_disabled(push_task)) {
+       if (rq->idle == push_task ||
+           ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
+           is_migration_disabled(push_task)) {
+
                /*
                 * If this is the idle task on the outgoing CPU try to wake
                 * up the hotplug control thread which might wait for the
@@ -7309,7 +7329,7 @@ static void balance_push(struct rq *rq)
        /*
         * At this point need_resched() is true and we'll take the loop in
         * schedule(). The next pick is obviously going to be the stop task
-        * which is_per_cpu_kthread() and will push this task away.
+        * which kthread_is_per_cpu() and will push this task away.
         */
        raw_spin_lock(&rq->lock);
 }
@@ -7320,10 +7340,13 @@ static void balance_push_set(int cpu, bool on)
        struct rq_flags rf;
 
        rq_lock_irqsave(rq, &rf);
-       if (on)
+       rq->balance_push = on;
+       if (on) {
+               WARN_ON_ONCE(rq->balance_callback);
                rq->balance_callback = &balance_push_callback;
-       else
+       } else if (rq->balance_callback == &balance_push_callback) {
                rq->balance_callback = NULL;
+       }
        rq_unlock_irqrestore(rq, &rf);
 }
 
@@ -7441,6 +7464,10 @@ int sched_cpu_activate(unsigned int cpu)
        struct rq *rq = cpu_rq(cpu);
        struct rq_flags rf;
 
+       /*
+        * Make sure that when the hotplug state machine does a roll-back
+        * we clear balance_push. Ideally that would happen earlier...
+        */
        balance_push_set(cpu, false);
 
 #ifdef CONFIG_SCHED_SMT
@@ -7483,17 +7510,27 @@ int sched_cpu_deactivate(unsigned int cpu)
        int ret;
 
        set_cpu_active(cpu, false);
+
+       /*
+        * From this point forward, this CPU will refuse to run any task that
+        * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
+        * push those tasks away until this gets cleared, see
+        * sched_cpu_dying().
+        */
+       balance_push_set(cpu, true);
+
        /*
-        * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
-        * users of this state to go away such that all new such users will
-        * observe it.
+        * We've cleared cpu_active_mask / set balance_push, wait for all
+        * preempt-disabled and RCU users of this state to go away such that
+        * all new such users will observe it.
+        *
+        * Specifically, we rely on ttwu to no longer target this CPU, see
+        * ttwu_queue_cond() and is_cpu_allowed().
         *
         * Do sync before park smpboot threads to take care the rcu boost case.
         */
        synchronize_rcu();
 
-       balance_push_set(cpu, true);
-
        rq_lock_irqsave(rq, &rf);
        if (rq->rd) {
                update_rq_clock(rq);
@@ -7574,6 +7611,25 @@ static void calc_load_migrate(struct rq *rq)
                atomic_long_add(delta, &calc_load_tasks);
 }
 
+static void dump_rq_tasks(struct rq *rq, const char *loglvl)
+{
+       struct task_struct *g, *p;
+       int cpu = cpu_of(rq);
+
+       lockdep_assert_held(&rq->lock);
+
+       printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
+       for_each_process_thread(g, p) {
+               if (task_cpu(p) != cpu)
+                       continue;
+
+               if (!task_on_rq_queued(p))
+                       continue;
+
+               printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
+       }
+}
+
 int sched_cpu_dying(unsigned int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
@@ -7583,9 +7639,18 @@ int sched_cpu_dying(unsigned int cpu)
        sched_tick_stop(cpu);
 
        rq_lock_irqsave(rq, &rf);
-       BUG_ON(rq->nr_running != 1 || rq_has_pinned_tasks(rq));
+       if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
+               WARN(true, "Dying CPU not properly vacated!");
+               dump_rq_tasks(rq, KERN_WARNING);
+       }
        rq_unlock_irqrestore(rq, &rf);
 
+       /*
+        * Now that the CPU is offline, make sure we're welcome
+        * to new tasks once we come back up.
+        */
+       balance_push_set(cpu, false);
+
        calc_load_migrate(rq);
        update_max_interval();
        nohz_balance_exit_idle(rq);
index 12ada79..bb09988 100644 (file)
@@ -975,6 +975,7 @@ struct rq {
        unsigned long           cpu_capacity_orig;
 
        struct callback_head    *balance_callback;
+       unsigned char           balance_push;
 
        unsigned char           nohz_idle_balance;
        unsigned char           idle_balance;
index 5736c55..5ad8566 100644 (file)
@@ -2550,6 +2550,9 @@ bool get_signal(struct ksignal *ksig)
        struct signal_struct *signal = current->signal;
        int signr;
 
+       if (unlikely(current->task_works))
+               task_work_run();
+
        /*
         * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
         * that the arch handlers don't all have to do it. If we get here
@@ -3701,7 +3704,8 @@ static bool access_pidfd_pidns(struct pid *pid)
        return true;
 }
 
-static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
+static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
+               siginfo_t __user *info)
 {
 #ifdef CONFIG_COMPAT
        /*
index 2efe1e2..f25208e 100644 (file)
@@ -188,6 +188,7 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
                kfree(td);
                return PTR_ERR(tsk);
        }
+       kthread_set_per_cpu(tsk, cpu);
        /*
         * Park the thread so that it could start right on the CPU
         * when it is available.
index 7404d38..87389b9 100644 (file)
@@ -498,7 +498,7 @@ out:
 static void sync_hw_clock(struct work_struct *work);
 static DECLARE_WORK(sync_work, sync_hw_clock);
 static struct hrtimer sync_hrtimer;
-#define SYNC_PERIOD_NS (11UL * 60 * NSEC_PER_SEC)
+#define SYNC_PERIOD_NS (11ULL * 60 * NSEC_PER_SEC)
 
 static enum hrtimer_restart sync_timer_callback(struct hrtimer *timer)
 {
@@ -512,7 +512,7 @@ static void sched_sync_hw_clock(unsigned long offset_nsec, bool retry)
        ktime_t exp = ktime_set(ktime_get_real_seconds(), 0);
 
        if (retry)
-               exp = ktime_add_ns(exp, 2 * NSEC_PER_SEC - offset_nsec);
+               exp = ktime_add_ns(exp, 2ULL * NSEC_PER_SEC - offset_nsec);
        else
                exp = ktime_add_ns(exp, SYNC_PERIOD_NS - offset_nsec);
 
index a45cedd..6aee576 100644 (file)
@@ -991,8 +991,7 @@ EXPORT_SYMBOL_GPL(ktime_get_seconds);
 /**
  * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
  *
- * Returns the wall clock seconds since 1970. This replaces the
- * get_seconds() interface which is not y2038 safe on 32bit systems.
+ * Returns the wall clock seconds since 1970.
  *
  * For 64bit systems the fast access to tk->xtime_sec is preserved. On
  * 32bit systems the access must be protected with the sequence
index d5a1941..c1a62ae 100644 (file)
@@ -538,7 +538,7 @@ config KPROBE_EVENTS
 config KPROBE_EVENTS_ON_NOTRACE
        bool "Do NOT protect notrace function from kprobe events"
        depends on KPROBE_EVENTS
-       depends on KPROBES_ON_FTRACE
+       depends on DYNAMIC_FTRACE
        default n
        help
          This is only for the developers who want to debug ftrace itself
index 9c31f42..e6fba17 100644 (file)
@@ -434,7 +434,7 @@ static int disable_trace_kprobe(struct trace_event_call *call,
        return 0;
 }
 
-#if defined(CONFIG_KPROBES_ON_FTRACE) && \
+#if defined(CONFIG_DYNAMIC_FTRACE) && \
        !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
 static bool __within_notrace_func(unsigned long addr)
 {
index 9880b6c..894bb88 100644 (file)
@@ -1848,12 +1848,6 @@ static void worker_attach_to_pool(struct worker *worker,
 {
        mutex_lock(&wq_pool_attach_mutex);
 
-       /*
-        * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
-        * online CPUs.  It'll be re-applied when any of the CPUs come up.
-        */
-       set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
-
        /*
         * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
         * stable across this function.  See the comments above the flag
@@ -1861,6 +1855,11 @@ static void worker_attach_to_pool(struct worker *worker,
         */
        if (pool->flags & POOL_DISASSOCIATED)
                worker->flags |= WORKER_UNBOUND;
+       else
+               kthread_set_per_cpu(worker->task, pool->cpu);
+
+       if (worker->rescue_wq)
+               set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
 
        list_add_tail(&worker->node, &pool->workers);
        worker->pool = pool;
@@ -1883,6 +1882,7 @@ static void worker_detach_from_pool(struct worker *worker)
 
        mutex_lock(&wq_pool_attach_mutex);
 
+       kthread_set_per_cpu(worker->task, -1);
        list_del(&worker->node);
        worker->pool = NULL;
 
@@ -4919,8 +4919,10 @@ static void unbind_workers(int cpu)
 
                raw_spin_unlock_irq(&pool->lock);
 
-               for_each_pool_worker(worker, pool)
-                       WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_active_mask) < 0);
+               for_each_pool_worker(worker, pool) {
+                       kthread_set_per_cpu(worker->task, -1);
+                       WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
+               }
 
                mutex_unlock(&wq_pool_attach_mutex);
 
@@ -4972,9 +4974,11 @@ static void rebind_workers(struct worker_pool *pool)
         * of all workers first and then clear UNBOUND.  As we're called
         * from CPU_ONLINE, the following shouldn't fail.
         */
-       for_each_pool_worker(worker, pool)
+       for_each_pool_worker(worker, pool) {
+               kthread_set_per_cpu(worker->task, pool->cpu);
                WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
                                                  pool->attrs->cpumask) < 0);
+       }
 
        raw_spin_lock_irq(&pool->lock);
 
index e6e58b2..7937265 100644 (file)
@@ -295,14 +295,6 @@ config GDB_SCRIPTS
 
 endif # DEBUG_INFO
 
-config ENABLE_MUST_CHECK
-       bool "Enable __must_check logic"
-       default y
-       help
-         Enable the __must_check logic in the kernel build.  Disable this to
-         suppress the "warning: ignoring return value of 'foo', declared with
-         attribute warn_unused_result" messages.
-
 config FRAME_WARN
        int "Warn for stack frames larger than"
        range 0 8192
index 1955d62..5baedc5 100644 (file)
@@ -774,8 +774,8 @@ static const struct font_data fontdata_ter16x32 = {
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc,
        0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 95 */
-       0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00,
-       0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+       0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
+       0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -1169,7 +1169,7 @@ static const struct font_data fontdata_ter16x32 = {
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x7f, 0xf8, 0x7f, 0xfc, 0x03, 0x9e, 0x03, 0x8e,
+       0x7e, 0xf8, 0x7f, 0xfc, 0x03, 0x9e, 0x03, 0x8e,
        0x03, 0x8e, 0x3f, 0x8e, 0x7f, 0xfe, 0xf3, 0xfe,
        0xe3, 0x80, 0xe3, 0x80, 0xe3, 0x80, 0xf3, 0xce,
        0x7f, 0xfe, 0x3e, 0xfc, 0x00, 0x00, 0x00, 0x00,
index 1635111..a21e6a5 100644 (file)
@@ -1658,7 +1658,7 @@ static int copy_compat_iovec_from_user(struct iovec *iov,
                (const struct compat_iovec __user *)uvec;
        int ret = -EFAULT, i;
 
-       if (!user_access_begin(uvec, nr_segs * sizeof(*uvec)))
+       if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
                return -EFAULT;
 
        for (i = 0; i < nr_segs; i++) {
index b4c0df6..c770570 100644 (file)
@@ -48,7 +48,7 @@ endif
 endif
 
 quiet_cmd_unroll = UNROLL  $@
-      cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$* < $< > $@
+      cmd_unroll = $(AWK) -v N=$* -f $(srctree)/$(src)/unroll.awk < $< > $@
 
 targets += int1.c int2.c int4.c int8.c int16.c int32.c
 $(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE
index a260296..18f6ee3 100644 (file)
@@ -4371,7 +4371,7 @@ retry:
                 * So we need to block hugepage fault by PG_hwpoison bit check.
                 */
                if (unlikely(PageHWPoison(page))) {
-                       ret = VM_FAULT_HWPOISON |
+                       ret = VM_FAULT_HWPOISON_LARGE |
                                VM_FAULT_SET_HINDEX(hstate_index(h));
                        goto backout_unlocked;
                }
index bc0ad20..7ca0b92 100644 (file)
@@ -64,7 +64,8 @@ static inline bool kasan_pmd_table(pud_t pud)
        return false;
 }
 #endif
-pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss;
+pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
+       __page_aligned_bss;
 
 static inline bool kasan_pte_table(pmd_t pmd)
 {
index d24bcfa..1eaaec1 100644 (file)
@@ -1427,7 +1427,7 @@ phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
 }
 
 /**
- * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node
+ * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
  * @size: size of memory block to be allocated in bytes
  * @align: alignment of the region and block's size
  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
index 5a38e9e..04d9f15 100644 (file)
@@ -1940,7 +1940,7 @@ retry:
                        goto retry;
                }
        } else if (ret == -EIO) {
-               pr_info("%s: %#lx: unknown page type: %lx (%pGP)\n",
+               pr_info("%s: %#lx: unknown page type: %lx (%pGp)\n",
                         __func__, pfn, page->flags, &page->flags);
        }
 
index 8cf96bd..2c3a865 100644 (file)
@@ -1111,7 +1111,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
                     const nodemask_t *to, int flags)
 {
        int busy = 0;
-       int err;
+       int err = 0;
        nodemask_t tmp;
 
        migrate_prep();
index 5860424..eb34d20 100644 (file)
@@ -2826,7 +2826,7 @@ EXPORT_SYMBOL(__test_set_page_writeback);
  */
 void wait_on_page_writeback(struct page *page)
 {
-       if (PageWriteback(page)) {
+       while (PageWriteback(page)) {
                trace_wait_on_page_writeback(page, page_mapping(page));
                wait_on_page_bit(page, PG_writeback);
        }
index bdbec4c..027f648 100644 (file)
@@ -2862,20 +2862,20 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
 {
        struct page *page;
 
-#ifdef CONFIG_CMA
-       /*
-        * Balance movable allocations between regular and CMA areas by
-        * allocating from CMA when over half of the zone's free memory
-        * is in the CMA area.
-        */
-       if (alloc_flags & ALLOC_CMA &&
-           zone_page_state(zone, NR_FREE_CMA_PAGES) >
-           zone_page_state(zone, NR_FREE_PAGES) / 2) {
-               page = __rmqueue_cma_fallback(zone, order);
-               if (page)
-                       return page;
+       if (IS_ENABLED(CONFIG_CMA)) {
+               /*
+                * Balance movable allocations between regular and CMA areas by
+                * allocating from CMA when over half of the zone's free memory
+                * is in the CMA area.
+                */
+               if (alloc_flags & ALLOC_CMA &&
+                   zone_page_state(zone, NR_FREE_CMA_PAGES) >
+                   zone_page_state(zone, NR_FREE_PAGES) / 2) {
+                       page = __rmqueue_cma_fallback(zone, order);
+                       if (page)
+                               goto out;
+               }
        }
-#endif
 retry:
        page = __rmqueue_smallest(zone, order, migratetype);
        if (unlikely(!page)) {
@@ -2886,8 +2886,9 @@ retry:
                                                                alloc_flags))
                        goto retry;
        }
-
-       trace_mm_page_alloc_zone_locked(page, order, migratetype);
+out:
+       if (page)
+               trace_mm_page_alloc_zone_locked(page, order, migratetype);
        return page;
 }
 
index 4bcc119..f5fee9c 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mm.h>
 #include <linux/uio.h>
 #include <linux/sched.h>
+#include <linux/compat.h>
 #include <linux/sched/mm.h>
 #include <linux/highmem.h>
 #include <linux/ptrace.h>
index dc5b42e..d9e4e10 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1973,7 +1973,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
 
                t = acquire_slab(s, n, page, object == NULL, &objects);
                if (!t)
-                       break;
+                       continue; /* cmpxchg raced */
 
                available += objects;
                if (!object) {
index 4d88fe5..e6f352b 100644 (file)
@@ -2420,8 +2420,10 @@ void *vmap(struct page **pages, unsigned int count,
                return NULL;
        }
 
-       if (flags & VM_MAP_PUT_PAGES)
+       if (flags & VM_MAP_PUT_PAGES) {
                area->pages = pages;
+               area->nr_pages = count;
+       }
        return area->addr;
 }
 EXPORT_SYMBOL(vmap);
index 257cba7..b1b574a 100644 (file)
@@ -1238,6 +1238,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
                        if (!PageSwapCache(page)) {
                                if (!(sc->gfp_mask & __GFP_IO))
                                        goto keep_locked;
+                               if (page_maybe_dma_pinned(page))
+                                       goto keep_locked;
                                if (PageTransHuge(page)) {
                                        /* cannot split THP, skip it */
                                        if (!can_split_huge_page(page, NULL))
index f292e02..8b64411 100644 (file)
@@ -284,8 +284,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
        return 0;
 
 out_free_newdev:
-       if (new_dev->reg_state == NETREG_UNINITIALIZED)
-               free_netdev(new_dev);
+       free_netdev(new_dev);
        return err;
 }
 
index c1c30a9..8b796c4 100644 (file)
@@ -272,7 +272,8 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
            kattr->test.repeat)
                return -EINVAL;
 
-       if (ctx_size_in < prog->aux->max_ctx_offset)
+       if (ctx_size_in < prog->aux->max_ctx_offset ||
+           ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
                return -EINVAL;
 
        if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
index 7839c3b..3ef7f78 100644 (file)
@@ -1155,6 +1155,7 @@ static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
        if (peer)
                return -EOPNOTSUPP;
 
+       memset(addr, 0, sizeof(*addr));
        addr->can_family = AF_CAN;
        addr->can_ifindex = so->ifindex;
        addr->can_addr.tp.rx_id = so->rxid;
index 9815cfe..ca44c32 100644 (file)
@@ -569,6 +569,34 @@ e_range:
        return -ERANGE;
 }
 
+static int decode_con_secret(void **p, void *end, u8 *con_secret,
+                            int *con_secret_len)
+{
+       int len;
+
+       ceph_decode_32_safe(p, end, len, bad);
+       ceph_decode_need(p, end, len, bad);
+
+       dout("%s len %d\n", __func__, len);
+       if (con_secret) {
+               if (len > CEPH_MAX_CON_SECRET_LEN) {
+                       pr_err("connection secret too big %d\n", len);
+                       goto bad_memzero;
+               }
+               memcpy(con_secret, *p, len);
+               *con_secret_len = len;
+       }
+       memzero_explicit(*p, len);
+       *p += len;
+       return 0;
+
+bad_memzero:
+       memzero_explicit(*p, len);
+bad:
+       pr_err("failed to decode connection secret\n");
+       return -EINVAL;
+}
+
 static int handle_auth_session_key(struct ceph_auth_client *ac,
                                   void **p, void *end,
                                   u8 *session_key, int *session_key_len,
@@ -612,17 +640,9 @@ static int handle_auth_session_key(struct ceph_auth_client *ac,
                dout("%s decrypted %d bytes\n", __func__, ret);
                dend = dp + ret;
 
-               ceph_decode_32_safe(&dp, dend, len, e_inval);
-               if (len > CEPH_MAX_CON_SECRET_LEN) {
-                       pr_err("connection secret too big %d\n", len);
-                       return -EINVAL;
-               }
-
-               dout("%s connection secret len %d\n", __func__, len);
-               if (con_secret) {
-                       memcpy(con_secret, dp, len);
-                       *con_secret_len = len;
-               }
+               ret = decode_con_secret(&dp, dend, con_secret, con_secret_len);
+               if (ret)
+                       return ret;
        }
 
        /* service tickets */
@@ -828,7 +848,6 @@ static int decrypt_authorizer_reply(struct ceph_crypto_key *secret,
 {
        void *dp, *dend;
        u8 struct_v;
-       int len;
        int ret;
 
        dp = *p + ceph_x_encrypt_offset();
@@ -843,17 +862,9 @@ static int decrypt_authorizer_reply(struct ceph_crypto_key *secret,
        ceph_decode_64_safe(&dp, dend, *nonce_plus_one, e_inval);
        dout("%s nonce_plus_one %llu\n", __func__, *nonce_plus_one);
        if (struct_v >= 2) {
-               ceph_decode_32_safe(&dp, dend, len, e_inval);
-               if (len > CEPH_MAX_CON_SECRET_LEN) {
-                       pr_err("connection secret too big %d\n", len);
-                       return -EINVAL;
-               }
-
-               dout("%s connection secret len %d\n", __func__, len);
-               if (con_secret) {
-                       memcpy(con_secret, dp, len);
-                       *con_secret_len = len;
-               }
+               ret = decode_con_secret(&dp, dend, con_secret, con_secret_len);
+               if (ret)
+                       return ret;
        }
 
        return 0;
index 4f75df4..92d89b3 100644 (file)
@@ -96,6 +96,7 @@ int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
        key->len = ceph_decode_16(p);
        ceph_decode_need(p, end, key->len, bad);
        ret = set_secret(key, *p);
+       memzero_explicit(*p, key->len);
        *p += key->len;
        return ret;
 
@@ -134,7 +135,7 @@ int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
 void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
 {
        if (key) {
-               kfree(key->key);
+               kfree_sensitive(key->key);
                key->key = NULL;
                if (key->tfm) {
                        crypto_free_sync_skcipher(key->tfm);
index 04f653b..2cb5ffd 100644 (file)
@@ -1100,7 +1100,7 @@ static int read_partial_message(struct ceph_connection *con)
                if (ret < 0)
                        return ret;
 
-               BUG_ON(!con->in_msg ^ skip);
+               BUG_ON((!con->in_msg) ^ skip);
                if (skip) {
                        /* skip this message */
                        dout("alloc_msg said skip message\n");
index c38d8de..cc40ce4 100644 (file)
@@ -689,11 +689,10 @@ static int verify_epilogue_crcs(struct ceph_connection *con, u32 front_crc,
 }
 
 static int setup_crypto(struct ceph_connection *con,
-                       u8 *session_key, int session_key_len,
-                       u8 *con_secret, int con_secret_len)
+                       const u8 *session_key, int session_key_len,
+                       const u8 *con_secret, int con_secret_len)
 {
        unsigned int noio_flag;
-       void *p;
        int ret;
 
        dout("%s con %p con_mode %d session_key_len %d con_secret_len %d\n",
@@ -751,15 +750,14 @@ static int setup_crypto(struct ceph_connection *con,
                return ret;
        }
 
-       p = con_secret;
-       WARN_ON((unsigned long)p & crypto_aead_alignmask(con->v2.gcm_tfm));
-       ret = crypto_aead_setkey(con->v2.gcm_tfm, p, CEPH_GCM_KEY_LEN);
+       WARN_ON((unsigned long)con_secret &
+               crypto_aead_alignmask(con->v2.gcm_tfm));
+       ret = crypto_aead_setkey(con->v2.gcm_tfm, con_secret, CEPH_GCM_KEY_LEN);
        if (ret) {
                pr_err("failed to set gcm key: %d\n", ret);
                return ret;
        }
 
-       p += CEPH_GCM_KEY_LEN;
        WARN_ON(crypto_aead_ivsize(con->v2.gcm_tfm) != CEPH_GCM_IV_LEN);
        ret = crypto_aead_setauthsize(con->v2.gcm_tfm, CEPH_GCM_TAG_LEN);
        if (ret) {
@@ -777,8 +775,11 @@ static int setup_crypto(struct ceph_connection *con,
        aead_request_set_callback(con->v2.gcm_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                  crypto_req_done, &con->v2.gcm_wait);
 
-       memcpy(&con->v2.in_gcm_nonce, p, CEPH_GCM_IV_LEN);
-       memcpy(&con->v2.out_gcm_nonce, p + CEPH_GCM_IV_LEN, CEPH_GCM_IV_LEN);
+       memcpy(&con->v2.in_gcm_nonce, con_secret + CEPH_GCM_KEY_LEN,
+              CEPH_GCM_IV_LEN);
+       memcpy(&con->v2.out_gcm_nonce,
+              con_secret + CEPH_GCM_KEY_LEN + CEPH_GCM_IV_LEN,
+              CEPH_GCM_IV_LEN);
        return 0;  /* auth_x, secure mode */
 }
 
@@ -800,7 +801,7 @@ static int hmac_sha256(struct ceph_connection *con, const struct kvec *kvecs,
        desc->tfm = con->v2.hmac_tfm;
        ret = crypto_shash_init(desc);
        if (ret)
-               return ret;
+               goto out;
 
        for (i = 0; i < kvec_cnt; i++) {
                WARN_ON((unsigned long)kvecs[i].iov_base &
@@ -808,15 +809,14 @@ static int hmac_sha256(struct ceph_connection *con, const struct kvec *kvecs,
                ret = crypto_shash_update(desc, kvecs[i].iov_base,
                                          kvecs[i].iov_len);
                if (ret)
-                       return ret;
+                       goto out;
        }
 
        ret = crypto_shash_final(desc, hmac);
-       if (ret)
-               return ret;
 
+out:
        shash_desc_zero(desc);
-       return 0;  /* auth_x, both plain and secure modes */
+       return ret;  /* auth_x, both plain and secure modes */
 }
 
 static void gcm_inc_nonce(struct ceph_gcm_nonce *nonce)
@@ -2072,27 +2072,32 @@ static int process_auth_done(struct ceph_connection *con, void *p, void *end)
        if (con->state != CEPH_CON_S_V2_AUTH) {
                dout("%s con %p state changed to %d\n", __func__, con,
                     con->state);
-               return -EAGAIN;
+               ret = -EAGAIN;
+               goto out;
        }
 
        dout("%s con %p handle_auth_done ret %d\n", __func__, con, ret);
        if (ret)
-               return ret;
+               goto out;
 
        ret = setup_crypto(con, session_key, session_key_len, con_secret,
                           con_secret_len);
        if (ret)
-               return ret;
+               goto out;
 
        reset_out_kvecs(con);
        ret = prepare_auth_signature(con);
        if (ret) {
                pr_err("prepare_auth_signature failed: %d\n", ret);
-               return ret;
+               goto out;
        }
 
        con->state = CEPH_CON_S_V2_AUTH_SIGNATURE;
-       return 0;
+
+out:
+       memzero_explicit(session_key_buf, sizeof(session_key_buf));
+       memzero_explicit(con_secret_buf, sizeof(con_secret_buf));
+       return ret;
 
 bad:
        pr_err("failed to decode auth_done\n");
@@ -3436,6 +3441,8 @@ void ceph_con_v2_reset_protocol(struct ceph_connection *con)
        }
 
        con->v2.con_mode = CEPH_CON_MODE_UNKNOWN;
+       memzero_explicit(&con->v2.in_gcm_nonce, CEPH_GCM_IV_LEN);
+       memzero_explicit(&con->v2.out_gcm_nonce, CEPH_GCM_IV_LEN);
 
        if (con->v2.hmac_tfm) {
                crypto_free_shash(con->v2.hmac_tfm);
index b9d54ed..195ceb8 100644 (file)
@@ -1433,7 +1433,7 @@ static int mon_handle_auth_bad_method(struct ceph_connection *con,
 /*
  * handle incoming message
  */
-static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+static void mon_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
 {
        struct ceph_mon_client *monc = con->private;
        int type = le16_to_cpu(msg->hdr.type);
@@ -1565,21 +1565,21 @@ static void mon_fault(struct ceph_connection *con)
  * will come from the messenger workqueue, which is drained prior to
  * mon_client destruction.
  */
-static struct ceph_connection *con_get(struct ceph_connection *con)
+static struct ceph_connection *mon_get_con(struct ceph_connection *con)
 {
        return con;
 }
 
-static void con_put(struct ceph_connection *con)
+static void mon_put_con(struct ceph_connection *con)
 {
 }
 
 static const struct ceph_connection_operations mon_con_ops = {
-       .get = con_get,
-       .put = con_put,
-       .dispatch = dispatch,
-       .fault = mon_fault,
+       .get = mon_get_con,
+       .put = mon_put_con,
        .alloc_msg = mon_alloc_msg,
+       .dispatch = mon_dispatch,
+       .fault = mon_fault,
        .get_auth_request = mon_get_auth_request,
        .handle_auth_reply_more = mon_handle_auth_reply_more,
        .handle_auth_done = mon_handle_auth_done,
index 61229c5..ff8624a 100644 (file)
@@ -5412,7 +5412,7 @@ void ceph_osdc_cleanup(void)
 /*
  * handle incoming message
  */
-static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+static void osd_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
 {
        struct ceph_osd *osd = con->private;
        struct ceph_osd_client *osdc = osd->o_osdc;
@@ -5534,9 +5534,9 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
        return m;
 }
 
-static struct ceph_msg *alloc_msg(struct ceph_connection *con,
-                                 struct ceph_msg_header *hdr,
-                                 int *skip)
+static struct ceph_msg *osd_alloc_msg(struct ceph_connection *con,
+                                     struct ceph_msg_header *hdr,
+                                     int *skip)
 {
        struct ceph_osd *osd = con->private;
        int type = le16_to_cpu(hdr->type);
@@ -5560,7 +5560,7 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con,
 /*
  * Wrappers to refcount containing ceph_osd struct
  */
-static struct ceph_connection *get_osd_con(struct ceph_connection *con)
+static struct ceph_connection *osd_get_con(struct ceph_connection *con)
 {
        struct ceph_osd *osd = con->private;
        if (get_osd(osd))
@@ -5568,7 +5568,7 @@ static struct ceph_connection *get_osd_con(struct ceph_connection *con)
        return NULL;
 }
 
-static void put_osd_con(struct ceph_connection *con)
+static void osd_put_con(struct ceph_connection *con)
 {
        struct ceph_osd *osd = con->private;
        put_osd(osd);
@@ -5582,8 +5582,8 @@ static void put_osd_con(struct ceph_connection *con)
  * Note: returned pointer is the address of a structure that's
  * managed separately.  Caller must *not* attempt to free it.
  */
-static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
-                                       int *proto, int force_new)
+static struct ceph_auth_handshake *
+osd_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
 {
        struct ceph_osd *o = con->private;
        struct ceph_osd_client *osdc = o->o_osdc;
@@ -5599,7 +5599,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
        return auth;
 }
 
-static int add_authorizer_challenge(struct ceph_connection *con,
+static int osd_add_authorizer_challenge(struct ceph_connection *con,
                                    void *challenge_buf, int challenge_buf_len)
 {
        struct ceph_osd *o = con->private;
@@ -5610,7 +5610,7 @@ static int add_authorizer_challenge(struct ceph_connection *con,
                                            challenge_buf, challenge_buf_len);
 }
 
-static int verify_authorizer_reply(struct ceph_connection *con)
+static int osd_verify_authorizer_reply(struct ceph_connection *con)
 {
        struct ceph_osd *o = con->private;
        struct ceph_osd_client *osdc = o->o_osdc;
@@ -5622,7 +5622,7 @@ static int verify_authorizer_reply(struct ceph_connection *con)
                NULL, NULL, NULL, NULL);
 }
 
-static int invalidate_authorizer(struct ceph_connection *con)
+static int osd_invalidate_authorizer(struct ceph_connection *con)
 {
        struct ceph_osd *o = con->private;
        struct ceph_osd_client *osdc = o->o_osdc;
@@ -5731,18 +5731,18 @@ static int osd_check_message_signature(struct ceph_msg *msg)
 }
 
 static const struct ceph_connection_operations osd_con_ops = {
-       .get = get_osd_con,
-       .put = put_osd_con,
-       .dispatch = dispatch,
-       .get_authorizer = get_authorizer,
-       .add_authorizer_challenge = add_authorizer_challenge,
-       .verify_authorizer_reply = verify_authorizer_reply,
-       .invalidate_authorizer = invalidate_authorizer,
-       .alloc_msg = alloc_msg,
+       .get = osd_get_con,
+       .put = osd_put_con,
+       .alloc_msg = osd_alloc_msg,
+       .dispatch = osd_dispatch,
+       .fault = osd_fault,
        .reencode_message = osd_reencode_message,
+       .get_authorizer = osd_get_authorizer,
+       .add_authorizer_challenge = osd_add_authorizer_challenge,
+       .verify_authorizer_reply = osd_verify_authorizer_reply,
+       .invalidate_authorizer = osd_invalidate_authorizer,
        .sign_message = osd_sign_message,
        .check_message_signature = osd_check_message_signature,
-       .fault = osd_fault,
        .get_auth_request = osd_get_auth_request,
        .handle_auth_reply_more = osd_handle_auth_reply_more,
        .handle_auth_done = osd_handle_auth_done,
index 8fa7392..a979b86 100644 (file)
@@ -9661,9 +9661,20 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
                }
        }
 
-       if ((features & NETIF_F_HW_TLS_TX) && !(features & NETIF_F_HW_CSUM)) {
-               netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
-               features &= ~NETIF_F_HW_TLS_TX;
+       if (features & NETIF_F_HW_TLS_TX) {
+               bool ip_csum = (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) ==
+                       (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+               bool hw_csum = features & NETIF_F_HW_CSUM;
+
+               if (!ip_csum && !hw_csum) {
+                       netdev_dbg(dev, "Dropping TLS TX HW offload feature since no CSUM feature.\n");
+                       features &= ~NETIF_F_HW_TLS_TX;
+               }
+       }
+
+       if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
+               netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
+               features &= ~NETIF_F_HW_TLS_RX;
        }
 
        return features;
@@ -10077,17 +10088,11 @@ int register_netdevice(struct net_device *dev)
        ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
        ret = notifier_to_errno(ret);
        if (ret) {
+               /* Expect explicit free_netdev() on failure */
+               dev->needs_free_netdev = false;
                rollback_registered(dev);
-               rcu_barrier();
-
-               dev->reg_state = NETREG_UNREGISTERED;
-               /* We should put the kobject that hold in
-                * netdev_unregister_kobject(), otherwise
-                * the net device cannot be freed when
-                * driver calls free_netdev(), because the
-                * kobject is being hold.
-                */
-               kobject_put(&dev->dev.kobj);
+               net_set_todo(dev);
+               goto out;
        }
        /*
         *      Prevent userspace races by waiting until the network
@@ -10631,6 +10636,17 @@ void free_netdev(struct net_device *dev)
        struct napi_struct *p, *n;
 
        might_sleep();
+
+       /* When called immediately after register_netdevice() failed the unwind
+        * handling may still be dismantling the device. Handle that case by
+        * deferring the free.
+        */
+       if (dev->reg_state == NETREG_UNREGISTERING) {
+               ASSERT_RTNL();
+               dev->needs_free_netdev = true;
+               return;
+       }
+
        netif_free_tx_queues(dev);
        netif_free_rx_queues(dev);
 
index ee828e4..738d434 100644 (file)
@@ -4146,7 +4146,7 @@ out:
 static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
                                              struct genl_info *info)
 {
-       struct devlink_port *devlink_port = info->user_ptr[0];
+       struct devlink_port *devlink_port = info->user_ptr[1];
        struct devlink_param_item *param_item;
        struct sk_buff *msg;
        int err;
@@ -4175,7 +4175,7 @@ static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
 static int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
                                              struct genl_info *info)
 {
-       struct devlink_port *devlink_port = info->user_ptr[0];
+       struct devlink_port *devlink_port = info->user_ptr[1];
 
        return __devlink_nl_cmd_param_set_doit(devlink_port->devlink,
                                               devlink_port->index,
index 80dbf2f..8e582e2 100644 (file)
@@ -80,11 +80,11 @@ static void est_timer(struct timer_list *t)
        u64 rate, brate;
 
        est_fetch_counters(est, &b);
-       brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log);
-       brate -= (est->avbps >> est->ewma_log);
+       brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log);
+       brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
 
-       rate = (b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
-       rate -= (est->avpps >> est->ewma_log);
+       rate = (b.packets - est->last_packets) << (10 - est->intvl_log);
+       rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
 
        write_seqcount_begin(&est->seq);
        est->avbps += brate;
@@ -143,6 +143,9 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
        if (parm->interval < -2 || parm->interval > 3)
                return -EINVAL;
 
+       if (parm->ewma_log == 0 || parm->ewma_log >= 31)
+               return -EINVAL;
+
        est = kzalloc(sizeof(*est), GFP_KERNEL);
        if (!est)
                return -ENOBUFS;
index 9500d28..277ed85 100644 (file)
@@ -1569,10 +1569,8 @@ static void neigh_proxy_process(struct timer_list *t)
 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
                    struct sk_buff *skb)
 {
-       unsigned long now = jiffies;
-
-       unsigned long sched_next = now + (prandom_u32() %
-                                         NEIGH_VAR(p, PROXY_DELAY));
+       unsigned long sched_next = jiffies +
+                       prandom_u32_max(NEIGH_VAR(p, PROXY_DELAY));
 
        if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
                kfree_skb(skb);
index 999b70c..daf502c 100644 (file)
@@ -1317,8 +1317,8 @@ static const struct attribute_group dql_group = {
 static ssize_t xps_cpus_show(struct netdev_queue *queue,
                             char *buf)
 {
+       int cpu, len, ret, num_tc = 1, tc = 0;
        struct net_device *dev = queue->dev;
-       int cpu, len, num_tc = 1, tc = 0;
        struct xps_dev_maps *dev_maps;
        cpumask_var_t mask;
        unsigned long index;
@@ -1328,22 +1328,31 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
 
        index = get_netdev_queue_index(queue);
 
+       if (!rtnl_trylock())
+               return restart_syscall();
+
        if (dev->num_tc) {
                /* Do not allow XPS on subordinate device directly */
                num_tc = dev->num_tc;
-               if (num_tc < 0)
-                       return -EINVAL;
+               if (num_tc < 0) {
+                       ret = -EINVAL;
+                       goto err_rtnl_unlock;
+               }
 
                /* If queue belongs to subordinate dev use its map */
                dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
 
                tc = netdev_txq_to_tc(dev, index);
-               if (tc < 0)
-                       return -EINVAL;
+               if (tc < 0) {
+                       ret = -EINVAL;
+                       goto err_rtnl_unlock;
+               }
        }
 
-       if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
-               return -ENOMEM;
+       if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
+               ret = -ENOMEM;
+               goto err_rtnl_unlock;
+       }
 
        rcu_read_lock();
        dev_maps = rcu_dereference(dev->xps_cpus_map);
@@ -1366,9 +1375,15 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
        }
        rcu_read_unlock();
 
+       rtnl_unlock();
+
        len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
        free_cpumask_var(mask);
        return len < PAGE_SIZE ? len : -EINVAL;
+
+err_rtnl_unlock:
+       rtnl_unlock();
+       return ret;
 }
 
 static ssize_t xps_cpus_store(struct netdev_queue *queue,
@@ -1396,7 +1411,13 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue,
                return err;
        }
 
+       if (!rtnl_trylock()) {
+               free_cpumask_var(mask);
+               return restart_syscall();
+       }
+
        err = netif_set_xps_queue(dev, mask, index);
+       rtnl_unlock();
 
        free_cpumask_var(mask);
 
@@ -1408,22 +1429,29 @@ static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
 
 static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
 {
+       int j, len, ret, num_tc = 1, tc = 0;
        struct net_device *dev = queue->dev;
        struct xps_dev_maps *dev_maps;
        unsigned long *mask, index;
-       int j, len, num_tc = 1, tc = 0;
 
        index = get_netdev_queue_index(queue);
 
+       if (!rtnl_trylock())
+               return restart_syscall();
+
        if (dev->num_tc) {
                num_tc = dev->num_tc;
                tc = netdev_txq_to_tc(dev, index);
-               if (tc < 0)
-                       return -EINVAL;
+               if (tc < 0) {
+                       ret = -EINVAL;
+                       goto err_rtnl_unlock;
+               }
        }
        mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
-       if (!mask)
-               return -ENOMEM;
+       if (!mask) {
+               ret = -ENOMEM;
+               goto err_rtnl_unlock;
+       }
 
        rcu_read_lock();
        dev_maps = rcu_dereference(dev->xps_rxqs_map);
@@ -1449,10 +1477,16 @@ static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
 out_no_maps:
        rcu_read_unlock();
 
+       rtnl_unlock();
+
        len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues);
        bitmap_free(mask);
 
        return len < PAGE_SIZE ? len : -EINVAL;
+
+err_rtnl_unlock:
+       rtnl_unlock();
+       return ret;
 }
 
 static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
@@ -1478,10 +1512,17 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
                return err;
        }
 
+       if (!rtnl_trylock()) {
+               bitmap_free(mask);
+               return restart_syscall();
+       }
+
        cpus_read_lock();
        err = __netif_set_xps_queue(dev, mask, index, true);
        cpus_read_unlock();
 
+       rtnl_unlock();
+
        bitmap_free(mask);
        return err ? : len;
 }
index bb0596c..3d6ab19 100644 (file)
@@ -3439,26 +3439,15 @@ replay:
 
        dev->ifindex = ifm->ifi_index;
 
-       if (ops->newlink) {
+       if (ops->newlink)
                err = ops->newlink(link_net ? : net, dev, tb, data, extack);
-               /* Drivers should call free_netdev() in ->destructor
-                * and unregister it on failure after registration
-                * so that device could be finally freed in rtnl_unlock.
-                */
-               if (err < 0) {
-                       /* If device is not registered at all, free it now */
-                       if (dev->reg_state == NETREG_UNINITIALIZED ||
-                           dev->reg_state == NETREG_UNREGISTERED)
-                               free_netdev(dev);
-                       goto out;
-               }
-       } else {
+       else
                err = register_netdevice(dev);
-               if (err < 0) {
-                       free_netdev(dev);
-                       goto out;
-               }
+       if (err < 0) {
+               free_netdev(dev);
+               goto out;
        }
+
        err = rtnl_configure_link(dev, ifm);
        if (err < 0)
                goto out_unregister;
index f62cae3..785daff 100644 (file)
@@ -437,7 +437,11 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
 
        len += NET_SKB_PAD;
 
-       if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
+       /* If requested length is either too small or too big,
+        * we use kmalloc() for skb->head allocation.
+        */
+       if (len <= SKB_WITH_OVERHEAD(1024) ||
+           len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
            (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
                skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
                if (!skb)
@@ -501,13 +505,17 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
                                 gfp_t gfp_mask)
 {
-       struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+       struct napi_alloc_cache *nc;
        struct sk_buff *skb;
        void *data;
 
        len += NET_SKB_PAD + NET_IP_ALIGN;
 
-       if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
+       /* If requested length is either too small or too big,
+        * we use kmalloc() for skb->head allocation.
+        */
+       if (len <= SKB_WITH_OVERHEAD(1024) ||
+           len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
            (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
                skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
                if (!skb)
@@ -515,6 +523,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
                goto skb_success;
        }
 
+       nc = this_cpu_ptr(&napi_alloc_cache);
        len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
        len = SKB_DATA_ALIGN(len);
 
@@ -3442,6 +3451,7 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
        st->root_skb = st->cur_skb = skb;
        st->frag_idx = st->stepped_offset = 0;
        st->frag_data = NULL;
+       st->frag_off = 0;
 }
 EXPORT_SYMBOL(skb_prepare_seq_read);
 
@@ -3496,14 +3506,27 @@ next_skb:
                st->stepped_offset += skb_headlen(st->cur_skb);
 
        while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
+               unsigned int pg_idx, pg_off, pg_sz;
+
                frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
-               block_limit = skb_frag_size(frag) + st->stepped_offset;
 
+               pg_idx = 0;
+               pg_off = skb_frag_off(frag);
+               pg_sz = skb_frag_size(frag);
+
+               if (skb_frag_must_loop(skb_frag_page(frag))) {
+                       pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT;
+                       pg_off = offset_in_page(pg_off + st->frag_off);
+                       pg_sz = min_t(unsigned int, pg_sz - st->frag_off,
+                                                   PAGE_SIZE - pg_off);
+               }
+
+               block_limit = pg_sz + st->stepped_offset;
                if (abs_offset < block_limit) {
                        if (!st->frag_data)
-                               st->frag_data = kmap_atomic(skb_frag_page(frag));
+                               st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx);
 
-                       *data = (u8 *) st->frag_data + skb_frag_off(frag) +
+                       *data = (u8 *)st->frag_data + pg_off +
                                (abs_offset - st->stepped_offset);
 
                        return block_limit - abs_offset;
@@ -3514,8 +3537,12 @@ next_skb:
                        st->frag_data = NULL;
                }
 
-               st->frag_idx++;
-               st->stepped_offset += skb_frag_size(frag);
+               st->stepped_offset += pg_sz;
+               st->frag_off += pg_sz;
+               if (st->frag_off == skb_frag_size(frag)) {
+                       st->frag_off = 0;
+                       st->frag_idx++;
+               }
        }
 
        if (st->frag_data) {
@@ -3655,7 +3682,8 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
        unsigned int delta_truesize = 0;
        unsigned int delta_len = 0;
        struct sk_buff *tail = NULL;
-       struct sk_buff *nskb;
+       struct sk_buff *nskb, *tmp;
+       int err;
 
        skb_push(skb, -skb_network_offset(skb) + offset);
 
@@ -3665,11 +3693,28 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
                nskb = list_skb;
                list_skb = list_skb->next;
 
+               err = 0;
+               if (skb_shared(nskb)) {
+                       tmp = skb_clone(nskb, GFP_ATOMIC);
+                       if (tmp) {
+                               consume_skb(nskb);
+                               nskb = tmp;
+                               err = skb_unclone(nskb, GFP_ATOMIC);
+                       } else {
+                               err = -ENOMEM;
+                       }
+               }
+
                if (!tail)
                        skb->next = nskb;
                else
                        tail->next = nskb;
 
+               if (unlikely(err)) {
+                       nskb->next = list_skb;
+                       goto err_linearize;
+               }
+
                tail = nskb;
 
                delta_len += nskb->len;
index bbdd3c7..b065f0a 100644 (file)
@@ -293,7 +293,7 @@ select_by_hash:
                        i = j = reciprocal_scale(hash, socks);
                        while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
                                i++;
-                               if (i >= reuse->num_socks)
+                               if (i >= socks)
                                        i = 0;
                                if (i == j)
                                        goto out;
index 084e159..653e3bc 100644 (file)
@@ -1765,6 +1765,8 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
        fn = &reply_funcs[dcb->cmd];
        if (!fn->cb)
                return -EOPNOTSUPP;
+       if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN))
+               return -EPERM;
 
        if (!tb[DCB_ATTR_IFNAME])
                return -EINVAL;
index 183003e..a47e0f9 100644 (file)
@@ -353,9 +353,13 @@ static int dsa_port_devlink_setup(struct dsa_port *dp)
 
 static void dsa_port_teardown(struct dsa_port *dp)
 {
+       struct devlink_port *dlp = &dp->devlink_port;
+
        if (!dp->setup)
                return;
 
+       devlink_port_type_clear(dlp);
+
        switch (dp->type) {
        case DSA_PORT_TYPE_UNUSED:
                break;
index 5a0f6fe..cb3a5cf 100644 (file)
@@ -309,8 +309,18 @@ static struct lock_class_key dsa_master_addr_list_lock_key;
 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
 {
        int mtu = ETH_DATA_LEN + cpu_dp->tag_ops->overhead;
+       struct dsa_switch *ds = cpu_dp->ds;
+       struct device_link *consumer_link;
        int ret;
 
+       /* The DSA master must use SET_NETDEV_DEV for this to work. */
+       consumer_link = device_link_add(ds->dev, dev->dev.parent,
+                                       DL_FLAG_AUTOREMOVE_CONSUMER);
+       if (!consumer_link)
+               netdev_err(dev,
+                          "Failed to create a device link to DSA switch %s\n",
+                          dev_name(ds->dev));
+
        rtnl_lock();
        ret = dev_set_mtu(dev, mtu);
        rtnl_unlock();
index 8b07f3a..a3271ec 100644 (file)
@@ -443,7 +443,6 @@ static int esp_output_encap(struct xfrm_state *x, struct sk_buff *skb,
 int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
 {
        u8 *tail;
-       u8 *vaddr;
        int nfrags;
        int esph_offset;
        struct page *page;
@@ -485,14 +484,10 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
                        page = pfrag->page;
                        get_page(page);
 
-                       vaddr = kmap_atomic(page);
-
-                       tail = vaddr + pfrag->offset;
+                       tail = page_address(page) + pfrag->offset;
 
                        esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
 
-                       kunmap_atomic(vaddr);
-
                        nfrags = skb_shinfo(skb)->nr_frags;
 
                        __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
index cdf6ec5..84bb707 100644 (file)
@@ -292,7 +292,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
                        .flowi4_iif = LOOPBACK_IFINDEX,
                        .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
                        .daddr = ip_hdr(skb)->saddr,
-                       .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
+                       .flowi4_tos = ip_hdr(skb)->tos & IPTOS_RT_MASK,
                        .flowi4_scope = scope,
                        .flowi4_mark = vmark ? skb->mark : 0,
                };
index 66fdbfe..5d1e6fe 100644 (file)
@@ -128,7 +128,7 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
         * to 0 and sets the configured key in the
         * inner erspan header field
         */
-       if (greh->protocol == htons(ETH_P_ERSPAN) ||
+       if ((greh->protocol == htons(ETH_P_ERSPAN) && hdr_len != 4) ||
            greh->protocol == htons(ETH_P_ERSPAN2)) {
                struct erspan_base_hdr *ershdr;
 
index fd8b880..6bd7ca0 100644 (file)
@@ -851,6 +851,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
                newicsk->icsk_retransmits = 0;
                newicsk->icsk_backoff     = 0;
                newicsk->icsk_probes_out  = 0;
+               newicsk->icsk_probes_tstamp = 0;
 
                /* Deinitialize accept_queue to trap illegal accesses. */
                memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
index 89fff5f..2ed0b01 100644 (file)
@@ -302,7 +302,7 @@ static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *
        if (skb_is_gso(skb))
                return ip_finish_output_gso(net, sk, skb, mtu);
 
-       if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
+       if (skb->len > mtu || IPCB(skb)->frag_max_size)
                return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
 
        return ip_finish_output2(net, sk, skb);
index ee65c92..64594aa 100644 (file)
@@ -759,8 +759,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                goto tx_error;
        }
 
-       if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph,
-                           0, 0, false)) {
+       df = tnl_params->frag_off;
+       if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
+               df |= (inner_iph->frag_off & htons(IP_DF));
+
+       if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, 0, 0, false)) {
                ip_rt_put(rt);
                goto tx_error;
        }
@@ -788,10 +791,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                        ttl = ip4_dst_hoplimit(&rt->dst);
        }
 
-       df = tnl_params->frag_off;
-       if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df)
-               df |= (inner_iph->frag_off&htons(IP_DF));
-
        max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
                        + rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
        if (max_headroom > dev->needed_headroom)
index 563b62b..c576a63 100644 (file)
@@ -1379,7 +1379,7 @@ static int compat_get_entries(struct net *net,
        xt_compat_lock(NFPROTO_ARP);
        t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = t->private;
+               const struct xt_table_info *private = xt_table_get_private_protected(t);
                struct xt_table_info info;
 
                ret = compat_table_info(private, &info);
index 6e2851f..e8f6f9d 100644 (file)
@@ -1589,7 +1589,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
        xt_compat_lock(AF_INET);
        t = xt_find_table_lock(net, AF_INET, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = t->private;
+               const struct xt_table_info *private = xt_table_get_private_protected(t);
                struct xt_table_info info;
                ret = compat_table_info(private, &info);
                if (!ret && get.size == info.size)
index cc23f1c..8cd3224 100644 (file)
@@ -76,7 +76,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
        flow.daddr = iph->saddr;
        flow.saddr = rpfilter_get_saddr(iph->daddr);
        flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
-       flow.flowi4_tos = RT_TOS(iph->tos);
+       flow.flowi4_tos = iph->tos & IPTOS_RT_MASK;
        flow.flowi4_scope = RT_SCOPE_UNIVERSE;
        flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
 
index 5e1b22d..e53e43a 100644 (file)
@@ -627,7 +627,7 @@ static int nh_check_attr_group(struct net *net, struct nlattr *tb[],
        for (i = NHA_GROUP_TYPE + 1; i < __NHA_MAX; ++i) {
                if (!tb[i])
                        continue;
-               if (tb[NHA_FDB])
+               if (i == NHA_FDB)
                        continue;
                NL_SET_ERR_MSG(extack,
                               "No other attributes can be set in nexthop groups");
@@ -1459,8 +1459,10 @@ static struct nexthop *nexthop_create_group(struct net *net,
        return nh;
 
 out_no_nh:
-       for (; i >= 0; --i)
+       for (i--; i >= 0; --i) {
+               list_del(&nhg->nh_entries[i].nh_list);
                nexthop_put(nhg->nh_entries[i].nh);
+       }
 
        kfree(nhg->spare);
        kfree(nhg);
index ed42d21..32545ec 100644 (file)
@@ -2937,6 +2937,7 @@ int tcp_disconnect(struct sock *sk, int flags)
 
        icsk->icsk_backoff = 0;
        icsk->icsk_probes_out = 0;
+       icsk->icsk_probes_tstamp = 0;
        icsk->icsk_rto = TCP_TIMEOUT_INIT;
        icsk->icsk_rto_min = TCP_RTO_MIN;
        icsk->icsk_delack_max = TCP_DELACK_MAX;
index c7e16b0..a7dfca0 100644 (file)
@@ -3384,6 +3384,7 @@ static void tcp_ack_probe(struct sock *sk)
                return;
        if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) {
                icsk->icsk_backoff = 0;
+               icsk->icsk_probes_tstamp = 0;
                inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
                /* Socket must be waked up by subsequent tcp_data_snd_check().
                 * This function is not for random using!
@@ -4396,10 +4397,9 @@ static void tcp_rcv_spurious_retrans(struct sock *sk, const struct sk_buff *skb)
         * The receiver remembers and reflects via DSACKs. Leverage the
         * DSACK state and change the txhash to re-route speculatively.
         */
-       if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq) {
-               sk_rethink_txhash(sk);
+       if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq &&
+           sk_rethink_txhash(sk))
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDUPLICATEDATAREHASH);
-       }
 }
 
 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
index 58207c7..777306b 100644 (file)
@@ -1595,6 +1595,8 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                tcp_move_syn(newtp, req);
                ireq->ireq_opt = NULL;
        } else {
+               newinet->inet_opt = NULL;
+
                if (!req_unhash && found_dup_sk) {
                        /* This code path should only be executed in the
                         * syncookie case only
@@ -1602,8 +1604,6 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                        bh_unlock_sock(newsk);
                        sock_put(newsk);
                        newsk = NULL;
-               } else {
-                       newinet->inet_opt = NULL;
                }
        }
        return newsk;
@@ -1760,6 +1760,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
 {
        u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
+       u32 tail_gso_size, tail_gso_segs;
        struct skb_shared_info *shinfo;
        const struct tcphdr *th;
        struct tcphdr *thtail;
@@ -1767,6 +1768,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
        unsigned int hdrlen;
        bool fragstolen;
        u32 gso_segs;
+       u32 gso_size;
        int delta;
 
        /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
@@ -1792,13 +1794,6 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
         */
        th = (const struct tcphdr *)skb->data;
        hdrlen = th->doff * 4;
-       shinfo = skb_shinfo(skb);
-
-       if (!shinfo->gso_size)
-               shinfo->gso_size = skb->len - hdrlen;
-
-       if (!shinfo->gso_segs)
-               shinfo->gso_segs = 1;
 
        tail = sk->sk_backlog.tail;
        if (!tail)
@@ -1821,6 +1816,15 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
                goto no_coalesce;
 
        __skb_pull(skb, hdrlen);
+
+       shinfo = skb_shinfo(skb);
+       gso_size = shinfo->gso_size ?: skb->len;
+       gso_segs = shinfo->gso_segs ?: 1;
+
+       shinfo = skb_shinfo(tail);
+       tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen);
+       tail_gso_segs = shinfo->gso_segs ?: 1;
+
        if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
                TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
 
@@ -1847,11 +1851,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
                }
 
                /* Not as strict as GRO. We only need to carry mss max value */
-               skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
-                                                skb_shinfo(tail)->gso_size);
-
-               gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
-               skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
+               shinfo->gso_size = max(gso_size, tail_gso_size);
+               shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
 
                sk->sk_backlog.len += delta;
                __NET_INC_STATS(sock_net(sk),
index f322e79..ab45869 100644 (file)
@@ -4084,6 +4084,7 @@ void tcp_send_probe0(struct sock *sk)
                /* Cancel probe timer, if it is not required. */
                icsk->icsk_probes_out = 0;
                icsk->icsk_backoff = 0;
+               icsk->icsk_probes_tstamp = 0;
                return;
        }
 
index 6c62b9e..faa9294 100644 (file)
@@ -219,14 +219,8 @@ static int tcp_write_timeout(struct sock *sk)
        int retry_until;
 
        if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
-               if (icsk->icsk_retransmits) {
-                       dst_negative_advice(sk);
-               } else {
-                       sk_rethink_txhash(sk);
-                       tp->timeout_rehash++;
-                       __NET_INC_STATS(sock_net(sk),
-                                       LINUX_MIB_TCPTIMEOUTREHASH);
-               }
+               if (icsk->icsk_retransmits)
+                       __dst_negative_advice(sk);
                retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
                expired = icsk->icsk_retransmits >= retry_until;
        } else {
@@ -234,12 +228,7 @@ static int tcp_write_timeout(struct sock *sk)
                        /* Black hole detection */
                        tcp_mtu_probing(icsk, sk);
 
-                       dst_negative_advice(sk);
-               } else {
-                       sk_rethink_txhash(sk);
-                       tp->timeout_rehash++;
-                       __NET_INC_STATS(sock_net(sk),
-                                       LINUX_MIB_TCPTIMEOUTREHASH);
+                       __dst_negative_advice(sk);
                }
 
                retry_until = net->ipv4.sysctl_tcp_retries2;
@@ -270,6 +259,11 @@ static int tcp_write_timeout(struct sock *sk)
                return 1;
        }
 
+       if (sk_rethink_txhash(sk)) {
+               tp->timeout_rehash++;
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
+       }
+
        return 0;
 }
 
@@ -349,6 +343,7 @@ static void tcp_probe_timer(struct sock *sk)
 
        if (tp->packets_out || !skb) {
                icsk->icsk_probes_out = 0;
+               icsk->icsk_probes_tstamp = 0;
                return;
        }
 
@@ -360,13 +355,12 @@ static void tcp_probe_timer(struct sock *sk)
         * corresponding system limit. We also implement similar policy when
         * we use RTO to probe window in tcp_retransmit_timer().
         */
-       if (icsk->icsk_user_timeout) {
-               u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out,
-                                               tcp_probe0_base(sk));
-
-               if (elapsed >= icsk->icsk_user_timeout)
-                       goto abort;
-       }
+       if (!icsk->icsk_probes_tstamp)
+               icsk->icsk_probes_tstamp = tcp_jiffies32;
+       else if (icsk->icsk_user_timeout &&
+                (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
+                msecs_to_jiffies(icsk->icsk_user_timeout))
+               goto abort;
 
        max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
        if (sock_flag(sk, SOCK_DEAD)) {
index 7103b0a..69ea765 100644 (file)
@@ -2555,7 +2555,8 @@ int udp_v4_early_demux(struct sk_buff *skb)
                 */
                if (!inet_sk(sk)->inet_daddr && in_dev)
                        return ip_mc_validate_source(skb, iph->daddr,
-                                                    iph->saddr, iph->tos,
+                                                    iph->saddr,
+                                                    iph->tos & IPTOS_RT_MASK,
                                                     skb->dev, in_dev, &itag);
        }
        return 0;
index eff2cac..9edc5bb 100644 (file)
@@ -2467,8 +2467,9 @@ static void addrconf_add_mroute(struct net_device *dev)
                .fc_ifindex = dev->ifindex,
                .fc_dst_len = 8,
                .fc_flags = RTF_UP,
-               .fc_type = RTN_UNICAST,
+               .fc_type = RTN_MULTICAST,
                .fc_nlinfo.nl_net = dev_net(dev),
+               .fc_protocol = RTPROT_KERNEL,
        };
 
        ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
index 52c2f06..2b804fc 100644 (file)
@@ -478,7 +478,6 @@ static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb,
 int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
 {
        u8 *tail;
-       u8 *vaddr;
        int nfrags;
        int esph_offset;
        struct page *page;
@@ -519,14 +518,10 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
                        page = pfrag->page;
                        get_page(page);
 
-                       vaddr = kmap_atomic(page);
-
-                       tail = vaddr + pfrag->offset;
+                       tail = page_address(page) + pfrag->offset;
 
                        esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
 
-                       kunmap_atomic(vaddr);
-
                        nfrags = skb_shinfo(skb)->nr_frags;
 
                        __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
index 605cdd3..f43e275 100644 (file)
@@ -1025,6 +1025,8 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn,
 {
        struct fib6_table *table = rt->fib6_table;
 
+       /* Flush all cached dst in exception table */
+       rt6_flush_exceptions(rt);
        fib6_drop_pcpu_from(rt, table);
 
        if (rt->nh && !list_empty(&rt->nh_list))
@@ -1927,9 +1929,6 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
        net->ipv6.rt6_stats->fib_rt_entries--;
        net->ipv6.rt6_stats->fib_discarded_routes++;
 
-       /* Flush all cached dst in exception table */
-       rt6_flush_exceptions(rt);
-
        /* Reset round-robin state, if necessary */
        if (rcu_access_pointer(fn->rr_ptr) == rt)
                fn->rr_ptr = NULL;
index 749ad72..077d43a 100644 (file)
@@ -125,8 +125,43 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
        return -EINVAL;
 }
 
+static int
+ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
+                                   struct sk_buff *skb, unsigned int mtu)
+{
+       struct sk_buff *segs, *nskb;
+       netdev_features_t features;
+       int ret = 0;
+
+       /* Please see corresponding comment in ip_finish_output_gso
+        * describing the cases where GSO segment length exceeds the
+        * egress MTU.
+        */
+       features = netif_skb_features(skb);
+       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+       if (IS_ERR_OR_NULL(segs)) {
+               kfree_skb(skb);
+               return -ENOMEM;
+       }
+
+       consume_skb(skb);
+
+       skb_list_walk_safe(segs, segs, nskb) {
+               int err;
+
+               skb_mark_not_on_list(segs);
+               err = ip6_fragment(net, sk, segs, ip6_finish_output2);
+               if (err && ret == 0)
+                       ret = err;
+       }
+
+       return ret;
+}
+
 static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+       unsigned int mtu;
+
 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
        /* Policy lookup after SNAT yielded a new policy */
        if (skb_dst(skb)->xfrm) {
@@ -135,7 +170,11 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff
        }
 #endif
 
-       if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
+       mtu = ip6_skb_dst_mtu(skb);
+       if (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))
+               return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu);
+
+       if ((skb->len > mtu && !skb_is_gso(skb)) ||
            dst_allfrag(skb_dst(skb)) ||
            (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
                return ip6_fragment(net, sk, skb, ip6_finish_output2);
index c4f532f..0d453fa 100644 (file)
@@ -1598,7 +1598,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
        xt_compat_lock(AF_INET6);
        t = xt_find_table_lock(net, AF_INET6, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = t->private;
+               const struct xt_table_info *private = xt_table_get_private_protected(t);
                struct xt_table_info info;
                ret = compat_table_info(private, &info);
                if (!ret && get.size == info.size)
index 2da0ee7..9363686 100644 (file)
@@ -1645,8 +1645,11 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
        }
 
 #ifdef CONFIG_IPV6_SIT_6RD
-       if (ipip6_netlink_6rd_parms(data, &ip6rd))
+       if (ipip6_netlink_6rd_parms(data, &ip6rd)) {
                err = ipip6_tunnel_update_6rd(nt, &ip6rd);
+               if (err < 0)
+                       unregister_netdevice_queue(dev, NULL);
+       }
 #endif
 
        return err;
index 213ea7a..4096188 100644 (file)
@@ -489,6 +489,7 @@ static int lapb_device_event(struct notifier_block *this, unsigned long event,
                break;
        }
 
+       lapb_put(lapb);
        return NOTIFY_DONE;
 }
 
index 48f144f..9e723d9 100644 (file)
@@ -120,18 +120,17 @@ static ssize_t aqm_write(struct file *file,
 {
        struct ieee80211_local *local = file->private_data;
        char buf[100];
-       size_t len;
 
-       if (count > sizeof(buf))
+       if (count >= sizeof(buf))
                return -EINVAL;
 
        if (copy_from_user(buf, user_buf, count))
                return -EFAULT;
 
-       buf[sizeof(buf) - 1] = '\0';
-       len = strlen(buf);
-       if (len > 0 && buf[len-1] == '\n')
-               buf[len-1] = 0;
+       if (count && buf[count - 1] == '\n')
+               buf[count - 1] = '\0';
+       else
+               buf[count] = '\0';
 
        if (sscanf(buf, "fq_limit %u", &local->fq.limit) == 1)
                return count;
@@ -177,18 +176,17 @@ static ssize_t airtime_flags_write(struct file *file,
 {
        struct ieee80211_local *local = file->private_data;
        char buf[16];
-       size_t len;
 
-       if (count > sizeof(buf))
+       if (count >= sizeof(buf))
                return -EINVAL;
 
        if (copy_from_user(buf, user_buf, count))
                return -EFAULT;
 
-       buf[sizeof(buf) - 1] = 0;
-       len = strlen(buf);
-       if (len > 0 && buf[len - 1] == '\n')
-               buf[len - 1] = 0;
+       if (count && buf[count - 1] == '\n')
+               buf[count - 1] = '\0';
+       else
+               buf[count] = '\0';
 
        if (kstrtou16(buf, 0, &local->airtime_flags))
                return -EINVAL;
@@ -237,20 +235,19 @@ static ssize_t aql_txq_limit_write(struct file *file,
 {
        struct ieee80211_local *local = file->private_data;
        char buf[100];
-       size_t len;
        u32 ac, q_limit_low, q_limit_high, q_limit_low_old, q_limit_high_old;
        struct sta_info *sta;
 
-       if (count > sizeof(buf))
+       if (count >= sizeof(buf))
                return -EINVAL;
 
        if (copy_from_user(buf, user_buf, count))
                return -EFAULT;
 
-       buf[sizeof(buf) - 1] = 0;
-       len = strlen(buf);
-       if (len > 0 && buf[len - 1] == '\n')
-               buf[len - 1] = 0;
+       if (count && buf[count - 1] == '\n')
+               buf[count - 1] = '\0';
+       else
+               buf[count] = '\0';
 
        if (sscanf(buf, "%u %u %u", &ac, &q_limit_low, &q_limit_high) != 3)
                return -EINVAL;
@@ -306,18 +303,17 @@ static ssize_t force_tx_status_write(struct file *file,
 {
        struct ieee80211_local *local = file->private_data;
        char buf[3];
-       size_t len;
 
-       if (count > sizeof(buf))
+       if (count >= sizeof(buf))
                return -EINVAL;
 
        if (copy_from_user(buf, user_buf, count))
                return -EFAULT;
 
-       buf[sizeof(buf) - 1] = '\0';
-       len = strlen(buf);
-       if (len > 0 && buf[len - 1] == '\n')
-               buf[len - 1] = 0;
+       if (count && buf[count - 1] == '\n')
+               buf[count - 1] = '\0';
+       else
+               buf[count] = '\0';
 
        if (buf[0] == '0' && buf[1] == '\0')
                local->force_tx_status = 0;
index 13b9bcc..972895e 100644 (file)
@@ -4176,6 +4176,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta)
 
        rcu_read_lock();
        key = rcu_dereference(sta->ptk[sta->ptk_idx]);
+       if (!key)
+               key = rcu_dereference(sdata->default_unicast_key);
        if (key) {
                switch (key->conf.cipher) {
                case WLAN_CIPHER_SUITE_TKIP:
index 6422da6..ebb3228 100644 (file)
@@ -649,7 +649,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
                if (!skip_hw && tx->key &&
                    tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
                        info->control.hw_key = &tx->key->conf;
-       } else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta &&
+       } else if (ieee80211_is_data_present(hdr->frame_control) && tx->sta &&
                   test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
                return TX_DROP;
        }
@@ -3809,7 +3809,7 @@ void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
                 * get immediately moved to the back of the list on the next
                 * call to ieee80211_next_txq().
                 */
-               if (txqi->txq.sta &&
+               if (txqi->txq.sta && local->airtime_flags &&
                    wiphy_ext_feature_isset(local->hw.wiphy,
                                            NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
                        list_add(&txqi->schedule_order,
@@ -4251,7 +4251,6 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
        struct ethhdr *ehdr = (struct ethhdr *)skb->data;
        struct ieee80211_key *key;
        struct sta_info *sta;
-       bool offload = true;
 
        if (unlikely(skb->len < ETH_HLEN)) {
                kfree_skb(skb);
@@ -4267,18 +4266,22 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
 
        if (unlikely(IS_ERR_OR_NULL(sta) || !sta->uploaded ||
            !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
-               sdata->control_port_protocol == ehdr->h_proto))
-               offload = false;
-       else if ((key = rcu_dereference(sta->ptk[sta->ptk_idx])) &&
-                (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
-                 key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
-               offload = false;
-
-       if (offload)
-               ieee80211_8023_xmit(sdata, dev, sta, key, skb);
-       else
-               ieee80211_subif_start_xmit(skb, dev);
+           sdata->control_port_protocol == ehdr->h_proto))
+               goto skip_offload;
+
+       key = rcu_dereference(sta->ptk[sta->ptk_idx]);
+       if (!key)
+               key = rcu_dereference(sdata->default_unicast_key);
+
+       if (key && (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
+                   key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
+               goto skip_offload;
+
+       ieee80211_8023_xmit(sdata, dev, sta, key, skb);
+       goto out;
 
+skip_offload:
+       ieee80211_subif_start_xmit(skb, dev);
 out:
        rcu_read_unlock();
 
index 09b19aa..f998a07 100644 (file)
@@ -427,7 +427,7 @@ static bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
 static bool tcp_can_send_ack(const struct sock *ssk)
 {
        return !((1 << inet_sk_state_load(ssk)) &
-              (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE));
+              (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_TIME_WAIT | TCPF_CLOSE | TCPF_LISTEN));
 }
 
 static void mptcp_send_ack(struct mptcp_sock *msk)
@@ -877,6 +877,9 @@ static void __mptcp_wmem_reserve(struct sock *sk, int size)
        struct mptcp_sock *msk = mptcp_sk(sk);
 
        WARN_ON_ONCE(msk->wmem_reserved);
+       if (WARN_ON_ONCE(amount < 0))
+               amount = 0;
+
        if (amount <= sk->sk_forward_alloc)
                goto reserve;
 
@@ -1587,7 +1590,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
                return -EOPNOTSUPP;
 
-       mptcp_lock_sock(sk, __mptcp_wmem_reserve(sk, len));
+       mptcp_lock_sock(sk, __mptcp_wmem_reserve(sk, min_t(size_t, 1 << 20, len)));
 
        timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 
@@ -2639,11 +2642,17 @@ static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
 
 static int mptcp_disconnect(struct sock *sk, int flags)
 {
-       /* Should never be called.
-        * inet_stream_connect() calls ->disconnect, but that
-        * refers to the subflow socket, not the mptcp one.
-        */
-       WARN_ON_ONCE(1);
+       struct mptcp_subflow_context *subflow;
+       struct mptcp_sock *msk = mptcp_sk(sk);
+
+       __mptcp_flush_join_list(msk);
+       mptcp_for_each_subflow(msk, subflow) {
+               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+               lock_sock(ssk);
+               tcp_disconnect(ssk, flags);
+               release_sock(ssk);
+       }
        return 0;
 }
 
@@ -3086,6 +3095,14 @@ bool mptcp_finish_join(struct sock *ssk)
        return true;
 }
 
+static void mptcp_shutdown(struct sock *sk, int how)
+{
+       pr_debug("sk=%p, how=%d", sk, how);
+
+       if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
+               __mptcp_wr_shutdown(sk);
+}
+
 static struct proto mptcp_prot = {
        .name           = "MPTCP",
        .owner          = THIS_MODULE,
@@ -3095,7 +3112,7 @@ static struct proto mptcp_prot = {
        .accept         = mptcp_accept,
        .setsockopt     = mptcp_setsockopt,
        .getsockopt     = mptcp_getsockopt,
-       .shutdown       = tcp_shutdown,
+       .shutdown       = mptcp_shutdown,
        .destroy        = mptcp_destroy,
        .sendmsg        = mptcp_sendmsg,
        .recvmsg        = mptcp_recvmsg,
@@ -3341,43 +3358,6 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
        return mask;
 }
 
-static int mptcp_shutdown(struct socket *sock, int how)
-{
-       struct mptcp_sock *msk = mptcp_sk(sock->sk);
-       struct sock *sk = sock->sk;
-       int ret = 0;
-
-       pr_debug("sk=%p, how=%d", msk, how);
-
-       lock_sock(sk);
-
-       how++;
-       if ((how & ~SHUTDOWN_MASK) || !how) {
-               ret = -EINVAL;
-               goto out_unlock;
-       }
-
-       if (sock->state == SS_CONNECTING) {
-               if ((1 << sk->sk_state) &
-                   (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
-                       sock->state = SS_DISCONNECTING;
-               else
-                       sock->state = SS_CONNECTED;
-       }
-
-       sk->sk_shutdown |= how;
-       if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk))
-               __mptcp_wr_shutdown(sk);
-
-       /* Wake up anyone sleeping in poll. */
-       sk->sk_state_change(sk);
-
-out_unlock:
-       release_sock(sk);
-
-       return ret;
-}
-
 static const struct proto_ops mptcp_stream_ops = {
        .family            = PF_INET,
        .owner             = THIS_MODULE,
@@ -3391,7 +3371,7 @@ static const struct proto_ops mptcp_stream_ops = {
        .ioctl             = inet_ioctl,
        .gettstamp         = sock_gettstamp,
        .listen            = mptcp_listen,
-       .shutdown          = mptcp_shutdown,
+       .shutdown          = inet_shutdown,
        .setsockopt        = sock_common_setsockopt,
        .getsockopt        = sock_common_getsockopt,
        .sendmsg           = inet_sendmsg,
@@ -3441,7 +3421,7 @@ static const struct proto_ops mptcp_v6_stream_ops = {
        .ioctl             = inet6_ioctl,
        .gettstamp         = sock_gettstamp,
        .listen            = mptcp_listen,
-       .shutdown          = mptcp_shutdown,
+       .shutdown          = inet_shutdown,
        .setsockopt        = sock_common_setsockopt,
        .getsockopt        = sock_common_getsockopt,
        .sendmsg           = inet6_sendmsg,
index 5b1f4ec..888ccc2 100644 (file)
@@ -1120,7 +1120,7 @@ int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev,
        int payload, i, ret;
 
        /* Find the NCSI device */
-       nd = ncsi_find_dev(dev);
+       nd = ncsi_find_dev(orig_dev);
        ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
        if (!ndp)
                return -ENODEV;
index 5f1208a..6186358 100644 (file)
@@ -141,20 +141,6 @@ htable_size(u8 hbits)
        return hsize * sizeof(struct hbucket *) + sizeof(struct htable);
 }
 
-/* Compute htable_bits from the user input parameter hashsize */
-static u8
-htable_bits(u32 hashsize)
-{
-       /* Assume that hashsize == 2^htable_bits */
-       u8 bits = fls(hashsize - 1);
-
-       if (jhash_size(bits) != hashsize)
-               /* Round up to the first 2^n value */
-               bits = fls(hashsize);
-
-       return bits;
-}
-
 #ifdef IP_SET_HASH_WITH_NETS
 #if IPSET_NET_COUNT > 1
 #define __CIDR(cidr, i)                (cidr[i])
@@ -640,7 +626,7 @@ mtype_resize(struct ip_set *set, bool retried)
        struct htype *h = set->data;
        struct htable *t, *orig;
        u8 htable_bits;
-       size_t dsize = set->dsize;
+       size_t hsize, dsize = set->dsize;
 #ifdef IP_SET_HASH_WITH_NETS
        u8 flags;
        struct mtype_elem *tmp;
@@ -664,14 +650,12 @@ mtype_resize(struct ip_set *set, bool retried)
 retry:
        ret = 0;
        htable_bits++;
-       if (!htable_bits) {
-               /* In case we have plenty of memory :-) */
-               pr_warn("Cannot increase the hashsize of set %s further\n",
-                       set->name);
-               ret = -IPSET_ERR_HASH_FULL;
-               goto out;
-       }
-       t = ip_set_alloc(htable_size(htable_bits));
+       if (!htable_bits)
+               goto hbwarn;
+       hsize = htable_size(htable_bits);
+       if (!hsize)
+               goto hbwarn;
+       t = ip_set_alloc(hsize);
        if (!t) {
                ret = -ENOMEM;
                goto out;
@@ -813,6 +797,12 @@ cleanup:
        if (ret == -EAGAIN)
                goto retry;
        goto out;
+
+hbwarn:
+       /* In case we have plenty of memory :-) */
+       pr_warn("Cannot increase the hashsize of set %s further\n", set->name);
+       ret = -IPSET_ERR_HASH_FULL;
+       goto out;
 }
 
 /* Get the current number of elements and ext_size in the set  */
@@ -1521,7 +1511,11 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
        if (!h)
                return -ENOMEM;
 
-       hbits = htable_bits(hashsize);
+       /* Compute htable_bits from the user input parameter hashsize.
+        * Assume that hashsize == 2^htable_bits,
+        * otherwise round up to the first 2^n value.
+        */
+       hbits = fls(hashsize - 1);
        hsize = htable_size(hbits);
        if (hsize == 0) {
                kfree(h);
index 46c5557..0ee702d 100644 (file)
@@ -523,6 +523,9 @@ nf_conntrack_hash_sysctl(struct ctl_table *table, int write,
 {
        int ret;
 
+       /* module_param hashsize could have changed value */
+       nf_conntrack_htable_size_user = nf_conntrack_htable_size;
+
        ret = proc_dointvec(table, write, buffer, lenp, ppos);
        if (ret < 0 || !write)
                return ret;
index ea923f8..b7c3c90 100644 (file)
@@ -1174,6 +1174,7 @@ static int __init nf_nat_init(void)
        ret = register_pernet_subsys(&nat_net_ops);
        if (ret < 0) {
                nf_ct_extend_unregister(&nat_extend);
+               kvfree(nf_nat_bysource);
                return ret;
        }
 
index 8d5aa0a..15c467f 100644 (file)
@@ -4162,7 +4162,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
                if (flags & ~(NFT_SET_ANONYMOUS | NFT_SET_CONSTANT |
                              NFT_SET_INTERVAL | NFT_SET_TIMEOUT |
                              NFT_SET_MAP | NFT_SET_EVAL |
-                             NFT_SET_OBJECT | NFT_SET_CONCAT))
+                             NFT_SET_OBJECT | NFT_SET_CONCAT | NFT_SET_EXPR))
                        return -EOPNOTSUPP;
                /* Only one of these operations is supported */
                if ((flags & (NFT_SET_MAP | NFT_SET_OBJECT)) ==
@@ -4304,6 +4304,10 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
                struct nlattr *tmp;
                int left;
 
+               if (!(flags & NFT_SET_EXPR)) {
+                       err = -EINVAL;
+                       goto err_set_alloc_name;
+               }
                i = 0;
                nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
                        if (i == NFT_SET_EXPR_MAX) {
@@ -5254,8 +5258,8 @@ static int nft_set_elem_expr_clone(const struct nft_ctx *ctx,
        return 0;
 
 err_expr:
-       for (k = i - 1; k >= 0; k++)
-               nft_expr_destroy(ctx, expr_array[i]);
+       for (k = i - 1; k >= 0; k--)
+               nft_expr_destroy(ctx, expr_array[k]);
 
        return -ENOMEM;
 }
index 983a1d5..0b053f7 100644 (file)
@@ -19,6 +19,7 @@ struct nft_dynset {
        enum nft_registers              sreg_key:8;
        enum nft_registers              sreg_data:8;
        bool                            invert;
+       bool                            expr;
        u8                              num_exprs;
        u64                             timeout;
        struct nft_expr                 *expr_array[NFT_SET_EXPR_MAX];
@@ -175,11 +176,12 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
 
        if (tb[NFTA_DYNSET_FLAGS]) {
                u32 flags = ntohl(nla_get_be32(tb[NFTA_DYNSET_FLAGS]));
-
-               if (flags & ~NFT_DYNSET_F_INV)
-                       return -EINVAL;
+               if (flags & ~(NFT_DYNSET_F_INV | NFT_DYNSET_F_EXPR))
+                       return -EOPNOTSUPP;
                if (flags & NFT_DYNSET_F_INV)
                        priv->invert = true;
+               if (flags & NFT_DYNSET_F_EXPR)
+                       priv->expr = true;
        }
 
        set = nft_set_lookup_global(ctx->net, ctx->table,
@@ -210,7 +212,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
        timeout = 0;
        if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
                if (!(set->flags & NFT_SET_TIMEOUT))
-                       return -EINVAL;
+                       return -EOPNOTSUPP;
 
                err = nf_msecs_to_jiffies64(tb[NFTA_DYNSET_TIMEOUT], &timeout);
                if (err)
@@ -224,7 +226,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
 
        if (tb[NFTA_DYNSET_SREG_DATA] != NULL) {
                if (!(set->flags & NFT_SET_MAP))
-                       return -EINVAL;
+                       return -EOPNOTSUPP;
                if (set->dtype == NFT_DATA_VERDICT)
                        return -EOPNOTSUPP;
 
@@ -261,6 +263,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
                struct nlattr *tmp;
                int left;
 
+               if (!priv->expr)
+                       return -EINVAL;
+
                i = 0;
                nla_for_each_nested(tmp, tb[NFTA_DYNSET_EXPRESSIONS], left) {
                        if (i == NFT_SET_EXPR_MAX) {
index 37253d3..0d5c422 100644 (file)
@@ -115,6 +115,9 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
        } cfg;
        int ret;
 
+       if (strnlen(info->name, sizeof(est->name)) >= sizeof(est->name))
+               return -ENAMETOOLONG;
+
        net_get_random_once(&jhash_rnd, sizeof(jhash_rnd));
 
        mutex_lock(&xn->hash_lock);
index e64727e..02a1f13 100644 (file)
@@ -508,7 +508,7 @@ static int nci_open_device(struct nci_dev *ndev)
                };
                unsigned long opt = 0;
 
-               if (!(ndev->nci_ver & NCI_VER_2_MASK))
+               if (ndev->nci_ver & NCI_VER_2_MASK)
                        opt = (unsigned long)&nci_init_v2_cmd;
 
                rc = __nci_request(ndev, nci_init_req, opt,
index de8e8db..6bbc7a4 100644 (file)
@@ -4595,7 +4595,9 @@ static void packet_seq_stop(struct seq_file *seq, void *v)
 static int packet_seq_show(struct seq_file *seq, void *v)
 {
        if (v == SEQ_START_TOKEN)
-               seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
+               seq_printf(seq,
+                          "%*sRefCnt Type Proto  Iface R Rmem   User   Inode\n",
+                          IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk");
        else {
                struct sock *s = sk_entry(v);
                const struct packet_sock *po = pkt_sk(s);
index 56aaf8c..8d00dfe 100644 (file)
@@ -755,7 +755,7 @@ static void qrtr_ns_data_ready(struct sock *sk)
        queue_work(qrtr_ns.workqueue, &qrtr_ns.work);
 }
 
-void qrtr_ns_init(void)
+int qrtr_ns_init(void)
 {
        struct sockaddr_qrtr sq;
        int ret;
@@ -766,7 +766,7 @@ void qrtr_ns_init(void)
        ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM,
                               PF_QIPCRTR, &qrtr_ns.sock);
        if (ret < 0)
-               return;
+               return ret;
 
        ret = kernel_getsockname(qrtr_ns.sock, (struct sockaddr *)&sq);
        if (ret < 0) {
@@ -797,12 +797,13 @@ void qrtr_ns_init(void)
        if (ret < 0)
                goto err_wq;
 
-       return;
+       return 0;
 
 err_wq:
        destroy_workqueue(qrtr_ns.workqueue);
 err_sock:
        sock_release(qrtr_ns.sock);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(qrtr_ns_init);
 
index f4ab3ca..b343582 100644 (file)
@@ -1287,13 +1287,19 @@ static int __init qrtr_proto_init(void)
                return rc;
 
        rc = sock_register(&qrtr_family);
-       if (rc) {
-               proto_unregister(&qrtr_proto);
-               return rc;
-       }
+       if (rc)
+               goto err_proto;
 
-       qrtr_ns_init();
+       rc = qrtr_ns_init();
+       if (rc)
+               goto err_sock;
 
+       return 0;
+
+err_sock:
+       sock_unregister(qrtr_family.family);
+err_proto:
+       proto_unregister(&qrtr_proto);
        return rc;
 }
 postcore_initcall(qrtr_proto_init);
index dc2b67f..3f2d286 100644 (file)
@@ -29,7 +29,7 @@ void qrtr_endpoint_unregister(struct qrtr_endpoint *ep);
 
 int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len);
 
-void qrtr_ns_init(void);
+int qrtr_ns_init(void);
 
 void qrtr_ns_remove(void);
 
index 667c44a..dc20136 100644 (file)
@@ -430,7 +430,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
                return;
        }
 
-       if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
+       if (state == RXRPC_CALL_SERVER_RECV_REQUEST) {
                unsigned long timo = READ_ONCE(call->next_req_timo);
                unsigned long now, expect_req_by;
 
index 9631aa8..8d2073e 100644 (file)
@@ -598,7 +598,7 @@ static long rxrpc_read(const struct key *key,
                default: /* we have a ticket we can't encode */
                        pr_err("Unsupported key token type (%u)\n",
                               token->security_index);
-                       continue;
+                       return -ENOPKG;
                }
 
                _debug("token[%u]: toksize=%u", ntoks, toksize);
@@ -674,7 +674,9 @@ static long rxrpc_read(const struct key *key,
                        break;
 
                default:
-                       break;
+                       pr_err("Unsupported key token type (%u)\n",
+                              token->security_index);
+                       return -ENOPKG;
                }
 
                ASSERTCMP((unsigned long)xdr - (unsigned long)oldxdr, ==,
index 1319986..84f9325 100644 (file)
@@ -1272,6 +1272,10 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
 
                nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
                msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
+               if (!nla_ok(nla_opt_msk, msk_depth)) {
+                       NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
+                       return -EINVAL;
+               }
        }
 
        nla_for_each_attr(nla_opt_key, nla_enc_key,
@@ -1307,9 +1311,6 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
                                NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
                                return -EINVAL;
                        }
-
-                       if (msk_depth)
-                               nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
                        break;
                case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
                        if (key->enc_opts.dst_opt_type) {
@@ -1340,9 +1341,6 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
                                NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
                                return -EINVAL;
                        }
-
-                       if (msk_depth)
-                               nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
                        break;
                case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
                        if (key->enc_opts.dst_opt_type) {
@@ -1373,14 +1371,20 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
                                NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
                                return -EINVAL;
                        }
-
-                       if (msk_depth)
-                               nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
                        break;
                default:
                        NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
                        return -EINVAL;
                }
+
+               if (!msk_depth)
+                       continue;
+
+               if (!nla_ok(nla_opt_msk, msk_depth)) {
+                       NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
+                       return -EINVAL;
+               }
+               nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
        }
 
        return 0;
index 78bec34..c4007b9 100644 (file)
@@ -366,9 +366,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        if (tb[TCA_TCINDEX_MASK])
                cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
 
-       if (tb[TCA_TCINDEX_SHIFT])
+       if (tb[TCA_TCINDEX_SHIFT]) {
                cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
-
+               if (cp->shift > 16) {
+                       err = -EINVAL;
+                       goto errout;
+               }
+       }
        if (!cp->hash) {
                /* Hash not specified, use perfect hash if the upper limit
                 * of the hashing index is below the threshold.
index 51cb553..6fe4e5c 100644 (file)
@@ -412,7 +412,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
 {
        struct qdisc_rate_table *rtab;
 
-       if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
+       if (tab == NULL || r->rate == 0 ||
+           r->cell_log == 0 || r->cell_log >= 32 ||
            nla_len(tab) != TC_RTAB_SIZE) {
                NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
                return NULL;
index bd618b0..50f680f 100644 (file)
@@ -362,7 +362,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
 
        ctl = nla_data(tb[TCA_CHOKE_PARMS]);
 
-       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
                return -EINVAL;
 
        if (ctl->limit > CHOKE_MAX_QUEUE)
index 8599c6f..e0bc775 100644 (file)
@@ -480,7 +480,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
        struct gred_sched *table = qdisc_priv(sch);
        struct gred_sched_data *q = table->tab[dp];
 
-       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) {
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log)) {
                NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
                return -EINVAL;
        }
index e89fab6..b4ae34d 100644 (file)
@@ -250,7 +250,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
        max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
 
        ctl = nla_data(tb[TCA_RED_PARMS]);
-       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
                return -EINVAL;
 
        err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
index bca2be5..b25e514 100644 (file)
@@ -647,7 +647,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
        }
 
        if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
-                                       ctl_v1->Wlog))
+                                       ctl_v1->Wlog, ctl_v1->Scell_log))
                return -EINVAL;
        if (ctl_v1 && ctl_v1->qth_min) {
                p = kmalloc(sizeof(*p), GFP_KERNEL);
index c74817e..6f77527 100644 (file)
@@ -1605,8 +1605,9 @@ static void taprio_reset(struct Qdisc *sch)
 
        hrtimer_cancel(&q->advance_timer);
        if (q->qdiscs) {
-               for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
-                       qdisc_reset(q->qdiscs[i]);
+               for (i = 0; i < dev->num_tx_queues; i++)
+                       if (q->qdiscs[i])
+                               qdisc_reset(q->qdiscs[i]);
        }
        sch->qstats.backlog = 0;
        sch->q.qlen = 0;
@@ -1626,7 +1627,7 @@ static void taprio_destroy(struct Qdisc *sch)
        taprio_disable_offload(dev, q, NULL);
 
        if (q->qdiscs) {
-               for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
+               for (i = 0; i < dev->num_tx_queues; i++)
                        qdisc_put(q->qdiscs[i]);
 
                kfree(q->qdiscs);
index 59342b5..0df85a1 100644 (file)
@@ -246,7 +246,8 @@ int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb)
                goto errattr;
        smc_clc_get_hostname(&host);
        if (host) {
-               snprintf(hostname, sizeof(hostname), "%s", host);
+               memcpy(hostname, host, SMC_MAX_HOSTNAME_LEN);
+               hostname[SMC_MAX_HOSTNAME_LEN] = 0;
                if (nla_put_string(skb, SMC_NLA_SYS_LOCAL_HOST, hostname))
                        goto errattr;
        }
@@ -257,7 +258,8 @@ int smc_nl_get_sys_info(struct sk_buff *skb, struct netlink_callback *cb)
                smc_ism_get_system_eid(smcd_dev, &seid);
        mutex_unlock(&smcd_dev_list.mutex);
        if (seid && smc_ism_is_v2_capable()) {
-               snprintf(smc_seid, sizeof(smc_seid), "%s", seid);
+               memcpy(smc_seid, seid, SMC_MAX_EID_LEN);
+               smc_seid[SMC_MAX_EID_LEN] = 0;
                if (nla_put_string(skb, SMC_NLA_SYS_SEID, smc_seid))
                        goto errattr;
        }
@@ -295,7 +297,8 @@ static int smc_nl_fill_lgr(struct smc_link_group *lgr,
                goto errattr;
        if (nla_put_u8(skb, SMC_NLA_LGR_R_VLAN_ID, lgr->vlan_id))
                goto errattr;
-       snprintf(smc_target, sizeof(smc_target), "%s", lgr->pnet_id);
+       memcpy(smc_target, lgr->pnet_id, SMC_MAX_PNETID_LEN);
+       smc_target[SMC_MAX_PNETID_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_LGR_R_PNETID, smc_target))
                goto errattr;
 
@@ -312,7 +315,7 @@ static int smc_nl_fill_lgr_link(struct smc_link_group *lgr,
                                struct sk_buff *skb,
                                struct netlink_callback *cb)
 {
-       char smc_ibname[IB_DEVICE_NAME_MAX + 1];
+       char smc_ibname[IB_DEVICE_NAME_MAX];
        u8 smc_gid_target[41];
        struct nlattr *attrs;
        u32 link_uid = 0;
@@ -461,7 +464,8 @@ static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
                goto errattr;
        if (nla_put_u32(skb, SMC_NLA_LGR_D_CHID, smc_ism_get_chid(lgr->smcd)))
                goto errattr;
-       snprintf(smc_pnet, sizeof(smc_pnet), "%s", lgr->smcd->pnetid);
+       memcpy(smc_pnet, lgr->smcd->pnetid, SMC_MAX_PNETID_LEN);
+       smc_pnet[SMC_MAX_PNETID_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_LGR_D_PNETID, smc_pnet))
                goto errattr;
 
@@ -474,10 +478,12 @@ static int smc_nl_fill_smcd_lgr(struct smc_link_group *lgr,
                goto errv2attr;
        if (nla_put_u8(skb, SMC_NLA_LGR_V2_OS, lgr->peer_os))
                goto errv2attr;
-       snprintf(smc_host, sizeof(smc_host), "%s", lgr->peer_hostname);
+       memcpy(smc_host, lgr->peer_hostname, SMC_MAX_HOSTNAME_LEN);
+       smc_host[SMC_MAX_HOSTNAME_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_LGR_V2_PEER_HOST, smc_host))
                goto errv2attr;
-       snprintf(smc_eid, sizeof(smc_eid), "%s", lgr->negotiated_eid);
+       memcpy(smc_eid, lgr->negotiated_eid, SMC_MAX_EID_LEN);
+       smc_eid[SMC_MAX_EID_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_LGR_V2_NEG_EID, smc_eid))
                goto errv2attr;
 
index ddd7fac..7d7ba03 100644 (file)
@@ -371,8 +371,8 @@ static int smc_nl_handle_dev_port(struct sk_buff *skb,
        if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR,
                       smcibdev->pnetid_by_user[port]))
                goto errattr;
-       snprintf(smc_pnet, sizeof(smc_pnet), "%s",
-                (char *)&smcibdev->pnetid[port]);
+       memcpy(smc_pnet, &smcibdev->pnetid[port], SMC_MAX_PNETID_LEN);
+       smc_pnet[SMC_MAX_PNETID_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet))
                goto errattr;
        if (nla_put_u32(skb, SMC_NLA_DEV_PORT_NETDEV,
@@ -414,7 +414,7 @@ static int smc_nl_handle_smcr_dev(struct smc_ib_device *smcibdev,
                                  struct sk_buff *skb,
                                  struct netlink_callback *cb)
 {
-       char smc_ibname[IB_DEVICE_NAME_MAX + 1];
+       char smc_ibname[IB_DEVICE_NAME_MAX];
        struct smc_pci_dev smc_pci_dev;
        struct pci_dev *pci_dev;
        unsigned char is_crit;
index 524ef64..9c6e958 100644 (file)
@@ -250,7 +250,8 @@ static int smc_nl_handle_smcd_dev(struct smcd_dev *smcd,
                goto errattr;
        if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR, smcd->pnetid_by_user))
                goto errportattr;
-       snprintf(smc_pnet, sizeof(smc_pnet), "%s", smcd->pnetid);
+       memcpy(smc_pnet, smcd->pnetid, SMC_MAX_PNETID_LEN);
+       smc_pnet[SMC_MAX_PNETID_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet))
                goto errportattr;
 
index 010dcb8..6e4dbd5 100644 (file)
@@ -185,7 +185,7 @@ static int rpc_parse_scope_id(struct net *net, const char *buf,
                        scope_id = dev->ifindex;
                        dev_put(dev);
                } else {
-                       if (kstrtou32(p, 10, &scope_id) == 0) {
+                       if (kstrtou32(p, 10, &scope_id) != 0) {
                                kfree(p);
                                return 0;
                        }
index 5fb9164..dcc50ae 100644 (file)
@@ -857,6 +857,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
        err = -EAGAIN;
        if (len <= 0)
                goto out_release;
+       trace_svc_xdr_recvfrom(&rqstp->rq_arg);
 
        clear_bit(XPT_OLD, &xprt->xpt_flags);
 
@@ -866,7 +867,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
 
        if (serv->sv_stats)
                serv->sv_stats->netcnt++;
-       trace_svc_xdr_recvfrom(rqstp, &rqstp->rq_arg);
        return len;
 out_release:
        rqstp->rq_res.len = 0;
@@ -904,7 +904,7 @@ int svc_send(struct svc_rqst *rqstp)
        xb->len = xb->head[0].iov_len +
                xb->page_len +
                xb->tail[0].iov_len;
-       trace_svc_xdr_sendto(rqstp, xb);
+       trace_svc_xdr_sendto(rqstp->rq_xid, xb);
        trace_svc_stats_latency(rqstp);
 
        len = xprt->xpt_ops->xpo_sendto(rqstp);
index b248f23..c9766d0 100644 (file)
@@ -1062,6 +1062,90 @@ err_noclose:
        return 0;       /* record not complete */
 }
 
+static int svc_tcp_send_kvec(struct socket *sock, const struct kvec *vec,
+                             int flags)
+{
+       return kernel_sendpage(sock, virt_to_page(vec->iov_base),
+                              offset_in_page(vec->iov_base),
+                              vec->iov_len, flags);
+}
+
+/*
+ * kernel_sendpage() is used exclusively to reduce the number of
+ * copy operations in this path. Therefore the caller must ensure
+ * that the pages backing @xdr are unchanging.
+ *
+ * In addition, the logic assumes that * .bv_len is never larger
+ * than PAGE_SIZE.
+ */
+static int svc_tcp_sendmsg(struct socket *sock, struct msghdr *msg,
+                          struct xdr_buf *xdr, rpc_fraghdr marker,
+                          unsigned int *sentp)
+{
+       const struct kvec *head = xdr->head;
+       const struct kvec *tail = xdr->tail;
+       struct kvec rm = {
+               .iov_base       = &marker,
+               .iov_len        = sizeof(marker),
+       };
+       int flags, ret;
+
+       *sentp = 0;
+       xdr_alloc_bvec(xdr, GFP_KERNEL);
+
+       msg->msg_flags = MSG_MORE;
+       ret = kernel_sendmsg(sock, msg, &rm, 1, rm.iov_len);
+       if (ret < 0)
+               return ret;
+       *sentp += ret;
+       if (ret != rm.iov_len)
+               return -EAGAIN;
+
+       flags = head->iov_len < xdr->len ? MSG_MORE | MSG_SENDPAGE_NOTLAST : 0;
+       ret = svc_tcp_send_kvec(sock, head, flags);
+       if (ret < 0)
+               return ret;
+       *sentp += ret;
+       if (ret != head->iov_len)
+               goto out;
+
+       if (xdr->page_len) {
+               unsigned int offset, len, remaining;
+               struct bio_vec *bvec;
+
+               bvec = xdr->bvec;
+               offset = xdr->page_base;
+               remaining = xdr->page_len;
+               flags = MSG_MORE | MSG_SENDPAGE_NOTLAST;
+               while (remaining > 0) {
+                       if (remaining <= PAGE_SIZE && tail->iov_len == 0)
+                               flags = 0;
+                       len = min(remaining, bvec->bv_len);
+                       ret = kernel_sendpage(sock, bvec->bv_page,
+                                             bvec->bv_offset + offset,
+                                             len, flags);
+                       if (ret < 0)
+                               return ret;
+                       *sentp += ret;
+                       if (ret != len)
+                               goto out;
+                       remaining -= len;
+                       offset = 0;
+                       bvec++;
+               }
+       }
+
+       if (tail->iov_len) {
+               ret = svc_tcp_send_kvec(sock, tail, 0);
+               if (ret < 0)
+                       return ret;
+               *sentp += ret;
+       }
+
+out:
+       return 0;
+}
+
 /**
  * svc_tcp_sendto - Send out a reply on a TCP socket
  * @rqstp: completed svc_rqst
@@ -1089,7 +1173,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
        mutex_lock(&xprt->xpt_mutex);
        if (svc_xprt_is_dead(xprt))
                goto out_notconn;
-       err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, marker, &sent);
+       err = svc_tcp_sendmsg(svsk->sk_sock, &msg, xdr, marker, &sent);
        xdr_free_bvec(xdr);
        trace_svcsock_tcp_send(xprt, err < 0 ? err : sent);
        if (err < 0 || sent != (xdr->len + sizeof(marker)))
index 6ae2140..1151092 100644 (file)
@@ -1030,7 +1030,6 @@ void tipc_link_reset(struct tipc_link *l)
 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                   struct sk_buff_head *xmitq)
 {
-       struct tipc_msg *hdr = buf_msg(skb_peek(list));
        struct sk_buff_head *backlogq = &l->backlogq;
        struct sk_buff_head *transmq = &l->transmq;
        struct sk_buff *skb, *_skb;
@@ -1038,13 +1037,18 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
        u16 ack = l->rcv_nxt - 1;
        u16 seqno = l->snd_nxt;
        int pkt_cnt = skb_queue_len(list);
-       int imp = msg_importance(hdr);
        unsigned int mss = tipc_link_mss(l);
        unsigned int cwin = l->window;
        unsigned int mtu = l->mtu;
+       struct tipc_msg *hdr;
        bool new_bundle;
        int rc = 0;
+       int imp;
+
+       if (pkt_cnt <= 0)
+               return 0;
 
+       hdr = buf_msg(skb_peek(list));
        if (unlikely(msg_size(hdr) > mtu)) {
                pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
                        skb_queue_len(list), msg_user(hdr),
@@ -1053,6 +1057,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                return -EMSGSIZE;
        }
 
+       imp = msg_importance(hdr);
        /* Allow oversubscription of one data msg per source at congestion */
        if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
                if (imp == TIPC_SYSTEM_IMPORTANCE) {
@@ -2539,7 +2544,7 @@ void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win)
 }
 
 /**
- * link_reset_stats - reset link statistics
+ * tipc_link_reset_stats - reset link statistics
  * @l: pointer to link
  */
 void tipc_link_reset_stats(struct tipc_link *l)
index 83d9eb8..008670d 100644 (file)
@@ -1665,7 +1665,7 @@ static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
 }
 
 /**
- * tipc_node_xmit() is the general link level function for message sending
+ * tipc_node_xmit() - general link level function for message sending
  * @net: the applicable net namespace
  * @list: chain of buffers containing message
  * @dnode: address of destination node
index 27026f5..f620acd 100644 (file)
@@ -21,6 +21,7 @@ config CFG80211
        tristate "cfg80211 - wireless configuration API"
        depends on RFKILL || !RFKILL
        select FW_LOADER
+       select CRC32
        # may need to update this when certificates are changed and are
        # using a different algorithm, though right now they shouldn't
        # (this is here rather than below to allow it to be a module)
index bb72447..8114bba 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright      2017  Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2021 Intel Corporation
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -139,6 +139,11 @@ static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
        return rcu_dereference_rtnl(cfg80211_regdomain);
 }
 
+/*
+ * Returns the regulatory domain associated with the wiphy.
+ *
+ * Requires either RTNL or RCU protection
+ */
 const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy)
 {
        return rcu_dereference_rtnl(wiphy->regd);
@@ -2571,9 +2576,13 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
        if (IS_ERR(new_regd))
                return;
 
+       rtnl_lock();
+
        tmp = get_wiphy_regdom(wiphy);
        rcu_assign_pointer(wiphy->regd, new_regd);
        rcu_free_regdom(tmp);
+
+       rtnl_unlock();
 }
 EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
 
index ac4a317..4a83117 100644 (file)
@@ -108,9 +108,9 @@ EXPORT_SYMBOL(xsk_get_pool_from_qid);
 
 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
 {
-       if (queue_id < dev->real_num_rx_queues)
+       if (queue_id < dev->num_rx_queues)
                dev->_rx[queue_id].pool = NULL;
-       if (queue_id < dev->real_num_tx_queues)
+       if (queue_id < dev->num_tx_queues)
                dev->_tx[queue_id].pool = NULL;
 }
 
@@ -423,9 +423,9 @@ static void xsk_destruct_skb(struct sk_buff *skb)
        struct xdp_sock *xs = xdp_sk(skb->sk);
        unsigned long flags;
 
-       spin_lock_irqsave(&xs->tx_completion_lock, flags);
+       spin_lock_irqsave(&xs->pool->cq_lock, flags);
        xskq_prod_submit_addr(xs->pool->cq, addr);
-       spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
+       spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
 
        sock_wfree(skb);
 }
@@ -437,6 +437,7 @@ static int xsk_generic_xmit(struct sock *sk)
        bool sent_frame = false;
        struct xdp_desc desc;
        struct sk_buff *skb;
+       unsigned long flags;
        int err = 0;
 
        mutex_lock(&xs->mutex);
@@ -468,10 +469,13 @@ static int xsk_generic_xmit(struct sock *sk)
                 * if there is space in it. This avoids having to implement
                 * any buffering in the Tx path.
                 */
+               spin_lock_irqsave(&xs->pool->cq_lock, flags);
                if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
+                       spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
                        kfree_skb(skb);
                        goto out;
                }
+               spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
 
                skb->dev = xs->dev;
                skb->priority = sk->sk_priority;
@@ -483,6 +487,9 @@ static int xsk_generic_xmit(struct sock *sk)
                if  (err == NETDEV_TX_BUSY) {
                        /* Tell user-space to retry the send */
                        skb->destructor = sock_wfree;
+                       spin_lock_irqsave(&xs->pool->cq_lock, flags);
+                       xskq_prod_cancel(xs->pool->cq);
+                       spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
                        /* Free skb without triggering the perf drop trace */
                        consume_skb(skb);
                        err = -EAGAIN;
@@ -878,6 +885,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
                }
        }
 
+       /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
+       xs->fq_tmp = NULL;
+       xs->cq_tmp = NULL;
+
        xs->dev = dev;
        xs->zc = xs->umem->zc;
        xs->queue_id = qid;
@@ -1299,7 +1310,6 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
        xs->state = XSK_READY;
        mutex_init(&xs->mutex);
        spin_lock_init(&xs->rx_lock);
-       spin_lock_init(&xs->tx_completion_lock);
 
        INIT_LIST_HEAD(&xs->map_list);
        spin_lock_init(&xs->map_list_lock);
index 67a4494..20598ee 100644 (file)
@@ -71,12 +71,11 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
        INIT_LIST_HEAD(&pool->free_list);
        INIT_LIST_HEAD(&pool->xsk_tx_list);
        spin_lock_init(&pool->xsk_tx_list_lock);
+       spin_lock_init(&pool->cq_lock);
        refcount_set(&pool->users, 1);
 
        pool->fq = xs->fq_tmp;
        pool->cq = xs->cq_tmp;
-       xs->fq_tmp = NULL;
-       xs->cq_tmp = NULL;
 
        for (i = 0; i < pool->free_heads_cnt; i++) {
                xskb = &pool->heads[i];
index 4a9663a..2823b7c 100644 (file)
@@ -334,6 +334,11 @@ static inline bool xskq_prod_is_full(struct xsk_queue *q)
        return xskq_prod_nb_free(q, 1) ? false : true;
 }
 
+static inline void xskq_prod_cancel(struct xsk_queue *q)
+{
+       q->cached_prod--;
+}
+
 static inline int xskq_prod_reserve(struct xsk_queue *q)
 {
        if (xskq_prod_is_full(q))
index 8c8d7c3..ff88e2f 100755 (executable)
@@ -223,6 +223,7 @@ while [ "$1" != "" ] ; do
                ;;
 
        *)
+               echo "bad command: $CMD" >&2
                usage
                ;;
        esac
index d66949b..b5487cc 100644 (file)
@@ -22,9 +22,9 @@ always-y += $(GCC_PLUGIN)
 GCC_PLUGINS_DIR = $(shell $(CC) -print-file-name=plugin)
 
 plugin_cxxflags        = -Wp,-MMD,$(depfile) $(KBUILD_HOSTCXXFLAGS) -fPIC \
-                  -I $(GCC_PLUGINS_DIR)/include -I $(obj) -std=gnu++98 \
+                  -I $(GCC_PLUGINS_DIR)/include -I $(obj) -std=gnu++11 \
                   -fno-rtti -fno-exceptions -fasynchronous-unwind-tables \
-                  -ggdb -Wno-narrowing -Wno-unused-variable -Wno-c++11-compat \
+                  -ggdb -Wno-narrowing -Wno-unused-variable \
                   -Wno-format-diag
 
 plugin_ldflags = -shared
index e46df0a..2c40e68 100644 (file)
@@ -94,16 +94,6 @@ configfiles=$(wildcard $(srctree)/kernel/configs/$@ $(srctree)/arch/$(SRCARCH)/c
        $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m .config $(configfiles)
        $(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
 
-PHONY += kvmconfig
-kvmconfig: kvm_guest.config
-       @echo >&2 "WARNING: 'make $@' will be removed after Linux 5.10"
-       @echo >&2 "         Please use 'make $<' instead."
-
-PHONY += xenconfig
-xenconfig: xen.config
-       @echo >&2 "WARNING: 'make $@' will be removed after Linux 5.10"
-       @echo >&2 "         Please use 'make $<' instead."
-
 PHONY += tinyconfig
 tinyconfig:
        $(Q)$(MAKE) -f $(srctree)/Makefile allnoconfig tiny.config
index aa68ec9..fcd4acd 100755 (executable)
@@ -33,7 +33,9 @@ if [ -f /usr/include/ncurses/ncurses.h ]; then
        exit 0
 fi
 
-if [ -f /usr/include/ncurses.h ]; then
+# As a final fallback before giving up, check if $HOSTCC knows of a default
+# ncurses installation (e.g. from a vendor-specific sysroot).
+if echo '#include <ncurses.h>' | "${HOSTCC}" -E - >/dev/null 2>&1; then
        echo cflags=\"-D_GNU_SOURCE\"
        echo libs=\"-lncurses\"
        exit 0
index 7d8026f..a0cd28c 100644 (file)
@@ -275,7 +275,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
                struct inode *inode;
 
                audit_log_format(ab, " name=");
+               spin_lock(&a->u.dentry->d_lock);
                audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
+               spin_unlock(&a->u.dentry->d_lock);
 
                inode = d_backing_inode(a->u.dentry);
                if (inode) {
@@ -293,8 +295,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
                dentry = d_find_alias(inode);
                if (dentry) {
                        audit_log_format(ab, " name=");
-                       audit_log_untrustedstring(ab,
-                                        dentry->d_name.name);
+                       spin_lock(&dentry->d_lock);
+                       audit_log_untrustedstring(ab, dentry->d_name.name);
+                       spin_unlock(&dentry->d_lock);
                        dput(dentry);
                }
                audit_log_format(ab, " dev=");
index 11554d0..1b8409e 100644 (file)
@@ -611,7 +611,8 @@ snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_in
 
        if (info->is_midi) {
                struct midi_info minf;
-               snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
+               if (snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf))
+                       return -ENXIO;
                inf->synth_type = SYNTH_TYPE_MIDI;
                inf->synth_subtype = 0;
                inf->nr_voices = 16;
index 7f82762..ee7122c 100644 (file)
@@ -88,7 +88,7 @@ static void transmit_midi_msg(struct snd_ff *ff, unsigned int port)
 
        /* Set interval to next transaction. */
        ff->next_ktime[port] = ktime_add_ns(ktime_get(),
-                               ff->rx_bytes[port] * 8 * NSEC_PER_SEC / 31250);
+                       ff->rx_bytes[port] * 8 * (NSEC_PER_SEC / 31250));
 
        if (quad_count == 1)
                tcode = TCODE_WRITE_QUADLET_REQUEST;
index 90288b4..a073cec 100644 (file)
@@ -209,7 +209,7 @@ static void midi_port_work(struct work_struct *work)
 
        /* Set interval to next transaction. */
        port->next_ktime = ktime_add_ns(ktime_get(),
-                               port->consume_bytes * 8 * NSEC_PER_SEC / 31250);
+                       port->consume_bytes * 8 * (NSEC_PER_SEC / 31250));
 
        /* Start this transaction. */
        port->idling = false;
index 687216e..eec1775 100644 (file)
@@ -2934,7 +2934,7 @@ static void hda_call_codec_resume(struct hda_codec *codec)
        snd_hdac_leave_pm(&codec->core);
 }
 
-static int hda_codec_suspend(struct device *dev)
+static int hda_codec_runtime_suspend(struct device *dev)
 {
        struct hda_codec *codec = dev_to_hda_codec(dev);
        unsigned int state;
@@ -2953,7 +2953,7 @@ static int hda_codec_suspend(struct device *dev)
        return 0;
 }
 
-static int hda_codec_resume(struct device *dev)
+static int hda_codec_runtime_resume(struct device *dev)
 {
        struct hda_codec *codec = dev_to_hda_codec(dev);
 
@@ -2968,16 +2968,6 @@ static int hda_codec_resume(struct device *dev)
        return 0;
 }
 
-static int hda_codec_runtime_suspend(struct device *dev)
-{
-       return hda_codec_suspend(dev);
-}
-
-static int hda_codec_runtime_resume(struct device *dev)
-{
-       return hda_codec_resume(dev);
-}
-
 #endif /* CONFIG_PM */
 
 #ifdef CONFIG_PM_SLEEP
@@ -2998,31 +2988,31 @@ static void hda_codec_pm_complete(struct device *dev)
 static int hda_codec_pm_suspend(struct device *dev)
 {
        dev->power.power_state = PMSG_SUSPEND;
-       return hda_codec_suspend(dev);
+       return pm_runtime_force_suspend(dev);
 }
 
 static int hda_codec_pm_resume(struct device *dev)
 {
        dev->power.power_state = PMSG_RESUME;
-       return hda_codec_resume(dev);
+       return pm_runtime_force_resume(dev);
 }
 
 static int hda_codec_pm_freeze(struct device *dev)
 {
        dev->power.power_state = PMSG_FREEZE;
-       return hda_codec_suspend(dev);
+       return pm_runtime_force_suspend(dev);
 }
 
 static int hda_codec_pm_thaw(struct device *dev)
 {
        dev->power.power_state = PMSG_THAW;
-       return hda_codec_resume(dev);
+       return pm_runtime_force_resume(dev);
 }
 
 static int hda_codec_pm_restore(struct device *dev)
 {
        dev->power.power_state = PMSG_RESTORE;
-       return hda_codec_resume(dev);
+       return pm_runtime_force_resume(dev);
 }
 #endif /* CONFIG_PM_SLEEP */
 
index 6852668..5a50d3a 100644 (file)
@@ -2220,8 +2220,6 @@ static const struct snd_pci_quirk power_save_denylist[] = {
        SND_PCI_QUIRK(0x1849, 0x7662, "Asrock H81M-HDS", 0),
        /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
        SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
-       /* https://bugzilla.redhat.com/show_bug.cgi?id=1581607 */
-       SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", 0),
        /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
        SND_PCI_QUIRK(0x1558, 0x6504, "Clevo W65_67SB", 0),
        /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
@@ -2486,6 +2484,9 @@ static const struct pci_device_id azx_ids[] = {
        /* CometLake-S */
        { PCI_DEVICE(0x8086, 0xa3f0),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* CometLake-R */
+       { PCI_DEVICE(0x8086, 0xf0c8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Icelake */
        { PCI_DEVICE(0x8086, 0x34c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
@@ -2509,6 +2510,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Alderlake-S */
        { PCI_DEVICE(0x8086, 0x7ad0),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Alderlake-P */
+       { PCI_DEVICE(0x8086, 0x51c8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Elkhart Lake */
        { PCI_DEVICE(0x8086, 0x4b55),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
@@ -2600,7 +2604,8 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
        /* ATI HDMI */
        { PCI_DEVICE(0x1002, 0x0002),
-         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+         AZX_DCAPS_PM_RUNTIME },
        { PCI_DEVICE(0x1002, 0x1308),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0x157a),
@@ -2662,9 +2667,11 @@ static const struct pci_device_id azx_ids[] = {
        { PCI_DEVICE(0x1002, 0xaab0),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0xaac0),
-         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+         AZX_DCAPS_PM_RUNTIME },
        { PCI_DEVICE(0x1002, 0xaac8),
-         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+         AZX_DCAPS_PM_RUNTIME },
        { PCI_DEVICE(0x1002, 0xaad8),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
          AZX_DCAPS_PM_RUNTIME },
index 70164d1..361cf20 100644 (file)
@@ -388,7 +388,7 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
         * in powers of 2, next available ratio is 16 which can be
         * used as a limiting factor here.
         */
-       if (of_device_is_compatible(np, "nvidia,tegra194-hda"))
+       if (of_device_is_compatible(np, "nvidia,tegra30-hda"))
                chip->bus.core.sdo_limit = 16;
 
        /* codec detection */
index be5000d..d49cc44 100644 (file)
@@ -1070,6 +1070,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
 static const struct hda_device_id snd_hda_id_conexant[] = {
        HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
+       HDA_CODEC_ENTRY(0x14f120d0, "CX11970", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
        HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto),
index 1e4a4b8..97adff0 100644 (file)
@@ -1733,7 +1733,7 @@ static void silent_stream_disable(struct hda_codec *codec,
        per_pin->silent_stream = false;
 
  unlock_out:
-       mutex_unlock(&spec->pcm_lock);
+       mutex_unlock(&per_pin->lock);
 }
 
 /* update ELD and jack state via audio component */
@@ -4346,6 +4346,7 @@ HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI",       patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI",  patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI",        patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI",  patch_i915_tgl_hdmi),
+HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI",        patch_i915_icl_hdmi),
index dde5ba2..ed5b6b8 100644 (file)
@@ -6289,6 +6289,7 @@ enum {
        ALC221_FIXUP_HP_FRONT_MIC,
        ALC292_FIXUP_TPT460,
        ALC298_FIXUP_SPK_VOLUME,
+       ALC298_FIXUP_LENOVO_SPK_VOLUME,
        ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,
        ALC269_FIXUP_ATIV_BOOK_8,
        ALC221_FIXUP_HP_MIC_NO_PRESENCE,
@@ -6370,6 +6371,7 @@ enum {
        ALC256_FIXUP_HP_HEADSET_MIC,
        ALC236_FIXUP_DELL_AIO_HEADSET_MIC,
        ALC282_FIXUP_ACER_DISABLE_LINEOUT,
+       ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -7119,6 +7121,10 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
        },
+       [ALC298_FIXUP_LENOVO_SPK_VOLUME] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc298_fixup_speaker_volume,
+       },
        [ALC295_FIXUP_DISABLE_DAC3] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc295_fixup_disable_dac3,
@@ -7803,6 +7809,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE
        },
+       [ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_limit_int_mic_boost,
+               .chained = true,
+               .chain_id = ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7821,6 +7833,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
+       SND_PCI_QUIRK(0x1025, 0x1094, "Acer Aspire E5-575T", ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1166, "Acer Veriton N4640G", ALC269_FIXUP_LIFEBOOK),
@@ -7885,7 +7898,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0a30, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
-       SND_PCI_QUIRK(0x1028, 0x0a58, "Dell Precision 3650 Tower", ALC255_FIXUP_DELL_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1028, 0x0a58, "Dell", ALC255_FIXUP_DELL_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -7959,11 +7972,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x8780, "HP ZBook Fury 17 G7 Mobile Workstation",
+                     ALC285_FIXUP_HP_GPIO_AMP_INIT),
+       SND_PCI_QUIRK(0x103c, 0x8783, "HP ZBook Fury 15 G7 Mobile Workstation",
+                     ALC285_FIXUP_HP_GPIO_AMP_INIT),
+       SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
@@ -8021,6 +8040,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
        SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
        SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+       SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+       SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
        SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
        SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
@@ -8126,6 +8147,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
index 7ef8f31..834367d 100644 (file)
@@ -113,6 +113,7 @@ static struct via_spec *via_new_spec(struct hda_codec *codec)
                spec->codec_type = VT1708S;
        spec->gen.indep_hp = 1;
        spec->gen.keep_eapd_on = 1;
+       spec->gen.dac_min_mute = 1;
        spec->gen.pcm_playback_hook = via_playback_pcm_hook;
        spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
        codec->power_save_node = 1;
@@ -1002,6 +1003,7 @@ static const struct hda_verb vt1802_init_verbs[] = {
 enum {
        VIA_FIXUP_INTMIC_BOOST,
        VIA_FIXUP_ASUS_G75,
+       VIA_FIXUP_POWER_SAVE,
 };
 
 static void via_fixup_intmic_boost(struct hda_codec *codec,
@@ -1011,6 +1013,13 @@ static void via_fixup_intmic_boost(struct hda_codec *codec,
                override_mic_boost(codec, 0x30, 0, 2, 40);
 }
 
+static void via_fixup_power_save(struct hda_codec *codec,
+                                const struct hda_fixup *fix, int action)
+{
+       if (action == HDA_FIXUP_ACT_PRE_PROBE)
+               codec->power_save_node = 0;
+}
+
 static const struct hda_fixup via_fixups[] = {
        [VIA_FIXUP_INTMIC_BOOST] = {
                .type = HDA_FIXUP_FUNC,
@@ -1025,11 +1034,16 @@ static const struct hda_fixup via_fixups[] = {
                        { }
                }
        },
+       [VIA_FIXUP_POWER_SAVE] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = via_fixup_power_save,
+       },
 };
 
 static const struct snd_pci_quirk vt2002p_fixups[] = {
        SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
        SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
+       SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", VIA_FIXUP_POWER_SAVE),
        {}
 };
 
index 8c138e4..d3536fd 100644 (file)
@@ -140,21 +140,14 @@ static int snd_acp3x_probe(struct pci_dev *pci,
                goto release_regions;
        }
 
-       /* check for msi interrupt support */
-       ret = pci_enable_msi(pci);
-       if (ret)
-               /* msi is not enabled */
-               irqflags = IRQF_SHARED;
-       else
-               /* msi is enabled */
-               irqflags = 0;
+       irqflags = IRQF_SHARED;
 
        addr = pci_resource_start(pci, 0);
        adata->acp3x_base = devm_ioremap(&pci->dev, addr,
                                        pci_resource_len(pci, 0));
        if (!adata->acp3x_base) {
                ret = -ENOMEM;
-               goto disable_msi;
+               goto release_regions;
        }
        pci_set_master(pci);
        pci_set_drvdata(pci, adata);
@@ -162,7 +155,7 @@ static int snd_acp3x_probe(struct pci_dev *pci,
        adata->pme_en = rv_readl(adata->acp3x_base + mmACP_PME_EN);
        ret = acp3x_init(adata);
        if (ret)
-               goto disable_msi;
+               goto release_regions;
 
        val = rv_readl(adata->acp3x_base + mmACP_I2S_PIN_CONFIG);
        switch (val) {
@@ -251,8 +244,6 @@ unregister_devs:
 de_init:
        if (acp3x_deinit(adata->acp3x_base))
                dev_err(&pci->dev, "ACP de-init failed\n");
-disable_msi:
-       pci_disable_msi(pci);
 release_regions:
        pci_release_regions(pci);
 disable_pci:
@@ -311,7 +302,6 @@ static void snd_acp3x_remove(struct pci_dev *pci)
                dev_err(&pci->dev, "ACP de-init failed\n");
        pm_runtime_forbid(&pci->dev);
        pm_runtime_get_noresume(&pci->dev);
-       pci_disable_msi(pci);
        pci_release_regions(pci);
        pci_disable_device(pci);
 }
index fa169bf..deca8c7 100644 (file)
@@ -171,6 +171,20 @@ static const struct dmi_system_id rn_acp_quirk_table[] = {
                        DMI_EXACT_MATCH(DMI_BOARD_NAME, "LNVNB161216"),
                }
        },
+       {
+               /* Lenovo ThinkPad E14 Gen 2 */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_BOARD_NAME, "20T6CTO1WW"),
+               }
+       },
+       {
+               /* Lenovo ThinkPad X395 */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_BOARD_NAME, "20NLCTO1WW"),
+               }
+       },
        {}
 };
 
index 142373e..9fe9471 100644 (file)
@@ -143,7 +143,7 @@ config SND_MCHP_SOC_SPDIFTX
          - sama7g5
 
          This S/PDIF TX driver is compliant with IEC-60958 standard and
-         includes programable User Data and Channel Status fields.
+         includes programmable User Data and Channel Status fields.
 
 config SND_MCHP_SOC_SPDIFRX
        tristate "Microchip ASoC driver for boards using S/PDIF RX"
@@ -157,5 +157,5 @@ config SND_MCHP_SOC_SPDIFRX
          - sama7g5
 
          This S/PDIF RX driver is compliant with IEC-60958 standard and
-         includes programable User Data and Channel Status fields.
+         includes programmable User Data and Channel Status fields.
 endif
index ba4eb54..9bf6bfd 100644 (file)
@@ -457,7 +457,7 @@ config SND_SOC_ADAU7118_HW
        help
          Enable support for the Analog Devices ADAU7118 8 Channel PDM-to-I2S/TDM
          Converter. In this mode, the device works in standalone mode which
-         means that there is no bus to comunicate with it. Stereo mode is not
+         means that there is no bus to communicate with it. Stereo mode is not
          supported in this mode.
 
          To compile this driver as a module, choose M here: the module
index d5fcc4d..0f3ac22 100644 (file)
@@ -717,7 +717,7 @@ static int hdmi_codec_set_jack(struct snd_soc_component *component,
                               void *data)
 {
        struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
-       int ret = -EOPNOTSUPP;
+       int ret = -ENOTSUPP;
 
        if (hcp->hcd.ops->hook_plugged_cb) {
                hcp->jack = jack;
index 92921e3..85f6865 100644 (file)
 #include <sound/tlv.h>
 #include "max98373.h"
 
+static const u32 max98373_i2c_cache_reg[] = {
+       MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK,
+       MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK,
+       MAX98373_R20B6_BDE_CUR_STATE_READBACK,
+};
+
 static struct reg_default max98373_reg[] = {
        {MAX98373_R2000_SW_RESET, 0x00},
        {MAX98373_R2001_INT_RAW1, 0x00},
@@ -472,6 +478,11 @@ static struct snd_soc_dai_driver max98373_dai[] = {
 static int max98373_suspend(struct device *dev)
 {
        struct max98373_priv *max98373 = dev_get_drvdata(dev);
+       int i;
+
+       /* cache feedback register values before suspend */
+       for (i = 0; i < max98373->cache_num; i++)
+               regmap_read(max98373->regmap, max98373->cache[i].reg, &max98373->cache[i].val);
 
        regcache_cache_only(max98373->regmap, true);
        regcache_mark_dirty(max98373->regmap);
@@ -509,6 +520,7 @@ static int max98373_i2c_probe(struct i2c_client *i2c,
 {
        int ret = 0;
        int reg = 0;
+       int i;
        struct max98373_priv *max98373 = NULL;
 
        max98373 = devm_kzalloc(&i2c->dev, sizeof(*max98373), GFP_KERNEL);
@@ -534,6 +546,14 @@ static int max98373_i2c_probe(struct i2c_client *i2c,
                return ret;
        }
 
+       max98373->cache_num = ARRAY_SIZE(max98373_i2c_cache_reg);
+       max98373->cache = devm_kcalloc(&i2c->dev, max98373->cache_num,
+                                      sizeof(*max98373->cache),
+                                      GFP_KERNEL);
+
+       for (i = 0; i < max98373->cache_num; i++)
+               max98373->cache[i].reg = max98373_i2c_cache_reg[i];
+
        /* voltage/current slot & gpio configuration */
        max98373_slot_config(&i2c->dev, max98373);
 
index ec2e79c..b8d471d 100644 (file)
@@ -23,6 +23,12 @@ struct sdw_stream_data {
        struct sdw_stream_runtime *sdw_stream;
 };
 
+static const u32 max98373_sdw_cache_reg[] = {
+       MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK,
+       MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK,
+       MAX98373_R20B6_BDE_CUR_STATE_READBACK,
+};
+
 static struct reg_default max98373_reg[] = {
        {MAX98373_R0040_SCP_INIT_STAT_1, 0x00},
        {MAX98373_R0041_SCP_INIT_MASK_1, 0x00},
@@ -245,6 +251,11 @@ static const struct regmap_config max98373_sdw_regmap = {
 static __maybe_unused int max98373_suspend(struct device *dev)
 {
        struct max98373_priv *max98373 = dev_get_drvdata(dev);
+       int i;
+
+       /* cache feedback register values before suspend */
+       for (i = 0; i < max98373->cache_num; i++)
+               regmap_read(max98373->regmap, max98373->cache[i].reg, &max98373->cache[i].val);
 
        regcache_cache_only(max98373->regmap, true);
 
@@ -757,6 +768,7 @@ static int max98373_init(struct sdw_slave *slave, struct regmap *regmap)
 {
        struct max98373_priv *max98373;
        int ret;
+       int i;
        struct device *dev = &slave->dev;
 
        /*  Allocate and assign private driver data structure  */
@@ -768,6 +780,14 @@ static int max98373_init(struct sdw_slave *slave, struct regmap *regmap)
        max98373->regmap = regmap;
        max98373->slave = slave;
 
+       max98373->cache_num = ARRAY_SIZE(max98373_sdw_cache_reg);
+       max98373->cache = devm_kcalloc(dev, max98373->cache_num,
+                                      sizeof(*max98373->cache),
+                                      GFP_KERNEL);
+
+       for (i = 0; i < max98373->cache_num; i++)
+               max98373->cache[i].reg = max98373_sdw_cache_reg[i];
+
        /* Read voltage and slot configuration */
        max98373_slot_config(dev, max98373);
 
index 929bb17..31d571d 100644 (file)
@@ -168,6 +168,31 @@ static SOC_ENUM_SINGLE_DECL(max98373_adc_samplerate_enum,
                            MAX98373_R2051_MEAS_ADC_SAMPLING_RATE, 0,
                            max98373_ADC_samplerate_text);
 
+static int max98373_feedback_get(struct snd_kcontrol *kcontrol,
+                                struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+       struct soc_mixer_control *mc =
+               (struct soc_mixer_control *)kcontrol->private_value;
+       struct max98373_priv *max98373 = snd_soc_component_get_drvdata(component);
+       int i;
+
+       if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) {
+               /*
+                * Register values will be cached before suspend. The cached value
+                * will be a valid value and userspace will happy with that.
+                */
+               for (i = 0; i < max98373->cache_num; i++) {
+                       if (mc->reg == max98373->cache[i].reg) {
+                               ucontrol->value.integer.value[0] = max98373->cache[i].val;
+                               return 0;
+                       }
+               }
+       }
+
+       return snd_soc_put_volsw(kcontrol, ucontrol);
+}
+
 static const struct snd_kcontrol_new max98373_snd_controls[] = {
 SOC_SINGLE("Digital Vol Sel Switch", MAX98373_R203F_AMP_DSP_CFG,
        MAX98373_AMP_VOL_SEL_SHIFT, 1, 0),
@@ -209,8 +234,10 @@ SOC_SINGLE("ADC PVDD FLT Switch", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
        MAX98373_FLT_EN_SHIFT, 1, 0),
 SOC_SINGLE("ADC TEMP FLT Switch", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
        MAX98373_FLT_EN_SHIFT, 1, 0),
-SOC_SINGLE("ADC PVDD", MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0, 0xFF, 0),
-SOC_SINGLE("ADC TEMP", MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0, 0xFF, 0),
+SOC_SINGLE_EXT("ADC PVDD", MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0, 0xFF, 0,
+       max98373_feedback_get, NULL),
+SOC_SINGLE_EXT("ADC TEMP", MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0, 0xFF, 0,
+       max98373_feedback_get, NULL),
 SOC_SINGLE("ADC PVDD FLT Coeff", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
        0, 0x3, 0),
 SOC_SINGLE("ADC TEMP FLT Coeff", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
@@ -226,7 +253,8 @@ SOC_SINGLE("BDE LVL1 Thresh", MAX98373_R2097_BDE_L1_THRESH, 0, 0xFF, 0),
 SOC_SINGLE("BDE LVL2 Thresh", MAX98373_R2098_BDE_L2_THRESH, 0, 0xFF, 0),
 SOC_SINGLE("BDE LVL3 Thresh", MAX98373_R2099_BDE_L3_THRESH, 0, 0xFF, 0),
 SOC_SINGLE("BDE LVL4 Thresh", MAX98373_R209A_BDE_L4_THRESH, 0, 0xFF, 0),
-SOC_SINGLE("BDE Active Level", MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0, 8, 0),
+SOC_SINGLE_EXT("BDE Active Level", MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0, 8, 0,
+       max98373_feedback_get, NULL),
 SOC_SINGLE("BDE Clip Mode Switch", MAX98373_R2092_BDE_CLIPPER_MODE, 0, 1, 0),
 SOC_SINGLE("BDE Thresh Hysteresis", MAX98373_R209B_BDE_THRESH_HYST, 0, 0xFF, 0),
 SOC_SINGLE("BDE Hold Time", MAX98373_R2090_BDE_LVL_HOLD, 0, 0xFF, 0),
index 4ab29b9..71f5a52 100644 (file)
 /* MAX98373_R2000_SW_RESET */
 #define MAX98373_SOFT_RESET (0x1 << 0)
 
+struct max98373_cache {
+       u32 reg;
+       u32 val;
+};
+
 struct max98373_priv {
        struct regmap *regmap;
        int reset_gpio;
@@ -212,6 +217,9 @@ struct max98373_priv {
        bool interleave_mode;
        unsigned int ch_size;
        bool tdm_mode;
+       /* cache for reading a valid fake feedback value */
+       struct max98373_cache *cache;
+       int cache_num;
        /* variables to support soundwire */
        struct sdw_slave *slave;
        bool hw_init;
index 5771c02..85f7441 100644 (file)
@@ -462,6 +462,8 @@ static int rt711_set_amp_gain_put(struct snd_kcontrol *kcontrol,
        unsigned int read_ll, read_rl;
        int i;
 
+       mutex_lock(&rt711->calibrate_mutex);
+
        /* Can't use update bit function, so read the original value first */
        addr_h = mc->reg;
        addr_l = mc->rreg;
@@ -547,6 +549,8 @@ static int rt711_set_amp_gain_put(struct snd_kcontrol *kcontrol,
        if (dapm->bias_level <= SND_SOC_BIAS_STANDBY)
                regmap_write(rt711->regmap,
                                RT711_SET_AUDIO_POWER_STATE, AC_PWRST_D3);
+
+       mutex_unlock(&rt711->calibrate_mutex);
        return 0;
 }
 
@@ -859,9 +863,11 @@ static int rt711_set_bias_level(struct snd_soc_component *component,
                break;
 
        case SND_SOC_BIAS_STANDBY:
+               mutex_lock(&rt711->calibrate_mutex);
                regmap_write(rt711->regmap,
                        RT711_SET_AUDIO_POWER_STATE,
                        AC_PWRST_D3);
+               mutex_unlock(&rt711->calibrate_mutex);
                break;
 
        default:
index 2c2a76a..dbbb761 100644 (file)
@@ -90,7 +90,7 @@ static int imx_hdmi_init(struct snd_soc_pcm_runtime *rtd)
        }
 
        ret = snd_soc_component_set_jack(component, &data->hdmi_jack, NULL);
-       if (ret && ret != -EOPNOTSUPP) {
+       if (ret && ret != -ENOTSUPP) {
                dev_err(card->dev, "Can't set HDMI Jack %d\n", ret);
                return ret;
        }
@@ -164,6 +164,7 @@ static int imx_hdmi_probe(struct platform_device *pdev)
 
        if ((hdmi_out && hdmi_in) || (!hdmi_out && !hdmi_in)) {
                dev_err(&pdev->dev, "Invalid HDMI DAI link\n");
+               ret = -EINVAL;
                goto fail;
        }
 
index c55d123..c763bfe 100644 (file)
@@ -189,6 +189,7 @@ static struct platform_driver haswell_audio = {
        .probe = haswell_audio_probe,
        .driver = {
                .name = "haswell-audio",
+               .pm = &snd_soc_pm_ops,
        },
 };
 
index fcd8dff..1275c14 100644 (file)
@@ -224,6 +224,7 @@ static int cnl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
                                "dsp boot timeout, status=%#x error=%#x\n",
                                sst_dsp_shim_read(ctx, CNL_ADSP_FW_STATUS),
                                sst_dsp_shim_read(ctx, CNL_ADSP_ERROR_CODE));
+                       ret = -ETIMEDOUT;
                        goto err;
                }
        } else {
index c8664ab..87cac44 100644 (file)
@@ -467,8 +467,20 @@ static int axg_tdm_iface_set_bias_level(struct snd_soc_component *component,
        return ret;
 }
 
+static const struct snd_soc_dapm_widget axg_tdm_iface_dapm_widgets[] = {
+       SND_SOC_DAPM_SIGGEN("Playback Signal"),
+};
+
+static const struct snd_soc_dapm_route axg_tdm_iface_dapm_routes[] = {
+       { "Loopback", NULL, "Playback Signal" },
+};
+
 static const struct snd_soc_component_driver axg_tdm_iface_component_drv = {
-       .set_bias_level = axg_tdm_iface_set_bias_level,
+       .dapm_widgets           = axg_tdm_iface_dapm_widgets,
+       .num_dapm_widgets       = ARRAY_SIZE(axg_tdm_iface_dapm_widgets),
+       .dapm_routes            = axg_tdm_iface_dapm_routes,
+       .num_dapm_routes        = ARRAY_SIZE(axg_tdm_iface_dapm_routes),
+       .set_bias_level         = axg_tdm_iface_set_bias_level,
 };
 
 static const struct of_device_id axg_tdm_iface_of_match[] = {
index 88ed95a..b4faf9d 100644 (file)
@@ -224,15 +224,6 @@ static const struct axg_tdm_formatter_ops axg_tdmin_ops = {
 };
 
 static const struct axg_tdm_formatter_driver axg_tdmin_drv = {
-       .component_drv  = &axg_tdmin_component_drv,
-       .regmap_cfg     = &axg_tdmin_regmap_cfg,
-       .ops            = &axg_tdmin_ops,
-       .quirks         = &(const struct axg_tdm_formatter_hw) {
-               .skew_offset    = 2,
-       },
-};
-
-static const struct axg_tdm_formatter_driver g12a_tdmin_drv = {
        .component_drv  = &axg_tdmin_component_drv,
        .regmap_cfg     = &axg_tdmin_regmap_cfg,
        .ops            = &axg_tdmin_ops,
@@ -247,10 +238,10 @@ static const struct of_device_id axg_tdmin_of_match[] = {
                .data = &axg_tdmin_drv,
        }, {
                .compatible = "amlogic,g12a-tdmin",
-               .data = &g12a_tdmin_drv,
+               .data = &axg_tdmin_drv,
        }, {
                .compatible = "amlogic,sm1-tdmin",
-               .data = &g12a_tdmin_drv,
+               .data = &axg_tdmin_drv,
        }, {}
 };
 MODULE_DEVICE_TABLE(of, axg_tdmin_of_match);
index af684fd..c5e99c2 100644 (file)
@@ -270,18 +270,6 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
        struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
        unsigned int id = dai->driver->id;
        int ret = -EINVAL;
-       unsigned int val = 0;
-
-       ret = regmap_read(drvdata->lpaif_map,
-                               LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id), &val);
-       if (ret) {
-               dev_err(dai->dev, "error reading from i2sctl reg: %d\n", ret);
-               return ret;
-       }
-       if (val == LPAIF_I2SCTL_RESET_STATE) {
-               dev_err(dai->dev, "error in i2sctl register state\n");
-               return -ENOTRECOVERABLE;
-       }
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
@@ -454,20 +442,16 @@ static bool lpass_cpu_regmap_volatile(struct device *dev, unsigned int reg)
        struct lpass_variant *v = drvdata->variant;
        int i;
 
-       for (i = 0; i < v->i2s_ports; ++i)
-               if (reg == LPAIF_I2SCTL_REG(v, i))
-                       return true;
        for (i = 0; i < v->irq_ports; ++i)
                if (reg == LPAIF_IRQSTAT_REG(v, i))
                        return true;
 
        for (i = 0; i < v->rdma_channels; ++i)
-               if (reg == LPAIF_RDMACURR_REG(v, i) || reg == LPAIF_RDMACTL_REG(v, i))
+               if (reg == LPAIF_RDMACURR_REG(v, i))
                        return true;
 
        for (i = 0; i < v->wrdma_channels; ++i)
-               if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start) ||
-                       reg == LPAIF_WRDMACTL_REG(v, i + v->wrdma_channel_start))
+               if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start))
                        return true;
 
        return false;
index 80b09de..d1c2485 100644 (file)
@@ -452,7 +452,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
        unsigned int reg_irqclr = 0, val_irqclr = 0;
        unsigned int  reg_irqen = 0, val_irqen = 0, val_mask = 0;
        unsigned int dai_id = cpu_dai->driver->id;
-       unsigned int dma_ctrl_reg = 0;
 
        ch = pcm_data->dma_ch;
        if (dir ==  SNDRV_PCM_STREAM_PLAYBACK) {
@@ -469,17 +468,7 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                id = pcm_data->dma_ch - v->wrdma_channel_start;
                map = drvdata->lpaif_map;
        }
-       ret = regmap_read(map, LPAIF_DMACTL_REG(v, ch, dir, dai_id), &dma_ctrl_reg);
-       if (ret) {
-               dev_err(soc_runtime->dev, "error reading from rdmactl reg: %d\n", ret);
-               return ret;
-       }
 
-       if (dma_ctrl_reg == LPAIF_DMACTL_RESET_STATE ||
-               dma_ctrl_reg == LPAIF_DMACTL_RESET_STATE + 1) {
-               dev_err(soc_runtime->dev, "error in rdmactl register state\n");
-               return -ENOTRECOVERABLE;
-       }
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
@@ -500,7 +489,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                                        "error writing to rdmactl reg: %d\n", ret);
                                return ret;
                        }
-                       map = drvdata->hdmiif_map;
                        reg_irqclr = LPASS_HDMITX_APP_IRQCLEAR_REG(v);
                        val_irqclr = (LPAIF_IRQ_ALL(ch) |
                                        LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
@@ -519,7 +507,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                        break;
                case MI2S_PRIMARY:
                case MI2S_SECONDARY:
-                       map = drvdata->lpaif_map;
                        reg_irqclr = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
                        val_irqclr = LPAIF_IRQ_ALL(ch);
 
@@ -563,7 +550,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                                        "error writing to rdmactl reg: %d\n", ret);
                                return ret;
                        }
-                       map = drvdata->hdmiif_map;
                        reg_irqen = LPASS_HDMITX_APP_IRQEN_REG(v);
                        val_mask = (LPAIF_IRQ_ALL(ch) |
                                        LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
@@ -573,7 +559,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                        break;
                case MI2S_PRIMARY:
                case MI2S_SECONDARY:
-                       map = drvdata->lpaif_map;
                        reg_irqen = LPAIF_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
                        val_mask = LPAIF_IRQ_ALL(ch);
                        val_irqen = 0;
@@ -838,6 +823,39 @@ static void lpass_platform_pcm_free(struct snd_soc_component *component,
        }
 }
 
+static int lpass_platform_pcmops_suspend(struct snd_soc_component *component)
+{
+       struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+       struct regmap *map;
+       unsigned int dai_id = component->id;
+
+       if (dai_id == LPASS_DP_RX)
+               map = drvdata->hdmiif_map;
+       else
+               map = drvdata->lpaif_map;
+
+       regcache_cache_only(map, true);
+       regcache_mark_dirty(map);
+
+       return 0;
+}
+
+static int lpass_platform_pcmops_resume(struct snd_soc_component *component)
+{
+       struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+       struct regmap *map;
+       unsigned int dai_id = component->id;
+
+       if (dai_id == LPASS_DP_RX)
+               map = drvdata->hdmiif_map;
+       else
+               map = drvdata->lpaif_map;
+
+       regcache_cache_only(map, false);
+       return regcache_sync(map);
+}
+
+
 static const struct snd_soc_component_driver lpass_component_driver = {
        .name           = DRV_NAME,
        .open           = lpass_platform_pcmops_open,
@@ -850,6 +868,8 @@ static const struct snd_soc_component_driver lpass_component_driver = {
        .mmap           = lpass_platform_pcmops_mmap,
        .pcm_construct  = lpass_platform_pcm_new,
        .pcm_destruct   = lpass_platform_pcm_free,
+       .suspend                = lpass_platform_pcmops_suspend,
+       .resume                 = lpass_platform_pcmops_resume,
 
 };
 
index b9aacf3..abdfd9c 100644 (file)
@@ -366,25 +366,27 @@ void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
        struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
        struct device *dev = rsnd_priv_to_dev(priv);
        struct clk *clk;
-       int i, ret;
+       int i;
 
        for_each_rsnd_clk(clk, adg, i) {
-               ret = 0;
                if (enable) {
-                       ret = clk_prepare_enable(clk);
+                       int ret = clk_prepare_enable(clk);
 
                        /*
                         * We shouldn't use clk_get_rate() under
                         * atomic context. Let's keep it when
                         * rsnd_adg_clk_enable() was called
                         */
-                       adg->clk_rate[i] = clk_get_rate(adg->clk[i]);
+                       adg->clk_rate[i] = 0;
+                       if (ret < 0)
+                               dev_warn(dev, "can't use clk %d\n", i);
+                       else
+                               adg->clk_rate[i] = clk_get_rate(clk);
                } else {
-                       clk_disable_unprepare(clk);
+                       if (adg->clk_rate[i])
+                               clk_disable_unprepare(clk);
+                       adg->clk_rate[i] = 0;
                }
-
-               if (ret < 0)
-                       dev_warn(dev, "can't use clk %d\n", i);
        }
 }
 
index 9f0c86c..2b75d01 100644 (file)
@@ -2486,6 +2486,7 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
        enum snd_soc_dapm_direction dir;
 
        list_del(&w->list);
+       list_del(&w->dirty);
        /*
         * remove source and sink paths associated to this widget.
         * While removing the path, remove reference to it from both
index 031dad5..3e8b6c0 100644 (file)
@@ -122,7 +122,7 @@ config SND_SOC_SOF_DEBUG_XRUN_STOP
        bool "SOF stop on XRUN"
        help
          This option forces PCMs to stop on any XRUN event. This is useful to
-         preserve any trace data ond pipeline status prior to the XRUN.
+         preserve any trace data and pipeline status prior to the XRUN.
          Say Y if you are debugging SOF FW pipeline XRUNs.
          If unsure select "N".
 
index 6875fa5..6744318 100644 (file)
@@ -63,16 +63,18 @@ static int hda_codec_load_module(struct hda_codec *codec)
 }
 
 /* enable controller wake up event for all codecs with jack connectors */
-void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev)
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable)
 {
        struct hda_bus *hbus = sof_to_hbus(sdev);
        struct hdac_bus *bus = sof_to_bus(sdev);
        struct hda_codec *codec;
        unsigned int mask = 0;
 
-       list_for_each_codec(codec, hbus)
-               if (codec->jacktbl.used)
-                       mask |= BIT(codec->core.addr);
+       if (enable) {
+               list_for_each_codec(codec, hbus)
+                       if (codec->jacktbl.used)
+                               mask |= BIT(codec->core.addr);
+       }
 
        snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, mask);
 }
@@ -81,23 +83,18 @@ void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev)
 void hda_codec_jack_check(struct snd_sof_dev *sdev)
 {
        struct hda_bus *hbus = sof_to_hbus(sdev);
-       struct hdac_bus *bus = sof_to_bus(sdev);
        struct hda_codec *codec;
 
-       /* disable controller Wake Up event*/
-       snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, 0);
-
        list_for_each_codec(codec, hbus)
                /*
                 * Wake up all jack-detecting codecs regardless whether an event
                 * has been recorded in STATESTS
                 */
                if (codec->jacktbl.used)
-                       schedule_delayed_work(&codec->jackpoll_work,
-                                             codec->jackpoll_interval);
+                       pm_request_resume(&codec->core.dev);
 }
 #else
-void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev) {}
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable) {}
 void hda_codec_jack_check(struct snd_sof_dev *sdev) {}
 #endif /* CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC */
 EXPORT_SYMBOL_NS(hda_codec_jack_wake_enable, SND_SOC_SOF_HDA_AUDIO_CODEC);
@@ -156,7 +153,8 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
                if (!hdev->bus->audio_component) {
                        dev_dbg(sdev->dev,
                                "iDisp hw present but no driver\n");
-                       goto error;
+                       ret = -ENOENT;
+                       goto out;
                }
                hda_priv->need_display_power = true;
        }
@@ -173,24 +171,23 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
                 * other return codes without modification
                 */
                if (ret == 0)
-                       goto error;
+                       ret = -ENOENT;
        }
 
-       return ret;
-
-error:
-       snd_hdac_ext_bus_device_exit(hdev);
-       return -ENOENT;
-
+out:
+       if (ret < 0) {
+               snd_hdac_device_unregister(hdev);
+               put_device(&hdev->dev);
+       }
 #else
        hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
        if (!hdev)
                return -ENOMEM;
 
        ret = snd_hdac_ext_bus_device_init(&hbus->core, address, hdev, HDA_DEV_ASOC);
+#endif
 
        return ret;
-#endif
 }
 
 /* Codec initialization */
index 2b00115..1c5e05b 100644 (file)
@@ -617,7 +617,7 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
 
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
        if (runtime_suspend)
-               hda_codec_jack_wake_enable(sdev);
+               hda_codec_jack_wake_enable(sdev, true);
 
        /* power down all hda link */
        snd_hdac_ext_bus_link_power_down_all(bus);
@@ -683,8 +683,11 @@ static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
 
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
        /* check jack status */
-       if (runtime_resume)
-               hda_codec_jack_check(sdev);
+       if (runtime_resume) {
+               hda_codec_jack_wake_enable(sdev, false);
+               if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
+                       hda_codec_jack_check(sdev);
+       }
 
        /* turn off the links that were off before suspend */
        list_for_each_entry(hlink, &bus->hlink_list, list) {
index 9ec8ae0..a3b6f3e 100644 (file)
@@ -650,7 +650,7 @@ void sof_hda_bus_init(struct hdac_bus *bus, struct device *dev);
  */
 void hda_codec_probe_bus(struct snd_sof_dev *sdev,
                         bool hda_codec_use_common_hdmi);
-void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev);
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable);
 void hda_codec_jack_check(struct snd_sof_dev *sdev);
 
 #endif /* CONFIG_SND_SOC_SOF_HDA */
index d731ca6..e08fbf8 100644 (file)
@@ -450,10 +450,8 @@ lookup_device_name(u32 id)
 static void snd_usb_audio_free(struct snd_card *card)
 {
        struct snd_usb_audio *chip = card->private_data;
-       struct snd_usb_endpoint *ep, *n;
 
-       list_for_each_entry_safe(ep, n, &chip->ep_list, list)
-               snd_usb_endpoint_free(ep);
+       snd_usb_endpoint_free_all(chip);
 
        mutex_destroy(&chip->mutex);
        if (!atomic_read(&chip->shutdown))
@@ -611,6 +609,7 @@ static int snd_usb_audio_create(struct usb_interface *intf,
        chip->usb_id = usb_id;
        INIT_LIST_HEAD(&chip->pcm_list);
        INIT_LIST_HEAD(&chip->ep_list);
+       INIT_LIST_HEAD(&chip->iface_ref_list);
        INIT_LIST_HEAD(&chip->midi_list);
        INIT_LIST_HEAD(&chip->mixer_list);
 
index 6a027c3..37091b1 100644 (file)
@@ -18,6 +18,7 @@ struct audioformat {
        unsigned int frame_size;        /* samples per frame for non-audio */
        unsigned char iface;            /* interface number */
        unsigned char altsetting;       /* corresponding alternate setting */
+       unsigned char ep_idx;           /* endpoint array index */
        unsigned char altset_idx;       /* array index of altenate setting */
        unsigned char attributes;       /* corresponding attributes of cs endpoint */
        unsigned char endpoint;         /* endpoint */
@@ -42,6 +43,7 @@ struct audioformat {
 };
 
 struct snd_usb_substream;
+struct snd_usb_iface_ref;
 struct snd_usb_endpoint;
 struct snd_usb_power_domain;
 
@@ -58,6 +60,7 @@ struct snd_urb_ctx {
 
 struct snd_usb_endpoint {
        struct snd_usb_audio *chip;
+       struct snd_usb_iface_ref *iface_ref;
 
        int opened;             /* open refcount; protect with chip->mutex */
        atomic_t running;       /* running status */
index 31051f2..dc68ed6 100644 (file)
@@ -485,18 +485,9 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip,
                              const struct audioformat *fmt, int rate)
 {
        struct usb_device *dev = chip->dev;
-       struct usb_host_interface *alts;
-       unsigned int ep;
        unsigned char data[3];
        int err, crate;
 
-       alts = snd_usb_get_host_interface(chip, fmt->iface, fmt->altsetting);
-       if (!alts)
-               return -EINVAL;
-       if (get_iface_desc(alts)->bNumEndpoints < 1)
-               return -EINVAL;
-       ep = get_endpoint(alts, 0)->bEndpointAddress;
-
        /* if endpoint doesn't have sampling rate control, bail out */
        if (!(fmt->attributes & UAC_EP_CS_ATTR_SAMPLE_RATE))
                return 0;
@@ -506,11 +497,11 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip,
        data[2] = rate >> 16;
        err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR,
                              USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_OUT,
-                             UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep,
-                             data, sizeof(data));
+                             UAC_EP_CS_ATTR_SAMPLE_RATE << 8,
+                             fmt->endpoint, data, sizeof(data));
        if (err < 0) {
                dev_err(&dev->dev, "%d:%d: cannot set freq %d to ep %#x\n",
-                       fmt->iface, fmt->altsetting, rate, ep);
+                       fmt->iface, fmt->altsetting, rate, fmt->endpoint);
                return err;
        }
 
@@ -524,11 +515,11 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip,
 
        err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC_GET_CUR,
                              USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN,
-                             UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep,
-                             data, sizeof(data));
+                             UAC_EP_CS_ATTR_SAMPLE_RATE << 8,
+                             fmt->endpoint, data, sizeof(data));
        if (err < 0) {
                dev_err(&dev->dev, "%d:%d: cannot get freq at ep %#x\n",
-                       fmt->iface, fmt->altsetting, ep);
+                       fmt->iface, fmt->altsetting, fmt->endpoint);
                chip->sample_rate_read_error++;
                return 0; /* some devices don't support reading */
        }
index 162da7a..8e56882 100644 (file)
 #define EP_FLAG_RUNNING                1
 #define EP_FLAG_STOPPING       2
 
+/* interface refcounting */
+struct snd_usb_iface_ref {
+       unsigned char iface;
+       bool need_setup;
+       int opened;
+       struct list_head list;
+};
+
 /*
  * snd_usb_endpoint is a model that abstracts everything related to an
  * USB endpoint and its streaming.
@@ -488,6 +496,28 @@ exit_clear:
        clear_bit(ctx->index, &ep->active_mask);
 }
 
+/*
+ * Find or create a refcount object for the given interface
+ *
+ * The objects are released altogether in snd_usb_endpoint_free_all()
+ */
+static struct snd_usb_iface_ref *
+iface_ref_find(struct snd_usb_audio *chip, int iface)
+{
+       struct snd_usb_iface_ref *ip;
+
+       list_for_each_entry(ip, &chip->iface_ref_list, list)
+               if (ip->iface == iface)
+                       return ip;
+
+       ip = kzalloc(sizeof(*ip), GFP_KERNEL);
+       if (!ip)
+               return NULL;
+       ip->iface = iface;
+       list_add_tail(&ip->list, &chip->iface_ref_list);
+       return ip;
+}
+
 /*
  * Get the existing endpoint object corresponding EP
  * Returns NULL if not present.
@@ -520,8 +550,8 @@ snd_usb_get_endpoint(struct snd_usb_audio *chip, int ep_num)
  *
  * Returns zero on success or a negative error code.
  *
- * New endpoints will be added to chip->ep_list and must be freed by
- * calling snd_usb_endpoint_free().
+ * New endpoints will be added to chip->ep_list and freed by
+ * calling snd_usb_endpoint_free_all().
  *
  * For SND_USB_ENDPOINT_TYPE_SYNC, the caller needs to guarantee that
  * bNumEndpoints > 1 beforehand.
@@ -653,11 +683,17 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
                } else {
                        ep->iface = fp->iface;
                        ep->altsetting = fp->altsetting;
-                       ep->ep_idx = 0;
+                       ep->ep_idx = fp->ep_idx;
                }
                usb_audio_dbg(chip, "Open EP 0x%x, iface=%d:%d, idx=%d\n",
                              ep_num, ep->iface, ep->altsetting, ep->ep_idx);
 
+               ep->iface_ref = iface_ref_find(chip, ep->iface);
+               if (!ep->iface_ref) {
+                       ep = NULL;
+                       goto unlock;
+               }
+
                ep->cur_audiofmt = fp;
                ep->cur_channels = fp->channels;
                ep->cur_rate = params_rate(params);
@@ -681,6 +717,11 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
                              ep->implicit_fb_sync);
 
        } else {
+               if (WARN_ON(!ep->iface_ref)) {
+                       ep = NULL;
+                       goto unlock;
+               }
+
                if (!endpoint_compatible(ep, fp, params)) {
                        usb_audio_err(chip, "Incompatible EP setup for 0x%x\n",
                                      ep_num);
@@ -692,6 +733,9 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
                              ep_num, ep->opened);
        }
 
+       if (!ep->iface_ref->opened++)
+               ep->iface_ref->need_setup = true;
+
        ep->opened++;
 
  unlock:
@@ -760,12 +804,16 @@ void snd_usb_endpoint_close(struct snd_usb_audio *chip,
        mutex_lock(&chip->mutex);
        usb_audio_dbg(chip, "Closing EP 0x%x (count %d)\n",
                      ep->ep_num, ep->opened);
-       if (!--ep->opened) {
+
+       if (!--ep->iface_ref->opened)
                endpoint_set_interface(chip, ep, false);
+
+       if (!--ep->opened) {
                ep->iface = 0;
                ep->altsetting = 0;
                ep->cur_audiofmt = NULL;
                ep->cur_rate = 0;
+               ep->iface_ref = NULL;
                usb_audio_dbg(chip, "EP 0x%x closed\n", ep->ep_num);
        }
        mutex_unlock(&chip->mutex);
@@ -775,6 +823,8 @@ void snd_usb_endpoint_close(struct snd_usb_audio *chip,
 void snd_usb_endpoint_suspend(struct snd_usb_endpoint *ep)
 {
        ep->need_setup = true;
+       if (ep->iface_ref)
+               ep->iface_ref->need_setup = true;
 }
 
 /*
@@ -1195,11 +1245,22 @@ int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
        int err = 0;
 
        mutex_lock(&chip->mutex);
+       if (WARN_ON(!ep->iface_ref))
+               goto unlock;
        if (!ep->need_setup)
                goto unlock;
 
-       /* No need to (re-)configure the sync EP belonging to the same altset */
-       if (ep->ep_idx) {
+       /* If the interface has been already set up, just set EP parameters */
+       if (!ep->iface_ref->need_setup) {
+               /* sample rate setup of UAC1 is per endpoint, and we need
+                * to update at each EP configuration
+                */
+               if (ep->cur_audiofmt->protocol == UAC_VERSION_1) {
+                       err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt,
+                                                      ep->cur_rate);
+                       if (err < 0)
+                               goto unlock;
+               }
                err = snd_usb_endpoint_set_params(chip, ep);
                if (err < 0)
                        goto unlock;
@@ -1242,6 +1303,8 @@ int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
                        goto unlock;
        }
 
+       ep->iface_ref->need_setup = false;
+
  done:
        ep->need_setup = false;
        err = 1;
@@ -1387,15 +1450,21 @@ void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
 }
 
 /**
- * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint
+ * snd_usb_endpoint_free_all: Free the resources of an snd_usb_endpoint
+ * @card: The chip
  *
- * @ep: the endpoint to free
- *
- * This free all resources of the given ep.
+ * This free all endpoints and those resources
  */
-void snd_usb_endpoint_free(struct snd_usb_endpoint *ep)
+void snd_usb_endpoint_free_all(struct snd_usb_audio *chip)
 {
-       kfree(ep);
+       struct snd_usb_endpoint *ep, *en;
+       struct snd_usb_iface_ref *ip, *in;
+
+       list_for_each_entry_safe(ep, en, &chip->ep_list, list)
+               kfree(ep);
+
+       list_for_each_entry_safe(ip, in, &chip->iface_ref_list, list)
+               kfree(ip);
 }
 
 /*
index 11e3bb8..eea4ca4 100644 (file)
@@ -42,7 +42,7 @@ void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
 void snd_usb_endpoint_suspend(struct snd_usb_endpoint *ep);
 int  snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
 void snd_usb_endpoint_release(struct snd_usb_endpoint *ep);
-void snd_usb_endpoint_free(struct snd_usb_endpoint *ep);
+void snd_usb_endpoint_free_all(struct snd_usb_audio *chip);
 
 int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
 int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep,
index eb3a4c4..521cc84 100644 (file)
@@ -58,8 +58,6 @@ static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
        IMPLICIT_FB_FIXED_DEV(0x0499, 0x172f, 0x81, 2), /* Steinberg UR22C */
        IMPLICIT_FB_FIXED_DEV(0x0d9a, 0x00df, 0x81, 2), /* RTX6001 */
        IMPLICIT_FB_FIXED_DEV(0x22f0, 0x0006, 0x81, 3), /* Allen&Heath Qu-16 */
-       IMPLICIT_FB_FIXED_DEV(0x2b73, 0x000a, 0x82, 0), /* Pioneer DJ DJM-900NXS2 */
-       IMPLICIT_FB_FIXED_DEV(0x2b73, 0x0017, 0x82, 0), /* Pioneer DJ DJM-250MK2 */
        IMPLICIT_FB_FIXED_DEV(0x1686, 0xf029, 0x82, 2), /* Zoom UAC-2 */
        IMPLICIT_FB_FIXED_DEV(0x2466, 0x8003, 0x86, 2), /* Fractal Audio Axe-Fx II */
        IMPLICIT_FB_FIXED_DEV(0x0499, 0x172a, 0x86, 2), /* Yamaha MODX */
@@ -74,10 +72,12 @@ static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
 
        /* No quirk for playback but with capture quirk (see below) */
        IMPLICIT_FB_SKIP_DEV(0x0582, 0x0130),   /* BOSS BR-80 */
+       IMPLICIT_FB_SKIP_DEV(0x0582, 0x0171),   /* BOSS RC-505 */
        IMPLICIT_FB_SKIP_DEV(0x0582, 0x0189),   /* BOSS GT-100v2 */
        IMPLICIT_FB_SKIP_DEV(0x0582, 0x01d6),   /* BOSS GT-1 */
        IMPLICIT_FB_SKIP_DEV(0x0582, 0x01d8),   /* BOSS Katana */
        IMPLICIT_FB_SKIP_DEV(0x0582, 0x01e5),   /* BOSS GT-001 */
+       IMPLICIT_FB_SKIP_DEV(0x0582, 0x0203),   /* BOSS AD-10 */
 
        {} /* terminator */
 };
@@ -85,10 +85,12 @@ static const struct snd_usb_implicit_fb_match playback_implicit_fb_quirks[] = {
 /* Implicit feedback quirk table for capture: only FIXED type */
 static const struct snd_usb_implicit_fb_match capture_implicit_fb_quirks[] = {
        IMPLICIT_FB_FIXED_DEV(0x0582, 0x0130, 0x0d, 0x01), /* BOSS BR-80 */
+       IMPLICIT_FB_FIXED_DEV(0x0582, 0x0171, 0x0d, 0x01), /* BOSS RC-505 */
        IMPLICIT_FB_FIXED_DEV(0x0582, 0x0189, 0x0d, 0x01), /* BOSS GT-100v2 */
        IMPLICIT_FB_FIXED_DEV(0x0582, 0x01d6, 0x0d, 0x01), /* BOSS GT-1 */
        IMPLICIT_FB_FIXED_DEV(0x0582, 0x01d8, 0x0d, 0x01), /* BOSS Katana */
        IMPLICIT_FB_FIXED_DEV(0x0582, 0x01e5, 0x0d, 0x01), /* BOSS GT-001 */
+       IMPLICIT_FB_FIXED_DEV(0x0582, 0x0203, 0x0d, 0x01), /* BOSS AD-10 */
 
        {} /* terminator */
 };
@@ -96,7 +98,7 @@ static const struct snd_usb_implicit_fb_match capture_implicit_fb_quirks[] = {
 /* set up sync EP information on the audioformat */
 static int add_implicit_fb_sync_ep(struct snd_usb_audio *chip,
                                   struct audioformat *fmt,
-                                  int ep, int ifnum,
+                                  int ep, int ep_idx, int ifnum,
                                   const struct usb_host_interface *alts)
 {
        struct usb_interface *iface;
@@ -111,7 +113,7 @@ static int add_implicit_fb_sync_ep(struct snd_usb_audio *chip,
        fmt->sync_ep = ep;
        fmt->sync_iface = ifnum;
        fmt->sync_altsetting = alts->desc.bAlternateSetting;
-       fmt->sync_ep_idx = 0;
+       fmt->sync_ep_idx = ep_idx;
        fmt->implicit_fb = 1;
        usb_audio_dbg(chip,
                      "%d:%d: added %s implicit_fb sync_ep %x, iface %d:%d\n",
@@ -143,7 +145,7 @@ static int add_generic_uac2_implicit_fb(struct snd_usb_audio *chip,
            (epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) !=
                                        USB_ENDPOINT_USAGE_IMPLICIT_FB)
                return 0;
-       return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress,
+       return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress, 0,
                                       ifnum, alts);
 }
 
@@ -169,10 +171,33 @@ static int add_roland_implicit_fb(struct snd_usb_audio *chip,
            (epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) !=
                                        USB_ENDPOINT_USAGE_IMPLICIT_FB)
                return 0;
-       return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress,
+       return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress, 0,
                                       ifnum, alts);
 }
 
+/* Playback and capture EPs on Pioneer devices share the same iface/altset,
+ * but they don't seem working with the implicit fb mode well, hence we
+ * just return as if the sync were already set up.
+ */
+static int skip_pioneer_sync_ep(struct snd_usb_audio *chip,
+                               struct audioformat *fmt,
+                               struct usb_host_interface *alts)
+{
+       struct usb_endpoint_descriptor *epd;
+
+       if (alts->desc.bNumEndpoints != 2)
+               return 0;
+
+       epd = get_endpoint(alts, 1);
+       if (!usb_endpoint_is_isoc_in(epd) ||
+           (epd->bmAttributes & USB_ENDPOINT_SYNCTYPE) != USB_ENDPOINT_SYNC_ASYNC ||
+           ((epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) !=
+            USB_ENDPOINT_USAGE_DATA &&
+            (epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) !=
+            USB_ENDPOINT_USAGE_IMPLICIT_FB))
+               return 0;
+       return 1; /* don't handle with the implicit fb, just skip sync EP */
+}
 
 static int __add_generic_implicit_fb(struct snd_usb_audio *chip,
                                     struct audioformat *fmt,
@@ -193,7 +218,7 @@ static int __add_generic_implicit_fb(struct snd_usb_audio *chip,
        if (!usb_endpoint_is_isoc_in(epd) ||
            (epd->bmAttributes & USB_ENDPOINT_SYNCTYPE) != USB_ENDPOINT_SYNC_ASYNC)
                return 0;
-       return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress,
+       return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress, 0,
                                       iface, alts);
 }
 
@@ -246,7 +271,7 @@ static int audioformat_implicit_fb_quirk(struct snd_usb_audio *chip,
                case IMPLICIT_FB_NONE:
                        return 0; /* No quirk */
                case IMPLICIT_FB_FIXED:
-                       return add_implicit_fb_sync_ep(chip, fmt, p->ep_num,
+                       return add_implicit_fb_sync_ep(chip, fmt, p->ep_num, 0,
                                                       p->iface, NULL);
                }
        }
@@ -274,6 +299,14 @@ static int audioformat_implicit_fb_quirk(struct snd_usb_audio *chip,
                        return 1;
        }
 
+       /* Pioneer devices with vendor spec class */
+       if (attr == USB_ENDPOINT_SYNC_ASYNC &&
+           alts->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
+           USB_ID_VENDOR(chip->usb_id) == 0x2b73 /* Pioneer */) {
+               if (skip_pioneer_sync_ep(chip, fmt, alts))
+                       return 1;
+       }
+
        /* Try the generic implicit fb if available */
        if (chip->generic_implicit_fb)
                return add_generic_implicit_fb(chip, fmt, alts);
@@ -291,8 +324,8 @@ static int audioformat_capture_quirk(struct snd_usb_audio *chip,
 
        p = find_implicit_fb_entry(chip, capture_implicit_fb_quirks, alts);
        if (p && p->type == IMPLICIT_FB_FIXED)
-               return add_implicit_fb_sync_ep(chip, fmt, p->ep_num, p->iface,
-                                              NULL);
+               return add_implicit_fb_sync_ep(chip, fmt, p->ep_num, 0,
+                                              p->iface, NULL);
        return 0;
 }
 
@@ -374,20 +407,19 @@ snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip,
                                     int stream)
 {
        struct snd_usb_substream *subs;
-       const struct audioformat *fp, *sync_fmt;
+       const struct audioformat *fp, *sync_fmt = NULL;
        int score, high_score;
 
-       /* When sharing the same altset, use the original audioformat */
+       /* Use the original audioformat as fallback for the shared altset */
        if (target->iface == target->sync_iface &&
            target->altsetting == target->sync_altsetting)
-               return target;
+               sync_fmt = target;
 
        subs = find_matching_substream(chip, stream, target->sync_ep,
                                       target->fmt_type);
        if (!subs)
-               return NULL;
+               return sync_fmt;
 
-       sync_fmt = NULL;
        high_score = 0;
        list_for_each_entry(fp, &subs->fmt_list, list) {
                score = match_endpoint_audioformats(subs, fp,
index c821365..0c23fa6 100644 (file)
@@ -1889,6 +1889,8 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi,
                ms_ep = find_usb_ms_endpoint_descriptor(hostep);
                if (!ms_ep)
                        continue;
+               if (ms_ep->bNumEmbMIDIJack > 0x10)
+                       continue;
                if (usb_endpoint_dir_out(ep)) {
                        if (endpoints[epidx].out_ep) {
                                if (++epidx >= MIDI_MAX_ENDPOINTS) {
@@ -2141,6 +2143,8 @@ static int snd_usbmidi_detect_roland(struct snd_usb_midi *umidi,
                    cs_desc[1] == USB_DT_CS_INTERFACE &&
                    cs_desc[2] == 0xf1 &&
                    cs_desc[3] == 0x02) {
+                       if (cs_desc[4] > 0x10 || cs_desc[5] > 0x10)
+                               continue;
                        endpoint->in_cables  = (1 << cs_desc[4]) - 1;
                        endpoint->out_cables = (1 << cs_desc[5]) - 1;
                        return snd_usbmidi_detect_endpoints(umidi, endpoint, 1);
index 5607990..078bb4c 100644 (file)
@@ -663,7 +663,7 @@ static int hw_check_valid_format(struct snd_usb_substream *subs,
        check_fmts.bits[1] = (u32)(fp->formats >> 32);
        snd_mask_intersect(&check_fmts, fmts);
        if (snd_mask_empty(&check_fmts)) {
-               hwc_debug("   > check: no supported format %d\n", fp->format);
+               hwc_debug("   > check: no supported format 0x%llx\n", fp->formats);
                return 0;
        }
        /* check the channels */
@@ -775,24 +775,11 @@ static int hw_rule_channels(struct snd_pcm_hw_params *params,
        return apply_hw_params_minmax(it, rmin, rmax);
 }
 
-static int hw_rule_format(struct snd_pcm_hw_params *params,
-                         struct snd_pcm_hw_rule *rule)
+static int apply_hw_params_format_bits(struct snd_mask *fmt, u64 fbits)
 {
-       struct snd_usb_substream *subs = rule->private;
-       const struct audioformat *fp;
-       struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
-       u64 fbits;
        u32 oldbits[2];
        int changed;
 
-       hwc_debug("hw_rule_format: %x:%x\n", fmt->bits[0], fmt->bits[1]);
-       fbits = 0;
-       list_for_each_entry(fp, &subs->fmt_list, list) {
-               if (!hw_check_valid_format(subs, params, fp))
-                       continue;
-               fbits |= fp->formats;
-       }
-
        oldbits[0] = fmt->bits[0];
        oldbits[1] = fmt->bits[1];
        fmt->bits[0] &= (u32)fbits;
@@ -806,6 +793,24 @@ static int hw_rule_format(struct snd_pcm_hw_params *params,
        return changed;
 }
 
+static int hw_rule_format(struct snd_pcm_hw_params *params,
+                         struct snd_pcm_hw_rule *rule)
+{
+       struct snd_usb_substream *subs = rule->private;
+       const struct audioformat *fp;
+       struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+       u64 fbits;
+
+       hwc_debug("hw_rule_format: %x:%x\n", fmt->bits[0], fmt->bits[1]);
+       fbits = 0;
+       list_for_each_entry(fp, &subs->fmt_list, list) {
+               if (!hw_check_valid_format(subs, params, fp))
+                       continue;
+               fbits |= fp->formats;
+       }
+       return apply_hw_params_format_bits(fmt, fbits);
+}
+
 static int hw_rule_period_time(struct snd_pcm_hw_params *params,
                               struct snd_pcm_hw_rule *rule)
 {
@@ -833,64 +838,92 @@ static int hw_rule_period_time(struct snd_pcm_hw_params *params,
        return apply_hw_params_minmax(it, pmin, UINT_MAX);
 }
 
-/* apply PCM hw constraints from the concurrent sync EP */
-static int apply_hw_constraint_from_sync(struct snd_pcm_runtime *runtime,
-                                        struct snd_usb_substream *subs)
+/* get the EP or the sync EP for implicit fb when it's already set up */
+static const struct snd_usb_endpoint *
+get_sync_ep_from_substream(struct snd_usb_substream *subs)
 {
        struct snd_usb_audio *chip = subs->stream->chip;
-       struct snd_usb_endpoint *ep;
        const struct audioformat *fp;
-       int err;
+       const struct snd_usb_endpoint *ep;
 
        list_for_each_entry(fp, &subs->fmt_list, list) {
                ep = snd_usb_get_endpoint(chip, fp->endpoint);
                if (ep && ep->cur_rate)
-                       goto found;
+                       return ep;
                if (!fp->implicit_fb)
                        continue;
                /* for the implicit fb, check the sync ep as well */
                ep = snd_usb_get_endpoint(chip, fp->sync_ep);
                if (ep && ep->cur_rate)
-                       goto found;
+                       return ep;
        }
-       return 0;
+       return NULL;
+}
 
- found:
-       if (!find_format(&subs->fmt_list, ep->cur_format, ep->cur_rate,
-                        ep->cur_channels, false, NULL)) {
-               usb_audio_dbg(chip, "EP 0x%x being used, but not applicable\n",
-                             ep->ep_num);
+/* additional hw constraints for implicit feedback mode */
+static int hw_rule_format_implicit_fb(struct snd_pcm_hw_params *params,
+                                     struct snd_pcm_hw_rule *rule)
+{
+       struct snd_usb_substream *subs = rule->private;
+       const struct snd_usb_endpoint *ep;
+       struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+       ep = get_sync_ep_from_substream(subs);
+       if (!ep)
                return 0;
-       }
 
-       usb_audio_dbg(chip, "EP 0x%x being used, using fixed params:\n",
-                     ep->ep_num);
-       usb_audio_dbg(chip, "rate=%d, period_size=%d, periods=%d\n",
-                     ep->cur_rate, ep->cur_period_frames,
-                     ep->cur_buffer_periods);
+       hwc_debug("applying %s\n", __func__);
+       return apply_hw_params_format_bits(fmt, pcm_format_to_bits(ep->cur_format));
+}
 
-       runtime->hw.formats = subs->formats;
-       runtime->hw.rate_min = runtime->hw.rate_max = ep->cur_rate;
-       runtime->hw.rates = SNDRV_PCM_RATE_KNOT;
-       runtime->hw.periods_min = runtime->hw.periods_max =
-               ep->cur_buffer_periods;
+static int hw_rule_rate_implicit_fb(struct snd_pcm_hw_params *params,
+                                   struct snd_pcm_hw_rule *rule)
+{
+       struct snd_usb_substream *subs = rule->private;
+       const struct snd_usb_endpoint *ep;
+       struct snd_interval *it;
 
-       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
-                                 hw_rule_channels, subs,
-                                 SNDRV_PCM_HW_PARAM_FORMAT,
-                                 SNDRV_PCM_HW_PARAM_RATE,
-                                 -1);
-       if (err < 0)
-               return err;
+       ep = get_sync_ep_from_substream(subs);
+       if (!ep)
+               return 0;
 
-       err = snd_pcm_hw_constraint_minmax(runtime,
-                                          SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
-                                          ep->cur_period_frames,
-                                          ep->cur_period_frames);
-       if (err < 0)
-               return err;
+       hwc_debug("applying %s\n", __func__);
+       it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+       return apply_hw_params_minmax(it, ep->cur_rate, ep->cur_rate);
+}
 
-       return 1; /* notify the finding */
+static int hw_rule_period_size_implicit_fb(struct snd_pcm_hw_params *params,
+                                          struct snd_pcm_hw_rule *rule)
+{
+       struct snd_usb_substream *subs = rule->private;
+       const struct snd_usb_endpoint *ep;
+       struct snd_interval *it;
+
+       ep = get_sync_ep_from_substream(subs);
+       if (!ep)
+               return 0;
+
+       hwc_debug("applying %s\n", __func__);
+       it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
+       return apply_hw_params_minmax(it, ep->cur_period_frames,
+                                     ep->cur_period_frames);
+}
+
+static int hw_rule_periods_implicit_fb(struct snd_pcm_hw_params *params,
+                                      struct snd_pcm_hw_rule *rule)
+{
+       struct snd_usb_substream *subs = rule->private;
+       const struct snd_usb_endpoint *ep;
+       struct snd_interval *it;
+
+       ep = get_sync_ep_from_substream(subs);
+       if (!ep)
+               return 0;
+
+       hwc_debug("applying %s\n", __func__);
+       it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIODS);
+       return apply_hw_params_minmax(it, ep->cur_buffer_periods,
+                                     ep->cur_buffer_periods);
 }
 
 /*
@@ -899,20 +932,11 @@ static int apply_hw_constraint_from_sync(struct snd_pcm_runtime *runtime,
 
 static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substream *subs)
 {
-       struct snd_usb_audio *chip = subs->stream->chip;
        const struct audioformat *fp;
        unsigned int pt, ptmin;
        int param_period_time_if_needed = -1;
        int err;
 
-       mutex_lock(&chip->mutex);
-       err = apply_hw_constraint_from_sync(runtime, subs);
-       mutex_unlock(&chip->mutex);
-       if (err < 0)
-               return err;
-       if (err > 0) /* found the matching? */
-               goto add_extra_rules;
-
        runtime->hw.formats = subs->formats;
 
        runtime->hw.rate_min = 0x7fffffff;
@@ -957,6 +981,7 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
 
        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
                                  hw_rule_rate, subs,
+                                 SNDRV_PCM_HW_PARAM_RATE,
                                  SNDRV_PCM_HW_PARAM_FORMAT,
                                  SNDRV_PCM_HW_PARAM_CHANNELS,
                                  param_period_time_if_needed,
@@ -964,9 +989,9 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
        if (err < 0)
                return err;
 
-add_extra_rules:
        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
                                  hw_rule_channels, subs,
+                                 SNDRV_PCM_HW_PARAM_CHANNELS,
                                  SNDRV_PCM_HW_PARAM_FORMAT,
                                  SNDRV_PCM_HW_PARAM_RATE,
                                  param_period_time_if_needed,
@@ -975,6 +1000,7 @@ add_extra_rules:
                return err;
        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
                                  hw_rule_format, subs,
+                                 SNDRV_PCM_HW_PARAM_FORMAT,
                                  SNDRV_PCM_HW_PARAM_RATE,
                                  SNDRV_PCM_HW_PARAM_CHANNELS,
                                  param_period_time_if_needed,
@@ -993,6 +1019,28 @@ add_extra_rules:
                        return err;
        }
 
+       /* additional hw constraints for implicit fb */
+       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
+                                 hw_rule_format_implicit_fb, subs,
+                                 SNDRV_PCM_HW_PARAM_FORMAT, -1);
+       if (err < 0)
+               return err;
+       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+                                 hw_rule_rate_implicit_fb, subs,
+                                 SNDRV_PCM_HW_PARAM_RATE, -1);
+       if (err < 0)
+               return err;
+       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+                                 hw_rule_period_size_implicit_fb, subs,
+                                 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
+       if (err < 0)
+               return err;
+       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
+                                 hw_rule_periods_implicit_fb, subs,
+                                 SNDRV_PCM_HW_PARAM_PERIODS, -1);
+       if (err < 0)
+               return err;
+
        return 0;
 }
 
index 0e11cb9..c8a4bdf 100644 (file)
@@ -3362,6 +3362,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
                                        .altsetting = 1,
                                        .altset_idx = 1,
                                        .endpoint = 0x86,
+                                       .ep_idx = 1,
                                        .ep_attr = USB_ENDPOINT_XFER_ISOC|
                                                 USB_ENDPOINT_SYNC_ASYNC|
                                                 USB_ENDPOINT_USAGE_IMPLICIT_FB,
@@ -3450,6 +3451,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
                                        .altsetting = 1,
                                        .altset_idx = 1,
                                        .endpoint = 0x82,
+                                       .ep_idx = 1,
                                        .ep_attr = USB_ENDPOINT_XFER_ISOC|
                                                USB_ENDPOINT_SYNC_ASYNC|
                                                USB_ENDPOINT_USAGE_IMPLICIT_FB,
@@ -3506,6 +3508,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
                                        .altsetting = 1,
                                        .altset_idx = 1,
                                        .endpoint = 0x82,
+                                       .ep_idx = 1,
                                        .ep_attr = USB_ENDPOINT_XFER_ISOC|
                                                 USB_ENDPOINT_SYNC_ASYNC|
                                                 USB_ENDPOINT_USAGE_IMPLICIT_FB,
@@ -3562,6 +3565,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
                                        .altsetting = 1,
                                        .altset_idx = 1,
                                        .endpoint = 0x82,
+                                       .ep_idx = 1,
                                        .ep_attr = USB_ENDPOINT_XFER_ISOC|
                                                 USB_ENDPOINT_SYNC_ASYNC|
                                                 USB_ENDPOINT_USAGE_IMPLICIT_FB,
@@ -3619,6 +3623,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
                                        .altsetting = 1,
                                        .altset_idx = 1,
                                        .endpoint = 0x82,
+                                       .ep_idx = 1,
                                        .ep_attr = USB_ENDPOINT_XFER_ISOC|
                                                USB_ENDPOINT_SYNC_ASYNC|
                                        USB_ENDPOINT_USAGE_IMPLICIT_FB,
@@ -3679,6 +3684,7 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
                                        .altsetting = 1,
                                        .altset_idx = 1,
                                        .endpoint = 0x82,
+                                       .ep_idx = 1,
                                        .ep_attr = USB_ENDPOINT_XFER_ISOC|
                                            USB_ENDPOINT_SYNC_ASYNC|
                                            USB_ENDPOINT_USAGE_IMPLICIT_FB,
index e4a690b..e196e36 100644 (file)
@@ -120,6 +120,40 @@ static int create_standard_audio_quirk(struct snd_usb_audio *chip,
        return 0;
 }
 
+/* create the audio stream and the corresponding endpoints from the fixed
+ * audioformat object; this is used for quirks with the fixed EPs
+ */
+static int add_audio_stream_from_fixed_fmt(struct snd_usb_audio *chip,
+                                          struct audioformat *fp)
+{
+       int stream, err;
+
+       stream = (fp->endpoint & USB_DIR_IN) ?
+               SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+
+       snd_usb_audioformat_set_sync_ep(chip, fp);
+
+       err = snd_usb_add_audio_stream(chip, stream, fp);
+       if (err < 0)
+               return err;
+
+       err = snd_usb_add_endpoint(chip, fp->endpoint,
+                                  SND_USB_ENDPOINT_TYPE_DATA);
+       if (err < 0)
+               return err;
+
+       if (fp->sync_ep) {
+               err = snd_usb_add_endpoint(chip, fp->sync_ep,
+                                          fp->implicit_fb ?
+                                          SND_USB_ENDPOINT_TYPE_DATA :
+                                          SND_USB_ENDPOINT_TYPE_SYNC);
+               if (err < 0)
+                       return err;
+       }
+
+       return 0;
+}
+
 /*
  * create a stream for an endpoint/altsetting without proper descriptors
  */
@@ -131,8 +165,8 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
        struct audioformat *fp;
        struct usb_host_interface *alts;
        struct usb_interface_descriptor *altsd;
-       int stream, err;
        unsigned *rate_table = NULL;
+       int err;
 
        fp = kmemdup(quirk->data, sizeof(*fp), GFP_KERNEL);
        if (!fp)
@@ -153,11 +187,6 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
                fp->rate_table = rate_table;
        }
 
-       stream = (fp->endpoint & USB_DIR_IN)
-               ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
-       err = snd_usb_add_audio_stream(chip, stream, fp);
-       if (err < 0)
-               goto error;
        if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber ||
            fp->altset_idx >= iface->num_altsetting) {
                err = -EINVAL;
@@ -165,7 +194,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
        }
        alts = &iface->altsetting[fp->altset_idx];
        altsd = get_iface_desc(alts);
-       if (altsd->bNumEndpoints < 1) {
+       if (altsd->bNumEndpoints <= fp->ep_idx) {
                err = -EINVAL;
                goto error;
        }
@@ -175,7 +204,14 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
        if (fp->datainterval == 0)
                fp->datainterval = snd_usb_parse_datainterval(chip, alts);
        if (fp->maxpacksize == 0)
-               fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
+               fp->maxpacksize = le16_to_cpu(get_endpoint(alts, fp->ep_idx)->wMaxPacketSize);
+       if (!fp->fmt_type)
+               fp->fmt_type = UAC_FORMAT_TYPE_I;
+
+       err = add_audio_stream_from_fixed_fmt(chip, fp);
+       if (err < 0)
+               goto error;
+
        usb_set_interface(chip->dev, fp->iface, 0);
        snd_usb_init_pitch(chip, fp);
        snd_usb_init_sample_rate(chip, fp, fp->rate_max);
@@ -417,7 +453,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
        struct usb_host_interface *alts;
        struct usb_interface_descriptor *altsd;
        struct audioformat *fp;
-       int stream, err;
+       int err;
 
        /* both PCM and MIDI interfaces have 2 or more altsettings */
        if (iface->num_altsetting < 2)
@@ -482,9 +518,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
                return -ENXIO;
        }
 
-       stream = (fp->endpoint & USB_DIR_IN)
-               ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
-       err = snd_usb_add_audio_stream(chip, stream, fp);
+       err = add_audio_stream_from_fixed_fmt(chip, fp);
        if (err < 0) {
                list_del(&fp->list); /* unlink for avoiding double-free */
                kfree(fp);
@@ -1436,30 +1470,6 @@ static void set_format_emu_quirk(struct snd_usb_substream *subs,
        subs->pkt_offset_adj = (emu_samplerate_id >= EMU_QUIRK_SR_176400HZ) ? 4 : 0;
 }
 
-
-/*
- * Pioneer DJ DJM-900NXS2
- * Device needs to know the sample rate each time substream is started
- */
-static int pioneer_djm_set_format_quirk(struct snd_usb_substream *subs)
-{
-       unsigned int cur_rate = subs->data_endpoint->cur_rate;
-       /* Convert sample rate value to little endian */
-       u8 sr[3];
-
-       sr[0] = cur_rate & 0xff;
-       sr[1] = (cur_rate >> 8) & 0xff;
-       sr[2] = (cur_rate >> 16) & 0xff;
-
-       /* Configure device */
-       usb_set_interface(subs->dev, 0, 1);
-       snd_usb_ctl_msg(subs->stream->chip->dev,
-               usb_rcvctrlpipe(subs->stream->chip->dev, 0),
-               0x01, 0x22, 0x0100, 0x0082, &sr, 0x0003);
-
-       return 0;
-}
-
 void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
                              const struct audioformat *fmt)
 {
@@ -1470,10 +1480,6 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
        case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */
                set_format_emu_quirk(subs, fmt);
                break;
-       case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */
-       case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
-               pioneer_djm_set_format_quirk(subs);
-               break;
        case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
                subs->stream_offset_adj = 2;
                break;
index 980287a..215c177 100644 (file)
@@ -44,6 +44,7 @@ struct snd_usb_audio {
 
        struct list_head pcm_list;      /* list of pcm streams */
        struct list_head ep_list;       /* list of audio-related endpoints */
+       struct list_head iface_ref_list; /* list of interface refcounts */
        int pcm_devs;
 
        struct list_head midi_list;     /* list of midi interfaces */
index 595e164..feb30c2 100755 (executable)
@@ -152,6 +152,7 @@ setup_instance() { # [instance]
        set_array_of ${instance}.options ${instancedir}/trace_options
        set_value_of ${instance}.trace_clock ${instancedir}/trace_clock
        set_value_of ${instance}.cpumask ${instancedir}/tracing_cpumask
+       set_value_of ${instance}.tracing_on ${instancedir}/tracing_on
        set_value_of ${instance}.tracer ${instancedir}/current_tracer
        set_array_of ${instance}.ftrace.filters \
                ${instancedir}/set_ftrace_filter
index 6c0d4b6..a0c3bcc 100755 (executable)
@@ -221,6 +221,10 @@ instance_options() { # [instance-name]
        if [ `echo $val | sed -e s/f//g`x != x ]; then
                emit_kv $PREFIX.cpumask = $val
        fi
+       val=`cat $INSTANCE/tracing_on`
+       if [ `echo $val | sed -e s/f//g`x != x ]; then
+               emit_kv $PREFIX.tracing_on = $val
+       fi
 
        val=
        for i in `cat $INSTANCE/set_event`; do
index 3fae61e..ff3aa0c 100644 (file)
@@ -11,7 +11,6 @@
 #include <bpf/bpf.h>
 #include <bpf/libbpf.h>
 #include <net/if.h>
-#include <linux/if.h>
 #include <linux/rtnetlink.h>
 #include <linux/socket.h>
 #include <linux/tc_act/tc_bpf.h>
index e3ea569..7409d78 100644 (file)
@@ -139,6 +139,8 @@ int eprintf(int level, int var, const char *fmt, ...)
 #define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_err(fmt, ...) \
        eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+       eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
 
 static bool is_btf_id(const char *name)
 {
@@ -472,7 +474,7 @@ static int symbols_resolve(struct object *obj)
        int nr_funcs    = obj->nr_funcs;
        int err, type_id;
        struct btf *btf;
-       __u32 nr;
+       __u32 nr_types;
 
        btf = btf__parse(obj->btf ?: obj->path, NULL);
        err = libbpf_get_error(btf);
@@ -483,12 +485,12 @@ static int symbols_resolve(struct object *obj)
        }
 
        err = -1;
-       nr  = btf__get_nr_types(btf);
+       nr_types = btf__get_nr_types(btf);
 
        /*
         * Iterate all the BTF types and search for collected symbol IDs.
         */
-       for (type_id = 1; type_id <= nr; type_id++) {
+       for (type_id = 1; type_id <= nr_types; type_id++) {
                const struct btf_type *type;
                struct rb_root *root;
                struct btf_id *id;
@@ -526,8 +528,13 @@ static int symbols_resolve(struct object *obj)
 
                id = btf_id__find(root, str);
                if (id) {
-                       id->id = type_id;
-                       (*nr)--;
+                       if (id->id) {
+                               pr_info("WARN: multiple IDs found for '%s': %d, %d - using %d\n",
+                                       str, id->id, type_id, id->id);
+                       } else {
+                               id->id = type_id;
+                               (*nr)--;
+                       }
                }
        }
 
index cacd66a..a2b233f 100644 (file)
@@ -107,8 +107,8 @@ int monitor_device(const char *device_name,
                        ret = -EIO;
                        break;
                }
-               fprintf(stdout, "GPIO EVENT at %llu on line %d (%d|%d) ",
-                       event.timestamp_ns, event.offset, event.line_seqno,
+               fprintf(stdout, "GPIO EVENT at %" PRIu64 " on line %d (%d|%d) ",
+                       (uint64_t)event.timestamp_ns, event.offset, event.line_seqno,
                        event.seqno);
                switch (event.id) {
                case GPIO_V2_LINE_EVENT_RISING_EDGE:
index f229ec6..41e76d2 100644 (file)
@@ -10,6 +10,7 @@
 #include <ctype.h>
 #include <errno.h>
 #include <fcntl.h>
+#include <inttypes.h>
 #include <linux/gpio.h>
 #include <poll.h>
 #include <stdbool.h>
@@ -86,8 +87,8 @@ int main(int argc, char **argv)
                                return EXIT_FAILURE;
                        }
 
-                       printf("line %u: %s at %llu\n",
-                              chg.info.offset, event, chg.timestamp_ns);
+                       printf("line %u: %s at %" PRIu64 "\n",
+                              chg.info.offset, event, (uint64_t)chg.timestamp_ns);
                }
        }
 
index ce365d2..cc7070c 100644 (file)
@@ -79,9 +79,4 @@
 #define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
 #endif // static_assert
 
-#ifdef __GENKSYMS__
-/* genksyms gets confused by _Static_assert */
-#define _Static_assert(expr, ...)
-#endif
-
 #endif /* _LINUX_BUILD_BUG_H */
index 886802b..374c678 100644 (file)
@@ -251,6 +251,7 @@ struct kvm_hyperv_exit {
 #define KVM_EXIT_X86_RDMSR        29
 #define KVM_EXIT_X86_WRMSR        30
 #define KVM_EXIT_DIRTY_RING_FULL  31
+#define KVM_EXIT_AP_RESET_HOLD    32
 
 /* For KVM_EXIT_INTERNAL_ERROR */
 /* Emulate instruction failed. */
@@ -573,6 +574,7 @@ struct kvm_vapic_addr {
 #define KVM_MP_STATE_CHECK_STOP        6
 #define KVM_MP_STATE_OPERATING         7
 #define KVM_MP_STATE_LOAD              8
+#define KVM_MP_STATE_AP_RESET_HOLD     9
 
 struct kvm_mp_state {
        __u32 mp_state;
index 3c3f2bc..9970a28 100644 (file)
@@ -240,11 +240,6 @@ static int btf_parse_hdr(struct btf *btf)
        }
 
        meta_left = btf->raw_size - sizeof(*hdr);
-       if (!meta_left) {
-               pr_debug("BTF has no data\n");
-               return -EINVAL;
-       }
-
        if (meta_left < hdr->str_off + hdr->str_len) {
                pr_debug("Invalid BTF total size:%u\n", btf->raw_size);
                return -EINVAL;
index cfcdbd7..17465d4 100644 (file)
@@ -367,21 +367,13 @@ static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, boo
        return map;
 }
 
-static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
-                                    struct perf_evsel *evsel, int idx, int cpu,
-                                    int thread)
+static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
 {
        struct perf_sample_id *sid = SID(evsel, cpu, thread);
 
        sid->idx = idx;
-       if (evlist->cpus && cpu >= 0)
-               sid->cpu = evlist->cpus->map[cpu];
-       else
-               sid->cpu = -1;
-       if (!evsel->system_wide && evlist->threads && thread >= 0)
-               sid->tid = perf_thread_map__pid(evlist->threads, thread);
-       else
-               sid->tid = -1;
+       sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
+       sid->tid = perf_thread_map__pid(evsel->threads, thread);
 }
 
 static struct perf_mmap*
@@ -500,8 +492,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
                        if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
                                                   fd) < 0)
                                return -1;
-                       perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
-                                                thread);
+                       perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
                }
        }
 
index c8d4509..c70e9e0 100644 (file)
@@ -27,5 +27,5 @@ int main(int argc, char **argv)
        perf_cpu_map__put(cpus);
 
        __T_END;
-       return 0;
+       return tests_failed == 0 ? 0 : -1;
 }
index 6d8ebe0..e2ac0b7 100644 (file)
@@ -208,13 +208,13 @@ static int test_mmap_thread(void)
        char path[PATH_MAX];
        int id, err, pid, go_pipe[2];
        union perf_event *event;
-       char bf;
        int count = 0;
 
        snprintf(path, PATH_MAX, "%s/kernel/debug/tracing/events/syscalls/sys_enter_prctl/id",
                 sysfs__mountpoint());
 
        if (filename__read_int(path, &id)) {
+               tests_failed++;
                fprintf(stderr, "error: failed to get tracepoint id: %s\n", path);
                return -1;
        }
@@ -229,6 +229,7 @@ static int test_mmap_thread(void)
        pid = fork();
        if (!pid) {
                int i;
+               char bf;
 
                read(go_pipe[0], &bf, 1);
 
@@ -266,7 +267,7 @@ static int test_mmap_thread(void)
        perf_evlist__enable(evlist);
 
        /* kick the child and wait for it to finish */
-       write(go_pipe[1], &bf, 1);
+       write(go_pipe[1], "A", 1);
        waitpid(pid, NULL, 0);
 
        /*
@@ -409,5 +410,5 @@ int main(int argc, char **argv)
        test_mmap_cpus();
 
        __T_END;
-       return 0;
+       return tests_failed == 0 ? 0 : -1;
 }
index 135722a..0ad82d7 100644 (file)
@@ -131,5 +131,5 @@ int main(int argc, char **argv)
        test_stat_thread_enable();
 
        __T_END;
-       return 0;
+       return tests_failed == 0 ? 0 : -1;
 }
index 7dc4d6f..3844714 100644 (file)
@@ -27,5 +27,5 @@ int main(int argc, char **argv)
        perf_thread_map__put(threads);
 
        __T_END;
-       return 0;
+       return tests_failed == 0 ? 0 : -1;
 }
index 5f8d3ee..4bd3031 100644 (file)
@@ -2928,14 +2928,10 @@ int check(struct objtool_file *file)
        warnings += ret;
 
 out:
-       if (ret < 0) {
-               /*
-                *  Fatal error.  The binary is corrupt or otherwise broken in
-                *  some way, or objtool itself is broken.  Fail the kernel
-                *  build.
-                */
-               return ret;
-       }
-
+       /*
+        *  For now, don't fail the kernel build on fatal warnings.  These
+        *  errors are still fairly common due to the growing matrix of
+        *  supported toolchains and their recent pace of change.
+        */
        return 0;
 }
index be89c74..d8421e1 100644 (file)
@@ -380,8 +380,11 @@ static int read_symbols(struct elf *elf)
 
        symtab = find_section_by_name(elf, ".symtab");
        if (!symtab) {
-               WARN("missing symbol table");
-               return -1;
+               /*
+                * A missing symbol table is actually possible if it's an empty
+                * .o file.  This can happen for thunk_64.o.
+                */
+               return 0;
        }
 
        symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
@@ -448,6 +451,13 @@ static int read_symbols(struct elf *elf)
                list_add(&sym->list, entry);
                elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
                elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
+
+               /*
+                * Don't store empty STT_NOTYPE symbols in the rbtree.  They
+                * can exist within a function, confusing the sorting.
+                */
+               if (!sym->len)
+                       rb_erase(&sym->node, &sym->sec->symbol_tree);
        }
 
        if (stats)
index edacfa9..42dad4a 100644 (file)
@@ -186,6 +186,7 @@ struct output_option {
 
 enum {
        OUTPUT_TYPE_SYNTH = PERF_TYPE_MAX,
+       OUTPUT_TYPE_OTHER,
        OUTPUT_TYPE_MAX
 };
 
@@ -283,6 +284,18 @@ static struct {
 
                .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
        },
+
+       [OUTPUT_TYPE_OTHER] = {
+               .user_set = false,
+
+               .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
+                             PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
+                             PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
+                             PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
+                             PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
+
+               .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
+       },
 };
 
 struct evsel_script {
@@ -343,8 +356,11 @@ static inline int output_type(unsigned int type)
        case PERF_TYPE_SYNTH:
                return OUTPUT_TYPE_SYNTH;
        default:
-               return type;
+               if (type < PERF_TYPE_MAX)
+                       return type;
        }
+
+       return OUTPUT_TYPE_OTHER;
 }
 
 static inline unsigned int attr_type(unsigned int type)
index 65c4ff6..e6b6181 100644 (file)
@@ -39,7 +39,7 @@
    Copyright (C) 2018 Red Hat, Inc., Arnaldo Carvalho de Melo <acme@redhat.com>
 */
 
-#include <bpf/bpf.h>
+#include <bpf.h>
 
 #define NSEC_PER_SEC   1000000000L
 
index 249dfe4..ebebd35 100755 (executable)
@@ -9,31 +9,29 @@ perf stat -a true > /dev/null 2>&1 || exit 2
 
 test_global_aggr()
 {
-       local cyc
-
        perf stat -a --no-big-num -e cycles,instructions sleep 1  2>&1 | \
        grep -e cycles -e instructions | \
        while read num evt hash ipc rest
        do
                # skip not counted events
-               if [[ $num == "<not" ]]; then
+               if [ "$num" = "<not" ]; then
                        continue
                fi
 
                # save cycles count
-               if [[ $evt == "cycles" ]]; then
+               if [ "$evt" = "cycles" ]; then
                        cyc=$num
                        continue
                fi
 
                # skip if no cycles
-               if [[ -z $cyc ]]; then
+               if [ -z "$cyc" ]; then
                        continue
                fi
 
                # use printf for rounding and a leading zero
-               local res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)`
-               if [[ $ipc != $res ]]; then
+               res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)`
+               if [ "$ipc" != "$res" ]; then
                        echo "IPC is different: $res != $ipc  ($num / $cyc)"
                        exit 1
                fi
@@ -42,32 +40,32 @@ test_global_aggr()
 
 test_no_aggr()
 {
-       declare -A results
-
        perf stat -a -A --no-big-num -e cycles,instructions sleep 1  2>&1 | \
        grep ^CPU | \
        while read cpu num evt hash ipc rest
        do
                # skip not counted events
-               if [[ $num == "<not" ]]; then
+               if [ "$num" = "<not" ]; then
                        continue
                fi
 
                # save cycles count
-               if [[ $evt == "cycles" ]]; then
-                       results[$cpu]=$num
+               if [ "$evt" = "cycles" ]; then
+                       results="$results $cpu:$num"
                        continue
                fi
 
+               cyc=${results##* $cpu:}
+               cyc=${cyc%% *}
+
                # skip if no cycles
-               local cyc=${results[$cpu]}
-               if [[ -z $cyc ]]; then
+               if [ -z "$cyc" ]; then
                        continue
                fi
 
                # use printf for rounding and a leading zero
-               local res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)`
-               if [[ $ipc != $res ]]; then
+               res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)`
+               if [ "$ipc" != "$res" ]; then
                        echo "IPC is different for $cpu: $res != $ipc  ($num / $cyc)"
                        exit 1
                fi
index 062383e..c4ed3dc 100644 (file)
@@ -3323,6 +3323,14 @@ int perf_session__write_header(struct perf_session *session,
        attr_offset = lseek(ff.fd, 0, SEEK_CUR);
 
        evlist__for_each_entry(evlist, evsel) {
+               if (evsel->core.attr.size < sizeof(evsel->core.attr)) {
+                       /*
+                        * We are likely in "perf inject" and have read
+                        * from an older file. Update attr size so that
+                        * reader gets the right offset to the ids.
+                        */
+                       evsel->core.attr.size = sizeof(evsel->core.attr);
+               }
                f_attr = (struct perf_file_attr){
                        .attr = evsel->core.attr,
                        .ids  = {
index f841f35..1e9d3f9 100644 (file)
@@ -2980,7 +2980,7 @@ int machines__for_each_thread(struct machines *machines,
 
 pid_t machine__get_current_tid(struct machine *machine, int cpu)
 {
-       int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
+       int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
 
        if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid)
                return -1;
@@ -2992,7 +2992,7 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
                             pid_t tid)
 {
        struct thread *thread;
-       int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
+       int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
 
        if (cpu < 0)
                return -EINVAL;
index ee94d3e..e6d3452 100644 (file)
@@ -162,6 +162,14 @@ static bool contains_event(struct evsel **metric_events, int num_events,
        return false;
 }
 
+static bool evsel_same_pmu(struct evsel *ev1, struct evsel *ev2)
+{
+       if (!ev1->pmu_name || !ev2->pmu_name)
+               return false;
+
+       return !strcmp(ev1->pmu_name, ev2->pmu_name);
+}
+
 /**
  * Find a group of events in perf_evlist that correspond to those from a parsed
  * metric expression. Note, as find_evsel_group is called in the same order as
@@ -280,8 +288,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
                         */
                        if (!has_constraint &&
                            ev->leader != metric_events[i]->leader &&
-                           !strcmp(ev->leader->pmu_name,
-                                   metric_events[i]->leader->pmu_name))
+                           evsel_same_pmu(ev->leader, metric_events[i]->leader))
                                break;
                        if (!strcmp(metric_events[i]->name, ev->name)) {
                                set_bit(ev->idx, evlist_used);
@@ -766,7 +773,6 @@ int __weak arch_get_runtimeparam(struct pmu_event *pe __maybe_unused)
 struct metricgroup_add_iter_data {
        struct list_head *metric_list;
        const char *metric;
-       struct metric **m;
        struct expr_ids *ids;
        int *ret;
        bool *has_match;
@@ -1058,12 +1064,13 @@ static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
                                                  void *data)
 {
        struct metricgroup_add_iter_data *d = data;
+       struct metric *m = NULL;
        int ret;
 
        if (!match_pe_metric(pe, d->metric))
                return 0;
 
-       ret = add_metric(d->metric_list, pe, d->metric_no_group, d->m, NULL, d->ids);
+       ret = add_metric(d->metric_list, pe, d->metric_no_group, &m, NULL, d->ids);
        if (ret)
                return ret;
 
@@ -1114,7 +1121,6 @@ static int metricgroup__add_metric(const char *metric, bool metric_no_group,
                                .metric_list = &list,
                                .metric = metric,
                                .metric_no_group = metric_no_group,
-                               .m = &m,
                                .ids = &ids,
                                .has_match = &has_match,
                                .ret = &ret,
index 50ff979..25adbcc 100644 (file)
@@ -2404,7 +2404,7 @@ int perf_session__cpu_bitmap(struct perf_session *session,
 {
        int i, err = -1;
        struct perf_cpu_map *map;
-       int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS);
+       int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
 
        for (i = 0; i < PERF_TYPE_MAX; ++i) {
                struct evsel *evsel;
index 9012651..12eafd1 100644 (file)
@@ -8,6 +8,7 @@
 #include "evlist.h"
 #include "expr.h"
 #include "metricgroup.h"
+#include "cgroup.h"
 #include <linux/zalloc.h>
 
 /*
@@ -28,6 +29,7 @@ struct saved_value {
        enum stat_type type;
        int ctx;
        int cpu;
+       struct cgroup *cgrp;
        struct runtime_stat *stat;
        struct stats stats;
        u64 metric_total;
@@ -57,6 +59,9 @@ static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
        if (a->ctx != b->ctx)
                return a->ctx - b->ctx;
 
+       if (a->cgrp != b->cgrp)
+               return (char *)a->cgrp < (char *)b->cgrp ? -1 : +1;
+
        if (a->evsel == NULL && b->evsel == NULL) {
                if (a->stat == b->stat)
                        return 0;
@@ -100,7 +105,8 @@ static struct saved_value *saved_value_lookup(struct evsel *evsel,
                                              bool create,
                                              enum stat_type type,
                                              int ctx,
-                                             struct runtime_stat *st)
+                                             struct runtime_stat *st,
+                                             struct cgroup *cgrp)
 {
        struct rblist *rblist;
        struct rb_node *nd;
@@ -110,10 +116,15 @@ static struct saved_value *saved_value_lookup(struct evsel *evsel,
                .type = type,
                .ctx = ctx,
                .stat = st,
+               .cgrp = cgrp,
        };
 
        rblist = &st->value_list;
 
+       /* don't use context info for clock events */
+       if (type == STAT_NSECS)
+               dm.ctx = 0;
+
        nd = rblist__find(rblist, &dm);
        if (nd)
                return container_of(nd, struct saved_value, rb_node);
@@ -191,12 +202,18 @@ void perf_stat__reset_shadow_per_stat(struct runtime_stat *st)
        reset_stat(st);
 }
 
+struct runtime_stat_data {
+       int ctx;
+       struct cgroup *cgrp;
+};
+
 static void update_runtime_stat(struct runtime_stat *st,
                                enum stat_type type,
-                               int ctx, int cpu, u64 count)
+                               int cpu, u64 count,
+                               struct runtime_stat_data *rsd)
 {
-       struct saved_value *v = saved_value_lookup(NULL, cpu, true,
-                                                  type, ctx, st);
+       struct saved_value *v = saved_value_lookup(NULL, cpu, true, type,
+                                                  rsd->ctx, st, rsd->cgrp);
 
        if (v)
                update_stats(&v->stats, count);
@@ -210,82 +227,86 @@ static void update_runtime_stat(struct runtime_stat *st,
 void perf_stat__update_shadow_stats(struct evsel *counter, u64 count,
                                    int cpu, struct runtime_stat *st)
 {
-       int ctx = evsel_context(counter);
        u64 count_ns = count;
        struct saved_value *v;
+       struct runtime_stat_data rsd = {
+               .ctx = evsel_context(counter),
+               .cgrp = counter->cgrp,
+       };
 
        count *= counter->scale;
 
        if (evsel__is_clock(counter))
-               update_runtime_stat(st, STAT_NSECS, 0, cpu, count_ns);
+               update_runtime_stat(st, STAT_NSECS, cpu, count_ns, &rsd);
        else if (evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
-               update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
+               update_runtime_stat(st, STAT_CYCLES, cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
-               update_runtime_stat(st, STAT_CYCLES_IN_TX, ctx, cpu, count);
+               update_runtime_stat(st, STAT_CYCLES_IN_TX, cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TRANSACTION_START))
-               update_runtime_stat(st, STAT_TRANSACTION, ctx, cpu, count);
+               update_runtime_stat(st, STAT_TRANSACTION, cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, ELISION_START))
-               update_runtime_stat(st, STAT_ELISION, ctx, cpu, count);
+               update_runtime_stat(st, STAT_ELISION, cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
                update_runtime_stat(st, STAT_TOPDOWN_TOTAL_SLOTS,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
                update_runtime_stat(st, STAT_TOPDOWN_SLOTS_ISSUED,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
                update_runtime_stat(st, STAT_TOPDOWN_SLOTS_RETIRED,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
                update_runtime_stat(st, STAT_TOPDOWN_FETCH_BUBBLES,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
                update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_RETIRING))
                update_runtime_stat(st, STAT_TOPDOWN_RETIRING,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_BAD_SPEC))
                update_runtime_stat(st, STAT_TOPDOWN_BAD_SPEC,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_FE_BOUND))
                update_runtime_stat(st, STAT_TOPDOWN_FE_BOUND,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, TOPDOWN_BE_BOUND))
                update_runtime_stat(st, STAT_TOPDOWN_BE_BOUND,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
                update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
                update_runtime_stat(st, STAT_STALLED_CYCLES_BACK,
-                                   ctx, cpu, count);
+                                   cpu, count, &rsd);
        else if (evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
-               update_runtime_stat(st, STAT_BRANCHES, ctx, cpu, count);
+               update_runtime_stat(st, STAT_BRANCHES, cpu, count, &rsd);
        else if (evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
-               update_runtime_stat(st, STAT_CACHEREFS, ctx, cpu, count);
+               update_runtime_stat(st, STAT_CACHEREFS, cpu, count, &rsd);
        else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
-               update_runtime_stat(st, STAT_L1_DCACHE, ctx, cpu, count);
+               update_runtime_stat(st, STAT_L1_DCACHE, cpu, count, &rsd);
        else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
-               update_runtime_stat(st, STAT_L1_ICACHE, ctx, cpu, count);
+               update_runtime_stat(st, STAT_L1_ICACHE, cpu, count, &rsd);
        else if (evsel__match(counter, HW_CACHE, HW_CACHE_LL))
-               update_runtime_stat(st, STAT_LL_CACHE, ctx, cpu, count);
+               update_runtime_stat(st, STAT_LL_CACHE, cpu, count, &rsd);
        else if (evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
-               update_runtime_stat(st, STAT_DTLB_CACHE, ctx, cpu, count);
+               update_runtime_stat(st, STAT_DTLB_CACHE, cpu, count, &rsd);
        else if (evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
-               update_runtime_stat(st, STAT_ITLB_CACHE, ctx, cpu, count);
+               update_runtime_stat(st, STAT_ITLB_CACHE, cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, SMI_NUM))
-               update_runtime_stat(st, STAT_SMI_NUM, ctx, cpu, count);
+               update_runtime_stat(st, STAT_SMI_NUM, cpu, count, &rsd);
        else if (perf_stat_evsel__is(counter, APERF))
-               update_runtime_stat(st, STAT_APERF, ctx, cpu, count);
+               update_runtime_stat(st, STAT_APERF, cpu, count, &rsd);
 
        if (counter->collect_stat) {
-               v = saved_value_lookup(counter, cpu, true, STAT_NONE, 0, st);
+               v = saved_value_lookup(counter, cpu, true, STAT_NONE, 0, st,
+                                      rsd.cgrp);
                update_stats(&v->stats, count);
                if (counter->metric_leader)
                        v->metric_total += count;
        } else if (counter->metric_leader) {
                v = saved_value_lookup(counter->metric_leader,
-                                      cpu, true, STAT_NONE, 0, st);
+                                      cpu, true, STAT_NONE, 0, st, rsd.cgrp);
                v->metric_total += count;
                v->metric_other++;
        }
@@ -422,11 +443,12 @@ void perf_stat__collect_metric_expr(struct evlist *evsel_list)
 }
 
 static double runtime_stat_avg(struct runtime_stat *st,
-                              enum stat_type type, int ctx, int cpu)
+                              enum stat_type type, int cpu,
+                              struct runtime_stat_data *rsd)
 {
        struct saved_value *v;
 
-       v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
+       v = saved_value_lookup(NULL, cpu, false, type, rsd->ctx, st, rsd->cgrp);
        if (!v)
                return 0.0;
 
@@ -434,11 +456,12 @@ static double runtime_stat_avg(struct runtime_stat *st,
 }
 
 static double runtime_stat_n(struct runtime_stat *st,
-                            enum stat_type type, int ctx, int cpu)
+                            enum stat_type type, int cpu,
+                            struct runtime_stat_data *rsd)
 {
        struct saved_value *v;
 
-       v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
+       v = saved_value_lookup(NULL, cpu, false, type, rsd->ctx, st, rsd->cgrp);
        if (!v)
                return 0.0;
 
@@ -446,16 +469,15 @@ static double runtime_stat_n(struct runtime_stat *st,
 }
 
 static void print_stalled_cycles_frontend(struct perf_stat_config *config,
-                                         int cpu,
-                                         struct evsel *evsel, double avg,
+                                         int cpu, double avg,
                                          struct perf_stat_output_ctx *out,
-                                         struct runtime_stat *st)
+                                         struct runtime_stat *st,
+                                         struct runtime_stat_data *rsd)
 {
        double total, ratio = 0.0;
        const char *color;
-       int ctx = evsel_context(evsel);
 
-       total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd);
 
        if (total)
                ratio = avg / total * 100.0;
@@ -470,16 +492,15 @@ static void print_stalled_cycles_frontend(struct perf_stat_config *config,
 }
 
 static void print_stalled_cycles_backend(struct perf_stat_config *config,
-                                        int cpu,
-                                        struct evsel *evsel, double avg,
+                                        int cpu, double avg,
                                         struct perf_stat_output_ctx *out,
-                                        struct runtime_stat *st)
+                                        struct runtime_stat *st,
+                                        struct runtime_stat_data *rsd)
 {
        double total, ratio = 0.0;
        const char *color;
-       int ctx = evsel_context(evsel);
 
-       total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd);
 
        if (total)
                ratio = avg / total * 100.0;
@@ -490,17 +511,15 @@ static void print_stalled_cycles_backend(struct perf_stat_config *config,
 }
 
 static void print_branch_misses(struct perf_stat_config *config,
-                               int cpu,
-                               struct evsel *evsel,
-                               double avg,
+                               int cpu, double avg,
                                struct perf_stat_output_ctx *out,
-                               struct runtime_stat *st)
+                               struct runtime_stat *st,
+                               struct runtime_stat_data *rsd)
 {
        double total, ratio = 0.0;
        const char *color;
-       int ctx = evsel_context(evsel);
 
-       total = runtime_stat_avg(st, STAT_BRANCHES, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_BRANCHES, cpu, rsd);
 
        if (total)
                ratio = avg / total * 100.0;
@@ -511,18 +530,15 @@ static void print_branch_misses(struct perf_stat_config *config,
 }
 
 static void print_l1_dcache_misses(struct perf_stat_config *config,
-                                  int cpu,
-                                  struct evsel *evsel,
-                                  double avg,
+                                  int cpu, double avg,
                                   struct perf_stat_output_ctx *out,
-                                  struct runtime_stat *st)
-
+                                  struct runtime_stat *st,
+                                  struct runtime_stat_data *rsd)
 {
        double total, ratio = 0.0;
        const char *color;
-       int ctx = evsel_context(evsel);
 
-       total = runtime_stat_avg(st, STAT_L1_DCACHE, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_L1_DCACHE, cpu, rsd);
 
        if (total)
                ratio = avg / total * 100.0;
@@ -533,18 +549,15 @@ static void print_l1_dcache_misses(struct perf_stat_config *config,
 }
 
 static void print_l1_icache_misses(struct perf_stat_config *config,
-                                  int cpu,
-                                  struct evsel *evsel,
-                                  double avg,
+                                  int cpu, double avg,
                                   struct perf_stat_output_ctx *out,
-                                  struct runtime_stat *st)
-
+                                  struct runtime_stat *st,
+                                  struct runtime_stat_data *rsd)
 {
        double total, ratio = 0.0;
        const char *color;
-       int ctx = evsel_context(evsel);
 
-       total = runtime_stat_avg(st, STAT_L1_ICACHE, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_L1_ICACHE, cpu, rsd);
 
        if (total)
                ratio = avg / total * 100.0;
@@ -554,17 +567,15 @@ static void print_l1_icache_misses(struct perf_stat_config *config,
 }
 
 static void print_dtlb_cache_misses(struct perf_stat_config *config,
-                                   int cpu,
-                                   struct evsel *evsel,
-                                   double avg,
+                                   int cpu, double avg,
                                    struct perf_stat_output_ctx *out,
-                                   struct runtime_stat *st)
+                                   struct runtime_stat *st,
+                                   struct runtime_stat_data *rsd)
 {
        double total, ratio = 0.0;
        const char *color;
-       int ctx = evsel_context(evsel);
 
-       total = runtime_stat_avg(st, STAT_DTLB_CACHE, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_DTLB_CACHE, cpu, rsd);
 
        if (total)
                ratio = avg / total * 100.0;
@@ -574,17 +585,15 @@ static void print_dtlb_cache_misses(struct perf_stat_config *config,
 }
 
 static void print_itlb_cache_misses(struct perf_stat_config *config,
-                                   int cpu,
-                                   struct evsel *evsel,
-                                   double avg,
+                                   int cpu, double avg,
                                    struct perf_stat_output_ctx *out,
-                                   struct runtime_stat *st)
+                                   struct runtime_stat *st,
+                                   struct runtime_stat_data *rsd)
 {
        double total, ratio = 0.0;
        const char *color;
-       int ctx = evsel_context(evsel);
 
-       total = runtime_stat_avg(st, STAT_ITLB_CACHE, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_ITLB_CACHE, cpu, rsd);
 
        if (total)
                ratio = avg / total * 100.0;
@@ -594,17 +603,15 @@ static void print_itlb_cache_misses(struct perf_stat_config *config,
 }
 
 static void print_ll_cache_misses(struct perf_stat_config *config,
-                                 int cpu,
-                                 struct evsel *evsel,
-                                 double avg,
+                                 int cpu, double avg,
                                  struct perf_stat_output_ctx *out,
-                                 struct runtime_stat *st)
+                                 struct runtime_stat *st,
+                                 struct runtime_stat_data *rsd)
 {
        double total, ratio = 0.0;
        const char *color;
-       int ctx = evsel_context(evsel);
 
-       total = runtime_stat_avg(st, STAT_LL_CACHE, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_LL_CACHE, cpu, rsd);
 
        if (total)
                ratio = avg / total * 100.0;
@@ -662,56 +669,61 @@ static double sanitize_val(double x)
        return x;
 }
 
-static double td_total_slots(int ctx, int cpu, struct runtime_stat *st)
+static double td_total_slots(int cpu, struct runtime_stat *st,
+                            struct runtime_stat_data *rsd)
 {
-       return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, ctx, cpu);
+       return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, cpu, rsd);
 }
 
-static double td_bad_spec(int ctx, int cpu, struct runtime_stat *st)
+static double td_bad_spec(int cpu, struct runtime_stat *st,
+                         struct runtime_stat_data *rsd)
 {
        double bad_spec = 0;
        double total_slots;
        double total;
 
-       total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, ctx, cpu) -
-               runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, ctx, cpu) +
-               runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, ctx, cpu);
+       total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, cpu, rsd) -
+               runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, cpu, rsd) +
+               runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, cpu, rsd);
 
-       total_slots = td_total_slots(ctx, cpu, st);
+       total_slots = td_total_slots(cpu, st, rsd);
        if (total_slots)
                bad_spec = total / total_slots;
        return sanitize_val(bad_spec);
 }
 
-static double td_retiring(int ctx, int cpu, struct runtime_stat *st)
+static double td_retiring(int cpu, struct runtime_stat *st,
+                         struct runtime_stat_data *rsd)
 {
        double retiring = 0;
-       double total_slots = td_total_slots(ctx, cpu, st);
+       double total_slots = td_total_slots(cpu, st, rsd);
        double ret_slots = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED,
-                                           ctx, cpu);
+                                           cpu, rsd);
 
        if (total_slots)
                retiring = ret_slots / total_slots;
        return retiring;
 }
 
-static double td_fe_bound(int ctx, int cpu, struct runtime_stat *st)
+static double td_fe_bound(int cpu, struct runtime_stat *st,
+                         struct runtime_stat_data *rsd)
 {
        double fe_bound = 0;
-       double total_slots = td_total_slots(ctx, cpu, st);
+       double total_slots = td_total_slots(cpu, st, rsd);
        double fetch_bub = runtime_stat_avg(st, STAT_TOPDOWN_FETCH_BUBBLES,
-                                           ctx, cpu);
+                                           cpu, rsd);
 
        if (total_slots)
                fe_bound = fetch_bub / total_slots;
        return fe_bound;
 }
 
-static double td_be_bound(int ctx, int cpu, struct runtime_stat *st)
+static double td_be_bound(int cpu, struct runtime_stat *st,
+                         struct runtime_stat_data *rsd)
 {
-       double sum = (td_fe_bound(ctx, cpu, st) +
-                     td_bad_spec(ctx, cpu, st) +
-                     td_retiring(ctx, cpu, st));
+       double sum = (td_fe_bound(cpu, st, rsd) +
+                     td_bad_spec(cpu, st, rsd) +
+                     td_retiring(cpu, st, rsd));
        if (sum == 0)
                return 0;
        return sanitize_val(1.0 - sum);
@@ -722,15 +734,15 @@ static double td_be_bound(int ctx, int cpu, struct runtime_stat *st)
  * the ratios we need to recreate the sum.
  */
 
-static double td_metric_ratio(int ctx, int cpu,
-                             enum stat_type type,
-                             struct runtime_stat *stat)
+static double td_metric_ratio(int cpu, enum stat_type type,
+                             struct runtime_stat *stat,
+                             struct runtime_stat_data *rsd)
 {
-       double sum = runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, ctx, cpu) +
-               runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, ctx, cpu) +
-               runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, ctx, cpu) +
-               runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, ctx, cpu);
-       double d = runtime_stat_avg(stat, type, ctx, cpu);
+       double sum = runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu, rsd) +
+               runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu, rsd) +
+               runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu, rsd) +
+               runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu, rsd);
+       double d = runtime_stat_avg(stat, type, cpu, rsd);
 
        if (sum)
                return d / sum;
@@ -742,34 +754,33 @@ static double td_metric_ratio(int ctx, int cpu,
  * We allow two missing.
  */
 
-static bool full_td(int ctx, int cpu,
-                   struct runtime_stat *stat)
+static bool full_td(int cpu, struct runtime_stat *stat,
+                   struct runtime_stat_data *rsd)
 {
        int c = 0;
 
-       if (runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, ctx, cpu) > 0)
+       if (runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu, rsd) > 0)
                c++;
-       if (runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, ctx, cpu) > 0)
+       if (runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu, rsd) > 0)
                c++;
-       if (runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, ctx, cpu) > 0)
+       if (runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu, rsd) > 0)
                c++;
-       if (runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, ctx, cpu) > 0)
+       if (runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu, rsd) > 0)
                c++;
        return c >= 2;
 }
 
-static void print_smi_cost(struct perf_stat_config *config,
-                          int cpu, struct evsel *evsel,
+static void print_smi_cost(struct perf_stat_config *config, int cpu,
                           struct perf_stat_output_ctx *out,
-                          struct runtime_stat *st)
+                          struct runtime_stat *st,
+                          struct runtime_stat_data *rsd)
 {
        double smi_num, aperf, cycles, cost = 0.0;
-       int ctx = evsel_context(evsel);
        const char *color = NULL;
 
-       smi_num = runtime_stat_avg(st, STAT_SMI_NUM, ctx, cpu);
-       aperf = runtime_stat_avg(st, STAT_APERF, ctx, cpu);
-       cycles = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+       smi_num = runtime_stat_avg(st, STAT_SMI_NUM, cpu, rsd);
+       aperf = runtime_stat_avg(st, STAT_APERF, cpu, rsd);
+       cycles = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd);
 
        if ((cycles == 0) || (aperf == 0))
                return;
@@ -804,7 +815,8 @@ static int prepare_metric(struct evsel **metric_events,
                        scale = 1e-9;
                } else {
                        v = saved_value_lookup(metric_events[i], cpu, false,
-                                              STAT_NONE, 0, st);
+                                              STAT_NONE, 0, st,
+                                              metric_events[i]->cgrp);
                        if (!v)
                                break;
                        stats = &v->stats;
@@ -930,12 +942,15 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
        print_metric_t print_metric = out->print_metric;
        double total, ratio = 0.0, total2;
        const char *color = NULL;
-       int ctx = evsel_context(evsel);
+       struct runtime_stat_data rsd = {
+               .ctx = evsel_context(evsel),
+               .cgrp = evsel->cgrp,
+       };
        struct metric_event *me;
        int num = 1;
 
        if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
-               total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+               total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd);
 
                if (total) {
                        ratio = avg / total;
@@ -945,12 +960,11 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                        print_metric(config, ctxp, NULL, NULL, "insn per cycle", 0);
                }
 
-               total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT,
-                                        ctx, cpu);
+               total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT, cpu, &rsd);
 
                total = max(total, runtime_stat_avg(st,
                                                    STAT_STALLED_CYCLES_BACK,
-                                                   ctx, cpu));
+                                                   cpu, &rsd));
 
                if (total && avg) {
                        out->new_line(config, ctxp);
@@ -960,8 +974,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                                        ratio);
                }
        } else if (evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
-               if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)
-                       print_branch_misses(config, cpu, evsel, avg, out, st);
+               if (runtime_stat_n(st, STAT_BRANCHES, cpu, &rsd) != 0)
+                       print_branch_misses(config, cpu, avg, out, st, &rsd);
                else
                        print_metric(config, ctxp, NULL, NULL, "of all branches", 0);
        } else if (
@@ -970,8 +984,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                                        ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
                                         ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
 
-               if (runtime_stat_n(st, STAT_L1_DCACHE, ctx, cpu) != 0)
-                       print_l1_dcache_misses(config, cpu, evsel, avg, out, st);
+               if (runtime_stat_n(st, STAT_L1_DCACHE, cpu, &rsd) != 0)
+                       print_l1_dcache_misses(config, cpu, avg, out, st, &rsd);
                else
                        print_metric(config, ctxp, NULL, NULL, "of all L1-dcache accesses", 0);
        } else if (
@@ -980,8 +994,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                                        ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
                                         ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
 
-               if (runtime_stat_n(st, STAT_L1_ICACHE, ctx, cpu) != 0)
-                       print_l1_icache_misses(config, cpu, evsel, avg, out, st);
+               if (runtime_stat_n(st, STAT_L1_ICACHE, cpu, &rsd) != 0)
+                       print_l1_icache_misses(config, cpu, avg, out, st, &rsd);
                else
                        print_metric(config, ctxp, NULL, NULL, "of all L1-icache accesses", 0);
        } else if (
@@ -990,8 +1004,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                                        ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
                                         ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
 
-               if (runtime_stat_n(st, STAT_DTLB_CACHE, ctx, cpu) != 0)
-                       print_dtlb_cache_misses(config, cpu, evsel, avg, out, st);
+               if (runtime_stat_n(st, STAT_DTLB_CACHE, cpu, &rsd) != 0)
+                       print_dtlb_cache_misses(config, cpu, avg, out, st, &rsd);
                else
                        print_metric(config, ctxp, NULL, NULL, "of all dTLB cache accesses", 0);
        } else if (
@@ -1000,8 +1014,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                                        ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
                                         ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
 
-               if (runtime_stat_n(st, STAT_ITLB_CACHE, ctx, cpu) != 0)
-                       print_itlb_cache_misses(config, cpu, evsel, avg, out, st);
+               if (runtime_stat_n(st, STAT_ITLB_CACHE, cpu, &rsd) != 0)
+                       print_itlb_cache_misses(config, cpu, avg, out, st, &rsd);
                else
                        print_metric(config, ctxp, NULL, NULL, "of all iTLB cache accesses", 0);
        } else if (
@@ -1010,27 +1024,27 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                                        ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
                                         ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
 
-               if (runtime_stat_n(st, STAT_LL_CACHE, ctx, cpu) != 0)
-                       print_ll_cache_misses(config, cpu, evsel, avg, out, st);
+               if (runtime_stat_n(st, STAT_LL_CACHE, cpu, &rsd) != 0)
+                       print_ll_cache_misses(config, cpu, avg, out, st, &rsd);
                else
                        print_metric(config, ctxp, NULL, NULL, "of all LL-cache accesses", 0);
        } else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
-               total = runtime_stat_avg(st, STAT_CACHEREFS, ctx, cpu);
+               total = runtime_stat_avg(st, STAT_CACHEREFS, cpu, &rsd);
 
                if (total)
                        ratio = avg * 100 / total;
 
-               if (runtime_stat_n(st, STAT_CACHEREFS, ctx, cpu) != 0)
+               if (runtime_stat_n(st, STAT_CACHEREFS, cpu, &rsd) != 0)
                        print_metric(config, ctxp, NULL, "%8.3f %%",
                                     "of all cache refs", ratio);
                else
                        print_metric(config, ctxp, NULL, NULL, "of all cache refs", 0);
        } else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
-               print_stalled_cycles_frontend(config, cpu, evsel, avg, out, st);
+               print_stalled_cycles_frontend(config, cpu, avg, out, st, &rsd);
        } else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
-               print_stalled_cycles_backend(config, cpu, evsel, avg, out, st);
+               print_stalled_cycles_backend(config, cpu, avg, out, st, &rsd);
        } else if (evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
-               total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
+               total = runtime_stat_avg(st, STAT_NSECS, cpu, &rsd);
 
                if (total) {
                        ratio = avg / total;
@@ -1039,7 +1053,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                        print_metric(config, ctxp, NULL, NULL, "Ghz", 0);
                }
        } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
-               total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
+               total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd);
 
                if (total)
                        print_metric(config, ctxp, NULL,
@@ -1049,8 +1063,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                        print_metric(config, ctxp, NULL, NULL, "transactional cycles",
                                     0);
        } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
-               total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
-               total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, ctx, cpu);
+               total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd);
+               total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd);
 
                if (total2 < avg)
                        total2 = avg;
@@ -1060,21 +1074,19 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                else
                        print_metric(config, ctxp, NULL, NULL, "aborted cycles", 0);
        } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
-               total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
-                                        ctx, cpu);
+               total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd);
 
                if (avg)
                        ratio = total / avg;
 
-               if (runtime_stat_n(st, STAT_CYCLES_IN_TX, ctx, cpu) != 0)
+               if (runtime_stat_n(st, STAT_CYCLES_IN_TX, cpu, &rsd) != 0)
                        print_metric(config, ctxp, NULL, "%8.0f",
                                     "cycles / transaction", ratio);
                else
                        print_metric(config, ctxp, NULL, NULL, "cycles / transaction",
                                      0);
        } else if (perf_stat_evsel__is(evsel, ELISION_START)) {
-               total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
-                                        ctx, cpu);
+               total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd);
 
                if (avg)
                        ratio = total / avg;
@@ -1087,28 +1099,28 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                else
                        print_metric(config, ctxp, NULL, NULL, "CPUs utilized", 0);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
-               double fe_bound = td_fe_bound(ctx, cpu, st);
+               double fe_bound = td_fe_bound(cpu, st, &rsd);
 
                if (fe_bound > 0.2)
                        color = PERF_COLOR_RED;
                print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
                                fe_bound * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
-               double retiring = td_retiring(ctx, cpu, st);
+               double retiring = td_retiring(cpu, st, &rsd);
 
                if (retiring > 0.7)
                        color = PERF_COLOR_GREEN;
                print_metric(config, ctxp, color, "%8.1f%%", "retiring",
                                retiring * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
-               double bad_spec = td_bad_spec(ctx, cpu, st);
+               double bad_spec = td_bad_spec(cpu, st, &rsd);
 
                if (bad_spec > 0.1)
                        color = PERF_COLOR_RED;
                print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
                                bad_spec * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
-               double be_bound = td_be_bound(ctx, cpu, st);
+               double be_bound = td_be_bound(cpu, st, &rsd);
                const char *name = "backend bound";
                static int have_recovery_bubbles = -1;
 
@@ -1121,43 +1133,43 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
 
                if (be_bound > 0.2)
                        color = PERF_COLOR_RED;
-               if (td_total_slots(ctx, cpu, st) > 0)
+               if (td_total_slots(cpu, st, &rsd) > 0)
                        print_metric(config, ctxp, color, "%8.1f%%", name,
                                        be_bound * 100.);
                else
                        print_metric(config, ctxp, NULL, NULL, name, 0);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_RETIRING) &&
-                       full_td(ctx, cpu, st)) {
-               double retiring = td_metric_ratio(ctx, cpu,
-                                                 STAT_TOPDOWN_RETIRING, st);
-
+                  full_td(cpu, st, &rsd)) {
+               double retiring = td_metric_ratio(cpu,
+                                                 STAT_TOPDOWN_RETIRING, st,
+                                                 &rsd);
                if (retiring > 0.7)
                        color = PERF_COLOR_GREEN;
                print_metric(config, ctxp, color, "%8.1f%%", "retiring",
                                retiring * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_FE_BOUND) &&
-                       full_td(ctx, cpu, st)) {
-               double fe_bound = td_metric_ratio(ctx, cpu,
-                                                 STAT_TOPDOWN_FE_BOUND, st);
-
+                  full_td(cpu, st, &rsd)) {
+               double fe_bound = td_metric_ratio(cpu,
+                                                 STAT_TOPDOWN_FE_BOUND, st,
+                                                 &rsd);
                if (fe_bound > 0.2)
                        color = PERF_COLOR_RED;
                print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
                                fe_bound * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_BE_BOUND) &&
-                       full_td(ctx, cpu, st)) {
-               double be_bound = td_metric_ratio(ctx, cpu,
-                                                 STAT_TOPDOWN_BE_BOUND, st);
-
+                  full_td(cpu, st, &rsd)) {
+               double be_bound = td_metric_ratio(cpu,
+                                                 STAT_TOPDOWN_BE_BOUND, st,
+                                                 &rsd);
                if (be_bound > 0.2)
                        color = PERF_COLOR_RED;
                print_metric(config, ctxp, color, "%8.1f%%", "backend bound",
                                be_bound * 100.);
        } else if (perf_stat_evsel__is(evsel, TOPDOWN_BAD_SPEC) &&
-                       full_td(ctx, cpu, st)) {
-               double bad_spec = td_metric_ratio(ctx, cpu,
-                                                 STAT_TOPDOWN_BAD_SPEC, st);
-
+                  full_td(cpu, st, &rsd)) {
+               double bad_spec = td_metric_ratio(cpu,
+                                                 STAT_TOPDOWN_BAD_SPEC, st,
+                                                 &rsd);
                if (bad_spec > 0.1)
                        color = PERF_COLOR_RED;
                print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
@@ -1165,11 +1177,11 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
        } else if (evsel->metric_expr) {
                generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL,
                                evsel->name, evsel->metric_name, NULL, 1, cpu, out, st);
-       } else if (runtime_stat_n(st, STAT_NSECS, 0, cpu) != 0) {
+       } else if (runtime_stat_n(st, STAT_NSECS, cpu, &rsd) != 0) {
                char unit = 'M';
                char unit_buf[10];
 
-               total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
+               total = runtime_stat_avg(st, STAT_NSECS, cpu, &rsd);
 
                if (total)
                        ratio = 1000.0 * avg / total;
@@ -1180,7 +1192,7 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
                snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
                print_metric(config, ctxp, NULL, "%8.3f", unit_buf, ratio);
        } else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
-               print_smi_cost(config, cpu, evsel, out, st);
+               print_smi_cost(config, cpu, out, st, &rsd);
        } else {
                num = 0;
        }
index 5390158..09cb3a6 100644 (file)
@@ -1249,6 +1249,8 @@ static void dump_isst_config(int arg)
        isst_ctdp_display_information_end(outf);
 }
 
+static void adjust_scaling_max_from_base_freq(int cpu);
+
 static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
                                  void *arg4)
 {
@@ -1267,6 +1269,9 @@ static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
                        int pkg_id = get_physical_package_id(cpu);
                        int die_id = get_physical_die_id(cpu);
 
+                       /* Wait for updated base frequencies */
+                       usleep(2000);
+
                        fprintf(stderr, "Option is set to online/offline\n");
                        ctdp_level.core_cpumask_size =
                                alloc_cpu_set(&ctdp_level.core_cpumask);
@@ -1283,6 +1288,7 @@ static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
                                        if (CPU_ISSET_S(i, ctdp_level.core_cpumask_size, ctdp_level.core_cpumask)) {
                                                fprintf(stderr, "online cpu %d\n", i);
                                                set_cpu_online_offline(i, 1);
+                                               adjust_scaling_max_from_base_freq(i);
                                        } else {
                                                fprintf(stderr, "offline cpu %d\n", i);
                                                set_cpu_online_offline(i, 0);
@@ -1440,6 +1446,31 @@ static int set_cpufreq_scaling_min_max(int cpu, int max, int freq)
        return 0;
 }
 
+static int no_turbo(void)
+{
+       return parse_int_file(0, "/sys/devices/system/cpu/intel_pstate/no_turbo");
+}
+
+static void adjust_scaling_max_from_base_freq(int cpu)
+{
+       int base_freq, scaling_max_freq;
+
+       scaling_max_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_max_freq", cpu);
+       base_freq = get_cpufreq_base_freq(cpu);
+       if (scaling_max_freq < base_freq || no_turbo())
+               set_cpufreq_scaling_min_max(cpu, 1, base_freq);
+}
+
+static void adjust_scaling_min_from_base_freq(int cpu)
+{
+       int base_freq, scaling_min_freq;
+
+       scaling_min_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_min_freq", cpu);
+       base_freq = get_cpufreq_base_freq(cpu);
+       if (scaling_min_freq < base_freq)
+               set_cpufreq_scaling_min_max(cpu, 0, base_freq);
+}
+
 static int set_clx_pbf_cpufreq_scaling_min_max(int cpu)
 {
        struct isst_pkg_ctdp_level_info *ctdp_level;
@@ -1537,6 +1568,7 @@ static void set_scaling_min_to_cpuinfo_max(int cpu)
                        continue;
 
                set_cpufreq_scaling_min_max_from_cpuinfo(i, 1, 0);
+               adjust_scaling_min_from_base_freq(i);
        }
 }
 
index 21516e2..e808a47 100755 (executable)
@@ -43,9 +43,9 @@ class KunitStatus(Enum):
        BUILD_FAILURE = auto()
        TEST_FAILURE = auto()
 
-def get_kernel_root_path():
-       parts = sys.argv[0] if not __file__ else __file__
-       parts = os.path.realpath(parts).split('tools/testing/kunit')
+def get_kernel_root_path() -> str:
+       path = sys.argv[0] if not __file__ else __file__
+       parts = os.path.realpath(path).split('tools/testing/kunit')
        if len(parts) != 2:
                sys.exit(1)
        return parts[0]
@@ -171,7 +171,7 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
                                exec_result.elapsed_time))
        return parse_result
 
-def add_common_opts(parser):
+def add_common_opts(parser) -> None:
        parser.add_argument('--build_dir',
                            help='As in the make command, it specifies the build '
                            'directory.',
@@ -183,13 +183,13 @@ def add_common_opts(parser):
                            help='Run all KUnit tests through allyesconfig',
                            action='store_true')
 
-def add_build_opts(parser):
+def add_build_opts(parser) -> None:
        parser.add_argument('--jobs',
                            help='As in the make command, "Specifies  the number of '
                            'jobs (commands) to run simultaneously."',
                            type=int, default=8, metavar='jobs')
 
-def add_exec_opts(parser):
+def add_exec_opts(parser) -> None:
        parser.add_argument('--timeout',
                            help='maximum number of seconds to allow for all tests '
                            'to run. This does not include time taken to build the '
@@ -198,7 +198,7 @@ def add_exec_opts(parser):
                            default=300,
                            metavar='timeout')
 
-def add_parse_opts(parser):
+def add_parse_opts(parser) -> None:
        parser.add_argument('--raw_output', help='don\'t format output from kernel',
                            action='store_true')
        parser.add_argument('--json',
@@ -256,10 +256,7 @@ def main(argv, linux=None):
                        os.mkdir(cli_args.build_dir)
 
                if not linux:
-                       linux = kunit_kernel.LinuxSourceTree()
-
-               linux.create_kunitconfig(cli_args.build_dir)
-               linux.read_kunitconfig(cli_args.build_dir)
+                       linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
 
                request = KunitRequest(cli_args.raw_output,
                                       cli_args.timeout,
@@ -277,10 +274,7 @@ def main(argv, linux=None):
                        os.mkdir(cli_args.build_dir)
 
                if not linux:
-                       linux = kunit_kernel.LinuxSourceTree()
-
-               linux.create_kunitconfig(cli_args.build_dir)
-               linux.read_kunitconfig(cli_args.build_dir)
+                       linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
 
                request = KunitConfigRequest(cli_args.build_dir,
                                             cli_args.make_options)
@@ -292,10 +286,7 @@ def main(argv, linux=None):
                        sys.exit(1)
        elif cli_args.subcommand == 'build':
                if not linux:
-                       linux = kunit_kernel.LinuxSourceTree()
-
-               linux.create_kunitconfig(cli_args.build_dir)
-               linux.read_kunitconfig(cli_args.build_dir)
+                       linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
 
                request = KunitBuildRequest(cli_args.jobs,
                                            cli_args.build_dir,
@@ -309,10 +300,7 @@ def main(argv, linux=None):
                        sys.exit(1)
        elif cli_args.subcommand == 'exec':
                if not linux:
-                       linux = kunit_kernel.LinuxSourceTree()
-
-               linux.create_kunitconfig(cli_args.build_dir)
-               linux.read_kunitconfig(cli_args.build_dir)
+                       linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
 
                exec_request = KunitExecRequest(cli_args.timeout,
                                                cli_args.build_dir,
index 02ffc3a..bdd6023 100644 (file)
@@ -8,6 +8,7 @@
 
 import collections
 import re
+from typing import List, Set
 
 CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
 CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
@@ -30,10 +31,10 @@ class KconfigParseError(Exception):
 class Kconfig(object):
        """Represents defconfig or .config specified using the Kconfig language."""
 
-       def __init__(self):
-               self._entries = []
+       def __init__(self) -> None:
+               self._entries = []  # type: List[KconfigEntry]
 
-       def entries(self):
+       def entries(self) -> Set[KconfigEntry]:
                return set(self._entries)
 
        def add_entry(self, entry: KconfigEntry) -> None:
index 624b31b..f5cca5c 100644 (file)
@@ -13,7 +13,7 @@ import kunit_parser
 
 from kunit_parser import TestStatus
 
-def get_json_result(test_result, def_config, build_dir, json_path):
+def get_json_result(test_result, def_config, build_dir, json_path) -> str:
        sub_groups = []
 
        # Each test suite is mapped to a KernelCI sub_group
index 57c1724..2076a5a 100644 (file)
@@ -11,6 +11,7 @@ import subprocess
 import os
 import shutil
 import signal
+from typing import Iterator
 
 from contextlib import ExitStack
 
@@ -39,7 +40,7 @@ class BuildError(Exception):
 class LinuxSourceTreeOperations(object):
        """An abstraction over command line operations performed on a source tree."""
 
-       def make_mrproper(self):
+       def make_mrproper(self) -> None:
                try:
                        subprocess.check_output(['make', 'mrproper'], stderr=subprocess.STDOUT)
                except OSError as e:
@@ -47,7 +48,7 @@ class LinuxSourceTreeOperations(object):
                except subprocess.CalledProcessError as e:
                        raise ConfigError(e.output.decode())
 
-       def make_olddefconfig(self, build_dir, make_options):
+       def make_olddefconfig(self, build_dir, make_options) -> None:
                command = ['make', 'ARCH=um', 'olddefconfig']
                if make_options:
                        command.extend(make_options)
@@ -60,7 +61,7 @@ class LinuxSourceTreeOperations(object):
                except subprocess.CalledProcessError as e:
                        raise ConfigError(e.output.decode())
 
-       def make_allyesconfig(self, build_dir, make_options):
+       def make_allyesconfig(self, build_dir, make_options) -> None:
                kunit_parser.print_with_timestamp(
                        'Enabling all CONFIGs for UML...')
                command = ['make', 'ARCH=um', 'allyesconfig']
@@ -82,7 +83,7 @@ class LinuxSourceTreeOperations(object):
                kunit_parser.print_with_timestamp(
                        'Starting Kernel with all configs takes a few minutes...')
 
-       def make(self, jobs, build_dir, make_options):
+       def make(self, jobs, build_dir, make_options) -> None:
                command = ['make', 'ARCH=um', '--jobs=' + str(jobs)]
                if make_options:
                        command.extend(make_options)
@@ -100,7 +101,7 @@ class LinuxSourceTreeOperations(object):
                if stderr:  # likely only due to build warnings
                        print(stderr.decode())
 
-       def linux_bin(self, params, timeout, build_dir):
+       def linux_bin(self, params, timeout, build_dir) -> None:
                """Runs the Linux UML binary. Must be named 'linux'."""
                linux_bin = get_file_path(build_dir, 'linux')
                outfile = get_outfile_path(build_dir)
@@ -110,41 +111,42 @@ class LinuxSourceTreeOperations(object):
                                                   stderr=subprocess.STDOUT)
                        process.wait(timeout)
 
-def get_kconfig_path(build_dir):
+def get_kconfig_path(build_dir) -> str:
        return get_file_path(build_dir, KCONFIG_PATH)
 
-def get_kunitconfig_path(build_dir):
+def get_kunitconfig_path(build_dir) -> str:
        return get_file_path(build_dir, KUNITCONFIG_PATH)
 
-def get_outfile_path(build_dir):
+def get_outfile_path(build_dir) -> str:
        return get_file_path(build_dir, OUTFILE_PATH)
 
 class LinuxSourceTree(object):
        """Represents a Linux kernel source tree with KUnit tests."""
 
-       def __init__(self):
-               self._ops = LinuxSourceTreeOperations()
+       def __init__(self, build_dir: str, load_config=True, defconfig=DEFAULT_KUNITCONFIG_PATH) -> None:
                signal.signal(signal.SIGINT, self.signal_handler)
 
-       def clean(self):
-               try:
-                       self._ops.make_mrproper()
-               except ConfigError as e:
-                       logging.error(e)
-                       return False
-               return True
+               self._ops = LinuxSourceTreeOperations()
+
+               if not load_config:
+                       return
 
-       def create_kunitconfig(self, build_dir, defconfig=DEFAULT_KUNITCONFIG_PATH):
                kunitconfig_path = get_kunitconfig_path(build_dir)
                if not os.path.exists(kunitconfig_path):
                        shutil.copyfile(defconfig, kunitconfig_path)
 
-       def read_kunitconfig(self, build_dir):
-               kunitconfig_path = get_kunitconfig_path(build_dir)
                self._kconfig = kunit_config.Kconfig()
                self._kconfig.read_from_file(kunitconfig_path)
 
-       def validate_config(self, build_dir):
+       def clean(self) -> bool:
+               try:
+                       self._ops.make_mrproper()
+               except ConfigError as e:
+                       logging.error(e)
+                       return False
+               return True
+
+       def validate_config(self, build_dir) -> bool:
                kconfig_path = get_kconfig_path(build_dir)
                validated_kconfig = kunit_config.Kconfig()
                validated_kconfig.read_from_file(kconfig_path)
@@ -158,7 +160,7 @@ class LinuxSourceTree(object):
                        return False
                return True
 
-       def build_config(self, build_dir, make_options):
+       def build_config(self, build_dir, make_options) -> bool:
                kconfig_path = get_kconfig_path(build_dir)
                if build_dir and not os.path.exists(build_dir):
                        os.mkdir(build_dir)
@@ -170,7 +172,7 @@ class LinuxSourceTree(object):
                        return False
                return self.validate_config(build_dir)
 
-       def build_reconfig(self, build_dir, make_options):
+       def build_reconfig(self, build_dir, make_options) -> bool:
                """Creates a new .config if it is not a subset of the .kunitconfig."""
                kconfig_path = get_kconfig_path(build_dir)
                if os.path.exists(kconfig_path):
@@ -186,7 +188,7 @@ class LinuxSourceTree(object):
                        print('Generating .config ...')
                        return self.build_config(build_dir, make_options)
 
-       def build_um_kernel(self, alltests, jobs, build_dir, make_options):
+       def build_um_kernel(self, alltests, jobs, build_dir, make_options) -> bool:
                try:
                        if alltests:
                                self._ops.make_allyesconfig(build_dir, make_options)
@@ -197,8 +199,8 @@ class LinuxSourceTree(object):
                        return False
                return self.validate_config(build_dir)
 
-       def run_kernel(self, args=[], build_dir='', timeout=None):
-               args.extend(['mem=1G'])
+       def run_kernel(self, args=[], build_dir='', timeout=None) -> Iterator[str]:
+               args.extend(['mem=1G', 'console=tty'])
                self._ops.linux_bin(args, timeout, build_dir)
                outfile = get_outfile_path(build_dir)
                subprocess.call(['stty', 'sane'])
@@ -206,6 +208,6 @@ class LinuxSourceTree(object):
                        for line in file:
                                yield line
 
-       def signal_handler(self, sig, frame):
+       def signal_handler(self, sig, frame) -> None:
                logging.error('Build interruption occurred. Cleaning console.')
                subprocess.call(['stty', 'sane'])
index 6614ec4..e8bcc13 100644 (file)
@@ -12,32 +12,32 @@ from collections import namedtuple
 from datetime import datetime
 from enum import Enum, auto
 from functools import reduce
-from typing import List, Optional, Tuple
+from typing import Iterable, Iterator, List, Optional, Tuple
 
 TestResult = namedtuple('TestResult', ['status','suites','log'])
 
 class TestSuite(object):
-       def __init__(self):
-               self.status = None
-               self.name = None
-               self.cases = []
+       def __init__(self) -> None:
+               self.status = TestStatus.SUCCESS
+               self.name = ''
+               self.cases = []  # type: List[TestCase]
 
-       def __str__(self):
-               return 'TestSuite(' + self.status + ',' + self.name + ',' + str(self.cases) + ')'
+       def __str__(self) -> str:
+               return 'TestSuite(' + str(self.status) + ',' + self.name + ',' + str(self.cases) + ')'
 
-       def __repr__(self):
+       def __repr__(self) -> str:
                return str(self)
 
 class TestCase(object):
-       def __init__(self):
-               self.status = None
+       def __init__(self) -> None:
+               self.status = TestStatus.SUCCESS
                self.name = ''
-               self.log = []
+               self.log = []  # type: List[str]
 
-       def __str__(self):
-               return 'TestCase(' + self.status + ',' + self.name + ',' + str(self.log) + ')'
+       def __str__(self) -> str:
+               return 'TestCase(' + str(self.status) + ',' + self.name + ',' + str(self.log) + ')'
 
-       def __repr__(self):
+       def __repr__(self) -> str:
                return str(self)
 
 class TestStatus(Enum):
@@ -51,7 +51,7 @@ kunit_start_re = re.compile(r'TAP version [0-9]+$')
 kunit_end_re = re.compile('(List of all partitions:|'
                          'Kernel panic - not syncing: VFS:)')
 
-def isolate_kunit_output(kernel_output):
+def isolate_kunit_output(kernel_output) -> Iterator[str]:
        started = False
        for line in kernel_output:
                line = line.rstrip()  # line always has a trailing \n
@@ -64,7 +64,7 @@ def isolate_kunit_output(kernel_output):
                elif started:
                        yield line[prefix_len:] if prefix_len > 0 else line
 
-def raw_output(kernel_output):
+def raw_output(kernel_output) -> None:
        for line in kernel_output:
                print(line.rstrip())
 
@@ -72,36 +72,36 @@ DIVIDER = '=' * 60
 
 RESET = '\033[0;0m'
 
-def red(text):
+def red(text) -> str:
        return '\033[1;31m' + text + RESET
 
-def yellow(text):
+def yellow(text) -> str:
        return '\033[1;33m' + text + RESET
 
-def green(text):
+def green(text) -> str:
        return '\033[1;32m' + text + RESET
 
-def print_with_timestamp(message):
+def print_with_timestamp(message) -> None:
        print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
 
-def format_suite_divider(message):
+def format_suite_divider(message) -> str:
        return '======== ' + message + ' ========'
 
-def print_suite_divider(message):
+def print_suite_divider(message) -> None:
        print_with_timestamp(DIVIDER)
        print_with_timestamp(format_suite_divider(message))
 
-def print_log(log):
+def print_log(log) -> None:
        for m in log:
                print_with_timestamp(m)
 
 TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*#).*$')
 
-def consume_non_diagnositic(lines: List[str]) -> None:
+def consume_non_diagnostic(lines: List[str]) -> None:
        while lines and not TAP_ENTRIES.match(lines[0]):
                lines.pop(0)
 
-def save_non_diagnositic(lines: List[str], test_case: TestCase) -> None:
+def save_non_diagnostic(lines: List[str], test_case: TestCase) -> None:
        while lines and not TAP_ENTRIES.match(lines[0]):
                test_case.log.append(lines[0])
                lines.pop(0)
@@ -113,7 +113,7 @@ OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
 OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
 
 def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool:
-       save_non_diagnositic(lines, test_case)
+       save_non_diagnostic(lines, test_case)
        if not lines:
                test_case.status = TestStatus.TEST_CRASHED
                return True
@@ -139,7 +139,7 @@ SUBTEST_DIAGNOSTIC = re.compile(r'^[\s]+# (.*)$')
 DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^[\s]+# .*?: kunit test case crashed!$')
 
 def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool:
-       save_non_diagnositic(lines, test_case)
+       save_non_diagnostic(lines, test_case)
        if not lines:
                return False
        line = lines[0]
@@ -155,7 +155,7 @@ def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool:
 
 def parse_test_case(lines: List[str]) -> Optional[TestCase]:
        test_case = TestCase()
-       save_non_diagnositic(lines, test_case)
+       save_non_diagnostic(lines, test_case)
        while parse_diagnostic(lines, test_case):
                pass
        if parse_ok_not_ok_test_case(lines, test_case):
@@ -166,7 +166,7 @@ def parse_test_case(lines: List[str]) -> Optional[TestCase]:
 SUBTEST_HEADER = re.compile(r'^[\s]+# Subtest: (.*)$')
 
 def parse_subtest_header(lines: List[str]) -> Optional[str]:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        if not lines:
                return None
        match = SUBTEST_HEADER.match(lines[0])
@@ -179,7 +179,7 @@ def parse_subtest_header(lines: List[str]) -> Optional[str]:
 SUBTEST_PLAN = re.compile(r'[\s]+[0-9]+\.\.([0-9]+)')
 
 def parse_subtest_plan(lines: List[str]) -> Optional[int]:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        match = SUBTEST_PLAN.match(lines[0])
        if match:
                lines.pop(0)
@@ -202,7 +202,7 @@ def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
 def parse_ok_not_ok_test_suite(lines: List[str],
                               test_suite: TestSuite,
                               expected_suite_index: int) -> bool:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        if not lines:
                test_suite.status = TestStatus.TEST_CRASHED
                return False
@@ -224,18 +224,17 @@ def parse_ok_not_ok_test_suite(lines: List[str],
        else:
                return False
 
-def bubble_up_errors(to_status, status_container_list) -> TestStatus:
-       status_list = map(to_status, status_container_list)
-       return reduce(max_status, status_list, TestStatus.SUCCESS)
+def bubble_up_errors(statuses: Iterable[TestStatus]) -> TestStatus:
+       return reduce(max_status, statuses, TestStatus.SUCCESS)
 
 def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
-       max_test_case_status = bubble_up_errors(lambda x: x.status, test_suite.cases)
+       max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
        return max_status(max_test_case_status, test_suite.status)
 
 def parse_test_suite(lines: List[str], expected_suite_index: int) -> Optional[TestSuite]:
        if not lines:
                return None
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        test_suite = TestSuite()
        test_suite.status = TestStatus.SUCCESS
        name = parse_subtest_header(lines)
@@ -264,7 +263,7 @@ def parse_test_suite(lines: List[str], expected_suite_index: int) -> Optional[Te
 TAP_HEADER = re.compile(r'^TAP version 14$')
 
 def parse_tap_header(lines: List[str]) -> bool:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        if TAP_HEADER.match(lines[0]):
                lines.pop(0)
                return True
@@ -274,7 +273,7 @@ def parse_tap_header(lines: List[str]) -> bool:
 TEST_PLAN = re.compile(r'[0-9]+\.\.([0-9]+)')
 
 def parse_test_plan(lines: List[str]) -> Optional[int]:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        match = TEST_PLAN.match(lines[0])
        if match:
                lines.pop(0)
@@ -282,11 +281,11 @@ def parse_test_plan(lines: List[str]) -> Optional[int]:
        else:
                return None
 
-def bubble_up_suite_errors(test_suite_list: List[TestSuite]) -> TestStatus:
-       return bubble_up_errors(lambda x: x.status, test_suite_list)
+def bubble_up_suite_errors(test_suites: Iterable[TestSuite]) -> TestStatus:
+       return bubble_up_errors(x.status for x in test_suites)
 
 def parse_test_result(lines: List[str]) -> TestResult:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        if not lines or not parse_tap_header(lines):
                return TestResult(TestStatus.NO_TESTS, [], lines)
        expected_test_suite_num = parse_test_plan(lines)
index afbab4a..8a917cb 100644 (file)
@@ -77,8 +77,10 @@ TARGETS += zram
 TARGETS_HOTPLUG = cpu-hotplug
 TARGETS_HOTPLUG += memory-hotplug
 
-# User can optionally provide a TARGETS skiplist.
-SKIP_TARGETS ?=
+# User can optionally provide a TARGETS skiplist.  By default we skip
+# BPF since it has cutting edge build time dependencies which require
+# more effort to install.
+SKIP_TARGETS ?= bpf
 ifneq ($(SKIP_TARGETS),)
        TMP := $(filter-out $(SKIP_TARGETS), $(TARGETS))
        override TARGETS := $(TMP)
index 1c5556b..0dbd594 100644 (file)
@@ -457,7 +457,7 @@ function barf
        mov     x11, x1 // actual data
        mov     x12, x2 // data size
 
-       puts    "Mistatch: PID="
+       puts    "Mismatch: PID="
        mov     x0, x20
        bl      putdec
        puts    ", iteration="
index f95074c..9210691 100644 (file)
@@ -625,7 +625,7 @@ function barf
        mov     x11, x1 // actual data
        mov     x12, x2 // data size
 
-       puts    "Mistatch: PID="
+       puts    "Mismatch: PID="
        mov     x0, x20
        bl      putdec
        puts    ", iteration="
index 8c33e99..c51df6b 100644 (file)
@@ -121,6 +121,9 @@ VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux)                                \
                     /sys/kernel/btf/vmlinux                            \
                     /boot/vmlinux-$(shell uname -r)
 VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+ifeq ($(VMLINUX_BTF),)
+$(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)")
+endif
 
 # Define simple and short `make test_progs`, `make test_sysctl`, etc targets
 # to build individual tests.
index c0fe73a..3bfcf00 100644 (file)
@@ -34,61 +34,6 @@ struct storage {
        struct bpf_spin_lock lock;
 };
 
-/* Copies an rm binary to a temp file. dest is a mkstemp template */
-static int copy_rm(char *dest)
-{
-       int fd_in, fd_out = -1, ret = 0;
-       struct stat stat;
-       char *buf = NULL;
-
-       fd_in = open("/bin/rm", O_RDONLY);
-       if (fd_in < 0)
-               return -errno;
-
-       fd_out = mkstemp(dest);
-       if (fd_out < 0) {
-               ret = -errno;
-               goto out;
-       }
-
-       ret = fstat(fd_in, &stat);
-       if (ret == -1) {
-               ret = -errno;
-               goto out;
-       }
-
-       buf = malloc(stat.st_blksize);
-       if (!buf) {
-               ret = -errno;
-               goto out;
-       }
-
-       while (ret = read(fd_in, buf, stat.st_blksize), ret > 0) {
-               ret = write(fd_out, buf, ret);
-               if (ret < 0) {
-                       ret = -errno;
-                       goto out;
-
-               }
-       }
-       if (ret < 0) {
-               ret = -errno;
-               goto out;
-
-       }
-
-       /* Set executable permission on the copied file */
-       ret = chmod(dest, 0100);
-       if (ret == -1)
-               ret = -errno;
-
-out:
-       free(buf);
-       close(fd_in);
-       close(fd_out);
-       return ret;
-}
-
 /* Fork and exec the provided rm binary and return the exit code of the
  * forked process and its pid.
  */
@@ -168,9 +113,11 @@ static bool check_syscall_operations(int map_fd, int obj_fd)
 
 void test_test_local_storage(void)
 {
-       char tmp_exec_path[PATH_MAX] = "/tmp/copy_of_rmXXXXXX";
+       char tmp_dir_path[64] = "/tmp/local_storageXXXXXX";
        int err, serv_sk = -1, task_fd = -1, rm_fd = -1;
        struct local_storage *skel = NULL;
+       char tmp_exec_path[64];
+       char cmd[256];
 
        skel = local_storage__open_and_load();
        if (CHECK(!skel, "skel_load", "lsm skeleton failed\n"))
@@ -189,18 +136,24 @@ void test_test_local_storage(void)
                                      task_fd))
                goto close_prog;
 
-       err = copy_rm(tmp_exec_path);
-       if (CHECK(err < 0, "copy_rm", "err %d errno %d\n", err, errno))
+       if (CHECK(!mkdtemp(tmp_dir_path), "mkdtemp",
+                 "unable to create tmpdir: %d\n", errno))
                goto close_prog;
 
+       snprintf(tmp_exec_path, sizeof(tmp_exec_path), "%s/copy_of_rm",
+                tmp_dir_path);
+       snprintf(cmd, sizeof(cmd), "cp /bin/rm %s", tmp_exec_path);
+       if (CHECK_FAIL(system(cmd)))
+               goto close_prog_rmdir;
+
        rm_fd = open(tmp_exec_path, O_RDONLY);
        if (CHECK(rm_fd < 0, "open", "failed to open %s err:%d, errno:%d",
                  tmp_exec_path, rm_fd, errno))
-               goto close_prog;
+               goto close_prog_rmdir;
 
        if (!check_syscall_operations(bpf_map__fd(skel->maps.inode_storage_map),
                                      rm_fd))
-               goto close_prog;
+               goto close_prog_rmdir;
 
        /* Sets skel->bss->monitored_pid to the pid of the forked child
         * forks a child process that executes tmp_exec_path and tries to
@@ -209,33 +162,36 @@ void test_test_local_storage(void)
         */
        err = run_self_unlink(&skel->bss->monitored_pid, tmp_exec_path);
        if (CHECK(err != EPERM, "run_self_unlink", "err %d want EPERM\n", err))
-               goto close_prog_unlink;
+               goto close_prog_rmdir;
 
        /* Set the process being monitored to be the current process */
        skel->bss->monitored_pid = getpid();
 
-       /* Remove the temporary created executable */
-       err = unlink(tmp_exec_path);
-       if (CHECK(err != 0, "unlink", "unable to unlink %s: %d", tmp_exec_path,
-                 errno))
-               goto close_prog_unlink;
+       /* Move copy_of_rm to a new location so that it triggers the
+        * inode_rename LSM hook with a new_dentry that has a NULL inode ptr.
+        */
+       snprintf(cmd, sizeof(cmd), "mv %s/copy_of_rm %s/check_null_ptr",
+                tmp_dir_path, tmp_dir_path);
+       if (CHECK_FAIL(system(cmd)))
+               goto close_prog_rmdir;
 
        CHECK(skel->data->inode_storage_result != 0, "inode_storage_result",
              "inode_local_storage not set\n");
 
        serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
        if (CHECK(serv_sk < 0, "start_server", "failed to start server\n"))
-               goto close_prog;
+               goto close_prog_rmdir;
 
        CHECK(skel->data->sk_storage_result != 0, "sk_storage_result",
              "sk_local_storage not set\n");
 
        if (!check_syscall_operations(bpf_map__fd(skel->maps.sk_storage_map),
                                      serv_sk))
-               goto close_prog;
+               goto close_prog_rmdir;
 
-close_prog_unlink:
-       unlink(tmp_exec_path);
+close_prog_rmdir:
+       snprintf(cmd, sizeof(cmd), "rm -rf %s", tmp_dir_path);
+       system(cmd);
 close_prog:
        close(serv_sk);
        close(rm_fd);
index 5bfef28..418d9c6 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright 2020 Google LLC.
  */
 
-#include "vmlinux.h"
+#include <linux/bpf.h>
 #include <errno.h>
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
index 3e3de13..95868bc 100644 (file)
@@ -50,7 +50,6 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
        __u32 pid = bpf_get_current_pid_tgid() >> 32;
        struct local_storage *storage;
        bool is_self_unlink;
-       int err;
 
        if (pid != monitored_pid)
                return 0;
@@ -66,8 +65,27 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
                        return -EPERM;
        }
 
-       storage = bpf_inode_storage_get(&inode_storage_map, victim->d_inode, 0,
-                                       BPF_LOCAL_STORAGE_GET_F_CREATE);
+       return 0;
+}
+
+SEC("lsm/inode_rename")
+int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
+            struct inode *new_dir, struct dentry *new_dentry,
+            unsigned int flags)
+{
+       __u32 pid = bpf_get_current_pid_tgid() >> 32;
+       struct local_storage *storage;
+       int err;
+
+       /* new_dentry->d_inode can be NULL when the inode is renamed to a file
+        * that did not exist before. The helper should be able to handle this
+        * NULL pointer.
+        */
+       bpf_inode_storage_get(&inode_storage_map, new_dentry->d_inode, 0,
+                             BPF_LOCAL_STORAGE_GET_F_CREATE);
+
+       storage = bpf_inode_storage_get(&inode_storage_map, old_dentry->d_inode,
+                                       0, 0);
        if (!storage)
                return 0;
 
@@ -76,7 +94,7 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
                inode_storage_result = -1;
        bpf_spin_unlock(&storage->lock);
 
-       err = bpf_inode_storage_delete(&inode_storage_map, victim->d_inode);
+       err = bpf_inode_storage_delete(&inode_storage_map, old_dentry->d_inode);
        if (!err)
                inode_storage_result = err;
 
@@ -133,37 +151,18 @@ int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
        return 0;
 }
 
-SEC("lsm/file_open")
-int BPF_PROG(file_open, struct file *file)
-{
-       __u32 pid = bpf_get_current_pid_tgid() >> 32;
-       struct local_storage *storage;
-
-       if (pid != monitored_pid)
-               return 0;
-
-       if (!file->f_inode)
-               return 0;
-
-       storage = bpf_inode_storage_get(&inode_storage_map, file->f_inode, 0,
-                                       BPF_LOCAL_STORAGE_GET_F_CREATE);
-       if (!storage)
-               return 0;
-
-       bpf_spin_lock(&storage->lock);
-       storage->value = DUMMY_STORAGE_VALUE;
-       bpf_spin_unlock(&storage->lock);
-       return 0;
-}
-
 /* This uses the local storage to remember the inode of the binary that a
  * process was originally executing.
  */
 SEC("lsm/bprm_committed_creds")
 void BPF_PROG(exec, struct linux_binprm *bprm)
 {
+       __u32 pid = bpf_get_current_pid_tgid() >> 32;
        struct local_storage *storage;
 
+       if (pid != monitored_pid)
+               return;
+
        storage = bpf_task_storage_get(&task_storage_map,
                                       bpf_get_current_task_btf(), 0,
                                       BPF_LOCAL_STORAGE_GET_F_CREATE);
@@ -172,4 +171,13 @@ void BPF_PROG(exec, struct linux_binprm *bprm)
                storage->exec_inode = bprm->file->f_inode;
                bpf_spin_unlock(&storage->lock);
        }
+
+       storage = bpf_inode_storage_get(&inode_storage_map, bprm->file->f_inode,
+                                       0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+       if (!storage)
+               return;
+
+       bpf_spin_lock(&storage->lock);
+       storage->value = DUMMY_STORAGE_VALUE;
+       bpf_spin_unlock(&storage->lock);
 }
index 0ad3e63..51adc42 100644 (file)
@@ -1312,22 +1312,58 @@ static void test_map_stress(void)
 #define DO_UPDATE 1
 #define DO_DELETE 0
 
+#define MAP_RETRIES 20
+
+static int map_update_retriable(int map_fd, const void *key, const void *value,
+                               int flags, int attempts)
+{
+       while (bpf_map_update_elem(map_fd, key, value, flags)) {
+               if (!attempts || (errno != EAGAIN && errno != EBUSY))
+                       return -errno;
+
+               usleep(1);
+               attempts--;
+       }
+
+       return 0;
+}
+
+static int map_delete_retriable(int map_fd, const void *key, int attempts)
+{
+       while (bpf_map_delete_elem(map_fd, key)) {
+               if (!attempts || (errno != EAGAIN && errno != EBUSY))
+                       return -errno;
+
+               usleep(1);
+               attempts--;
+       }
+
+       return 0;
+}
+
 static void test_update_delete(unsigned int fn, void *data)
 {
        int do_update = ((int *)data)[1];
        int fd = ((int *)data)[0];
-       int i, key, value;
+       int i, key, value, err;
 
        for (i = fn; i < MAP_SIZE; i += TASKS) {
                key = value = i;
 
                if (do_update) {
-                       assert(bpf_map_update_elem(fd, &key, &value,
-                                                  BPF_NOEXIST) == 0);
-                       assert(bpf_map_update_elem(fd, &key, &value,
-                                                  BPF_EXIST) == 0);
+                       err = map_update_retriable(fd, &key, &value, BPF_NOEXIST, MAP_RETRIES);
+                       if (err)
+                               printf("error %d %d\n", err, errno);
+                       assert(err == 0);
+                       err = map_update_retriable(fd, &key, &value, BPF_EXIST, MAP_RETRIES);
+                       if (err)
+                               printf("error %d %d\n", err, errno);
+                       assert(err == 0);
                } else {
-                       assert(bpf_map_delete_elem(fd, &key) == 0);
+                       err = map_delete_retriable(fd, &key, MAP_RETRIES);
+                       if (err)
+                               printf("error %d %d\n", err, errno);
+                       assert(err == 0);
                }
        }
 }
index 777a814..f8569f0 100644 (file)
@@ -50,7 +50,7 @@
 #define MAX_INSNS      BPF_MAXINSNS
 #define MAX_TEST_INSNS 1000000
 #define MAX_FIXUPS     8
-#define MAX_NR_MAPS    20
+#define MAX_NR_MAPS    21
 #define MAX_TEST_RUNS  8
 #define POINTER_VALUE  0xcafe4all
 #define TEST_DATA_LEN  64
@@ -87,6 +87,7 @@ struct bpf_test {
        int fixup_sk_storage_map[MAX_FIXUPS];
        int fixup_map_event_output[MAX_FIXUPS];
        int fixup_map_reuseport_array[MAX_FIXUPS];
+       int fixup_map_ringbuf[MAX_FIXUPS];
        const char *errstr;
        const char *errstr_unpriv;
        uint32_t insn_processed;
@@ -640,6 +641,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
        int *fixup_sk_storage_map = test->fixup_sk_storage_map;
        int *fixup_map_event_output = test->fixup_map_event_output;
        int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
+       int *fixup_map_ringbuf = test->fixup_map_ringbuf;
 
        if (test->fill_helper) {
                test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
@@ -817,6 +819,14 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
                        fixup_map_reuseport_array++;
                } while (*fixup_map_reuseport_array);
        }
+       if (*fixup_map_ringbuf) {
+               map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0,
+                                          0, 4096);
+               do {
+                       prog[*fixup_map_ringbuf].imm = map_fds[20];
+                       fixup_map_ringbuf++;
+               } while (*fixup_map_ringbuf);
+       }
 }
 
 struct libcap {
index 45d43bf..0b94389 100644 (file)
        .result = ACCEPT,
        .result_unpriv = ACCEPT,
 },
+{
+       "check valid spill/fill, ptr to mem",
+       .insns = {
+       /* reserve 8 byte ringbuf memory */
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_2, 8),
+       BPF_MOV64_IMM(BPF_REG_3, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
+       /* store a pointer to the reserved memory in R6 */
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       /* check whether the reservation was successful */
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+       /* spill R6(mem) into the stack */
+       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+       /* fill it back in R7 */
+       BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
+       /* should be able to access *(R7) = 0 */
+       BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
+       /* submit the reserved ringbuf memory */
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+       BPF_MOV64_IMM(BPF_REG_2, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_ringbuf = { 1 },
+       .result = ACCEPT,
+       .result_unpriv = ACCEPT,
+},
 {
        "check corrupted spill/fill",
        .insns = {
index 014deda..1e722ee 100644 (file)
@@ -715,7 +715,7 @@ static void worker_pkt_dump(void)
                int payload = *((uint32_t *)(pkt_buf[iter]->payload + PKT_HDR_SIZE));
 
                if (payload == EOT) {
-                       ksft_print_msg("End-of-tranmission frame received\n");
+                       ksft_print_msg("End-of-transmission frame received\n");
                        fprintf(stdout, "---------------------------------------\n");
                        break;
                }
@@ -747,7 +747,7 @@ static void worker_pkt_validate(void)
                        }
 
                        if (payloadseqnum == EOT) {
-                               ksft_print_msg("End-of-tranmission frame received: PASS\n");
+                               ksft_print_msg("End-of-transmission frame received: PASS\n");
                                sigvar = 1;
                                break;
                        }
index 4d900bc..5c77002 100755 (executable)
@@ -230,7 +230,7 @@ switch_create()
        __mlnx_qos -i $swp4 --pfc=0,1,0,0,0,0,0,0 >/dev/null
        # PG0 will get autoconfigured to Xoff, give PG1 arbitrarily 100K, which
        # is (-2*MTU) about 80K of delay provision.
-       __mlnx_qos -i $swp3 --buffer_size=0,$_100KB,0,0,0,0,0,0 >/dev/null
+       __mlnx_qos -i $swp4 --buffer_size=0,$_100KB,0,0,0,0,0,0 >/dev/null
 
        # bridges
        # -------
index c7ca4fa..fe41c6a 100644 (file)
@@ -33,7 +33,7 @@ ifeq ($(ARCH),s390)
        UNAME_M := s390x
 endif
 
-LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c
+LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
 LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
 LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
 LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
index 3d96a7b..cdad1ec 100644 (file)
@@ -7,23 +7,20 @@
  * Copyright (C) 2019, Google, Inc.
  */
 
-#define _GNU_SOURCE /* for program_invocation_name */
+#define _GNU_SOURCE /* for pipe2 */
 
 #include <stdio.h>
 #include <stdlib.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-#include <asm/unistd.h>
 #include <time.h>
 #include <poll.h>
 #include <pthread.h>
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
 #include <linux/userfaultfd.h>
+#include <sys/syscall.h>
 
-#include "perf_test_util.h"
-#include "processor.h"
+#include "kvm_util.h"
 #include "test_util.h"
+#include "perf_test_util.h"
+#include "guest_modes.h"
 
 #ifdef __NR_userfaultfd
 
 #define PER_VCPU_DEBUG(...) _no_printf(__VA_ARGS__)
 #endif
 
+static int nr_vcpus = 1;
+static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
 static char *guest_data_prototype;
 
 static void *vcpu_worker(void *data)
 {
        int ret;
-       struct vcpu_args *vcpu_args = (struct vcpu_args *)data;
+       struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data;
        int vcpu_id = vcpu_args->vcpu_id;
        struct kvm_vm *vm = perf_test_args.vm;
        struct kvm_run *run;
@@ -248,9 +247,14 @@ static int setup_demand_paging(struct kvm_vm *vm,
        return 0;
 }
 
-static void run_test(enum vm_guest_mode mode, bool use_uffd,
-                    useconds_t uffd_delay)
+struct test_params {
+       bool use_uffd;
+       useconds_t uffd_delay;
+};
+
+static void run_test(enum vm_guest_mode mode, void *arg)
 {
+       struct test_params *p = arg;
        pthread_t *vcpu_threads;
        pthread_t *uffd_handler_threads = NULL;
        struct uffd_handler_args *uffd_args = NULL;
@@ -261,7 +265,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
        int vcpu_id;
        int r;
 
-       vm = create_vm(mode, nr_vcpus, guest_percpu_mem_size);
+       vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size);
 
        perf_test_args.wr_fract = 1;
 
@@ -273,9 +277,9 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
        vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
        TEST_ASSERT(vcpu_threads, "Memory allocation failed");
 
-       add_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
+       perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
 
-       if (use_uffd) {
+       if (p->use_uffd) {
                uffd_handler_threads =
                        malloc(nr_vcpus * sizeof(*uffd_handler_threads));
                TEST_ASSERT(uffd_handler_threads, "Memory allocation failed");
@@ -308,7 +312,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
                        r = setup_demand_paging(vm,
                                                &uffd_handler_threads[vcpu_id],
                                                pipefds[vcpu_id * 2],
-                                               uffd_delay, &uffd_args[vcpu_id],
+                                               p->uffd_delay, &uffd_args[vcpu_id],
                                                vcpu_hva, guest_percpu_mem_size);
                        if (r < 0)
                                exit(-r);
@@ -339,7 +343,7 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
 
        pr_info("All vCPU threads joined\n");
 
-       if (use_uffd) {
+       if (p->use_uffd) {
                char c;
 
                /* Tell the user fault fd handler threads to quit */
@@ -357,43 +361,23 @@ static void run_test(enum vm_guest_mode mode, bool use_uffd,
                perf_test_args.vcpu_args[0].pages * nr_vcpus /
                ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
 
-       ucall_uninit(vm);
-       kvm_vm_free(vm);
+       perf_test_destroy_vm(vm);
 
        free(guest_data_prototype);
        free(vcpu_threads);
-       if (use_uffd) {
+       if (p->use_uffd) {
                free(uffd_handler_threads);
                free(uffd_args);
                free(pipefds);
        }
 }
 
-struct guest_mode {
-       bool supported;
-       bool enabled;
-};
-static struct guest_mode guest_modes[NUM_VM_MODES];
-
-#define guest_mode_init(mode, supported, enabled) ({ \
-       guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
-})
-
 static void help(char *name)
 {
-       int i;
-
        puts("");
        printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n"
               "          [-b memory] [-v vcpus]\n", name);
-       printf(" -m: specify the guest mode ID to test\n"
-              "     (default: test all supported modes)\n"
-              "     This option may be used multiple times.\n"
-              "     Guest mode IDs:\n");
-       for (i = 0; i < NUM_VM_MODES; ++i) {
-               printf("         %d:    %s%s\n", i, vm_guest_mode_string(i),
-                      guest_modes[i].supported ? " (supported)" : "");
-       }
+       guest_modes_help();
        printf(" -u: use User Fault FD to handle vCPU page\n"
               "     faults.\n");
        printf(" -d: add a delay in usec to the User Fault\n"
@@ -410,53 +394,22 @@ static void help(char *name)
 int main(int argc, char *argv[])
 {
        int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
-       bool mode_selected = false;
-       unsigned int mode;
-       int opt, i;
-       bool use_uffd = false;
-       useconds_t uffd_delay = 0;
-
-#ifdef __x86_64__
-       guest_mode_init(VM_MODE_PXXV48_4K, true, true);
-#endif
-#ifdef __aarch64__
-       guest_mode_init(VM_MODE_P40V48_4K, true, true);
-       guest_mode_init(VM_MODE_P40V48_64K, true, true);
-       {
-               unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
-
-               if (limit >= 52)
-                       guest_mode_init(VM_MODE_P52V48_64K, true, true);
-               if (limit >= 48) {
-                       guest_mode_init(VM_MODE_P48V48_4K, true, true);
-                       guest_mode_init(VM_MODE_P48V48_64K, true, true);
-               }
-       }
-#endif
-#ifdef __s390x__
-       guest_mode_init(VM_MODE_P40V48_4K, true, true);
-#endif
+       struct test_params p = {};
+       int opt;
+
+       guest_modes_append_default();
 
        while ((opt = getopt(argc, argv, "hm:ud:b:v:")) != -1) {
                switch (opt) {
                case 'm':
-                       if (!mode_selected) {
-                               for (i = 0; i < NUM_VM_MODES; ++i)
-                                       guest_modes[i].enabled = false;
-                               mode_selected = true;
-                       }
-                       mode = strtoul(optarg, NULL, 10);
-                       TEST_ASSERT(mode < NUM_VM_MODES,
-                                   "Guest mode ID %d too big", mode);
-                       guest_modes[mode].enabled = true;
+                       guest_modes_cmdline(optarg);
                        break;
                case 'u':
-                       use_uffd = true;
+                       p.use_uffd = true;
                        break;
                case 'd':
-                       uffd_delay = strtoul(optarg, NULL, 0);
-                       TEST_ASSERT(uffd_delay >= 0,
-                                   "A negative UFFD delay is not supported.");
+                       p.uffd_delay = strtoul(optarg, NULL, 0);
+                       TEST_ASSERT(p.uffd_delay >= 0, "A negative UFFD delay is not supported.");
                        break;
                case 'b':
                        guest_percpu_mem_size = parse_size(optarg);
@@ -473,14 +426,7 @@ int main(int argc, char *argv[])
                }
        }
 
-       for (i = 0; i < NUM_VM_MODES; ++i) {
-               if (!guest_modes[i].enabled)
-                       continue;
-               TEST_ASSERT(guest_modes[i].supported,
-                           "Guest mode ID %d (%s) not supported.",
-                           i, vm_guest_mode_string(i));
-               run_test(i, use_uffd, uffd_delay);
-       }
+       for_each_guest_mode(run_test, &p);
 
        return 0;
 }
index 9c6a7be..2283a0e 100644 (file)
@@ -8,29 +8,28 @@
  * Copyright (C) 2020, Google, Inc.
  */
 
-#define _GNU_SOURCE /* for program_invocation_name */
-
 #include <stdio.h>
 #include <stdlib.h>
-#include <unistd.h>
 #include <time.h>
 #include <pthread.h>
 #include <linux/bitmap.h>
-#include <linux/bitops.h>
 
 #include "kvm_util.h"
-#include "perf_test_util.h"
-#include "processor.h"
 #include "test_util.h"
+#include "perf_test_util.h"
+#include "guest_modes.h"
 
 /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
 #define TEST_HOST_LOOP_N               2UL
 
+static int nr_vcpus = 1;
+static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+
 /* Host variables */
 static u64 dirty_log_manual_caps;
 static bool host_quit;
 static uint64_t iteration;
-static uint64_t vcpu_last_completed_iteration[MAX_VCPUS];
+static uint64_t vcpu_last_completed_iteration[KVM_MAX_VCPUS];
 
 static void *vcpu_worker(void *data)
 {
@@ -42,7 +41,7 @@ static void *vcpu_worker(void *data)
        struct timespec ts_diff;
        struct timespec total = (struct timespec){0};
        struct timespec avg;
-       struct vcpu_args *vcpu_args = (struct vcpu_args *)data;
+       struct perf_test_vcpu_args *vcpu_args = (struct perf_test_vcpu_args *)data;
        int vcpu_id = vcpu_args->vcpu_id;
 
        vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
@@ -89,9 +88,15 @@ static void *vcpu_worker(void *data)
        return NULL;
 }
 
-static void run_test(enum vm_guest_mode mode, unsigned long iterations,
-                    uint64_t phys_offset, int wr_fract)
+struct test_params {
+       unsigned long iterations;
+       uint64_t phys_offset;
+       int wr_fract;
+};
+
+static void run_test(enum vm_guest_mode mode, void *arg)
 {
+       struct test_params *p = arg;
        pthread_t *vcpu_threads;
        struct kvm_vm *vm;
        unsigned long *bmap;
@@ -106,9 +111,9 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
        struct kvm_enable_cap cap = {};
        struct timespec clear_dirty_log_total = (struct timespec){0};
 
-       vm = create_vm(mode, nr_vcpus, guest_percpu_mem_size);
+       vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size);
 
-       perf_test_args.wr_fract = wr_fract;
+       perf_test_args.wr_fract = p->wr_fract;
 
        guest_num_pages = (nr_vcpus * guest_percpu_mem_size) >> vm_get_page_shift(vm);
        guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
@@ -124,7 +129,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
        vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
        TEST_ASSERT(vcpu_threads, "Memory allocation failed");
 
-       add_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
+       perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
 
        sync_global_to_guest(vm, perf_test_args);
 
@@ -150,13 +155,13 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
 
        /* Enable dirty logging */
        clock_gettime(CLOCK_MONOTONIC, &start);
-       vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX,
+       vm_mem_region_set_flags(vm, PERF_TEST_MEM_SLOT_INDEX,
                                KVM_MEM_LOG_DIRTY_PAGES);
        ts_diff = timespec_diff_now(start);
        pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
                ts_diff.tv_sec, ts_diff.tv_nsec);
 
-       while (iteration < iterations) {
+       while (iteration < p->iterations) {
                /*
                 * Incrementing the iteration number will start the vCPUs
                 * dirtying memory again.
@@ -177,7 +182,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
                        iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
 
                clock_gettime(CLOCK_MONOTONIC, &start);
-               kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
+               kvm_vm_get_dirty_log(vm, PERF_TEST_MEM_SLOT_INDEX, bmap);
 
                ts_diff = timespec_diff_now(start);
                get_dirty_log_total = timespec_add(get_dirty_log_total,
@@ -187,7 +192,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
 
                if (dirty_log_manual_caps) {
                        clock_gettime(CLOCK_MONOTONIC, &start);
-                       kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
+                       kvm_vm_clear_dirty_log(vm, PERF_TEST_MEM_SLOT_INDEX, bmap, 0,
                                               host_num_pages);
 
                        ts_diff = timespec_diff_now(start);
@@ -205,43 +210,30 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
 
        /* Disable dirty logging */
        clock_gettime(CLOCK_MONOTONIC, &start);
-       vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX, 0);
+       vm_mem_region_set_flags(vm, PERF_TEST_MEM_SLOT_INDEX, 0);
        ts_diff = timespec_diff_now(start);
        pr_info("Disabling dirty logging time: %ld.%.9lds\n",
                ts_diff.tv_sec, ts_diff.tv_nsec);
 
-       avg = timespec_div(get_dirty_log_total, iterations);
+       avg = timespec_div(get_dirty_log_total, p->iterations);
        pr_info("Get dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
-               iterations, get_dirty_log_total.tv_sec,
+               p->iterations, get_dirty_log_total.tv_sec,
                get_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
 
        if (dirty_log_manual_caps) {
-               avg = timespec_div(clear_dirty_log_total, iterations);
+               avg = timespec_div(clear_dirty_log_total, p->iterations);
                pr_info("Clear dirty log over %lu iterations took %ld.%.9lds. (Avg %ld.%.9lds/iteration)\n",
-                       iterations, clear_dirty_log_total.tv_sec,
+                       p->iterations, clear_dirty_log_total.tv_sec,
                        clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
        }
 
        free(bmap);
        free(vcpu_threads);
-       ucall_uninit(vm);
-       kvm_vm_free(vm);
+       perf_test_destroy_vm(vm);
 }
 
-struct guest_mode {
-       bool supported;
-       bool enabled;
-};
-static struct guest_mode guest_modes[NUM_VM_MODES];
-
-#define guest_mode_init(mode, supported, enabled) ({ \
-       guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
-})
-
 static void help(char *name)
 {
-       int i;
-
        puts("");
        printf("usage: %s [-h] [-i iterations] [-p offset] "
               "[-m mode] [-b vcpu bytes] [-v vcpus]\n", name);
@@ -250,14 +242,7 @@ static void help(char *name)
               TEST_HOST_LOOP_N);
        printf(" -p: specify guest physical test memory offset\n"
               "     Warning: a low offset can conflict with the loaded test code.\n");
-       printf(" -m: specify the guest mode ID to test "
-              "(default: test all supported modes)\n"
-              "     This option may be used multiple times.\n"
-              "     Guest mode IDs:\n");
-       for (i = 0; i < NUM_VM_MODES; ++i) {
-               printf("         %d:    %s%s\n", i, vm_guest_mode_string(i),
-                      guest_modes[i].supported ? " (supported)" : "");
-       }
+       guest_modes_help();
        printf(" -b: specify the size of the memory region which should be\n"
               "     dirtied by each vCPU. e.g. 10M or 3G.\n"
               "     (default: 1G)\n");
@@ -272,74 +257,43 @@ static void help(char *name)
 
 int main(int argc, char *argv[])
 {
-       unsigned long iterations = TEST_HOST_LOOP_N;
-       bool mode_selected = false;
-       uint64_t phys_offset = 0;
-       unsigned int mode;
-       int opt, i;
-       int wr_fract = 1;
+       int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
+       struct test_params p = {
+               .iterations = TEST_HOST_LOOP_N,
+               .wr_fract = 1,
+       };
+       int opt;
 
        dirty_log_manual_caps =
                kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
        dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
                                  KVM_DIRTY_LOG_INITIALLY_SET);
 
-#ifdef __x86_64__
-       guest_mode_init(VM_MODE_PXXV48_4K, true, true);
-#endif
-#ifdef __aarch64__
-       guest_mode_init(VM_MODE_P40V48_4K, true, true);
-       guest_mode_init(VM_MODE_P40V48_64K, true, true);
-
-       {
-               unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
-
-               if (limit >= 52)
-                       guest_mode_init(VM_MODE_P52V48_64K, true, true);
-               if (limit >= 48) {
-                       guest_mode_init(VM_MODE_P48V48_4K, true, true);
-                       guest_mode_init(VM_MODE_P48V48_64K, true, true);
-               }
-       }
-#endif
-#ifdef __s390x__
-       guest_mode_init(VM_MODE_P40V48_4K, true, true);
-#endif
+       guest_modes_append_default();
 
        while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:")) != -1) {
                switch (opt) {
                case 'i':
-                       iterations = strtol(optarg, NULL, 10);
+                       p.iterations = strtol(optarg, NULL, 10);
                        break;
                case 'p':
-                       phys_offset = strtoull(optarg, NULL, 0);
+                       p.phys_offset = strtoull(optarg, NULL, 0);
                        break;
                case 'm':
-                       if (!mode_selected) {
-                               for (i = 0; i < NUM_VM_MODES; ++i)
-                                       guest_modes[i].enabled = false;
-                               mode_selected = true;
-                       }
-                       mode = strtoul(optarg, NULL, 10);
-                       TEST_ASSERT(mode < NUM_VM_MODES,
-                                   "Guest mode ID %d too big", mode);
-                       guest_modes[mode].enabled = true;
+                       guest_modes_cmdline(optarg);
                        break;
                case 'b':
                        guest_percpu_mem_size = parse_size(optarg);
                        break;
                case 'f':
-                       wr_fract = atoi(optarg);
-                       TEST_ASSERT(wr_fract >= 1,
+                       p.wr_fract = atoi(optarg);
+                       TEST_ASSERT(p.wr_fract >= 1,
                                    "Write fraction cannot be less than one");
                        break;
                case 'v':
                        nr_vcpus = atoi(optarg);
-                       TEST_ASSERT(nr_vcpus > 0,
-                                   "Must have a positive number of vCPUs");
-                       TEST_ASSERT(nr_vcpus <= MAX_VCPUS,
-                                   "This test does not currently support\n"
-                                   "more than %d vCPUs.", MAX_VCPUS);
+                       TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
+                                   "Invalid number of vcpus, must be between 1 and %d", max_vcpus);
                        break;
                case 'h':
                default:
@@ -348,18 +302,11 @@ int main(int argc, char *argv[])
                }
        }
 
-       TEST_ASSERT(iterations >= 2, "The test should have at least two iterations");
+       TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations");
 
-       pr_info("Test iterations: %"PRIu64"\n", iterations);
+       pr_info("Test iterations: %"PRIu64"\n", p.iterations);
 
-       for (i = 0; i < NUM_VM_MODES; ++i) {
-               if (!guest_modes[i].enabled)
-                       continue;
-               TEST_ASSERT(guest_modes[i].supported,
-                           "Guest mode ID %d (%s) not supported.",
-                           i, vm_guest_mode_string(i));
-               run_test(i, iterations, phys_offset, wr_fract);
-       }
+       for_each_guest_mode(run_test, &p);
 
        return 0;
 }
index 471baec..bb2752d 100644 (file)
@@ -9,8 +9,6 @@
 
 #include <stdio.h>
 #include <stdlib.h>
-#include <unistd.h>
-#include <time.h>
 #include <pthread.h>
 #include <semaphore.h>
 #include <sys/types.h>
@@ -20,8 +18,9 @@
 #include <linux/bitops.h>
 #include <asm/barrier.h>
 
-#include "test_util.h"
 #include "kvm_util.h"
+#include "test_util.h"
+#include "guest_modes.h"
 #include "processor.h"
 
 #define VCPU_ID                                1
@@ -673,9 +672,15 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
 #define DIRTY_MEM_BITS 30 /* 1G */
 #define PAGE_SHIFT_4K  12
 
-static void run_test(enum vm_guest_mode mode, unsigned long iterations,
-                    unsigned long interval, uint64_t phys_offset)
+struct test_params {
+       unsigned long iterations;
+       unsigned long interval;
+       uint64_t phys_offset;
+};
+
+static void run_test(enum vm_guest_mode mode, void *arg)
 {
+       struct test_params *p = arg;
        struct kvm_vm *vm;
        unsigned long *bmap;
 
@@ -709,12 +714,12 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
        host_page_size = getpagesize();
        host_num_pages = vm_num_host_pages(mode, guest_num_pages);
 
-       if (!phys_offset) {
+       if (!p->phys_offset) {
                guest_test_phys_mem = (vm_get_max_gfn(vm) -
                                       guest_num_pages) * guest_page_size;
                guest_test_phys_mem &= ~(host_page_size - 1);
        } else {
-               guest_test_phys_mem = phys_offset;
+               guest_test_phys_mem = p->phys_offset;
        }
 
 #ifdef __s390x__
@@ -758,9 +763,9 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
 
        pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
 
-       while (iteration < iterations) {
+       while (iteration < p->iterations) {
                /* Give the vcpu thread some time to dirty some pages */
-               usleep(interval * 1000);
+               usleep(p->interval * 1000);
                log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
                                             bmap, host_num_pages);
                vm_dirty_log_verify(mode, bmap);
@@ -783,20 +788,8 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
        kvm_vm_free(vm);
 }
 
-struct guest_mode {
-       bool supported;
-       bool enabled;
-};
-static struct guest_mode guest_modes[NUM_VM_MODES];
-
-#define guest_mode_init(mode, supported, enabled) ({ \
-       guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
-})
-
 static void help(char *name)
 {
-       int i;
-
        puts("");
        printf("usage: %s [-h] [-i iterations] [-I interval] "
               "[-p offset] [-m mode]\n", name);
@@ -813,51 +806,23 @@ static void help(char *name)
        printf(" -M: specify the host logging mode "
               "(default: run all log modes).  Supported modes: \n\t");
        log_modes_dump();
-       printf(" -m: specify the guest mode ID to test "
-              "(default: test all supported modes)\n"
-              "     This option may be used multiple times.\n"
-              "     Guest mode IDs:\n");
-       for (i = 0; i < NUM_VM_MODES; ++i) {
-               printf("         %d:    %s%s\n", i, vm_guest_mode_string(i),
-                      guest_modes[i].supported ? " (supported)" : "");
-       }
+       guest_modes_help();
        puts("");
        exit(0);
 }
 
 int main(int argc, char *argv[])
 {
-       unsigned long iterations = TEST_HOST_LOOP_N;
-       unsigned long interval = TEST_HOST_LOOP_INTERVAL;
-       bool mode_selected = false;
-       uint64_t phys_offset = 0;
-       unsigned int mode;
-       int opt, i, j;
+       struct test_params p = {
+               .iterations = TEST_HOST_LOOP_N,
+               .interval = TEST_HOST_LOOP_INTERVAL,
+       };
+       int opt, i;
 
        sem_init(&dirty_ring_vcpu_stop, 0, 0);
        sem_init(&dirty_ring_vcpu_cont, 0, 0);
 
-#ifdef __x86_64__
-       guest_mode_init(VM_MODE_PXXV48_4K, true, true);
-#endif
-#ifdef __aarch64__
-       guest_mode_init(VM_MODE_P40V48_4K, true, true);
-       guest_mode_init(VM_MODE_P40V48_64K, true, true);
-
-       {
-               unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
-
-               if (limit >= 52)
-                       guest_mode_init(VM_MODE_P52V48_64K, true, true);
-               if (limit >= 48) {
-                       guest_mode_init(VM_MODE_P48V48_4K, true, true);
-                       guest_mode_init(VM_MODE_P48V48_64K, true, true);
-               }
-       }
-#endif
-#ifdef __s390x__
-       guest_mode_init(VM_MODE_P40V48_4K, true, true);
-#endif
+       guest_modes_append_default();
 
        while ((opt = getopt(argc, argv, "c:hi:I:p:m:M:")) != -1) {
                switch (opt) {
@@ -865,24 +830,16 @@ int main(int argc, char *argv[])
                        test_dirty_ring_count = strtol(optarg, NULL, 10);
                        break;
                case 'i':
-                       iterations = strtol(optarg, NULL, 10);
+                       p.iterations = strtol(optarg, NULL, 10);
                        break;
                case 'I':
-                       interval = strtol(optarg, NULL, 10);
+                       p.interval = strtol(optarg, NULL, 10);
                        break;
                case 'p':
-                       phys_offset = strtoull(optarg, NULL, 0);
+                       p.phys_offset = strtoull(optarg, NULL, 0);
                        break;
                case 'm':
-                       if (!mode_selected) {
-                               for (i = 0; i < NUM_VM_MODES; ++i)
-                                       guest_modes[i].enabled = false;
-                               mode_selected = true;
-                       }
-                       mode = strtoul(optarg, NULL, 10);
-                       TEST_ASSERT(mode < NUM_VM_MODES,
-                                   "Guest mode ID %d too big", mode);
-                       guest_modes[mode].enabled = true;
+                       guest_modes_cmdline(optarg);
                        break;
                case 'M':
                        if (!strcmp(optarg, "all")) {
@@ -911,32 +868,24 @@ int main(int argc, char *argv[])
                }
        }
 
-       TEST_ASSERT(iterations > 2, "Iterations must be greater than two");
-       TEST_ASSERT(interval > 0, "Interval must be greater than zero");
+       TEST_ASSERT(p.iterations > 2, "Iterations must be greater than two");
+       TEST_ASSERT(p.interval > 0, "Interval must be greater than zero");
 
        pr_info("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
-               iterations, interval);
+               p.iterations, p.interval);
 
        srandom(time(0));
 
-       for (i = 0; i < NUM_VM_MODES; ++i) {
-               if (!guest_modes[i].enabled)
-                       continue;
-               TEST_ASSERT(guest_modes[i].supported,
-                           "Guest mode ID %d (%s) not supported.",
-                           i, vm_guest_mode_string(i));
-               if (host_log_mode_option == LOG_MODE_ALL) {
-                       /* Run each log mode */
-                       for (j = 0; j < LOG_MODE_NUM; j++) {
-                               pr_info("Testing Log Mode '%s'\n",
-                                       log_modes[j].name);
-                               host_log_mode = j;
-                               run_test(i, iterations, interval, phys_offset);
-                       }
-               } else {
-                       host_log_mode = host_log_mode_option;
-                       run_test(i, iterations, interval, phys_offset);
+       if (host_log_mode_option == LOG_MODE_ALL) {
+               /* Run each log mode */
+               for (i = 0; i < LOG_MODE_NUM; i++) {
+                       pr_info("Testing Log Mode '%s'\n", log_modes[i].name);
+                       host_log_mode = i;
+                       for_each_guest_mode(run_test, &p);
                }
+       } else {
+               host_log_mode = host_log_mode_option;
+               for_each_guest_mode(run_test, &p);
        }
 
        return 0;
diff --git a/tools/testing/selftests/kvm/include/guest_modes.h b/tools/testing/selftests/kvm/include/guest_modes.h
new file mode 100644 (file)
index 0000000..b691df3
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020, Red Hat, Inc.
+ */
+#include "kvm_util.h"
+
+struct guest_mode {
+       bool supported;
+       bool enabled;
+};
+
+extern struct guest_mode guest_modes[NUM_VM_MODES];
+
+#define guest_mode_append(mode, supported, enabled) ({ \
+       guest_modes[mode] = (struct guest_mode){ supported, enabled }; \
+})
+
+void guest_modes_append_default(void);
+void for_each_guest_mode(void (*func)(enum vm_guest_mode, void *), void *arg);
+void guest_modes_help(void);
+void guest_modes_cmdline(const char *arg);
index dfa9d36..5cbb861 100644 (file)
@@ -16,6 +16,7 @@
 
 #include "sparsebit.h"
 
+#define KVM_MAX_VCPUS 512
 
 /*
  * Callers of kvm_util only have an incomplete/opaque description of the
@@ -70,6 +71,14 @@ enum vm_guest_mode {
 #define vm_guest_mode_string(m) vm_guest_mode_string[m]
 extern const char * const vm_guest_mode_string[];
 
+struct vm_guest_mode_params {
+       unsigned int pa_bits;
+       unsigned int va_bits;
+       unsigned int page_size;
+       unsigned int page_shift;
+};
+extern const struct vm_guest_mode_params vm_guest_mode_params[];
+
 enum vm_mem_backing_src_type {
        VM_MEM_SRC_ANONYMOUS,
        VM_MEM_SRC_ANONYMOUS_THP,
index 239421e..b118882 100644 (file)
@@ -9,38 +9,15 @@
 #define SELFTEST_KVM_PERF_TEST_UTIL_H
 
 #include "kvm_util.h"
-#include "processor.h"
-
-#define MAX_VCPUS 512
-
-#define PAGE_SHIFT_4K  12
-#define PTES_PER_4K_PT 512
-
-#define TEST_MEM_SLOT_INDEX            1
 
 /* Default guest test virtual memory offset */
 #define DEFAULT_GUEST_TEST_MEM         0xc0000000
 
 #define DEFAULT_PER_VCPU_MEM_SIZE      (1 << 30) /* 1G */
 
-/*
- * Guest physical memory offset of the testing memory slot.
- * This will be set to the topmost valid physical address minus
- * the test memory size.
- */
-static uint64_t guest_test_phys_mem;
-
-/*
- * Guest virtual memory offset of the testing memory slot.
- * Must not conflict with identity mapped test code.
- */
-static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
-static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
-
-/* Number of VCPUs for the test */
-static int nr_vcpus = 1;
+#define PERF_TEST_MEM_SLOT_INDEX       1
 
-struct vcpu_args {
+struct perf_test_vcpu_args {
        uint64_t gva;
        uint64_t pages;
 
@@ -54,141 +31,21 @@ struct perf_test_args {
        uint64_t guest_page_size;
        int wr_fract;
 
-       struct vcpu_args vcpu_args[MAX_VCPUS];
+       struct perf_test_vcpu_args vcpu_args[KVM_MAX_VCPUS];
 };
 
-static struct perf_test_args perf_test_args;
+extern struct perf_test_args perf_test_args;
 
 /*
- * Continuously write to the first 8 bytes of each page in the
- * specified region.
+ * Guest physical memory offset of the testing memory slot.
+ * This will be set to the topmost valid physical address minus
+ * the test memory size.
  */
-static void guest_code(uint32_t vcpu_id)
-{
-       struct vcpu_args *vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
-       uint64_t gva;
-       uint64_t pages;
-       int i;
-
-       /* Make sure vCPU args data structure is not corrupt. */
-       GUEST_ASSERT(vcpu_args->vcpu_id == vcpu_id);
-
-       gva = vcpu_args->gva;
-       pages = vcpu_args->pages;
-
-       while (true) {
-               for (i = 0; i < pages; i++) {
-                       uint64_t addr = gva + (i * perf_test_args.guest_page_size);
-
-                       if (i % perf_test_args.wr_fract == 0)
-                               *(uint64_t *)addr = 0x0123456789ABCDEF;
-                       else
-                               READ_ONCE(*(uint64_t *)addr);
-               }
-
-               GUEST_SYNC(1);
-       }
-}
-
-static struct kvm_vm *create_vm(enum vm_guest_mode mode, int vcpus,
-                               uint64_t vcpu_memory_bytes)
-{
-       struct kvm_vm *vm;
-       uint64_t pages = DEFAULT_GUEST_PHY_PAGES;
-       uint64_t guest_num_pages;
-
-       /* Account for a few pages per-vCPU for stacks */
-       pages += DEFAULT_STACK_PGS * vcpus;
-
-       /*
-        * Reserve twice the ammount of memory needed to map the test region and
-        * the page table / stacks region, at 4k, for page tables. Do the
-        * calculation with 4K page size: the smallest of all archs. (e.g., 64K
-        * page size guest will need even less memory for page tables).
-        */
-       pages += (2 * pages) / PTES_PER_4K_PT;
-       pages += ((2 * vcpus * vcpu_memory_bytes) >> PAGE_SHIFT_4K) /
-                PTES_PER_4K_PT;
-       pages = vm_adjust_num_guest_pages(mode, pages);
-
-       pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
-
-       vm = vm_create(mode, pages, O_RDWR);
-       kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
-#ifdef __x86_64__
-       vm_create_irqchip(vm);
-#endif
-
-       perf_test_args.vm = vm;
-       perf_test_args.guest_page_size = vm_get_page_size(vm);
-       perf_test_args.host_page_size = getpagesize();
-
-       TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
-                   "Guest memory size is not guest page size aligned.");
-
-       guest_num_pages = (vcpus * vcpu_memory_bytes) /
-                         perf_test_args.guest_page_size;
-       guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
-
-       /*
-        * If there should be more memory in the guest test region than there
-        * can be pages in the guest, it will definitely cause problems.
-        */
-       TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
-                   "Requested more guest memory than address space allows.\n"
-                   "    guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
-                   guest_num_pages, vm_get_max_gfn(vm), vcpus,
-                   vcpu_memory_bytes);
-
-       TEST_ASSERT(vcpu_memory_bytes % perf_test_args.host_page_size == 0,
-                   "Guest memory size is not host page size aligned.");
-
-       guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
-                             perf_test_args.guest_page_size;
-       guest_test_phys_mem &= ~(perf_test_args.host_page_size - 1);
-
-#ifdef __s390x__
-       /* Align to 1M (segment size) */
-       guest_test_phys_mem &= ~((1 << 20) - 1);
-#endif
-
-       pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
-
-       /* Add an extra memory slot for testing */
-       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
-                                   guest_test_phys_mem,
-                                   TEST_MEM_SLOT_INDEX,
-                                   guest_num_pages, 0);
-
-       /* Do mapping for the demand paging memory slot */
-       virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
-
-       ucall_init(vm, NULL);
-
-       return vm;
-}
-
-static void add_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes)
-{
-       vm_paddr_t vcpu_gpa;
-       struct vcpu_args *vcpu_args;
-       int vcpu_id;
-
-       for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
-               vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
-
-               vm_vcpu_add_default(vm, vcpu_id, guest_code);
-
-               vcpu_args->vcpu_id = vcpu_id;
-               vcpu_args->gva = guest_test_virt_mem +
-                                (vcpu_id * vcpu_memory_bytes);
-               vcpu_args->pages = vcpu_memory_bytes /
-                                  perf_test_args.guest_page_size;
+extern uint64_t guest_test_phys_mem;
 
-               vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes);
-               pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
-                        vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes);
-       }
-}
+struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
+                               uint64_t vcpu_memory_bytes);
+void perf_test_destroy_vm(struct kvm_vm *vm);
+void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes);
 
 #endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c
new file mode 100644 (file)
index 0000000..25bff30
--- /dev/null
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Red Hat, Inc.
+ */
+#include "guest_modes.h"
+
+struct guest_mode guest_modes[NUM_VM_MODES];
+
+void guest_modes_append_default(void)
+{
+       guest_mode_append(VM_MODE_DEFAULT, true, true);
+
+#ifdef __aarch64__
+       guest_mode_append(VM_MODE_P40V48_64K, true, true);
+       {
+               unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
+               if (limit >= 52)
+                       guest_mode_append(VM_MODE_P52V48_64K, true, true);
+               if (limit >= 48) {
+                       guest_mode_append(VM_MODE_P48V48_4K, true, true);
+                       guest_mode_append(VM_MODE_P48V48_64K, true, true);
+               }
+       }
+#endif
+}
+
+void for_each_guest_mode(void (*func)(enum vm_guest_mode, void *), void *arg)
+{
+       int i;
+
+       for (i = 0; i < NUM_VM_MODES; ++i) {
+               if (!guest_modes[i].enabled)
+                       continue;
+               TEST_ASSERT(guest_modes[i].supported,
+                           "Guest mode ID %d (%s) not supported.",
+                           i, vm_guest_mode_string(i));
+               func(i, arg);
+       }
+}
+
+void guest_modes_help(void)
+{
+       int i;
+
+       printf(" -m: specify the guest mode ID to test\n"
+              "     (default: test all supported modes)\n"
+              "     This option may be used multiple times.\n"
+              "     Guest mode IDs:\n");
+       for (i = 0; i < NUM_VM_MODES; ++i) {
+               printf("         %d:    %s%s\n", i, vm_guest_mode_string(i),
+                      guest_modes[i].supported ? " (supported)" : "");
+       }
+}
+
+void guest_modes_cmdline(const char *arg)
+{
+       static bool mode_selected;
+       unsigned int mode;
+       int i;
+
+       if (!mode_selected) {
+               for (i = 0; i < NUM_VM_MODES; ++i)
+                       guest_modes[i].enabled = false;
+               mode_selected = true;
+       }
+
+       mode = strtoul(optarg, NULL, 10);
+       TEST_ASSERT(mode < NUM_VM_MODES, "Guest mode ID %d too big", mode);
+       guest_modes[mode].enabled = true;
+}
index 88ef706..fa5a90e 100644 (file)
@@ -153,14 +153,7 @@ const char * const vm_guest_mode_string[] = {
 _Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES,
               "Missing new mode strings?");
 
-struct vm_guest_mode_params {
-       unsigned int pa_bits;
-       unsigned int va_bits;
-       unsigned int page_size;
-       unsigned int page_shift;
-};
-
-static const struct vm_guest_mode_params vm_guest_mode_params[] = {
+const struct vm_guest_mode_params vm_guest_mode_params[] = {
        { 52, 48,  0x1000, 12 },
        { 52, 48, 0x10000, 16 },
        { 48, 48,  0x1000, 12 },
diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
new file mode 100644 (file)
index 0000000..9be1944
--- /dev/null
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020, Google LLC.
+ */
+
+#include "kvm_util.h"
+#include "perf_test_util.h"
+#include "processor.h"
+
+struct perf_test_args perf_test_args;
+
+uint64_t guest_test_phys_mem;
+
+/*
+ * Guest virtual memory offset of the testing memory slot.
+ * Must not conflict with identity mapped test code.
+ */
+static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
+
+/*
+ * Continuously write to the first 8 bytes of each page in the
+ * specified region.
+ */
+static void guest_code(uint32_t vcpu_id)
+{
+       struct perf_test_vcpu_args *vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
+       uint64_t gva;
+       uint64_t pages;
+       int i;
+
+       /* Make sure vCPU args data structure is not corrupt. */
+       GUEST_ASSERT(vcpu_args->vcpu_id == vcpu_id);
+
+       gva = vcpu_args->gva;
+       pages = vcpu_args->pages;
+
+       while (true) {
+               for (i = 0; i < pages; i++) {
+                       uint64_t addr = gva + (i * perf_test_args.guest_page_size);
+
+                       if (i % perf_test_args.wr_fract == 0)
+                               *(uint64_t *)addr = 0x0123456789ABCDEF;
+                       else
+                               READ_ONCE(*(uint64_t *)addr);
+               }
+
+               GUEST_SYNC(1);
+       }
+}
+
+struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
+                                  uint64_t vcpu_memory_bytes)
+{
+       struct kvm_vm *vm;
+       uint64_t guest_num_pages;
+
+       pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
+
+       perf_test_args.host_page_size = getpagesize();
+       perf_test_args.guest_page_size = vm_guest_mode_params[mode].page_size;
+
+       guest_num_pages = vm_adjust_num_guest_pages(mode,
+                               (vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size);
+
+       TEST_ASSERT(vcpu_memory_bytes % perf_test_args.host_page_size == 0,
+                   "Guest memory size is not host page size aligned.");
+       TEST_ASSERT(vcpu_memory_bytes % perf_test_args.guest_page_size == 0,
+                   "Guest memory size is not guest page size aligned.");
+
+       vm = vm_create_with_vcpus(mode, vcpus,
+                                 (vcpus * vcpu_memory_bytes) / perf_test_args.guest_page_size,
+                                 0, guest_code, NULL);
+
+       perf_test_args.vm = vm;
+
+       /*
+        * If there should be more memory in the guest test region than there
+        * can be pages in the guest, it will definitely cause problems.
+        */
+       TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
+                   "Requested more guest memory than address space allows.\n"
+                   "    guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
+                   guest_num_pages, vm_get_max_gfn(vm), vcpus,
+                   vcpu_memory_bytes);
+
+       guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) *
+                             perf_test_args.guest_page_size;
+       guest_test_phys_mem &= ~(perf_test_args.host_page_size - 1);
+#ifdef __s390x__
+       /* Align to 1M (segment size) */
+       guest_test_phys_mem &= ~((1 << 20) - 1);
+#endif
+       pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
+
+       /* Add an extra memory slot for testing */
+       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+                                   guest_test_phys_mem,
+                                   PERF_TEST_MEM_SLOT_INDEX,
+                                   guest_num_pages, 0);
+
+       /* Do mapping for the demand paging memory slot */
+       virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0);
+
+       ucall_init(vm, NULL);
+
+       return vm;
+}
+
+void perf_test_destroy_vm(struct kvm_vm *vm)
+{
+       ucall_uninit(vm);
+       kvm_vm_free(vm);
+}
+
+void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes)
+{
+       vm_paddr_t vcpu_gpa;
+       struct perf_test_vcpu_args *vcpu_args;
+       int vcpu_id;
+
+       for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
+               vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
+
+               vcpu_args->vcpu_id = vcpu_id;
+               vcpu_args->gva = guest_test_virt_mem +
+                                (vcpu_id * vcpu_memory_bytes);
+               vcpu_args->pages = vcpu_memory_bytes /
+                                  perf_test_args.guest_page_size;
+
+               vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes);
+               pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
+                        vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes);
+       }
+}
index eb693a3..4c7d336 100755 (executable)
@@ -869,7 +869,7 @@ ipv6_torture()
        pid3=$!
        ip netns exec me ping -f 2001:db8:101::2 >/dev/null 2>&1 &
        pid4=$!
-       ip netns exec me mausezahn veth1 -B 2001:db8:101::2 -A 2001:db8:91::1 -c 0 -t tcp "dp=1-1023, flags=syn" >/dev/null 2>&1 &
+       ip netns exec me mausezahn -6 veth1 -B 2001:db8:101::2 -A 2001:db8:91::1 -c 0 -t tcp "dp=1-1023, flags=syn" >/dev/null 2>&1 &
        pid5=$!
 
        sleep 300
index 84205c3..2b57077 100755 (executable)
@@ -1055,7 +1055,6 @@ ipv6_addr_metric_test()
 
        check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 260"
        log_test $? 0 "Set metric with peer route on local side"
-       log_test $? 0 "User specified metric on local address"
        check_route6 "2001:db8:104::2 dev dummy2 proto kernel metric 260"
        log_test $? 0 "Set metric with peer route on peer side"
 
index 464e31e..64cd2e2 100755 (executable)
 # - list_flush_ipv6_exception
 #      Using the same topology as in pmtu_ipv6, create exceptions, and check
 #      they are shown when listing exception caches, gone after flushing them
-
+#
+# - pmtu_ipv4_route_change
+#      Use the same topology as in pmtu_ipv4, but issue a route replacement
+#      command and delete the corresponding device afterward. This tests for
+#      proper cleanup of the PMTU exceptions by the route replacement path.
+#      Device unregistration should complete successfully
+#
+# - pmtu_ipv6_route_change
+#      Same as above but with IPv6
 
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
@@ -224,7 +232,9 @@ tests="
        cleanup_ipv4_exception          ipv4: cleanup of cached exceptions      1
        cleanup_ipv6_exception          ipv6: cleanup of cached exceptions      1
        list_flush_ipv4_exception       ipv4: list and flush cached exceptions  1
-       list_flush_ipv6_exception       ipv6: list and flush cached exceptions  1"
+       list_flush_ipv6_exception       ipv6: list and flush cached exceptions  1
+       pmtu_ipv4_route_change          ipv4: PMTU exception w/route replace    1
+       pmtu_ipv6_route_change          ipv6: PMTU exception w/route replace    1"
 
 NS_A="ns-A"
 NS_B="ns-B"
@@ -1782,6 +1792,63 @@ test_list_flush_ipv6_exception() {
        return ${fail}
 }
 
+test_pmtu_ipvX_route_change() {
+       family=${1}
+
+       setup namespaces routing || return 2
+       trace "${ns_a}"  veth_A-R1    "${ns_r1}" veth_R1-A \
+             "${ns_r1}" veth_R1-B    "${ns_b}"  veth_B-R1 \
+             "${ns_a}"  veth_A-R2    "${ns_r2}" veth_R2-A \
+             "${ns_r2}" veth_R2-B    "${ns_b}"  veth_B-R2
+
+       if [ ${family} -eq 4 ]; then
+               ping=ping
+               dst1="${prefix4}.${b_r1}.1"
+               dst2="${prefix4}.${b_r2}.1"
+               gw="${prefix4}.${a_r1}.2"
+       else
+               ping=${ping6}
+               dst1="${prefix6}:${b_r1}::1"
+               dst2="${prefix6}:${b_r2}::1"
+               gw="${prefix6}:${a_r1}::2"
+       fi
+
+       # Set up initial MTU values
+       mtu "${ns_a}"  veth_A-R1 2000
+       mtu "${ns_r1}" veth_R1-A 2000
+       mtu "${ns_r1}" veth_R1-B 1400
+       mtu "${ns_b}"  veth_B-R1 1400
+
+       mtu "${ns_a}"  veth_A-R2 2000
+       mtu "${ns_r2}" veth_R2-A 2000
+       mtu "${ns_r2}" veth_R2-B 1500
+       mtu "${ns_b}"  veth_B-R2 1500
+
+       # Create route exceptions
+       run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1}
+       run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2}
+
+       # Check that exceptions have been created with the correct PMTU
+       pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})"
+       check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1
+       pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})"
+       check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1
+
+       # Replace the route from A to R1
+       run_cmd ${ns_a} ip route change default via ${gw}
+
+       # Delete the device in A
+       run_cmd ${ns_a} ip link del "veth_A-R1"
+}
+
+test_pmtu_ipv4_route_change() {
+       test_pmtu_ipvX_route_change 4
+}
+
+test_pmtu_ipv6_route_change() {
+       test_pmtu_ipvX_route_change 6
+}
+
 usage() {
        echo
        echo "$0 [OPTIONS] [TEST]..."
index cb0d189..e0088c2 100644 (file)
@@ -103,8 +103,8 @@ FIXTURE(tls)
 
 FIXTURE_VARIANT(tls)
 {
-       u16 tls_version;
-       u16 cipher_type;
+       uint16_t tls_version;
+       uint16_t cipher_type;
 };
 
 FIXTURE_VARIANT_ADD(tls, 12_gcm)
index ac2a30b..f8a19f5 100755 (executable)
@@ -5,6 +5,14 @@
 
 readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
 
+# set global exit status, but never reset nonzero one.
+check_err()
+{
+       if [ $ret -eq 0 ]; then
+               ret=$1
+       fi
+}
+
 cleanup() {
        local -r jobs="$(jobs -p)"
        local -r ns="$(ip netns list|grep $PEER_NS)"
@@ -44,7 +52,9 @@ run_one() {
        # Hack: let bg programs complete the startup
        sleep 0.1
        ./udpgso_bench_tx ${tx_args}
+       ret=$?
        wait $(jobs -p)
+       return $ret
 }
 
 run_test() {
@@ -87,8 +97,10 @@ run_one_nat() {
 
        sleep 0.1
        ./udpgso_bench_tx ${tx_args}
+       ret=$?
        kill -INT $pid
        wait $(jobs -p)
+       return $ret
 }
 
 run_one_2sock() {
@@ -110,7 +122,9 @@ run_one_2sock() {
        sleep 0.1
        # first UDP GSO socket should be closed at this point
        ./udpgso_bench_tx ${tx_args}
+       ret=$?
        wait $(jobs -p)
+       return $ret
 }
 
 run_nat_test() {
@@ -131,36 +145,54 @@ run_all() {
        local -r core_args="-l 4"
        local -r ipv4_args="${core_args} -4 -D 192.168.1.1"
        local -r ipv6_args="${core_args} -6 -D 2001:db8::1"
+       ret=0
 
        echo "ipv4"
        run_test "no GRO" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400"
+       check_err $?
 
        # explicitly check we are not receiving UDP_SEGMENT cmsg (-S -1)
        # when GRO does not take place
        run_test "no GRO chk cmsg" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400 -S -1"
+       check_err $?
 
        # the GSO packets are aggregated because:
        # * veth schedule napi after each xmit
        # * segmentation happens in BH context, veth napi poll is delayed after
        #   the transmission of the last segment
        run_test "GRO" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720"
+       check_err $?
        run_test "GRO chk cmsg" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472"
+       check_err $?
        run_test "GRO with custom segment size" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720"
+       check_err $?
        run_test "GRO with custom segment size cmsg" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720 -S 500"
+       check_err $?
 
        run_nat_test "bad GRO lookup" "${ipv4_args} -M 1 -s 14720 -S 0" "-n 10 -l 1472"
+       check_err $?
        run_2sock_test "multiple GRO socks" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472"
+       check_err $?
 
        echo "ipv6"
        run_test "no GRO" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400"
+       check_err $?
        run_test "no GRO chk cmsg" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400 -S -1"
+       check_err $?
        run_test "GRO" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520"
+       check_err $?
        run_test "GRO chk cmsg" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520 -S 1452"
+       check_err $?
        run_test "GRO with custom segment size" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520"
+       check_err $?
        run_test "GRO with custom segment size cmsg" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520 -S 500"
+       check_err $?
 
        run_nat_test "bad GRO lookup" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 10 -l 1452"
+       check_err $?
        run_2sock_test "multiple GRO socks" "${ipv6_args} -M 1 -s 14520 -S 0 " "-n 1 -l 14520 -S 1452"
+       check_err $?
+       return $ret
 }
 
 if [ ! -f ../bpf/xdp_dummy.o ]; then
@@ -180,3 +212,5 @@ elif [[ $1 == "__subprocess_2sock" ]]; then
        shift
        run_one_2sock $@
 fi
+
+exit $?
index a374e10..3006a8e 100644 (file)
@@ -4,7 +4,8 @@
 TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \
        conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
        nft_concat_range.sh nft_conntrack_helper.sh \
-       nft_queue.sh nft_meta.sh
+       nft_queue.sh nft_meta.sh \
+       ipip-conntrack-mtu.sh
 
 LDLIBS = -lmnl
 TEST_GEN_FILES =  nf-queue
diff --git a/tools/testing/selftests/netfilter/ipip-conntrack-mtu.sh b/tools/testing/selftests/netfilter/ipip-conntrack-mtu.sh
new file mode 100755 (executable)
index 0000000..4a6f5c3
--- /dev/null
@@ -0,0 +1,206 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+# Conntrack needs to reassemble fragments in order to have complete
+# packets for rule matching.  Reassembly can lead to packet loss.
+
+# Consider the following setup:
+#            +--------+       +---------+       +--------+
+#            |Router A|-------|Wanrouter|-------|Router B|
+#            |        |.IPIP..|         |..IPIP.|        |
+#            +--------+       +---------+       +--------+
+#           /                  mtu 1400                   \
+#          /                                               \
+#+--------+                                                 +--------+
+#|Client A|                                                 |Client B|
+#|        |                                                 |        |
+#+--------+                                                 +--------+
+
+# Router A and Router B use IPIP tunnel interfaces to tunnel traffic
+# between Client A and Client B over WAN. Wanrouter has MTU 1400 set
+# on its interfaces.
+
+rnd=$(mktemp -u XXXXXXXX)
+rx=$(mktemp)
+
+r_a="ns-ra-$rnd"
+r_b="ns-rb-$rnd"
+r_w="ns-rw-$rnd"
+c_a="ns-ca-$rnd"
+c_b="ns-cb-$rnd"
+
+checktool (){
+       if ! $1 > /dev/null 2>&1; then
+               echo "SKIP: Could not $2"
+               exit $ksft_skip
+       fi
+}
+
+checktool "iptables --version" "run test without iptables"
+checktool "ip -Version" "run test without ip tool"
+checktool "which nc" "run test without nc (netcat)"
+checktool "ip netns add ${r_a}" "create net namespace"
+
+for n in ${r_b} ${r_w} ${c_a} ${c_b};do
+       ip netns add ${n}
+done
+
+cleanup() {
+       for n in ${r_a} ${r_b} ${r_w} ${c_a} ${c_b};do
+               ip netns del ${n}
+       done
+       rm -f ${rx}
+}
+
+trap cleanup EXIT
+
+test_path() {
+       msg="$1"
+
+       ip netns exec ${c_b} nc -n -w 3 -q 3 -u -l -p 5000 > ${rx} < /dev/null &
+
+       sleep 1
+       for i in 1 2 3; do
+               head -c1400 /dev/zero | tr "\000" "a" | ip netns exec ${c_a} nc -n -w 1 -u 192.168.20.2 5000
+       done
+
+       wait
+
+       bytes=$(wc -c < ${rx})
+
+       if [ $bytes -eq 1400 ];then
+               echo "OK: PMTU $msg connection tracking"
+       else
+               echo "FAIL: PMTU $msg connection tracking: got $bytes, expected 1400"
+               exit 1
+       fi
+}
+
+# Detailed setup for Router A
+# ---------------------------
+# Interfaces:
+# eth0: 10.2.2.1/24
+# eth1: 192.168.10.1/24
+# ipip0: No IP address, local 10.2.2.1 remote 10.4.4.1
+# Routes:
+# 192.168.20.0/24 dev ipip0    (192.168.20.0/24 is subnet of Client B)
+# 10.4.4.1 via 10.2.2.254      (Router B via Wanrouter)
+# No iptables rules at all.
+
+ip link add veth0 netns ${r_a} type veth peer name veth0 netns ${r_w}
+ip link add veth1 netns ${r_a} type veth peer name veth0 netns ${c_a}
+
+l_addr="10.2.2.1"
+r_addr="10.4.4.1"
+ip netns exec ${r_a} ip link add ipip0 type ipip local ${l_addr} remote ${r_addr} mode ipip || exit $ksft_skip
+
+for dev in lo veth0 veth1 ipip0; do
+    ip -net ${r_a} link set $dev up
+done
+
+ip -net ${r_a} addr add 10.2.2.1/24 dev veth0
+ip -net ${r_a} addr add 192.168.10.1/24 dev veth1
+
+ip -net ${r_a} route add 192.168.20.0/24 dev ipip0
+ip -net ${r_a} route add 10.4.4.0/24 via 10.2.2.254
+
+ip netns exec ${r_a} sysctl -q net.ipv4.conf.all.forwarding=1 > /dev/null
+
+# Detailed setup for Router B
+# ---------------------------
+# Interfaces:
+# eth0: 10.4.4.1/24
+# eth1: 192.168.20.1/24
+# ipip0: No IP address, local 10.4.4.1 remote 10.2.2.1
+# Routes:
+# 192.168.10.0/24 dev ipip0    (192.168.10.0/24 is subnet of Client A)
+# 10.2.2.1 via 10.4.4.254      (Router A via Wanrouter)
+# No iptables rules at all.
+
+ip link add veth0 netns ${r_b} type veth peer name veth1 netns ${r_w}
+ip link add veth1 netns ${r_b} type veth peer name veth0 netns ${c_b}
+
+l_addr="10.4.4.1"
+r_addr="10.2.2.1"
+
+ip netns exec ${r_b} ip link add ipip0 type ipip local ${l_addr} remote ${r_addr} mode ipip || exit $ksft_skip
+
+for dev in lo veth0 veth1 ipip0; do
+       ip -net ${r_b} link set $dev up
+done
+
+ip -net ${r_b} addr add 10.4.4.1/24 dev veth0
+ip -net ${r_b} addr add 192.168.20.1/24 dev veth1
+
+ip -net ${r_b} route add 192.168.10.0/24 dev ipip0
+ip -net ${r_b} route add 10.2.2.0/24 via 10.4.4.254
+ip netns exec ${r_b} sysctl -q net.ipv4.conf.all.forwarding=1 > /dev/null
+
+# Client A
+ip -net ${c_a} addr add 192.168.10.2/24 dev veth0
+ip -net ${c_a} link set dev lo up
+ip -net ${c_a} link set dev veth0 up
+ip -net ${c_a} route add default via 192.168.10.1
+
+# Client A
+ip -net ${c_b} addr add 192.168.20.2/24 dev veth0
+ip -net ${c_b} link set dev veth0 up
+ip -net ${c_b} link set dev lo up
+ip -net ${c_b} route add default via 192.168.20.1
+
+# Wan
+ip -net ${r_w} addr add 10.2.2.254/24 dev veth0
+ip -net ${r_w} addr add 10.4.4.254/24 dev veth1
+
+ip -net ${r_w} link set dev lo up
+ip -net ${r_w} link set dev veth0 up mtu 1400
+ip -net ${r_w} link set dev veth1 up mtu 1400
+
+ip -net ${r_a} link set dev veth0 mtu 1400
+ip -net ${r_b} link set dev veth0 mtu 1400
+
+ip netns exec ${r_w} sysctl -q net.ipv4.conf.all.forwarding=1 > /dev/null
+
+# Path MTU discovery
+# ------------------
+# Running tracepath from Client A to Client B shows PMTU discovery is working
+# as expected:
+#
+# clienta:~# tracepath 192.168.20.2
+# 1?: [LOCALHOST]                      pmtu 1500
+# 1:  192.168.10.1                                          0.867ms
+# 1:  192.168.10.1                                          0.302ms
+# 2:  192.168.10.1                                          0.312ms pmtu 1480
+# 2:  no reply
+# 3:  192.168.10.1                                          0.510ms pmtu 1380
+# 3:  192.168.20.2                                          2.320ms reached
+# Resume: pmtu 1380 hops 3 back 3
+
+# ip netns exec ${c_a} traceroute --mtu 192.168.20.2
+
+# Router A has learned PMTU (1400) to Router B from Wanrouter.
+# Client A has learned PMTU (1400 - IPIP overhead = 1380) to Client B
+# from Router A.
+
+#Send large UDP packet
+#---------------------
+#Now we send a 1400 bytes UDP packet from Client A to Client B:
+
+# clienta:~# head -c1400 /dev/zero | tr "\000" "a" | nc -u 192.168.20.2 5000
+test_path "without"
+
+# The IPv4 stack on Client A already knows the PMTU to Client B, so the
+# UDP packet is sent as two fragments (1380 + 20). Router A forwards the
+# fragments between eth1 and ipip0. The fragments fit into the tunnel and
+# reach their destination.
+
+#When sending the large UDP packet again, Router A now reassembles the
+#fragments before routing the packet over ipip0. The resulting IPIP
+#packet is too big (1400) for the tunnel PMTU (1380) to Router B, it is
+#dropped on Router A before sending.
+
+ip netns exec ${r_a} iptables -A FORWARD -m conntrack --ctstate NEW
+test_path "with"
index edf0a48..bf6b962 100755 (executable)
@@ -94,7 +94,13 @@ check_for_helper()
        local message=$2
        local port=$3
 
-       ip netns exec ${netns} conntrack -L -p tcp --dport $port 2> /dev/null |grep -q 'helper=ftp'
+       if echo $message |grep -q 'ipv6';then
+               local family="ipv6"
+       else
+               local family="ipv4"
+       fi
+
+       ip netns exec ${netns} conntrack -L -f $family -p tcp --dport $port 2> /dev/null |grep -q 'helper=ftp'
        if [ $? -ne 0 ] ; then
                echo "FAIL: ${netns} did not show attached helper $message" 1>&2
                ret=1
@@ -111,8 +117,8 @@ test_helper()
 
        sleep 3 | ip netns exec ${ns2} nc -w 2 -l -p $port > /dev/null &
 
-       sleep 1
        sleep 1 | ip netns exec ${ns1} nc -w 2 10.0.1.2 $port > /dev/null &
+       sleep 1
 
        check_for_helper "$ns1" "ip $msg" $port
        check_for_helper "$ns2" "ip $msg" $port
@@ -128,8 +134,8 @@ test_helper()
 
        sleep 3 | ip netns exec ${ns2} nc -w 2 -6 -l -p $port > /dev/null &
 
-       sleep 1
        sleep 1 | ip netns exec ${ns1} nc -w 2 -6 dead:1::2 $port > /dev/null &
+       sleep 1
 
        check_for_helper "$ns1" "ipv6 $msg" $port
        check_for_helper "$ns2" "ipv6 $msg" $port
index cb53a8b..c25cf7c 100644 (file)
@@ -443,7 +443,6 @@ int test_alignment_handler_integer(void)
        LOAD_DFORM_TEST(ldu);
        LOAD_XFORM_TEST(ldx);
        LOAD_XFORM_TEST(ldux);
-       LOAD_DFORM_TEST(lmw);
        STORE_DFORM_TEST(stb);
        STORE_XFORM_TEST(stbx);
        STORE_DFORM_TEST(stbu);
@@ -462,7 +461,11 @@ int test_alignment_handler_integer(void)
        STORE_XFORM_TEST(stdx);
        STORE_DFORM_TEST(stdu);
        STORE_XFORM_TEST(stdux);
+
+#ifdef __BIG_ENDIAN__
+       LOAD_DFORM_TEST(lmw);
        STORE_DFORM_TEST(stmw);
+#endif
 
        return rc;
 }
index 9e5c7f3..0af4f02 100644 (file)
@@ -290,5 +290,5 @@ static int test(void)
 
 int main(void)
 {
-       test_harness(test, "pkey_exec_prot");
+       return test_harness(test, "pkey_exec_prot");
 }
index 4f815d7..2db76e5 100644 (file)
@@ -329,5 +329,5 @@ static int test(void)
 
 int main(void)
 {
-       test_harness(test, "pkey_siginfo");
+       return test_harness(test, "pkey_siginfo");
 }
index 5eb64d4..a8dc51a 100644 (file)
@@ -1,5 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
 vdso_test
+vdso_test_abi
+vdso_test_clock_getres
+vdso_test_correctness
 vdso_test_gettimeofday
 vdso_test_getcpu
 vdso_standalone_test_x86
index 5029ef9..c4aea79 100644 (file)
@@ -349,7 +349,7 @@ static void test_one_clock_gettime64(int clock, const char *name)
                return;
        }
 
-       printf("\t%llu.%09ld %llu.%09ld %llu.%09ld\n",
+       printf("\t%llu.%09lld %llu.%09lld %llu.%09lld\n",
               (unsigned long long)start.tv_sec, start.tv_nsec,
               (unsigned long long)vdso.tv_sec, vdso.tv_nsec,
               (unsigned long long)end.tv_sec, end.tv_nsec);
index b50c208..fe07d97 100644 (file)
@@ -1,5 +1,4 @@
 CONFIG_LOCALVERSION="-debug"
-CONFIG_ENABLE_MUST_CHECK=y
 CONFIG_FRAME_POINTER=y
 CONFIG_STACK_VALIDATION=y
 CONFIG_DEBUG_KERNEL=y
index 5f26048..fa9e361 100644 (file)
@@ -485,9 +485,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
        kvm->mmu_notifier_count++;
        need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end,
                                             range->flags);
-       need_tlb_flush |= kvm->tlbs_dirty;
        /* we've to flush the tlb before the pages can be freed */
-       if (need_tlb_flush)
+       if (need_tlb_flush || kvm->tlbs_dirty)
                kvm_flush_remote_tlbs(kvm);
 
        spin_unlock(&kvm->mmu_lock);