Merge tag 'drm-misc-next-fixes-2021-04-29' of git://anongit.freedesktop.org/drm/drm...
authorDave Airlie <airlied@redhat.com>
Fri, 30 Apr 2021 00:13:08 +0000 (10:13 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 30 Apr 2021 00:13:48 +0000 (10:13 +1000)
Two patches in drm-misc-next-fixes this week, one to fix the error
handling in TTM when a BO can't be swapped out and one to prevent a
wrong dereference in efifb.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20210429090308.k3fuqvenf6vupfmg@gilmour
1495 files changed:
.mailmap
Documentation/ABI/testing/debugfs-moxtet
Documentation/ABI/testing/debugfs-turris-mox-rwtm
Documentation/ABI/testing/sysfs-bus-moxtet-devices
Documentation/ABI/testing/sysfs-class-led-driver-turris-omnia
Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm
Documentation/ABI/testing/sysfs-fs-xfs
Documentation/arm64/acpi_object_usage.rst
Documentation/arm64/silicon-errata.rst
Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
Documentation/devicetree/bindings/i2c/i2c-gpio.yaml
Documentation/devicetree/bindings/i2c/i2c-imx.yaml
Documentation/devicetree/bindings/iio/adc/ingenic,adc.yaml
Documentation/devicetree/bindings/input/adc-joystick.yaml
Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.txt
Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml
Documentation/devicetree/bindings/mfd/ab8500.txt
Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
Documentation/devicetree/bindings/net/ethernet-controller.yaml
Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
Documentation/devicetree/bindings/sound/fsl,spdif.yaml
Documentation/networking/device_drivers/ethernet/amazon/ena.rst
Documentation/networking/devlink/devlink-dpipe.rst
Documentation/networking/devlink/devlink-port.rst
Documentation/networking/ethtool-netlink.rst
Documentation/networking/xfrm_device.rst
Documentation/virt/kvm/api.rst
MAINTAINERS
Makefile
arch/arc/boot/dts/haps_hs.dts
arch/arc/kernel/signal.c
arch/arc/kernel/unwind.c
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/armada-385-turris-omnia.dts
arch/arm/boot/dts/at91-sam9x60ek.dts
arch/arm/boot/dts/at91-sama5d27_som1.dtsi
arch/arm/boot/dts/bcm2711.dtsi
arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap44xx-clocks.dtsi
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/sam9x60.dtsi
arch/arm/mach-imx/avic.c
arch/arm/mach-imx/common.h
arch/arm/mach-imx/mach-imx1.c
arch/arm/mach-imx/mach-imx25.c
arch/arm/mach-imx/mach-imx27.c
arch/arm/mach-imx/mach-imx31.c
arch/arm/mach-imx/mach-imx35.c
arch/arm/mach-imx/mm-imx3.c
arch/arm/mach-keystone/keystone.c
arch/arm/mach-omap1/ams-delta-fiq-handler.S
arch/arm/mach-omap2/omap-secure.c
arch/arm/mach-omap2/omap-secure.h
arch/arm/mach-omap2/pmic-cpcap.c
arch/arm/mach-omap2/sr_device.c
arch/arm/mach-pxa/mainstone.c
arch/arm64/Kconfig
arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
arch/arm64/include/asm/checksum.h
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/thread_info.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/crash_dump.c
arch/arm64/kernel/process.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kvm/debug.c
arch/arm64/kvm/hyp/vgic-v3-sr.c
arch/arm64/mm/mmu.c
arch/csky/kernel/probes/ftrace.c
arch/ia64/include/asm/ptrace.h
arch/ia64/kernel/err_inject.c
arch/ia64/kernel/mca.c
arch/mips/kernel/setup.c
arch/mips/kernel/vmlinux.lds.S
arch/nds32/mm/cacheflush.c
arch/parisc/include/asm/cmpxchg.h
arch/parisc/include/asm/processor.h
arch/parisc/math-emu/fpu.h
arch/powerpc/include/asm/cpu_has_feature.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/ptrace/Makefile
arch/powerpc/kernel/ptrace/ptrace-decl.h
arch/powerpc/kernel/ptrace/ptrace-fpu.c
arch/powerpc/kernel/ptrace/ptrace-novsx.c
arch/powerpc/kernel/ptrace/ptrace-view.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/vdso32/gettimeofday.S
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/mobility.c
arch/riscv/Kconfig
arch/riscv/Kconfig.socs
arch/riscv/include/asm/asm-prototypes.h
arch/riscv/include/asm/irq.h
arch/riscv/include/asm/processor.h
arch/riscv/include/asm/ptrace.h
arch/riscv/include/asm/sbi.h
arch/riscv/include/asm/timex.h
arch/riscv/include/asm/uaccess.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/entry.S
arch/riscv/kernel/probes/ftrace.c
arch/riscv/kernel/probes/kprobes.c
arch/riscv/kernel/process.c
arch/riscv/kernel/sbi.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/stacktrace.c
arch/riscv/kernel/time.c
arch/riscv/kernel/traps.c
arch/riscv/mm/kasan_init.c
arch/s390/include/asm/pci.h
arch/s390/include/asm/stacktrace.h
arch/s390/include/asm/vdso/data.h
arch/s390/kernel/cpcmd.c
arch/s390/kernel/dumpstack.c
arch/s390/kernel/irq.c
arch/s390/kernel/perf_cpum_cf_diag.c
arch/s390/kernel/setup.c
arch/s390/kernel/time.c
arch/s390/kernel/vtime.c
arch/s390/pci/pci.c
arch/s390/pci/pci_event.c
arch/x86/Makefile
arch/x86/events/intel/core.c
arch/x86/events/intel/ds.c
arch/x86/include/asm/kfence.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/smp.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/xen/page.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/kprobes/ftrace.c
arch/x86/kernel/kvm.c
arch/x86/kernel/setup.c
arch/x86/kernel/signal.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/traps.c
arch/x86/kvm/Makefile
arch/x86/kvm/hyperv.c
arch/x86/kvm/hyperv.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/mmu_internal.h
arch/x86/kvm/mmu/tdp_iter.c
arch/x86/kvm/mmu/tdp_iter.h
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/mm/mem_encrypt.c
arch/x86/net/bpf_jit_comp.c
arch/x86/net/bpf_jit_comp32.c
arch/x86/platform/iris/iris.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
arch/xtensa/kernel/coprocessor.S
arch/xtensa/mm/fault.c
block/bio.c
block/blk-merge.c
block/blk-mq-debugfs.c
block/partitions/core.c
drivers/acpi/acpica/nsaccess.c
drivers/acpi/internal.h
drivers/acpi/processor_idle.c
drivers/acpi/scan.c
drivers/acpi/tables.c
drivers/acpi/video_detect.c
drivers/atm/fore200e.c
drivers/auxdisplay/charlcd.c
drivers/base/dd.c
drivers/base/power/runtime.c
drivers/block/floppy.c
drivers/block/null_blk/main.c
drivers/block/null_blk/null_blk.h
drivers/block/xen-blkback/blkback.c
drivers/bluetooth/btrsi.c
drivers/bluetooth/btusb.c
drivers/bus/moxtet.c
drivers/bus/mvebu-mbus.c
drivers/bus/omap_l3_noc.c
drivers/bus/ti-sysc.c
drivers/char/agp/Kconfig
drivers/char/applicom.c
drivers/char/toshiba.c
drivers/clk/clk-fixed-factor.c
drivers/clk/clk-mux.c
drivers/clk/clk.c
drivers/clk/qcom/camcc-sc7180.c
drivers/clk/qcom/clk-rcg2.c
drivers/clk/qcom/clk-rpmh.c
drivers/clk/qcom/gcc-sc7180.c
drivers/clk/socfpga/clk-gate.c
drivers/counter/stm32-timer-cnt.c
drivers/cpufreq/freq_table.c
drivers/dma-buf/dma-fence.c
drivers/extcon/extcon.c
drivers/firewire/nosy.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/vars.c
drivers/firmware/turris-mox-rwtm.c
drivers/gpio/gpio-moxtet.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/aldebaran.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/aldebaran.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdgpu/df_v3_6.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdgpu/si_dma.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/soc15_common.h
drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
drivers/gpu/drm/amd/amdgpu/umc_v6_1.h
drivers/gpu/drm/amd/amdgpu/umc_v6_7.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/umc_v6_7.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/umc_v8_7.c
drivers/gpu/drm/amd/amdgpu/umc_v8_7.h
drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
drivers/gpu/drm/amd/amdgpu/vega20_ih.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
drivers/gpu/drm/amd/display/dc/Makefile
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.h
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_bios_types.h
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dc_types.h
drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
drivers/gpu/drm/amd/display/include/logger_types.h
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
drivers/gpu/drm/amd/display/modules/color/color_gamma.h
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h
drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h
drivers/gpu/drm/amd/include/atombios.h
drivers/gpu/drm/amd/include/atomfirmware.h
drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h
drivers/gpu/drm/amd/pm/inc/smu_types.h
drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
drivers/gpu/drm/arm/display/include/malidp_utils.h
drivers/gpu/drm/arm/display/komeda/komeda_dev.c
drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
drivers/gpu/drm/bridge/Kconfig
drivers/gpu/drm/bridge/lontium-lt8912b.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_internal.h
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/drm_vblank.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/gma500/power.c
drivers/gpu/drm/i915/display/intel_acpi.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/Makefile
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx_power.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
drivers/gpu/drm/msm/dp/dp_aux.c
drivers/gpu/drm/msm/dp/dp_debug.c
drivers/gpu/drm/msm/dp/dp_hpd.c
drivers/gpu/drm/msm/dp/dp_power.c
drivers/gpu/drm/msm/dsi/dsi.h
drivers/gpu/drm/msm/dsi/dsi_cfg.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/dsi/dsi_manager.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
drivers/gpu/drm/msm/dsi/pll/dsi_pll.c [deleted file]
drivers/gpu/drm/msm/dsi/pll/dsi_pll.h [deleted file]
drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c [deleted file]
drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c [deleted file]
drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c [deleted file]
drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c [deleted file]
drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c [deleted file]
drivers/gpu/drm/msm/msm_atomic.c
drivers/gpu/drm/msm/msm_debugfs.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fb.c
drivers/gpu/drm/msm/msm_fence.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem.h
drivers/gpu/drm/msm/msm_gem_shrinker.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_gpu_trace.h
drivers/gpu/drm/msm/msm_kms.h
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/omapdrm/dss/dsi.c
drivers/gpu/drm/panel/panel-dsi-cm.c
drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_cs.c
drivers/gpu/drm/radeon/radeon_dp_mst.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/rcar-du/rcar_du_encoder.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/dc.h
drivers/gpu/drm/tegra/dpaux.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/drm.h
drivers/gpu/drm/tegra/fb.c
drivers/gpu/drm/tegra/gem.h
drivers/gpu/drm/tegra/gr2d.c
drivers/gpu/drm/tegra/gr3d.c
drivers/gpu/drm/tegra/hub.c
drivers/gpu/drm/tegra/plane.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/tegra/vic.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/drm/xen/xen_drm_front.c
drivers/gpu/drm/xen/xen_drm_front_conn.h
drivers/gpu/drm/xlnx/zynqmp_dp.c
drivers/gpu/host1x/bus.c
drivers/gpu/host1x/cdma.c
drivers/gpu/host1x/debug.c
drivers/gpu/host1x/dev.c
drivers/gpu/host1x/dev.h
drivers/gpu/host1x/hw/cdma_hw.c
drivers/gpu/host1x/hw/channel_hw.c
drivers/gpu/host1x/hw/debug_hw.c
drivers/gpu/host1x/hw/hw_host1x07_vm.h
drivers/gpu/host1x/intr.c
drivers/gpu/host1x/intr.h
drivers/gpu/host1x/job.c
drivers/gpu/host1x/syncpt.c
drivers/gpu/host1x/syncpt.h
drivers/i2c/busses/i2c-designware-master.c
drivers/i2c/busses/i2c-exynos5.c
drivers/i2c/busses/i2c-hix5hd2.c
drivers/i2c/busses/i2c-jz4780.c
drivers/i2c/busses/i2c-stm32f4.c
drivers/i2c/i2c-core-base.c
drivers/iio/adc/Kconfig
drivers/iio/adc/ab8500-gpadc.c
drivers/iio/adc/ad7949.c
drivers/iio/adc/qcom-spmi-vadc.c
drivers/iio/gyro/mpu3050-core.c
drivers/iio/humidity/hid-sensor-humidity.c
drivers/iio/imu/adis16400.c
drivers/iio/light/hid-sensor-prox.c
drivers/iio/temperature/hid-sensor-temperature.c
drivers/infiniband/core/addr.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/hfi1/affinity.c
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/netdev_rx.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/ulp/rtrs/rtrs-clt.c
drivers/input/joydev.c
drivers/interconnect/bulk.c
drivers/interconnect/core.c
drivers/interconnect/qcom/msm8939.c
drivers/iommu/amd/init.c
drivers/iommu/tegra-smmu.c
drivers/isdn/capi/kcapi.c
drivers/isdn/hardware/mISDN/mISDNipac.c
drivers/leds/leds-turris-omnia.c
drivers/mailbox/armada-37xx-rwtm-mailbox.c
drivers/md/dm-ioctl.c
drivers/md/dm-table.c
drivers/md/dm-verity-target.c
drivers/md/dm-zoned-target.c
drivers/md/dm.c
drivers/media/firewire/firedtv-fw.c
drivers/media/pci/cx18/cx18-alsa-main.c
drivers/media/pci/cx18/cx18-driver.c
drivers/media/pci/cx25821/cx25821-alsa.c
drivers/media/pci/cx88/cx88-alsa.c
drivers/media/pci/ivtv/ivtv-alsa-main.c
drivers/media/pci/ivtv/ivtv-driver.c
drivers/media/pci/sta2x11/sta2x11_vip.c
drivers/media/platform/atmel/atmel-isi.c
drivers/media/platform/atmel/atmel-sama5d2-isc.c
drivers/media/platform/marvell-ccic/cafe-driver.c
drivers/media/platform/stm32/stm32-dcmi.c
drivers/media/usb/cpia2/cpia2_v4l.c
drivers/media/usb/tm6000/tm6000-alsa.c
drivers/media/usb/tm6000/tm6000-dvb.c
drivers/mfd/intel_quark_i2c_gpio.c
drivers/misc/mei/client.c
drivers/mtd/maps/sun_uflash.c
drivers/net/arcnet/com20020-pci.c
drivers/net/bonding/bond_main.c
drivers/net/can/c_can/c_can.c
drivers/net/can/c_can/c_can_pci.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/dev/netlink.c
drivers/net/can/flexcan.c
drivers/net/can/kvaser_pciefd.c
drivers/net/can/m_can/m_can.c
drivers/net/can/peak_canfd/peak_pciefd_main.c
drivers/net/can/sja1000/ems_pci.c
drivers/net/can/sja1000/ems_pcmcia.c
drivers/net/can/sja1000/kvaser_pci.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/can/sja1000/peak_pcmcia.c
drivers/net/can/sja1000/plx_pci.c
drivers/net/can/spi/mcp251x.c
drivers/net/can/usb/Kconfig
drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
drivers/net/can/usb/peak_usb/pcan_usb.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/mt7530.c
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bcm4908_enet.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/hw.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_controlq.h
drivers/net/ethernet/intel/ice/ice_dcb.c
drivers/net/ethernet/intel/ice/ice_dcb_nl.c
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_type.h
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/igb/e1000_hw.h
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igc/igc.h
drivers/net/ethernet/intel/igc/igc_ethtool.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/igc/igc_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/Kconfig
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
drivers/net/ethernet/microchip/lan743x_main.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/flower/metadata.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/xilinx/xilinx_axienet.h
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/geneve.c
drivers/net/hamradio/scc.c
drivers/net/ieee802154/atusb.c
drivers/net/ipa/ipa_cmd.c
drivers/net/ipa/ipa_qmi.c
drivers/net/phy/bcm-phy-lib.c
drivers/net/phy/broadcom.c
drivers/net/phy/phylink.c
drivers/net/tun.c
drivers/net/usb/cdc-phonet.c
drivers/net/usb/hso.c
drivers/net/usb/r8152.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wan/hdlc_fr.c
drivers/net/wan/hdlc_x25.c
drivers/net/wireless/admtek/adm8211.c
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/atmel/atmel.c
drivers/net/wireless/atmel/atmel_cs.c
drivers/net/wireless/atmel/atmel_pci.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c
drivers/net/wireless/cisco/airo.c
drivers/net/wireless/cisco/airo_cs.c
drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/intersil/hostap/hostap_cs.c
drivers/net/wireless/intersil/hostap/hostap_pci.c
drivers/net/wireless/intersil/hostap/hostap_plx.c
drivers/net/wireless/mediatek/mt76/mt7921/regs.h
drivers/net/wireless/ralink/rt2x00/rt2400pci.c
drivers/net/wireless/ralink/rt2x00/rt2500pci.c
drivers/net/wireless/ralink/rt2x00/rt2500usb.c
drivers/net/wireless/ralink/rt2x00/rt2800pci.c
drivers/net/wireless/ralink/rt2x00/rt2800usb.c
drivers/net/wireless/ralink/rt2x00/rt61pci.c
drivers/net/wireless/ralink/rt2x00/rt73usb.c
drivers/net/wireless/rsi/rsi_91x_main.c
drivers/net/wireless/rsi/rsi_91x_sdio.c
drivers/net/wireless/rsi/rsi_91x_usb.c
drivers/net/wireless/virt_wifi.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/fc.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/core.c
drivers/nvme/target/loop.c
drivers/nvme/target/tcp.c
drivers/of/fdt.c
drivers/of/of_private.h
drivers/of/overlay.c
drivers/of/property.c
drivers/of/unittest.c
drivers/parport/parport_amiga.c
drivers/parport/parport_atari.c
drivers/parport/parport_gsc.c
drivers/parport/parport_mfc3.c
drivers/parport/parport_sunbpp.c
drivers/pci/hotplug/rpadlpar_sysfs.c
drivers/pci/hotplug/s390_pci_hpc.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/pinctrl-microchip-sgpio.c
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
drivers/pinctrl/qcom/pinctrl-sc7280.c
drivers/pinctrl/qcom/pinctrl-sdx55.c
drivers/platform/x86/Kconfig
drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c
drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c
drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c
drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
drivers/platform/x86/intel-hid.c
drivers/platform/x86/intel-vbtn.c
drivers/platform/x86/intel_pmc_core.c
drivers/platform/x86/intel_pmt_class.c
drivers/platform/x86/intel_pmt_crashlog.c
drivers/platform/x86/thinkpad_acpi.c
drivers/ptp/ptp_qoriq.c
drivers/ras/cec.c
drivers/regulator/bd9571mwv-regulator.c
drivers/remoteproc/pru_rproc.c
drivers/remoteproc/qcom_pil_info.c
drivers/s390/block/dasd.c
drivers/sbus/char/display7seg.c
drivers/scsi/hpsa.c
drivers/scsi/hpsa_cmd.h
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/myrs.c
drivers/scsi/pcmcia/nsp_cs.c
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/qedi/qedi_main.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/scsi_transport_srp.c
drivers/scsi/sd_zbc.c
drivers/scsi/smartpqi/smartpqi_init.c
drivers/scsi/st.c
drivers/scsi/ufs/ufs-mediatek.c
drivers/scsi/ufs/ufshcd.c
drivers/sh/maple/maple.c
drivers/soc/fsl/qbman/qman.c
drivers/soc/litex/litex_soc_ctrl.c
drivers/soc/qcom/qcom-geni-se.c
drivers/soc/ti/omap_prm.c
drivers/spi/spi-cadence-quadspi.c
drivers/staging/comedi/drivers/cb_pcidas.c
drivers/staging/comedi/drivers/cb_pcidas64.c
drivers/staging/comedi/drivers/vmk80xx.c
drivers/staging/media/tegra-video/vi.c
drivers/staging/rtl8192e/rtllib.h
drivers/staging/rtl8192e/rtllib_rx.c
drivers/staging/vt6655/rxtx.h
drivers/target/iscsi/iscsi_target.c
drivers/target/target_core_pscsi.c
drivers/tee/optee/core.c
drivers/thermal/thermal_sysfs.c
drivers/thunderbolt/retimer.c
drivers/thunderbolt/switch.c
drivers/thunderbolt/tb.c
drivers/tty/serial/icom.c
drivers/tty/serial/jsm/jsm_driver.c
drivers/tty/serial/qcom_geni_serial.c
drivers/usb/cdns3/cdnsp-gadget.c
drivers/usb/cdns3/cdnsp-ring.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/dwc3-qcom.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/udc/amd5536udc_pci.c
drivers/usb/host/xhci-mtk.c
drivers/usb/misc/ldusb.c
drivers/usb/musb/musb_core.c
drivers/usb/storage/transport.c
drivers/usb/storage/unusual_devs.h
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/tps6598x.c
drivers/usb/usbip/stub_dev.c
drivers/usb/usbip/usbip_common.h
drivers/usb/usbip/usbip_event.c
drivers/usb/usbip/vhci_hcd.c
drivers/usb/usbip/vhci_sysfs.c
drivers/usb/usbip/vudc_dev.c
drivers/usb/usbip/vudc_sysfs.c
drivers/vdpa/ifcvf/ifcvf_main.c
drivers/vdpa/mlx5/core/mlx5_vdpa.h
drivers/vdpa/mlx5/core/mr.c
drivers/vdpa/mlx5/core/resources.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa.c
drivers/vdpa/vdpa_sim/vdpa_sim.c
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
drivers/vfio/Kconfig
drivers/vfio/pci/Kconfig
drivers/vfio/platform/Kconfig
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/vdpa.c
drivers/vhost/vhost.c
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/hyperv_fb.c
drivers/virtio/virtio.c
drivers/virtio/virtio_mmio.c
drivers/watchdog/armada_37xx_wdt.c
drivers/watchdog/cpu5wdt.c
drivers/watchdog/cpwd.c
drivers/watchdog/riowd.c
drivers/xen/Kconfig
drivers/xen/events/events_base.c
fs/afs/dir.c
fs/afs/file.c
fs/afs/fs_operation.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/mntpt.c
fs/afs/write.c
fs/afs/xattr.c
fs/block_dev.c
fs/btrfs/Makefile
fs/btrfs/ctree.c
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/qgroup.c
fs/btrfs/reada.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/zoned.c
fs/cachefiles/bind.c
fs/cachefiles/rdwr.c
fs/cifs/Kconfig
fs/cifs/Makefile
fs/cifs/cifs_swn.c
fs/cifs/cifsacl.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/fs_context.c
fs/cifs/inode.c
fs/cifs/smb2glob.h
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2transport.c
fs/cifs/transport.c
fs/direct-io.c
fs/ext4/balloc.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/fast_commit.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/namei.c
fs/ext4/super.c
fs/ext4/sysfs.c
fs/ext4/verity.c
fs/ext4/xattr.c
fs/file.c
fs/fuse/dev.c
fs/fuse/fuse_i.h
fs/fuse/virtio_fs.c
fs/gfs2/super.c
fs/hostfs/hostfs_kern.c
fs/io-wq.c
fs/io-wq.h
fs/io_uring.c
fs/iomap/swapfile.c
fs/locks.c
fs/namei.c
fs/nfsd/Kconfig
fs/nfsd/filecache.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4state.c
fs/ocfs2/aops.c
fs/ocfs2/file.c
fs/reiserfs/xattr.h
fs/select.c
fs/squashfs/export.c
fs/squashfs/id.c
fs/squashfs/squashfs_fs.h
fs/squashfs/xattr_id.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_itable.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_symlink.c
fs/zonefs/super.c
include/acpi/acpi_bus.h
include/drm/drm_dp_helper.h
include/drm/drm_print.h
include/drm/gpu_scheduler.h
include/drm/ttm/ttm_bo_api.h
include/dt-bindings/bus/moxtet.h
include/linux/acpi.h
include/linux/amba/bus.h
include/linux/armada-37xx-rwtm-mailbox.h
include/linux/avf/virtchnl.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/clk-provider.h
include/linux/device-mapper.h
include/linux/dma-fence.h
include/linux/efi.h
include/linux/ethtool.h
include/linux/extcon.h
include/linux/firmware/intel/stratix10-svc-client.h
include/linux/host1x.h
include/linux/hugetlb_cgroup.h
include/linux/if_macvlan.h
include/linux/io_uring.h
include/linux/memblock.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/qp.h
include/linux/mm.h
include/linux/mmu_notifier.h
include/linux/module.h
include/linux/moxtet.h
include/linux/mutex.h
include/linux/netdevice.h
include/linux/netfilter/x_tables.h
include/linux/pagemap.h
include/linux/qcom-geni-se.h
include/linux/restart_block.h
include/linux/skbuff.h
include/linux/skmsg.h
include/linux/sunrpc/svc_rdma.h
include/linux/thread_info.h
include/linux/usb_usual.h
include/linux/usermode_driver.h
include/linux/vdpa.h
include/linux/virtio.h
include/linux/virtio_net.h
include/linux/ww_mutex.h
include/linux/xarray.h
include/net/act_api.h
include/net/dst.h
include/net/inet_connection_sock.h
include/net/netfilter/nf_tables.h
include/net/netns/xfrm.h
include/net/nexthop.h
include/net/red.h
include/net/rtnetlink.h
include/net/sock.h
include/net/xfrm.h
include/scsi/scsi_transport_iscsi.h
include/trace/events/workqueue.h
include/uapi/drm/msm_drm.h
include/uapi/linux/blkpg.h
include/uapi/linux/bpf.h
include/uapi/linux/can.h
include/uapi/linux/ethtool.h
include/uapi/linux/fuse.h
include/uapi/linux/psample.h
include/uapi/linux/rfkill.h
kernel/bpf/bpf_inode_storage.c
kernel/bpf/bpf_struct_ops.c
kernel/bpf/core.c
kernel/bpf/disasm.c
kernel/bpf/inode.c
kernel/bpf/preload/bpf_preload_kern.c
kernel/bpf/stackmap.c
kernel/bpf/syscall.c
kernel/bpf/trampoline.c
kernel/bpf/verifier.c
kernel/fork.c
kernel/futex.c
kernel/gcov/clang.c
kernel/irq/irq_sim.c
kernel/irq/manage.c
kernel/jump_label.c
kernel/locking/lockdep.c
kernel/locking/mutex.c
kernel/power/energy_model.c
kernel/ptrace.c
kernel/reboot.c
kernel/signal.c
kernel/static_call.c
kernel/time/alarmtimer.c
kernel/time/hrtimer.c
kernel/time/posix-cpu-timers.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/usermode_driver.c
kernel/watchdog.c
kernel/workqueue.c
lib/Kconfig.debug
lib/math/div64.c
lib/test_kasan_module.c
lib/test_xarray.c
lib/xarray.c
mm/gup.c
mm/highmem.c
mm/hugetlb.c
mm/hugetlb_cgroup.c
mm/internal.h
mm/kfence/core.c
mm/kmemleak.c
mm/memory.c
mm/mmu_notifier.c
mm/page-writeback.c
mm/page_poison.c
mm/percpu-internal.h
mm/percpu-stats.c
mm/percpu.c
mm/z3fold.c
net/batman-adv/main.c
net/batman-adv/translation-table.c
net/bridge/br_switchdev.c
net/can/bcm.c
net/can/isotp.c
net/can/raw.c
net/core/dev.c
net/core/drop_monitor.c
net/core/dst.c
net/core/filter.c
net/core/flow_dissector.c
net/core/neighbour.c
net/core/rtnetlink.c
net/core/skmsg.c
net/core/sock.c
net/core/xdp.c
net/dccp/ipv6.c
net/dsa/dsa2.c
net/dsa/switch.c
net/ethtool/common.c
net/ethtool/eee.c
net/ethtool/ioctl.c
net/hsr/hsr_device.c
net/hsr/hsr_forward.c
net/ieee802154/nl-mac.c
net/ieee802154/nl802154.c
net/ipv4/ah4.c
net/ipv4/devinet.c
net/ipv4/esp4.c
net/ipv4/esp4_offload.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_vti.c
net/ipv4/ipconfig.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/route.c
net/ipv4/tcp_minisocks.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/ah6.c
net/ipv6/esp6.c
net/ipv6/esp6_offload.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_input.c
net/ipv6/ip6_vti.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/mac80211/aead_api.c
net/mac80211/aes_gmac.c
net/mac80211/cfg.c
net/mac80211/ibss.c
net/mac80211/main.c
net/mac80211/mlme.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/tx.c
net/mac80211/util.c
net/mac802154/llsec.c
net/mptcp/options.c
net/mptcp/protocol.c
net/mptcp/subflow.c
net/ncsi/ncsi-manage.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto_gre.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_tables_api.c
net/netfilter/x_tables.c
net/nfc/llcp_sock.c
net/openvswitch/conntrack.c
net/openvswitch/conntrack.h
net/openvswitch/flow.c
net/qrtr/qrtr.c
net/rds/message.c
net/rds/send.c
net/rfkill/core.c
net/sched/act_api.c
net/sched/act_ct.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/sched/sch_choke.c
net/sched/sch_gred.c
net/sched/sch_htb.c
net/sched/sch_red.c
net/sched/sch_sfq.c
net/sched/sch_teql.c
net/sctp/ipv6.c
net/sctp/output.c
net/sctp/outqueue.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/svc.c
net/sunrpc/svc_xprt.c
net/sunrpc/xprtrdma/svc_rdma_backchannel.c
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/tipc/bearer.h
net/tipc/crypto.c
net/tipc/net.c
net/tipc/node.c
net/tipc/socket.c
net/vmw_vsock/af_vsock.c
net/wireless/nl80211.c
net/wireless/scan.c
net/wireless/sme.c
net/xfrm/xfrm_compat.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_interface.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_state.c
scripts/module.lds.S
security/integrity/iint.c
security/selinux/include/security.h
security/selinux/selinuxfs.c
security/selinux/ss/avtab.c
security/selinux/ss/avtab.h
security/selinux/ss/conditional.c
security/selinux/ss/services.c
security/selinux/ss/sidtab.c
security/selinux/ss/sidtab.h
security/tomoyo/network.c
sound/drivers/aloop.c
sound/drivers/dummy.c
sound/drivers/mtpav.c
sound/drivers/mts64.c
sound/drivers/pcsp/pcsp.c
sound/drivers/portman2x4.c
sound/drivers/serial-u16550.c
sound/drivers/virmidi.c
sound/firewire/dice/dice-stream.c
sound/isa/ad1816a/ad1816a.c
sound/isa/ad1848/ad1848.c
sound/isa/als100.c
sound/isa/azt2320.c
sound/isa/cmi8330.c
sound/isa/cs423x/cs4231.c
sound/isa/cs423x/cs4236.c
sound/isa/es1688/es1688.c
sound/isa/es18xx.c
sound/isa/gus/gusclassic.c
sound/isa/gus/gusextreme.c
sound/isa/gus/gusmax.c
sound/isa/gus/interwave.c
sound/isa/opl3sa2.c
sound/isa/opti9xx/miro.c
sound/isa/opti9xx/opti92x-ad1848.c
sound/isa/sb/jazz16.c
sound/isa/sb/sb16.c
sound/isa/sb/sb8.c
sound/isa/sc6000.c
sound/isa/wavefront/wavefront.c
sound/mips/sgio2audio.c
sound/pci/ad1889.c
sound/pci/ali5451/ali5451.c
sound/pci/als300.c
sound/pci/als4000.c
sound/pci/atiixp.c
sound/pci/atiixp_modem.c
sound/pci/au88x0/au88x0.c
sound/pci/azt3328.c
sound/pci/bt87x.c
sound/pci/ca0106/ca0106_main.c
sound/pci/cmipci.c
sound/pci/cs4281.c
sound/pci/cs46xx/cs46xx.c
sound/pci/cs5535audio/cs5535audio.c
sound/pci/ctxfi/xfi.c
sound/pci/echoaudio/echoaudio.c
sound/pci/emu10k1/emu10k1.c
sound/pci/emu10k1/emu10k1x.c
sound/pci/ens1370.c
sound/pci/es1938.c
sound/pci/es1968.c
sound/pci/fm801.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/ice1712/ice1712.c
sound/pci/ice1712/ice1724.c
sound/pci/intel8x0.c
sound/pci/intel8x0m.c
sound/pci/korg1212/korg1212.c
sound/pci/lola/lola.c
sound/pci/lx6464es/lx6464es.c
sound/pci/maestro3.c
sound/pci/mixart/mixart.c
sound/pci/nm256/nm256.c
sound/pci/oxygen/oxygen.c
sound/pci/oxygen/se6x.c
sound/pci/oxygen/virtuoso.c
sound/pci/pcxhr/pcxhr.c
sound/pci/riptide/riptide.c
sound/pci/rme32.c
sound/pci/rme96.c
sound/pci/rme9652/hdsp.c
sound/pci/rme9652/hdspm.c
sound/pci/rme9652/rme9652.c
sound/pci/sis7019.c
sound/pci/sonicvibes.c
sound/pci/trident/trident.c
sound/pci/via82xx.c
sound/pci/via82xx_modem.c
sound/pci/vx222/vx222.c
sound/pci/ymfpci/ymfpci.c
sound/pcmcia/pdaudiocf/pdaudiocf.c
sound/pcmcia/vx/vxpocket.c
sound/ppc/powermac.c
sound/sh/aica.c
sound/sh/sh_dac_audio.c
sound/soc/bcm/cygnus-ssp.c
sound/soc/codecs/Kconfig
sound/soc/codecs/ak4458.c
sound/soc/codecs/ak5558.c
sound/soc/codecs/cs42l42.c
sound/soc/codecs/cs42l42.h
sound/soc/codecs/es8316.c
sound/soc/codecs/lpass-rx-macro.c
sound/soc/codecs/lpass-tx-macro.c
sound/soc/codecs/lpass-va-macro.c
sound/soc/codecs/lpass-wsa-macro.c
sound/soc/codecs/max98373-i2c.c
sound/soc/codecs/max98373-sdw.c
sound/soc/codecs/max98373.c
sound/soc/codecs/rt1015.c
sound/soc/codecs/rt5640.c
sound/soc/codecs/rt5651.c
sound/soc/codecs/rt5659.c
sound/soc/codecs/rt5670.c
sound/soc/codecs/rt5670.h
sound/soc/codecs/rt711.c
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/sirf-audio-codec.h [deleted file]
sound/soc/codecs/wcd934x.c
sound/soc/codecs/wm8960.c
sound/soc/fsl/fsl_esai.c
sound/soc/fsl/fsl_ssi.c
sound/soc/generic/simple-card-utils.c
sound/soc/intel/atom/sst-mfld-platform-pcm.c
sound/soc/intel/boards/bytcr_rt5640.c
sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
sound/soc/mediatek/mt8192/mt8192-reg.h
sound/soc/qcom/lpass-cpu.c
sound/soc/qcom/sdm845.c
sound/soc/soc-core.c
sound/soc/sof/core.c
sound/soc/sof/intel/apl.c
sound/soc/sof/intel/cnl.c
sound/soc/sof/intel/hda-dsp.c
sound/soc/sof/intel/hda.c
sound/soc/sof/intel/hda.h
sound/soc/sof/intel/icl.c
sound/soc/sof/intel/pci-tgl.c
sound/soc/sof/intel/tgl.c
sound/soc/sunxi/sun4i-codec.c
sound/sparc/amd7930.c
sound/sparc/cs4231.c
sound/sparc/dbri.c
sound/usb/6fire/chip.c
sound/usb/caiaq/device.c
sound/usb/card.c
sound/usb/hiface/chip.c
sound/usb/misc/ua101.c
sound/usb/mixer_quirks.c
sound/usb/quirks.c
sound/usb/usx2y/usbusx2y.c
sound/x86/intel_hdmi_audio.c
sound/xen/xen_snd_front.c
tools/include/uapi/linux/kvm.h
tools/kvm/kvm_stat/kvm_stat.service
tools/lib/bpf/Makefile
tools/lib/bpf/btf_dump.c
tools/lib/bpf/libbpf.c
tools/lib/bpf/netlink.c
tools/lib/bpf/ringbuf.c
tools/lib/bpf/xsk.c
tools/perf/builtin-daemon.c
tools/perf/builtin-inject.c
tools/perf/tests/bpf.c
tools/perf/tests/shell/daemon.sh
tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c
tools/perf/util/auxtrace.c
tools/perf/util/block-info.c
tools/perf/util/bpf-event.c
tools/perf/util/parse-events.c
tools/perf/util/pmu.c
tools/perf/util/pmu.h
tools/perf/util/synthetic-events.c
tools/perf/util/vdso.c
tools/testing/kunit/configs/broken_on_uml.config
tools/testing/kunit/kunit_config.py
tools/testing/radix-tree/idr-test.c
tools/testing/radix-tree/linux/compiler_types.h [deleted file]
tools/testing/radix-tree/multiorder.c
tools/testing/radix-tree/xarray.c
tools/testing/selftests/arm64/fp/sve-test.S
tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
tools/testing/selftests/bpf/prog_tests/check_mtu.c
tools/testing/selftests/bpf/prog_tests/fexit_sleep.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
tools/testing/selftests/bpf/progs/fexit_sleep.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_check_mtu.c
tools/testing/selftests/bpf/progs/test_tunnel_kern.c
tools/testing/selftests/bpf/verifier/bounds_deduction.c
tools/testing/selftests/bpf/verifier/map_ptr.c
tools/testing/selftests/bpf/verifier/unpriv.c
tools/testing/selftests/bpf/verifier/value_ptr_arith.c
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/hardware_disable_test.c
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/kvm_util_internal.h
tools/testing/selftests/kvm/x86_64/get_msr_index_features.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/hyperv_clock.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c [new file with mode: 0644]
tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
tools/testing/selftests/net/mptcp/mptcp_join.sh
tools/testing/selftests/net/reuseaddr_ports_exhausted.c
tools/testing/selftests/vm/Makefile

index 85b93cd..2d93232 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -36,6 +36,7 @@ Andrew Morton <akpm@linux-foundation.org>
 Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
 Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
+Andrey Konovalov <andreyknvl@gmail.com> <andreyknvl@google.com>
 Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
 Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
 Andy Adamson <andros@citi.umich.edu>
@@ -65,6 +66,8 @@ Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
 Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
 Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
 Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
+Chris Chiu <chris.chiu@canonical.com> <chiu@endlessm.com>
+Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org>
 Christophe Ricard <christophe.ricard@gmail.com>
 Christoph Hellwig <hch@lst.de>
 Corey Minyard <minyard@acm.org>
@@ -165,6 +168,7 @@ Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
 Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
 John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
 John Stultz <johnstul@us.ibm.com>
+Jordan Crouse <jordan@cosmicpenguin.net> <jcrouse@codeaurora.org>
 <josh@joshtriplett.org> <josh@freedesktop.org>
 <josh@joshtriplett.org> <josh@kernel.org>
 <josh@joshtriplett.org> <josht@linux.vnet.ibm.com>
@@ -250,8 +254,14 @@ Morten Welinder <welinder@anemone.rentec.com>
 Morten Welinder <welinder@darter.rentec.com>
 Morten Welinder <welinder@troll.com>
 Mythri P K <mythripk@ti.com>
+Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
 Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
 Nguyen Anh Quynh <aquynh@gmail.com>
+Nicholas Piggin <npiggin@gmail.com> <npiggen@suse.de>
+Nicholas Piggin <npiggin@gmail.com> <npiggin@kernel.dk>
+Nicholas Piggin <npiggin@gmail.com> <npiggin@suse.de>
+Nicholas Piggin <npiggin@gmail.com> <nickpiggin@yahoo.com.au>
+Nicholas Piggin <npiggin@gmail.com> <piggin@cyberone.com.au>
 Nicolas Ferre <nicolas.ferre@microchip.com> <nicolas.ferre@atmel.com>
 Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
 Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
index 6eee10c..637d858 100644 (file)
@@ -1,7 +1,7 @@
 What:          /sys/kernel/debug/moxtet/input
 Date:          March 2019
 KernelVersion: 5.3
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Read input from the shift registers, in hexadecimal.
                Returns N+1 bytes, where N is the number of Moxtet connected
                modules. The first byte is from the CPU board itself.
@@ -19,7 +19,7 @@ Description:  (Read) Read input from the shift registers, in hexadecimal.
 What:          /sys/kernel/debug/moxtet/output
 Date:          March 2019
 KernelVersion: 5.3
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (RW) Read last written value to the shift registers, in
                hexadecimal, or write values to the shift registers, also
                in hexadecimal.
index 326df1b..813987d 100644 (file)
@@ -1,7 +1,7 @@
 What:          /sys/kernel/debug/turris-mox-rwtm/do_sign
 Date:          Jun 2020
 KernelVersion: 5.8
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:
 
                ======= ===========================================================
index 4a6d61b..32dccc0 100644 (file)
@@ -1,17 +1,17 @@
 What:          /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_description
 Date:          March 2019
 KernelVersion: 5.3
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Moxtet module description. Format: string
 
 What:          /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_id
 Date:          March 2019
 KernelVersion: 5.3
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Moxtet module ID. Format: %x
 
 What:          /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_name
 Date:          March 2019
 KernelVersion: 5.3
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Moxtet module name. Format: string
index 795a5de..c4d4697 100644 (file)
@@ -1,7 +1,7 @@
 What:          /sys/class/leds/<led>/device/brightness
 Date:          July 2020
 KernelVersion: 5.9
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (RW) On the front panel of the Turris Omnia router there is also
                a button which can be used to control the intensity of all the
                LEDs at once, so that if they are too bright, user can dim them.
index b8631f5..ea5e5b4 100644 (file)
@@ -1,21 +1,21 @@
 What:          /sys/firmware/turris-mox-rwtm/board_version
 Date:          August 2019
 KernelVersion: 5.4
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Board version burned into eFuses of this Turris Mox board.
                Format: %i
 
 What:          /sys/firmware/turris-mox-rwtm/mac_address*
 Date:          August 2019
 KernelVersion: 5.4
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) MAC addresses burned into eFuses of this Turris Mox board.
                Format: %pM
 
 What:          /sys/firmware/turris-mox-rwtm/pubkey
 Date:          August 2019
 KernelVersion: 5.4
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) ECDSA public key (in pubkey hex compressed form) computed
                as pair to the ECDSA private key burned into eFuses of this
                Turris Mox Board.
@@ -24,7 +24,7 @@ Description:  (Read) ECDSA public key (in pubkey hex compressed form) computed
 What:          /sys/firmware/turris-mox-rwtm/ram_size
 Date:          August 2019
 KernelVersion: 5.4
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) RAM size in MiB of this Turris Mox board as was detected
                during manufacturing and burned into eFuses. Can be 512 or 1024.
                Format: %i
@@ -32,6 +32,6 @@ Description:  (Read) RAM size in MiB of this Turris Mox board as was detected
 What:          /sys/firmware/turris-mox-rwtm/serial_number
 Date:          August 2019
 KernelVersion: 5.4
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Serial number burned into eFuses of this Turris Mox device.
                Format: %016X
index ea0cc8c..f704925 100644 (file)
@@ -33,7 +33,7 @@ Contact:      xfs@oss.sgi.com
 Description:
                The current state of the log write grant head. It
                represents the total log reservation of all currently
-               oustanding transactions, including regrants due to
+               outstanding transactions, including regrants due to
                rolling transactions. The grant head is exported in
                "cycle:bytes" format.
 Users:         xfstests
index 377e9d2..0609da7 100644 (file)
@@ -17,12 +17,12 @@ For ACPI on arm64, tables also fall into the following categories:
 
        -  Recommended: BERT, EINJ, ERST, HEST, PCCT, SSDT
 
-       -  Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IORT,
-          MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT, STAO,
-         TCPA, TPM2, UEFI, XENV
+       -  Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IBFT,
+          IORT, MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT,
+          STAO, TCPA, TPM2, UEFI, XENV
 
-       -  Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IBFT, IVRS, LPIT,
-          MSDM, OEMx, PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
+       -  Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IVRS, LPIT, MSDM, OEMx,
+          PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
 
 ====== ========================================================================
 Table  Usage for ARMv8 Linux
index 7195102..d410a47 100644 (file)
@@ -130,6 +130,9 @@ stable kernels.
 | Marvell        | ARM-MMU-500     | #582743         | N/A                         |
 +----------------+-----------------+-----------------+-----------------------------+
 +----------------+-----------------+-----------------+-----------------------------+
+| NVIDIA         | Carmel Core     | N/A             | NVIDIA_CARMEL_CNP_ERRATUM   |
++----------------+-----------------+-----------------+-----------------------------+
++----------------+-----------------+-----------------+-----------------------------+
 | Freescale/NXP  | LS2080A/LS1043A | A-008585        | FSL_ERRATUM_A008585         |
 +----------------+-----------------+-----------------+-----------------------------+
 +----------------+-----------------+-----------------+-----------------------------+
index 37f18d6..4c5c371 100644 (file)
@@ -32,7 +32,7 @@ Optional node properties:
 - "#thermal-sensor-cells" Used to expose itself to thermal fw.
 
 Read more about iio bindings at
-       Documentation/devicetree/bindings/iio/iio-bindings.txt
+       https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/
 
 Example:
        ncp15wb473@0 {
index ff99344..fd04028 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Bindings for GPIO bitbanged I2C
 
 maintainers:
-  - Wolfram Sang <wolfram@the-dreams.de>
+  - Wolfram Sang <wsa@kernel.org>
 
 allOf:
   - $ref: /schemas/i2c/i2c-controller.yaml#
index f23966b..3592d49 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Freescale Inter IC (I2C) and High Speed Inter IC (HS-I2C) for i.MX
 
 maintainers:
-  - Wolfram Sang <wolfram@the-dreams.de>
+  - Oleksij Rempel <o.rempel@pengutronix.de>
 
 allOf:
   - $ref: /schemas/i2c/i2c-controller.yaml#
index 9f414db..433a3fb 100644 (file)
@@ -14,8 +14,9 @@ description: >
   Industrial I/O subsystem bindings for ADC controller found in
   Ingenic JZ47xx SoCs.
 
-  ADC clients must use the format described in iio-bindings.txt, giving
-  a phandle and IIO specifier pair ("io-channels") to the ADC controller.
+  ADC clients must use the format described in
+  https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/iio-consumer.yaml,
+  giving a phandle and IIO specifier pair ("io-channels") to the ADC controller.
 
 properties:
   compatible:
index 054406b..721878d 100644 (file)
@@ -24,7 +24,9 @@ properties:
     description: >
       List of phandle and IIO specifier pairs.
       Each pair defines one ADC channel to which a joystick axis is connected.
-      See Documentation/devicetree/bindings/iio/iio-bindings.txt for details.
+      See
+      https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/iio-consumer.yaml
+      for details.
 
   '#address-cells':
     const: 1
index 51456c0..af5223b 100644 (file)
@@ -5,7 +5,10 @@ Required properties:
  - compatible: must be "resistive-adc-touch"
 The device must be connected to an ADC device that provides channels for
 position measurement and optional pressure.
-Refer to ../iio/iio-bindings.txt for details
+Refer to
+https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/iio-consumer.yaml
+for details
+
  - iio-channels: must have at least two channels connected to an ADC device.
 These should correspond to the channels exposed by the ADC device and should
 have the right index as the ADC device registers them. These channels
index fe7fa25..c7ed287 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: CZ.NIC's Turris Omnia LEDs driver
 
 maintainers:
-  - Marek Behún <marek.behun@nic.cz>
+  - Marek Behún <kabel@kernel.org>
 
 description:
   This module adds support for the RGB LEDs found on the front panel of the
index d2a6e83..937b3e5 100644 (file)
@@ -72,7 +72,9 @@ Required child device properties:
                                                pwm|regulator|rtc|sysctrl|usb]";
 
   A few child devices require ADC channels from the GPADC node. Those follow the
-  standard bindings from iio/iio-bindings.txt and iio/adc/adc.txt
+  standard bindings from
+  https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/iio-consumer.yaml
+  and Documentation/devicetree/bindings/iio/adc/adc.yaml
 
   abx500-temp           : io-channels "aux1" and "aux2" for measuring external
                           temperatures.
index 5ddcc8f..b52e7a3 100644 (file)
@@ -16,14 +16,14 @@ Optional subnodes:
 The sub-functions of CPCAP get their own node with their own compatible values,
 which are described in the following files:
 
-- ../power/supply/cpcap-battery.txt
-- ../power/supply/cpcap-charger.txt
-- ../regulator/cpcap-regulator.txt
-- ../phy/phy-cpcap-usb.txt
-- ../input/cpcap-pwrbutton.txt
-- ../rtc/cpcap-rtc.txt
-- ../leds/leds-cpcap.txt
-- ../iio/adc/cpcap-adc.txt
+- Documentation/devicetree/bindings/power/supply/cpcap-battery.txt
+- Documentation/devicetree/bindings/power/supply/cpcap-charger.txt
+- Documentation/devicetree/bindings/regulator/cpcap-regulator.txt
+- Documentation/devicetree/bindings/phy/phy-cpcap-usb.txt
+- Documentation/devicetree/bindings/input/cpcap-pwrbutton.txt
+- Documentation/devicetree/bindings/rtc/cpcap-rtc.txt
+- Documentation/devicetree/bindings/leds/leds-cpcap.txt
+- Documentation/devicetree/bindings/iio/adc/motorola,cpcap-adc.yaml
 
 The only exception is the audio codec. Instead of a compatible value its
 node must be named "audio-codec".
index 79c38ea..13c26f2 100644 (file)
@@ -32,7 +32,7 @@ required:
   - interrupts
   - interrupt-names
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 4b7d1e5..e8f0468 100644 (file)
@@ -49,7 +49,7 @@ properties:
     description:
       Reference to an nvmem node for the MAC address
 
-  nvmem-cells-names:
+  nvmem-cell-names:
     const: mac-address
 
   phy-connection-type:
index b921731..df9e844 100644 (file)
@@ -65,6 +65,71 @@ KSZ9031:
   step is 60ps. The default value is the neutral setting, so setting
   rxc-skew-ps=<0> actually results in -900 picoseconds adjustment.
 
+  The KSZ9031 hardware supports a range of skew values from negative to
+  positive, where the specific range is property dependent. All values
+  specified in the devicetree are offset by the minimum value so they
+  can be represented as positive integers in the devicetree since it's
+  difficult to represent a negative number in the devictree.
+
+  The following 5-bit values table apply to rxc-skew-ps and txc-skew-ps.
+
+  Pad Skew Value       Delay (ps)      Devicetree Value
+  ------------------------------------------------------
+  0_0000               -900ps          0
+  0_0001               -840ps          60
+  0_0010               -780ps          120
+  0_0011               -720ps          180
+  0_0100               -660ps          240
+  0_0101               -600ps          300
+  0_0110               -540ps          360
+  0_0111               -480ps          420
+  0_1000               -420ps          480
+  0_1001               -360ps          540
+  0_1010               -300ps          600
+  0_1011               -240ps          660
+  0_1100               -180ps          720
+  0_1101               -120ps          780
+  0_1110               -60ps           840
+  0_1111               0ps             900
+  1_0000               60ps            960
+  1_0001               120ps           1020
+  1_0010               180ps           1080
+  1_0011               240ps           1140
+  1_0100               300ps           1200
+  1_0101               360ps           1260
+  1_0110               420ps           1320
+  1_0111               480ps           1380
+  1_1000               540ps           1440
+  1_1001               600ps           1500
+  1_1010               660ps           1560
+  1_1011               720ps           1620
+  1_1100               780ps           1680
+  1_1101               840ps           1740
+  1_1110               900ps           1800
+  1_1111               960ps           1860
+
+  The following 4-bit values table apply to the txdX-skew-ps, rxdX-skew-ps
+  data pads, and the rxdv-skew-ps, txen-skew-ps control pads.
+
+  Pad Skew Value       Delay (ps)      Devicetree Value
+  ------------------------------------------------------
+  0000                 -420ps          0
+  0001                 -360ps          60
+  0010                 -300ps          120
+  0011                 -240ps          180
+  0100                 -180ps          240
+  0101                 -120ps          300
+  0110                 -60ps           360
+  0111                 0ps             420
+  1000                 60ps            480
+  1001                 120ps           540
+  1010                 180ps           600
+  1011                 240ps           660
+  1100                 300ps           720
+  1101                 360ps           780
+  1110                 420ps           840
+  1111                 480ps           900
+
   Optional properties:
 
     Maximum value of 1860, default value 900:
@@ -120,11 +185,21 @@ KSZ9131:
 
 Examples:
 
+       /* Attach to an Ethernet device with autodetected PHY */
+       &enet {
+               rxc-skew-ps = <1800>;
+               rxdv-skew-ps = <0>;
+               txc-skew-ps = <1800>;
+               txen-skew-ps = <0>;
+               status = "okay";
+       };
+
+       /* Attach to an explicitly-specified PHY */
        mdio {
                phy0: ethernet-phy@0 {
-                       rxc-skew-ps = <3000>;
+                       rxc-skew-ps = <1800>;
                        rxdv-skew-ps = <0>;
-                       txc-skew-ps = <3000>;
+                       txc-skew-ps = <1800>;
                        txen-skew-ps = <0>;
                        reg = <0>;
                };
@@ -133,3 +208,20 @@ Examples:
                phy = <&phy0>;
                phy-mode = "rgmii-id";
        };
+
+References
+
+  Micrel ksz9021rl/rn Data Sheet, Revision 1.2. Dated 2/13/2014.
+  http://www.micrel.com/_PDF/Ethernet/datasheets/ksz9021rl-rn_ds.pdf
+
+  Micrel ksz9031rnx Data Sheet, Revision 2.1. Dated 11/20/2014.
+  http://www.micrel.com/_PDF/Ethernet/datasheets/KSZ9031RNX.pdf
+
+Notes:
+
+  Note that a previous version of the Micrel ksz9021rl/rn Data Sheet
+  was missing extended register 106 (transmit data pad skews), and
+  incorrectly specified the ps per step as 200ps/step instead of
+  120ps/step. The latest update to this document reflects the latest
+  revision of the Micrel specification even though usage in the kernel
+  still reflects that incorrect document.
index 50449b6..4454aca 100644 (file)
@@ -21,6 +21,10 @@ properties:
       - fsl,vf610-spdif
       - fsl,imx6sx-spdif
       - fsl,imx8qm-spdif
+      - fsl,imx8qxp-spdif
+      - fsl,imx8mq-spdif
+      - fsl,imx8mm-spdif
+      - fsl,imx8mn-spdif
 
   reg:
     maxItems: 1
index 3561a8a..f8c6469 100644 (file)
@@ -267,7 +267,7 @@ DATA PATH
 Tx
 --
 
-end_start_xmit() is called by the stack. This function does the following:
+ena_start_xmit() is called by the stack. This function does the following:
 
 - Maps data buffers (skb->data and frags).
 - Populates ena_buf for the push buffer (if the driver and device are
index 468fe10..af37f25 100644 (file)
@@ -52,7 +52,7 @@ purposes as a standard complementary tool. The system's view from
 ``devlink-dpipe`` should change according to the changes done by the
 standard configuration tools.
 
-For example, it’s quiet common to  implement Access Control Lists (ACL)
+For example, it’s quite common to  implement Access Control Lists (ACL)
 using Ternary Content Addressable Memory (TCAM). The TCAM memory can be
 divided into TCAM regions. Complex TC filters can have multiple rules with
 different priorities and different lookup keys. On the other hand hardware
index e99b415..ab790e7 100644 (file)
@@ -151,7 +151,7 @@ representor netdevice.
 -------------
 A subfunction devlink port is created but it is not active yet. That means the
 entities are created on devlink side, the e-switch port representor is created,
-but the subfunction device itself it not created. A user might use e-switch port
+but the subfunction device itself is not created. A user might use e-switch port
 representor to do settings, putting it into bridge, adding TC rules, etc. A user
 might as well configure the hardware address (such as MAC address) of the
 subfunction while subfunction is inactive.
@@ -173,7 +173,7 @@ Terms and Definitions
    * - Term
      - Definitions
    * - ``PCI device``
-     - A physical PCI device having one or more PCI bus consists of one or
+     - A physical PCI device having one or more PCI buses consists of one or
        more PCI controllers.
    * - ``PCI controller``
      -  A controller consists of potentially multiple physical functions,
index 0507348..dc03ff8 100644 (file)
@@ -976,9 +976,9 @@ constraints on coalescing parameters and their values.
 
 
 PAUSE_GET
-============
+=========
 
-Gets channel counts like ``ETHTOOL_GPAUSE`` ioctl request.
+Gets pause frame settings like ``ETHTOOL_GPAUSEPARAM`` ioctl request.
 
 Request contents:
 
@@ -1007,7 +1007,7 @@ the statistics in the following structure:
 Each member has a corresponding attribute defined.
 
 PAUSE_SET
-============
+=========
 
 Sets pause parameters like ``ETHTOOL_GPAUSEPARAM`` ioctl request.
 
@@ -1024,7 +1024,7 @@ Request contents:
 EEE_GET
 =======
 
-Gets channel counts like ``ETHTOOL_GEEE`` ioctl request.
+Gets Energy Efficient Ethernet settings like ``ETHTOOL_GEEE`` ioctl request.
 
 Request contents:
 
@@ -1054,7 +1054,7 @@ first 32 are provided by the ``ethtool_ops`` callback.
 EEE_SET
 =======
 
-Sets pause parameters like ``ETHTOOL_GEEEPARAM`` ioctl request.
+Sets Energy Efficient Ethernet parameters like ``ETHTOOL_SEEE`` ioctl request.
 
 Request contents:
 
index da1073a..01391df 100644 (file)
@@ -50,7 +50,7 @@ Callbacks to implement
 
 The NIC driver offering ipsec offload will need to implement these
 callbacks to make the offload available to the network stack's
-XFRM subsytem.  Additionally, the feature bits NETIF_F_HW_ESP and
+XFRM subsystem.  Additionally, the feature bits NETIF_F_HW_ESP and
 NETIF_F_HW_ESP_TX_CSUM will signal the availability of the offload.
 
 
index 38e327d..307f2fc 100644 (file)
@@ -1495,7 +1495,8 @@ Fails if any VCPU has already been created.
 
 Define which vcpu is the Bootstrap Processor (BSP).  Values are the same
 as the vcpu id in KVM_CREATE_VCPU.  If this ioctl is not called, the default
-is vcpu 0.
+is vcpu 0. This ioctl has to be called before vcpu creation,
+otherwise it will return EBUSY error.
 
 
 4.42 KVM_GET_XSAVE
@@ -4806,8 +4807,10 @@ If an MSR access is not permitted through the filtering, it generates a
 allows user space to deflect and potentially handle various MSR accesses
 into user space.
 
-If a vCPU is in running state while this ioctl is invoked, the vCPU may
-experience inconsistent filtering behavior on MSR accesses.
+Note, invoking this ioctl with a vCPU is running is inherently racy.  However,
+KVM does guarantee that vCPUs will see either the previous filter or the new
+filter, e.g. MSRs with identical settings in both the old and new filter will
+have deterministic behavior.
 
 4.127 KVM_XEN_HVM_SET_ATTR
 --------------------------
index 2086492..f03a198 100644 (file)
@@ -1181,7 +1181,7 @@ M:        Joel Fernandes <joel@joelfernandes.org>
 M:     Christian Brauner <christian@brauner.io>
 M:     Hridya Valsaraju <hridya@google.com>
 M:     Suren Baghdasaryan <surenb@google.com>
-L:     devel@driverdev.osuosl.org
+L:     linux-kernel@vger.kernel.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
 F:     drivers/android/
@@ -1790,19 +1790,26 @@ F:      drivers/net/ethernet/cortina/
 F:     drivers/pinctrl/pinctrl-gemini.c
 F:     drivers/rtc/rtc-ftrtc010.c
 
-ARM/CZ.NIC TURRIS MOX SUPPORT
-M:     Marek Behun <marek.behun@nic.cz>
+ARM/CZ.NIC TURRIS SUPPORT
+M:     Marek Behun <kabel@kernel.org>
 S:     Maintained
-W:     http://mox.turris.cz
+W:     https://www.turris.cz/
 F:     Documentation/ABI/testing/debugfs-moxtet
 F:     Documentation/ABI/testing/sysfs-bus-moxtet-devices
 F:     Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm
 F:     Documentation/devicetree/bindings/bus/moxtet.txt
 F:     Documentation/devicetree/bindings/firmware/cznic,turris-mox-rwtm.txt
 F:     Documentation/devicetree/bindings/gpio/gpio-moxtet.txt
+F:     Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml
+F:     Documentation/devicetree/bindings/watchdog/armada-37xx-wdt.txt
 F:     drivers/bus/moxtet.c
 F:     drivers/firmware/turris-mox-rwtm.c
+F:     drivers/leds/leds-turris-omnia.c
+F:     drivers/mailbox/armada-37xx-rwtm-mailbox.c
 F:     drivers/gpio/gpio-moxtet.c
+F:     drivers/watchdog/armada_37xx_wdt.c
+F:     include/dt-bindings/bus/moxtet.h
+F:     include/linux/armada-37xx-rwtm-mailbox.h
 F:     include/linux/moxtet.h
 
 ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6)
@@ -2489,7 +2496,7 @@ N:        sc27xx
 N:     sc2731
 
 ARM/STI ARCHITECTURE
-M:     Patrice Chotard <patrice.chotard@st.com>
+M:     Patrice Chotard <patrice.chotard@foss.st.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 W:     http://www.stlinux.com
@@ -2522,7 +2529,7 @@ F:        include/linux/remoteproc/st_slim_rproc.h
 
 ARM/STM32 ARCHITECTURE
 M:     Maxime Coquelin <mcoquelin.stm32@gmail.com>
-M:     Alexandre Torgue <alexandre.torgue@st.com>
+M:     Alexandre Torgue <alexandre.torgue@foss.st.com>
 L:     linux-stm32@st-md-mailman.stormreply.com (moderated for non-subscribers)
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -3115,7 +3122,7 @@ C:        irc://irc.oftc.net/bcache
 F:     drivers/md/bcache/
 
 BDISP ST MEDIA DRIVER
-M:     Fabien Dessenne <fabien.dessenne@st.com>
+M:     Fabien Dessenne <fabien.dessenne@foss.st.com>
 L:     linux-media@vger.kernel.org
 S:     Supported
 W:     https://linuxtv.org
@@ -3675,7 +3682,7 @@ M:        bcm-kernel-feedback-list@broadcom.com
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 T:     git git://github.com/broadcom/stblinux.git
-F:     drivers/soc/bcm/bcm-pmb.c
+F:     drivers/soc/bcm/bcm63xx/bcm-pmb.c
 F:     include/dt-bindings/soc/bcm-pmb.h
 
 BROADCOM SPECIFIC AMBA DRIVER (BCMA)
@@ -5080,7 +5087,7 @@ S:        Maintained
 F:     drivers/platform/x86/dell/dell-wmi.c
 
 DELTA ST MEDIA DRIVER
-M:     Hugues Fruchet <hugues.fruchet@st.com>
+M:     Hugues Fruchet <hugues.fruchet@foss.st.com>
 L:     linux-media@vger.kernel.org
 S:     Supported
 W:     https://linuxtv.org
@@ -6022,7 +6029,6 @@ F:        drivers/gpu/drm/rockchip/
 
 DRM DRIVERS FOR STI
 M:     Benjamin Gaignard <benjamin.gaignard@linaro.org>
-M:     Vincent Abriou <vincent.abriou@st.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Maintained
 T:     git git://anongit.freedesktop.org/drm/drm-misc
@@ -6030,10 +6036,9 @@ F:       Documentation/devicetree/bindings/display/st,stih4xx.txt
 F:     drivers/gpu/drm/sti
 
 DRM DRIVERS FOR STM
-M:     Yannick Fertre <yannick.fertre@st.com>
-M:     Philippe Cornu <philippe.cornu@st.com>
+M:     Yannick Fertre <yannick.fertre@foss.st.com>
+M:     Philippe Cornu <philippe.cornu@foss.st.com>
 M:     Benjamin Gaignard <benjamin.gaignard@linaro.org>
-M:     Vincent Abriou <vincent.abriou@st.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Maintained
 T:     git git://anongit.freedesktop.org/drm/drm-misc
@@ -7492,8 +7497,9 @@ F:        include/uapi/asm-generic/
 GENERIC PHY FRAMEWORK
 M:     Kishon Vijay Abraham I <kishon@ti.com>
 M:     Vinod Koul <vkoul@kernel.org>
-L:     linux-kernel@vger.kernel.org
+L:     linux-phy@lists.infradead.org
 S:     Supported
+Q:     https://patchwork.kernel.org/project/linux-phy/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy.git
 F:     Documentation/devicetree/bindings/phy/
 F:     drivers/phy/
@@ -8132,7 +8138,6 @@ F:        drivers/crypto/hisilicon/sec2/sec_main.c
 
 HISILICON STAGING DRIVERS FOR HIKEY 960/970
 M:     Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
-L:     devel@driverdev.osuosl.org
 S:     Maintained
 F:     drivers/staging/hikey9xx/
 
@@ -8247,7 +8252,7 @@ F:        include/linux/hugetlb.h
 F:     mm/hugetlb.c
 
 HVA ST MEDIA DRIVER
-M:     Jean-Christophe Trotin <jean-christophe.trotin@st.com>
+M:     Jean-Christophe Trotin <jean-christophe.trotin@foss.st.com>
 L:     linux-media@vger.kernel.org
 S:     Supported
 W:     https://linuxtv.org
@@ -8537,6 +8542,7 @@ IBM Power SRIOV Virtual NIC Device Driver
 M:     Dany Madden <drt@linux.ibm.com>
 M:     Lijun Pan <ljp@linux.ibm.com>
 M:     Sukadev Bhattiprolu <sukadev@linux.ibm.com>
+R:     Thomas Falcon <tlfalcon@linux.ibm.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/ibm/ibmvnic.*
@@ -10046,7 +10052,6 @@ F:      scripts/leaking_addresses.pl
 
 LED SUBSYSTEM
 M:     Pavel Machek <pavel@ucw.cz>
-R:     Dan Murphy <dmurphy@ti.com>
 L:     linux-leds@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pavel/linux-leds.git
@@ -10928,7 +10933,6 @@ T:      git git://linuxtv.org/media_tree.git
 F:     drivers/media/radio/radio-maxiradio*
 
 MCAN MMIO DEVICE DRIVER
-M:     Dan Murphy <dmurphy@ti.com>
 M:     Pankaj Sharma <pankj.sharma@samsung.com>
 L:     linux-can@vger.kernel.org
 S:     Maintained
@@ -11189,7 +11193,7 @@ T:      git git://linuxtv.org/media_tree.git
 F:     drivers/media/dvb-frontends/stv6111*
 
 MEDIA DRIVERS FOR STM32 - DCMI
-M:     Hugues Fruchet <hugues.fruchet@st.com>
+M:     Hugues Fruchet <hugues.fruchet@foss.st.com>
 L:     linux-media@vger.kernel.org
 S:     Supported
 T:     git git://linuxtv.org/media_tree.git
@@ -12560,7 +12564,7 @@ NETWORKING [MPTCP]
 M:     Mat Martineau <mathew.j.martineau@linux.intel.com>
 M:     Matthieu Baerts <matthieu.baerts@tessares.net>
 L:     netdev@vger.kernel.org
-L:     mptcp@lists.01.org
+L:     mptcp@lists.linux.dev
 S:     Maintained
 W:     https://github.com/multipath-tcp/mptcp_net-next/wiki
 B:     https://github.com/multipath-tcp/mptcp_net-next/issues
@@ -14731,15 +14735,11 @@ F:    drivers/net/ethernet/qlogic/qlcnic/
 QLOGIC QLGE 10Gb ETHERNET DRIVER
 M:     Manish Chopra <manishc@marvell.com>
 M:     GR-Linux-NIC-Dev@marvell.com
-L:     netdev@vger.kernel.org
-S:     Supported
-F:     drivers/staging/qlge/
-
-QLOGIC QLGE 10Gb ETHERNET DRIVER
 M:     Coiby Xu <coiby.xu@gmail.com>
 L:     netdev@vger.kernel.org
-S:     Maintained
+S:     Supported
 F:     Documentation/networking/device_drivers/qlogic/qlge.rst
+F:     drivers/staging/qlge/
 
 QM1D1B0004 MEDIA DRIVER
 M:     Akihiro Tsukada <tskd08@gmail.com>
@@ -14879,6 +14879,14 @@ L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
 F:     drivers/iommu/arm/arm-smmu/qcom_iommu.c
 
+QUALCOMM IPC ROUTER (QRTR) DRIVER
+M:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+L:     linux-arm-msm@vger.kernel.org
+S:     Maintained
+F:     include/trace/events/qrtr.h
+F:     include/uapi/linux/qrtr.h
+F:     net/qrtr/
+
 QUALCOMM IPCC MAILBOX DRIVER
 M:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
 L:     linux-arm-msm@vger.kernel.org
@@ -15228,6 +15236,7 @@ F:      fs/reiserfs/
 REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
 M:     Ohad Ben-Cohen <ohad@wizery.com>
 M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+M:     Mathieu Poirier <mathieu.poirier@linaro.org>
 L:     linux-remoteproc@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git rproc-next
@@ -15241,6 +15250,7 @@ F:      include/linux/remoteproc/
 REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
 M:     Ohad Ben-Cohen <ohad@wizery.com>
 M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+M:     Mathieu Poirier <mathieu.poirier@linaro.org>
 L:     linux-remoteproc@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git rpmsg-next
@@ -15657,8 +15667,8 @@ F:      Documentation/s390/pci.rst
 
 S390 VFIO AP DRIVER
 M:     Tony Krowiak <akrowiak@linux.ibm.com>
-M:     Pierre Morel <pmorel@linux.ibm.com>
 M:     Halil Pasic <pasic@linux.ibm.com>
+M:     Jason Herne <jjherne@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -15670,6 +15680,7 @@ F:      drivers/s390/crypto/vfio_ap_private.h
 S390 VFIO-CCW DRIVER
 M:     Cornelia Huck <cohuck@redhat.com>
 M:     Eric Farman <farman@linux.ibm.com>
+M:     Matthew Rosato <mjrosato@linux.ibm.com>
 R:     Halil Pasic <pasic@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     kvm@vger.kernel.org
@@ -15680,6 +15691,7 @@ F:      include/uapi/linux/vfio_ccw.h
 
 S390 VFIO-PCI DRIVER
 M:     Matthew Rosato <mjrosato@linux.ibm.com>
+M:     Eric Farman <farman@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     kvm@vger.kernel.org
 S:     Supported
@@ -16909,8 +16921,10 @@ F:     tools/spi/
 
 SPIDERNET NETWORK DRIVER for CELL
 M:     Ishizaki Kou <kou.ishizaki@toshiba.co.jp>
+M:     Geoff Levand <geoff@infradead.org>
 L:     netdev@vger.kernel.org
-S:     Supported
+L:     linuxppc-dev@lists.ozlabs.org
+S:     Maintained
 F:     Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst
 F:     drivers/net/ethernet/toshiba/spider_net*
 
@@ -16964,7 +16978,8 @@ F:      Documentation/devicetree/bindings/media/i2c/st,st-mipid02.txt
 F:     drivers/media/i2c/st-mipid02.c
 
 ST STM32 I2C/SMBUS DRIVER
-M:     Pierre-Yves MORDRET <pierre-yves.mordret@st.com>
+M:     Pierre-Yves MORDRET <pierre-yves.mordret@foss.st.com>
+M:     Alain Volmat <alain.volmat@foss.st.com>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/busses/i2c-stm32*
@@ -17062,7 +17077,7 @@ F:      drivers/staging/vt665?/
 
 STAGING SUBSYSTEM
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-L:     devel@driverdev.osuosl.org
+L:     linux-staging@lists.linux.dev
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
 F:     drivers/staging/
@@ -17089,7 +17104,7 @@ F:      kernel/jump_label.c
 F:     kernel/static_call.c
 
 STI AUDIO (ASoC) DRIVERS
-M:     Arnaud Pouliquen <arnaud.pouliquen@st.com>
+M:     Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
 F:     Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
@@ -17109,15 +17124,15 @@ T:    git git://linuxtv.org/media_tree.git
 F:     drivers/media/usb/stk1160/
 
 STM32 AUDIO (ASoC) DRIVERS
-M:     Olivier Moysan <olivier.moysan@st.com>
-M:     Arnaud Pouliquen <arnaud.pouliquen@st.com>
+M:     Olivier Moysan <olivier.moysan@foss.st.com>
+M:     Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
 F:     Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
 F:     sound/soc/stm/
 
 STM32 TIMER/LPTIMER DRIVERS
-M:     Fabrice Gasnier <fabrice.gasnier@st.com>
+M:     Fabrice Gasnier <fabrice.gasnier@foss.st.com>
 S:     Maintained
 F:     Documentation/ABI/testing/*timer-stm32
 F:     Documentation/devicetree/bindings/*/*stm32-*timer*
@@ -17127,7 +17142,7 @@ F:      include/linux/*/stm32-*tim*
 
 STMMAC ETHERNET DRIVER
 M:     Giuseppe Cavallaro <peppe.cavallaro@st.com>
-M:     Alexandre Torgue <alexandre.torgue@st.com>
+M:     Alexandre Torgue <alexandre.torgue@foss.st.com>
 M:     Jose Abreu <joabreu@synopsys.com>
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -17869,7 +17884,6 @@ S:      Maintained
 F:     drivers/thermal/ti-soc-thermal/
 
 TI BQ27XXX POWER SUPPLY DRIVER
-R:     Dan Murphy <dmurphy@ti.com>
 F:     drivers/power/supply/bq27xxx_battery.c
 F:     drivers/power/supply/bq27xxx_battery_i2c.c
 F:     include/linux/power/bq27xxx_battery.h
@@ -18004,7 +18018,6 @@ S:      Odd Fixes
 F:     sound/soc/codecs/tas571x*
 
 TI TCAN4X5X DEVICE DRIVER
-M:     Dan Murphy <dmurphy@ti.com>
 L:     linux-can@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/can/tcan4x5x.txt
@@ -19157,7 +19170,7 @@ VME SUBSYSTEM
 M:     Martyn Welch <martyn@welchs.me.uk>
 M:     Manohar Vanga <manohar.vanga@gmail.com>
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-L:     devel@driverdev.osuosl.org
+L:     linux-kernel@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
 F:     Documentation/driver-api/vme.rst
index a28bb37..4730cf1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 12
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc7
 NAME = Frozen Wasteland
 
 # *DOCUMENTATION*
index 60d578e..76ad527 100644 (file)
@@ -16,7 +16,7 @@
        memory {
                device_type = "memory";
                /* CONFIG_LINUX_RAM_BASE needs to match low mem start */
-               reg = <0x0 0x80000000 0x0 0x20000000    /* 512 MB low mem */
+               reg = <0x0 0x80000000 0x0 0x40000000    /* 1 GB low mem */
                       0x1 0x00000000 0x0 0x40000000>;  /* 1 GB highmem */
        };
 
index a78d8f7..fdbe06c 100644 (file)
@@ -96,7 +96,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
                             sizeof(sf->uc.uc_mcontext.regs.scratch));
        err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
 
-       return err;
+       return err ? -EFAULT : 0;
 }
 
 static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
@@ -110,7 +110,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
                                &(sf->uc.uc_mcontext.regs.scratch),
                                sizeof(sf->uc.uc_mcontext.regs.scratch));
        if (err)
-               return err;
+               return -EFAULT;
 
        set_current_blocked(&set);
        regs->bta       = uregs.scratch.bta;
index 74ad425..47bab67 100644 (file)
@@ -187,25 +187,26 @@ static void init_unwind_table(struct unwind_table *table, const char *name,
                              const void *table_start, unsigned long table_size,
                              const u8 *header_start, unsigned long header_size)
 {
-       const u8 *ptr = header_start + 4;
-       const u8 *end = header_start + header_size;
-
        table->core.pc = (unsigned long)core_start;
        table->core.range = core_size;
        table->init.pc = (unsigned long)init_start;
        table->init.range = init_size;
        table->address = table_start;
        table->size = table_size;
-
-       /* See if the linker provided table looks valid. */
-       if (header_size <= 4
-           || header_start[0] != 1
-           || (void *)read_pointer(&ptr, end, header_start[1]) != table_start
-           || header_start[2] == DW_EH_PE_omit
-           || read_pointer(&ptr, end, header_start[2]) <= 0
-           || header_start[3] == DW_EH_PE_omit)
-               header_start = NULL;
-
+       /* To avoid the pointer addition with NULL pointer.*/
+       if (header_start != NULL) {
+               const u8 *ptr = header_start + 4;
+               const u8 *end = header_start + header_size;
+               /* See if the linker provided table looks valid. */
+               if (header_size <= 4
+               || header_start[0] != 1
+               || (void *)read_pointer(&ptr, end, header_start[1])
+                               != table_start
+               || header_start[2] == DW_EH_PE_omit
+               || read_pointer(&ptr, end, header_start[2]) <= 0
+               || header_start[3] == DW_EH_PE_omit)
+                       header_start = NULL;
+       }
        table->hdrsz = header_size;
        smp_wmb();
        table->header = header_start;
index 5b213a1..5e33d0e 100644 (file)
@@ -40,6 +40,9 @@
                ethernet1 = &cpsw_emac1;
                spi0 = &spi0;
                spi1 = &spi1;
+               mmc0 = &mmc1;
+               mmc1 = &mmc2;
+               mmc2 = &mmc3;
        };
 
        cpus {
index 646a064..5bd6a66 100644 (file)
@@ -32,7 +32,8 @@
                ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
                          MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
                          MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
-                         MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
+                         MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000
+                         MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000>;
 
                internal-regs {
 
        phy1: ethernet-phy@1 {
                compatible = "ethernet-phy-ieee802.3-c22";
                reg = <1>;
+               marvell,reg-init = <3 18 0 0x4985>;
 
                /* irq is connected to &pcawan pin 7 */
        };
index 73b6b1f..775ceb3 100644 (file)
 };
 
 &pinctrl {
-       atmel,mux-mask = <
-                        /*     A       B       C       */
-                        0xFFFFFE7F 0xC0E0397F 0xEF00019D       /* pioA */
-                        0x03FFFFFF 0x02FC7E68 0x00780000       /* pioB */
-                        0xffffffff 0xF83FFFFF 0xB800F3FC       /* pioC */
-                        0x003FFFFF 0x003F8000 0x00000000       /* pioD */
-                        >;
-
        adc {
                pinctrl_adc_default: adc_default {
                        atmel,pins = <AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE>;
index 1b11638..e3251f3 100644 (file)
@@ -84,8 +84,8 @@
                                pinctrl-0 = <&pinctrl_macb0_default>;
                                phy-mode = "rmii";
 
-                               ethernet-phy@0 {
-                                       reg = <0x0>;
+                               ethernet-phy@7 {
+                                       reg = <0x7>;
                                        interrupt-parent = <&pioA>;
                                        interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
                                        pinctrl-names = "default";
index 462b1df..720beec 100644 (file)
                        #reset-cells = <1>;
                };
 
-               bsc_intr: interrupt-controller@7ef00040 {
-                       compatible = "brcm,bcm2711-l2-intc", "brcm,l2-intc";
-                       reg = <0x7ef00040 0x30>;
-                       interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupt-controller;
-                       #interrupt-cells = <1>;
-               };
-
                aon_intr: interrupt-controller@7ef00100 {
                        compatible = "brcm,bcm2711-l2-intc", "brcm,l2-intc";
                        reg = <0x7ef00100 0x30>;
                        reg = <0x7ef04500 0x100>, <0x7ef00b00 0x300>;
                        reg-names = "bsc", "auto-i2c";
                        clock-frequency = <97500>;
-                       interrupt-parent = <&bsc_intr>;
-                       interrupts = <0>;
                        status = "disabled";
                };
 
                        reg = <0x7ef09500 0x100>, <0x7ef05b00 0x300>;
                        reg-names = "bsc", "auto-i2c";
                        clock-frequency = <97500>;
-                       interrupt-parent = <&bsc_intr>;
-                       interrupts = <1>;
                        status = "disabled";
                };
        };
index 7a1e531..f28a96f 100644 (file)
        pinctrl-0 = <&pinctrl_usdhc2>;
        cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+       vmmc-supply = <&vdd_sd1_reg>;
        status = "disabled";
 };
 
                     &pinctrl_usdhc3_cdwp>;
        cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
+       vmmc-supply = <&vdd_sd0_reg>;
        status = "disabled";
 };
index c593597..5a1e10d 100644 (file)
                        micrel,led-mode = <1>;
                        clocks = <&clks IMX6UL_CLK_ENET_REF>;
                        clock-names = "rmii-ref";
-                       reset-gpios = <&gpio_spi 1 GPIO_ACTIVE_LOW>;
-                       reset-assert-us = <10000>;
-                       reset-deassert-us = <100>;
 
                };
 
                        micrel,led-mode = <1>;
                        clocks = <&clks IMX6UL_CLK_ENET2_REF>;
                        clock-names = "rmii-ref";
-                       reset-gpios = <&gpio_spi 2 GPIO_ACTIVE_LOW>;
-                       reset-assert-us = <10000>;
-                       reset-deassert-us = <100>;
                };
        };
 };
        status = "okay";
 };
 
+&gpio_spi {
+       eth0-phy-hog {
+               gpio-hog;
+               gpios = <1 GPIO_ACTIVE_HIGH>;
+               output-high;
+               line-name = "eth0-phy";
+       };
+
+       eth1-phy-hog {
+               gpio-hog;
+               gpios = <2 GPIO_ACTIVE_HIGH>;
+               output-high;
+               line-name = "eth1-phy";
+       };
+};
+
 &i2c1 {
        clock-frequency = <100000>;
        pinctrl-names = "default";
index ecbb2cc..79cc457 100644 (file)
@@ -14,5 +14,6 @@
 };
 
 &gpmi {
+       fsl,use-minimum-ecc;
        status = "okay";
 };
index 72e4f64..4a9f949 100644 (file)
                i2c1 = &i2c2;
                i2c2 = &i2c3;
                i2c3 = &i2c4;
+               mmc0 = &mmc1;
+               mmc1 = &mmc2;
+               mmc2 = &mmc3;
+               mmc3 = &mmc4;
+               mmc4 = &mmc5;
                serial0 = &uart1;
                serial1 = &uart2;
                serial2 = &uart3;
index 5328685..1f1c04d 100644 (file)
                ti,max-div = <2>;
        };
 
-       sha2md5_fck: sha2md5_fck@15c8 {
-               #clock-cells = <0>;
-               compatible = "ti,gate-clock";
-               clocks = <&l3_div_ck>;
-               ti,bit-shift = <1>;
-               reg = <0x15c8>;
-       };
-
        usb_phy_cm_clk32k: usb_phy_cm_clk32k@640 {
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
index e025b7c..ee821d0 100644 (file)
                i2c2 = &i2c3;
                i2c3 = &i2c4;
                i2c4 = &i2c5;
+               mmc0 = &mmc1;
+               mmc1 = &mmc2;
+               mmc2 = &mmc3;
+               mmc3 = &mmc4;
+               mmc4 = &mmc5;
                serial0 = &uart1;
                serial1 = &uart2;
                serial2 = &uart3;
index 84066c1..ec45ced 100644 (file)
                                compatible = "microchip,sam9x60-pinctrl", "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
                                ranges = <0xfffff400 0xfffff400 0x800>;
 
+                               /* mux-mask corresponding to sam9x60 SoC in TFBGA228L package */
+                               atmel,mux-mask = <
+                                                /*     A       B       C       */
+                                                0xffffffff 0xffe03fff 0xef00019d       /* pioA */
+                                                0x03ffffff 0x02fc7e7f 0x00780000       /* pioB */
+                                                0xffffffff 0xffffffff 0xf83fffff       /* pioC */
+                                                0x003fffff 0x003f8000 0x00000000       /* pioD */
+                                                >;
+
                                pioA: gpio@fffff400 {
                                        compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
                                        reg = <0xfffff400 0x200>;
index 322caa2..21bce40 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/module.h>
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
+#include <linux/irqchip.h>
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -162,7 +163,7 @@ static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs)
  * interrupts. It registers the interrupt enable and disable functions
  * to the kernel for each interrupt source.
  */
-void __init mxc_init_irq(void __iomem *irqbase)
+static void __init mxc_init_irq(void __iomem *irqbase)
 {
        struct device_node *np;
        int irq_base;
@@ -220,3 +221,16 @@ void __init mxc_init_irq(void __iomem *irqbase)
 
        printk(KERN_INFO "MXC IRQ initialized\n");
 }
+
+static int __init imx_avic_init(struct device_node *node,
+                              struct device_node *parent)
+{
+       void __iomem *avic_base;
+
+       avic_base = of_iomap(node, 0);
+       BUG_ON(!avic_base);
+       mxc_init_irq(avic_base);
+       return 0;
+}
+
+IRQCHIP_DECLARE(imx_avic, "fsl,avic", imx_avic_init);
index 2b004cc..474dedb 100644 (file)
@@ -22,7 +22,6 @@ void mx35_map_io(void);
 void imx21_init_early(void);
 void imx31_init_early(void);
 void imx35_init_early(void);
-void mxc_init_irq(void __iomem *);
 void mx31_init_irq(void);
 void mx35_init_irq(void);
 void mxc_set_cpu_type(unsigned int type);
index 32df3b8..8eca92d 100644 (file)
@@ -17,16 +17,6 @@ static void __init imx1_init_early(void)
        mxc_set_cpu_type(MXC_CPU_MX1);
 }
 
-static void __init imx1_init_irq(void)
-{
-       void __iomem *avic_addr;
-
-       avic_addr = ioremap(MX1_AVIC_ADDR, SZ_4K);
-       WARN_ON(!avic_addr);
-
-       mxc_init_irq(avic_addr);
-}
-
 static const char * const imx1_dt_board_compat[] __initconst = {
        "fsl,imx1",
        NULL
@@ -34,7 +24,6 @@ static const char * const imx1_dt_board_compat[] __initconst = {
 
 DT_MACHINE_START(IMX1_DT, "Freescale i.MX1 (Device Tree Support)")
        .init_early     = imx1_init_early,
-       .init_irq       = imx1_init_irq,
        .dt_compat      = imx1_dt_board_compat,
        .restart        = mxc_restart,
 MACHINE_END
index 95de48a..51927bd 100644 (file)
@@ -22,17 +22,6 @@ static void __init imx25_dt_init(void)
        imx_aips_allow_unprivileged_access("fsl,imx25-aips");
 }
 
-static void __init mx25_init_irq(void)
-{
-       struct device_node *np;
-       void __iomem *avic_base;
-
-       np = of_find_compatible_node(NULL, NULL, "fsl,avic");
-       avic_base = of_iomap(np, 0);
-       BUG_ON(!avic_base);
-       mxc_init_irq(avic_base);
-}
-
 static const char * const imx25_dt_board_compat[] __initconst = {
        "fsl,imx25",
        NULL
@@ -42,6 +31,5 @@ DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)")
        .init_early     = imx25_init_early,
        .init_machine   = imx25_dt_init,
        .init_late      = imx25_pm_init,
-       .init_irq       = mx25_init_irq,
        .dt_compat      = imx25_dt_board_compat,
 MACHINE_END
index 262422a..e325c94 100644 (file)
@@ -56,17 +56,6 @@ static void __init imx27_init_early(void)
        mxc_set_cpu_type(MXC_CPU_MX27);
 }
 
-static void __init mx27_init_irq(void)
-{
-       void __iomem *avic_base;
-       struct device_node *np;
-
-       np = of_find_compatible_node(NULL, NULL, "fsl,avic");
-       avic_base = of_iomap(np, 0);
-       BUG_ON(!avic_base);
-       mxc_init_irq(avic_base);
-}
-
 static const char * const imx27_dt_board_compat[] __initconst = {
        "fsl,imx27",
        NULL
@@ -75,7 +64,6 @@ static const char * const imx27_dt_board_compat[] __initconst = {
 DT_MACHINE_START(IMX27_DT, "Freescale i.MX27 (Device Tree Support)")
        .map_io         = mx27_map_io,
        .init_early     = imx27_init_early,
-       .init_irq       = mx27_init_irq,
        .init_late      = imx27_pm_init,
        .dt_compat      = imx27_dt_board_compat,
 MACHINE_END
index dc69dfe..e9a1092 100644 (file)
@@ -14,6 +14,5 @@ static const char * const imx31_dt_board_compat[] __initconst = {
 DT_MACHINE_START(IMX31_DT, "Freescale i.MX31 (Device Tree Support)")
        .map_io         = mx31_map_io,
        .init_early     = imx31_init_early,
-       .init_irq       = mx31_init_irq,
        .dt_compat      = imx31_dt_board_compat,
 MACHINE_END
index ec5c306..0fc0821 100644 (file)
@@ -27,6 +27,5 @@ DT_MACHINE_START(IMX35_DT, "Freescale i.MX35 (Device Tree Support)")
        .l2c_aux_mask   = ~0,
        .map_io         = mx35_map_io,
        .init_early     = imx35_init_early,
-       .init_irq       = mx35_init_irq,
        .dt_compat      = imx35_dt_board_compat,
 MACHINE_END
index 5056438..28db972 100644 (file)
@@ -109,18 +109,6 @@ void __init imx31_init_early(void)
        mx3_ccm_base = of_iomap(np, 0);
        BUG_ON(!mx3_ccm_base);
 }
-
-void __init mx31_init_irq(void)
-{
-       void __iomem *avic_base;
-       struct device_node *np;
-
-       np = of_find_compatible_node(NULL, NULL, "fsl,imx31-avic");
-       avic_base = of_iomap(np, 0);
-       BUG_ON(!avic_base);
-
-       mxc_init_irq(avic_base);
-}
 #endif /* ifdef CONFIG_SOC_IMX31 */
 
 #ifdef CONFIG_SOC_IMX35
@@ -158,16 +146,4 @@ void __init imx35_init_early(void)
        mx3_ccm_base = of_iomap(np, 0);
        BUG_ON(!mx3_ccm_base);
 }
-
-void __init mx35_init_irq(void)
-{
-       void __iomem *avic_base;
-       struct device_node *np;
-
-       np = of_find_compatible_node(NULL, NULL, "fsl,imx35-avic");
-       avic_base = of_iomap(np, 0);
-       BUG_ON(!avic_base);
-
-       mxc_init_irq(avic_base);
-}
 #endif /* ifdef CONFIG_SOC_IMX35 */
index cd711bf..2c647bd 100644 (file)
@@ -65,7 +65,7 @@ static void __init keystone_init(void)
 static long long __init keystone_pv_fixup(void)
 {
        long long offset;
-       phys_addr_t mem_start, mem_end;
+       u64 mem_start, mem_end;
 
        mem_start = memblock_start_of_DRAM();
        mem_end = memblock_end_of_DRAM();
@@ -78,7 +78,7 @@ static long long __init keystone_pv_fixup(void)
        if (mem_start < KEYSTONE_HIGH_PHYS_START ||
            mem_end   > KEYSTONE_HIGH_PHYS_END) {
                pr_crit("Invalid address space for memory (%08llx-%08llx)\n",
-                       (u64)mem_start, (u64)mem_end);
+                       mem_start, mem_end);
                return 0;
        }
 
index 14a6c3e..f745a65 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/platform_data/gpio-omap.h>
 
 #include <asm/assembler.h>
+#include <asm/irq.h>
 
 #include "ams-delta-fiq.h"
 #include "board-ams-delta.h"
index f70d561..0659ab4 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/arm-smccc.h>
+#include <linux/cpu_pm.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -20,6 +21,7 @@
 
 #include "common.h"
 #include "omap-secure.h"
+#include "soc.h"
 
 static phys_addr_t omap_secure_memblock_base;
 
@@ -213,3 +215,40 @@ void __init omap_secure_init(void)
 {
        omap_optee_init_check();
 }
+
+/*
+ * Dummy dispatcher call after core OSWR and MPU off. Updates the ROM return
+ * address after MMU has been re-enabled after CPU1 has been woken up again.
+ * Otherwise the ROM code will attempt to use the earlier physical return
+ * address that got set with MMU off when waking up CPU1. Only used on secure
+ * devices.
+ */
+static int cpu_notifier(struct notifier_block *nb, unsigned long cmd, void *v)
+{
+       switch (cmd) {
+       case CPU_CLUSTER_PM_EXIT:
+               omap_secure_dispatcher(OMAP4_PPA_SERVICE_0,
+                                      FLAG_START_CRITICAL,
+                                      0, 0, 0, 0, 0);
+               break;
+       default:
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block secure_notifier_block = {
+       .notifier_call = cpu_notifier,
+};
+
+static int __init secure_pm_init(void)
+{
+       if (omap_type() == OMAP2_DEVICE_TYPE_GP || !soc_is_omap44xx())
+               return 0;
+
+       cpu_pm_register_notifier(&secure_notifier_block);
+
+       return 0;
+}
+omap_arch_initcall(secure_pm_init);
index 4aaa957..172069f 100644 (file)
@@ -50,6 +50,7 @@
 #define OMAP5_DRA7_MON_SET_ACR_INDEX   0x107
 
 /* Secure PPA(Primary Protected Application) APIs */
+#define OMAP4_PPA_SERVICE_0            0x21
 #define OMAP4_PPA_L2_POR_INDEX         0x23
 #define OMAP4_PPA_CPU_ACTRL_SMP_INDEX  0x25
 
index 09076ad..668dc84 100644 (file)
@@ -246,10 +246,10 @@ int __init omap4_cpcap_init(void)
        omap_voltage_register_pmic(voltdm, &omap443x_max8952_mpu);
 
        if (of_machine_is_compatible("motorola,droid-bionic")) {
-               voltdm = voltdm_lookup("mpu");
+               voltdm = voltdm_lookup("core");
                omap_voltage_register_pmic(voltdm, &omap_cpcap_core);
 
-               voltdm = voltdm_lookup("mpu");
+               voltdm = voltdm_lookup("iva");
                omap_voltage_register_pmic(voltdm, &omap_cpcap_iva);
        } else {
                voltdm = voltdm_lookup("core");
index 62df666..17b66f0 100644 (file)
@@ -88,34 +88,26 @@ static void __init sr_set_nvalues(struct omap_volt_data *volt_data,
 
 extern struct omap_sr_data omap_sr_pdata[];
 
-static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
+static int __init sr_init_by_name(const char *name, const char *voltdm)
 {
        struct omap_sr_data *sr_data = NULL;
        struct omap_volt_data *volt_data;
-       struct omap_smartreflex_dev_attr *sr_dev_attr;
        static int i;
 
-       if (!strncmp(oh->name, "smartreflex_mpu_iva", 20) ||
-           !strncmp(oh->name, "smartreflex_mpu", 16))
+       if (!strncmp(name, "smartreflex_mpu_iva", 20) ||
+           !strncmp(name, "smartreflex_mpu", 16))
                sr_data = &omap_sr_pdata[OMAP_SR_MPU];
-       else if (!strncmp(oh->name, "smartreflex_core", 17))
+       else if (!strncmp(name, "smartreflex_core", 17))
                sr_data = &omap_sr_pdata[OMAP_SR_CORE];
-       else if (!strncmp(oh->name, "smartreflex_iva", 16))
+       else if (!strncmp(name, "smartreflex_iva", 16))
                sr_data = &omap_sr_pdata[OMAP_SR_IVA];
 
        if (!sr_data) {
-               pr_err("%s: Unknown instance %s\n", __func__, oh->name);
+               pr_err("%s: Unknown instance %s\n", __func__, name);
                return -EINVAL;
        }
 
-       sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
-       if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
-               pr_err("%s: No voltage domain specified for %s. Cannot initialize\n",
-                      __func__, oh->name);
-               goto exit;
-       }
-
-       sr_data->name = oh->name;
+       sr_data->name = name;
        if (cpu_is_omap343x())
                sr_data->ip_type = 1;
        else
@@ -136,10 +128,10 @@ static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
                }
        }
 
-       sr_data->voltdm = voltdm_lookup(sr_dev_attr->sensor_voltdm_name);
+       sr_data->voltdm = voltdm_lookup(voltdm);
        if (!sr_data->voltdm) {
                pr_err("%s: Unable to get voltage domain pointer for VDD %s\n",
-                       __func__, sr_dev_attr->sensor_voltdm_name);
+                       __func__, voltdm);
                goto exit;
        }
 
@@ -160,6 +152,20 @@ exit:
        return 0;
 }
 
+static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
+{
+       struct omap_smartreflex_dev_attr *sr_dev_attr;
+
+       sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
+       if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
+               pr_err("%s: No voltage domain specified for %s. Cannot initialize\n",
+                      __func__, oh->name);
+               return 0;
+       }
+
+       return sr_init_by_name(oh->name, sr_dev_attr->sensor_voltdm_name);
+}
+
 /*
  * API to be called from board files to enable smartreflex
  * autocompensation at init.
@@ -169,7 +175,42 @@ void __init omap_enable_smartreflex_on_init(void)
        sr_enable_on_init = true;
 }
 
+static const char * const omap4_sr_instances[] = {
+       "mpu",
+       "iva",
+       "core",
+};
+
+static const char * const dra7_sr_instances[] = {
+       "mpu",
+       "core",
+};
+
 int __init omap_devinit_smartreflex(void)
 {
+       const char * const *sr_inst;
+       int i, nr_sr = 0;
+
+       if (soc_is_omap44xx()) {
+               sr_inst = omap4_sr_instances;
+               nr_sr = ARRAY_SIZE(omap4_sr_instances);
+
+       } else if (soc_is_dra7xx()) {
+               sr_inst = dra7_sr_instances;
+               nr_sr = ARRAY_SIZE(dra7_sr_instances);
+       }
+
+       if (nr_sr) {
+               const char *name, *voltdm;
+
+               for (i = 0; i < nr_sr; i++) {
+                       name = kasprintf(GFP_KERNEL, "smartreflex_%s", sr_inst[i]);
+                       voltdm = sr_inst[i];
+                       sr_init_by_name(name, voltdm);
+               }
+
+               return 0;
+       }
+
        return omap_hwmod_for_each_by_class("smartreflex", sr_dev_init, NULL);
 }
index d1010ec..d237bd0 100644 (file)
@@ -502,16 +502,20 @@ static inline void mainstone_init_keypad(void) {}
 #endif
 
 static int mst_pcmcia0_irqs[11] = {
-       [0 ... 10] = -1,
+       [0 ... 4] = -1,
        [5] = MAINSTONE_S0_CD_IRQ,
+       [6 ... 7] = -1,
        [8] = MAINSTONE_S0_STSCHG_IRQ,
+       [9] = -1,
        [10] = MAINSTONE_S0_IRQ,
 };
 
 static int mst_pcmcia1_irqs[11] = {
-       [0 ... 10] = -1,
+       [0 ... 4] = -1,
        [5] = MAINSTONE_S1_CD_IRQ,
+       [6 ... 7] = -1,
        [8] = MAINSTONE_S1_STSCHG_IRQ,
+       [9] = -1,
        [10] = MAINSTONE_S1_IRQ,
 };
 
index 5656e7a..e4e1b65 100644 (file)
@@ -810,6 +810,16 @@ config QCOM_FALKOR_ERRATUM_E1041
 
          If unsure, say Y.
 
+config NVIDIA_CARMEL_CNP_ERRATUM
+       bool "NVIDIA Carmel CNP: CNP on Carmel semantically different than ARM cores"
+       default y
+       help
+         If CNP is enabled on Carmel cores, non-sharable TLBIs on a core will not
+         invalidate shared TLB entries installed by a different core, as it would
+         on standard ARM cores.
+
+         If unsure, say Y.
+
 config SOCIONEXT_SYNQUACER_PREITS
        bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
        default y
index 7de6b37..9058cfa 100644 (file)
                        ranges = <0x0 0x00 0x1700000 0x100000>;
                        reg = <0x00 0x1700000 0x0 0x100000>;
                        interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+                       dma-coherent;
 
                        sec_jr0: jr@10000 {
                                compatible = "fsl,sec-v5.4-job-ring",
index 5a8a1dc..28c51e5 100644 (file)
                        ranges = <0x0 0x00 0x1700000 0x100000>;
                        reg = <0x00 0x1700000 0x0 0x100000>;
                        interrupts = <0 75 0x4>;
+                       dma-coherent;
 
                        sec_jr0: jr@10000 {
                                compatible = "fsl,sec-v5.4-job-ring",
index 1d6dfd1..3945830 100644 (file)
                        ranges = <0x0 0x00 0x1700000 0x100000>;
                        reg = <0x00 0x1700000 0x0 0x100000>;
                        interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+                       dma-coherent;
 
                        sec_jr0: jr@10000 {
                                compatible = "fsl,sec-v5.4-job-ring",
index 5ccc4cc..a003e6a 100644 (file)
 #define MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD                                     0x0A4 0x30C 0x000 0x0 0x0
 #define MX8MM_IOMUXC_SD1_CMD_GPIO2_IO1                                      0x0A4 0x30C 0x000 0x5 0x0
 #define MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0                                 0x0A8 0x310 0x000 0x0 0x0
-#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2                                    0x0A8 0x31  0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2                                    0x0A8 0x310 0x000 0x5 0x0
 #define MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1                                 0x0AC 0x314 0x000 0x0 0x0
 #define MX8MM_IOMUXC_SD1_DATA1_GPIO2_IO3                                    0x0AC 0x314 0x000 0x5 0x0
 #define MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2                                 0x0B0 0x318 0x000 0x0 0x0
index 0e1a6d9..122c95d 100644 (file)
@@ -35,7 +35,7 @@
 
 &i2c2 {
        clock-frequency = <400000>;
-       pinctrl-names = "default";
+       pinctrl-names = "default", "gpio";
        pinctrl-0 = <&pinctrl_i2c2>;
        pinctrl-1 = <&pinctrl_i2c2_gpio>;
        sda-gpios = <&gpio5 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
index 44a8c23..f3965ec 100644 (file)
@@ -67,7 +67,7 @@
 
 &i2c1 {
        clock-frequency = <400000>;
-       pinctrl-names = "default";
+       pinctrl-names = "default", "gpio";
        pinctrl-0 = <&pinctrl_i2c1>;
        pinctrl-1 = <&pinctrl_i2c1_gpio>;
        sda-gpios = <&gpio5 15 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
index b94b020..68e8fa1 100644 (file)
 #define MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD                                     0x0A4 0x30C 0x000 0x0 0x0
 #define MX8MQ_IOMUXC_SD1_CMD_GPIO2_IO1                                      0x0A4 0x30C 0x000 0x5 0x0
 #define MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0                                 0x0A8 0x310 0x000 0x0 0x0
-#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2                                    0x0A8 0x31  0x000 0x5 0x0
+#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2                                    0x0A8 0x310 0x000 0x5 0x0
 #define MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1                                 0x0AC 0x314 0x000 0x0 0x0
 #define MX8MQ_IOMUXC_SD1_DATA1_GPIO2_IO3                                    0x0AC 0x314 0x000 0x5 0x0
 #define MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2                                 0x0B0 0x318 0x000 0x0 0x0
index d239ab7..53e817c 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
 /*
  * Device Tree file for CZ.NIC Turris Mox Board
- * 2019 by Marek Behun <marek.behun@nic.cz>
+ * 2019 by Marek Behún <kabel@kernel.org>
  */
 
 /dts-v1/;
index 64179a3..c6f5df2 100644 (file)
                };
 
                CP11X_LABEL(sata0): sata@540000 {
-                       compatible = "marvell,armada-8k-ahci";
+                       compatible = "marvell,armada-8k-ahci",
+                       "generic-ahci";
                        reg = <0x540000 0x30000>;
                        dma-coherent;
+                       interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&CP11X_LABEL(clk) 1 15>,
                                 <&CP11X_LABEL(clk) 1 16>;
                        #address-cells = <1>;
                        status = "disabled";
 
                        sata-port@0 {
-                               interrupts = <109 IRQ_TYPE_LEVEL_HIGH>;
                                reg = <0>;
                        };
 
                        sata-port@1 {
-                               interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
                                reg = <1>;
                        };
                };
index 93a161b..dc52b73 100644 (file)
@@ -37,7 +37,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
        } while (--n > 0);
 
        sum += ((sum >> 32) | (sum << 32));
-       return csum_fold((__force u32)(sum >> 32));
+       return csum_fold((__force __wsum)(sum >> 32));
 }
 #define ip_fast_csum ip_fast_csum
 
index b77d997..c40f249 100644 (file)
@@ -66,7 +66,8 @@
 #define ARM64_WORKAROUND_1508412               58
 #define ARM64_HAS_LDAPR                                59
 #define ARM64_KVM_PROTECTED_MODE               60
+#define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP     61
 
-#define ARM64_NCAPS                            61
+#define ARM64_NCAPS                            62
 
 #endif /* __ASM_CPUCAPS_H */
index 4e90c2d..94d4025 100644 (file)
 #define CPTR_EL2_DEFAULT       CPTR_EL2_RES1
 
 /* Hyp Debug Configuration Register bits */
+#define MDCR_EL2_TTRF          (1 << 19)
 #define MDCR_EL2_TPMS          (1 << 14)
 #define MDCR_EL2_E2PB_MASK     (UL(0x3))
 #define MDCR_EL2_E2PB_SHIFT    (UL(12))
index ca2cd75..efc10e9 100644 (file)
@@ -251,6 +251,8 @@ unsigned long get_wchan(struct task_struct *p);
 extern struct task_struct *cpu_switch_to(struct task_struct *prev,
                                         struct task_struct *next);
 
+asmlinkage void arm64_preempt_schedule_irq(void);
+
 #define task_pt_regs(p) \
        ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
 
index 9f4e3b2..6623c99 100644 (file)
@@ -55,6 +55,8 @@ void arch_setup_new_exec(void);
 #define arch_setup_new_exec     arch_setup_new_exec
 
 void arch_release_task_struct(struct task_struct *tsk);
+int arch_dup_task_struct(struct task_struct *dst,
+                               struct task_struct *src);
 
 #endif
 
index 506a1cd..e2c20c0 100644 (file)
@@ -525,6 +525,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                                  0, 0,
                                  1, 0),
        },
+#endif
+#ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
+       {
+               /* NVIDIA Carmel */
+               .desc = "NVIDIA Carmel CNP erratum",
+               .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
+               ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
+       },
 #endif
        {
        }
index 0660307..e5281e1 100644 (file)
@@ -383,7 +383,6 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
         * of support.
         */
        S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
        ARM64_FTR_END,
 };
@@ -1321,7 +1320,10 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
         * may share TLB entries with a CPU stuck in the crashed
         * kernel.
         */
-        if (is_kdump_kernel())
+       if (is_kdump_kernel())
+               return false;
+
+       if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
                return false;
 
        return has_cpuid_feature(entry, scope);
index 77605ae..51fcf99 100644 (file)
@@ -353,7 +353,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
         * with the CLIDR_EL1 fields to avoid triggering false warnings
         * when there is a mismatch across the CPUs. Keep track of the
         * effective value of the CTR_EL0 in our internal records for
-        * acurate sanity check and feature enablement.
+        * accurate sanity check and feature enablement.
         */
        info->reg_ctr = read_cpuid_effective_cachetype();
        info->reg_dczid = read_cpuid(DCZID_EL0);
index e6e2842..58303a9 100644 (file)
@@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
 ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
 {
        memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
+       *ppos += count;
+
        return count;
 }
index 325c83b..6e60aa3 100644 (file)
@@ -57,6 +57,8 @@
 #include <asm/processor.h>
 #include <asm/pointer_auth.h>
 #include <asm/stacktrace.h>
+#include <asm/switch_to.h>
+#include <asm/system_misc.h>
 
 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
 #include <linux/stackprotector.h>
index ad20981..d55bdfb 100644 (file)
@@ -194,8 +194,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
 
 #ifdef CONFIG_STACKTRACE
 
-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
-                    struct task_struct *task, struct pt_regs *regs)
+noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
+                             void *cookie, struct task_struct *task,
+                             struct pt_regs *regs)
 {
        struct stackframe frame;
 
@@ -203,8 +204,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
                start_backtrace(&frame, regs->regs[29], regs->pc);
        else if (task == current)
                start_backtrace(&frame,
-                               (unsigned long)__builtin_frame_address(0),
-                               (unsigned long)arch_stack_walk);
+                               (unsigned long)__builtin_frame_address(1),
+                               (unsigned long)__builtin_return_address(0));
        else
                start_backtrace(&frame, thread_saved_fp(task),
                                thread_saved_pc(task));
index 7a7e425..dbc8905 100644 (file)
@@ -89,6 +89,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
  *  - Debug ROM Address (MDCR_EL2_TDRA)
  *  - OS related registers (MDCR_EL2_TDOSA)
  *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
+ *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
  *
  * Additionally, KVM only traps guest accesses to the debug registers if
  * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
@@ -112,6 +113,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
        vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
        vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
                                MDCR_EL2_TPMS |
+                               MDCR_EL2_TTRF |
                                MDCR_EL2_TPMCR |
                                MDCR_EL2_TDRA |
                                MDCR_EL2_TDOSA);
index ee3682b..39f8f7f 100644 (file)
@@ -429,6 +429,13 @@ u64 __vgic_v3_get_gic_config(void)
        if (has_vhe())
                flags = local_daif_save();
 
+       /*
+        * Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
+        * that to be able to set ICC_SRE_EL1.SRE to 0, all the
+        * interrupt overrides must be set. You've got to love this.
+        */
+       sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
+       isb();
        write_gicreg(0, ICC_SRE_EL1);
        isb();
 
@@ -436,6 +443,8 @@ u64 __vgic_v3_get_gic_config(void)
 
        write_gicreg(sre, ICC_SRE_EL1);
        isb();
+       sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
+       isb();
 
        if (has_vhe())
                local_daif_restore(flags);
index 7484ea4..5d9550f 100644 (file)
@@ -1448,6 +1448,22 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
 struct range arch_get_mappable_range(void)
 {
        struct range mhp_range;
+       u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
+       u64 end_linear_pa = __pa(PAGE_END - 1);
+
+       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+               /*
+                * Check for a wrap, it is possible because of randomized linear
+                * mapping the start physical address is actually bigger than
+                * the end physical address. In this case set start to zero
+                * because [0, end_linear_pa] range must still be able to cover
+                * all addressable physical addresses.
+                */
+               if (start_linear_pa > end_linear_pa)
+                       start_linear_pa = 0;
+       }
+
+       WARN_ON(start_linear_pa > end_linear_pa);
 
        /*
         * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
@@ -1455,8 +1471,9 @@ struct range arch_get_mappable_range(void)
         * range which can be mapped inside this linear mapping range, must
         * also be derived from its end points.
         */
-       mhp_range.start = __pa(_PAGE_OFFSET(vabits_actual));
-       mhp_range.end =  __pa(PAGE_END - 1);
+       mhp_range.start = start_linear_pa;
+       mhp_range.end =  end_linear_pa;
+
        return mhp_range;
 }
 
index ae2b1c7..ef2bb9b 100644 (file)
@@ -9,7 +9,7 @@ int arch_check_ftrace_location(struct kprobe *p)
        return 0;
 }
 
-/* Ftrace callback handler for kprobes -- called under preepmt disabed */
+/* Ftrace callback handler for kprobes -- called under preepmt disabled */
 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
                           struct ftrace_ops *ops, struct ftrace_regs *fregs)
 {
index b3aa460..0817913 100644 (file)
@@ -54,8 +54,7 @@
 
 static inline unsigned long user_stack_pointer(struct pt_regs *regs)
 {
-       /* FIXME: should this be bspstore + nr_dirty regs? */
-       return regs->ar_bspstore;
+       return regs->r12;
 }
 
 static inline int is_syscall_success(struct pt_regs *regs)
@@ -79,11 +78,6 @@ static inline long regs_return_value(struct pt_regs *regs)
        unsigned long __ip = instruction_pointer(regs);                 \
        (__ip & ~3UL) + ((__ip & 3UL) << 2);                            \
 })
-/*
- * Why not default?  Because user_stack_pointer() on ia64 gives register
- * stack backing store instead...
- */
-#define current_user_stack_pointer() (current_pt_regs()->r12)
 
   /* given a pointer to a task_struct, return the user's pt_regs */
 # define task_pt_regs(t)               (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
index 8b5b8e6..dd5bfed 100644 (file)
@@ -59,7 +59,7 @@ show_##name(struct device *dev, struct device_attribute *attr,        \
                char *buf)                                              \
 {                                                                      \
        u32 cpu=dev->id;                                                \
-       return sprintf(buf, "%lx\n", name[cpu]);                        \
+       return sprintf(buf, "%llx\n", name[cpu]);                       \
 }
 
 #define store(name)                                                    \
@@ -86,9 +86,9 @@ store_call_start(struct device *dev, struct device_attribute *attr,
 
 #ifdef ERR_INJ_DEBUG
        printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu);
-       printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]);
-       printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]);
-       printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n",
+       printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]);
+       printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]);
+       printk(KERN_DEBUG "err_data_buffer=%llx, %llx, %llx.\n",
                          err_data_buffer[cpu].data1,
                          err_data_buffer[cpu].data2,
                          err_data_buffer[cpu].data3);
@@ -117,8 +117,8 @@ store_call_start(struct device *dev, struct device_attribute *attr,
 
 #ifdef ERR_INJ_DEBUG
        printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
-       printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]);
-       printk(KERN_DEBUG "resources=%lx\n", resources[cpu]);
+       printk(KERN_DEBUG "capabilities=%llx,\n", capabilities[cpu]);
+       printk(KERN_DEBUG "resources=%llx\n", resources[cpu]);
 #endif
        return size;
 }
@@ -131,7 +131,7 @@ show_virtual_to_phys(struct device *dev, struct device_attribute *attr,
                        char *buf)
 {
        unsigned int cpu=dev->id;
-       return sprintf(buf, "%lx\n", phys_addr[cpu]);
+       return sprintf(buf, "%llx\n", phys_addr[cpu]);
 }
 
 static ssize_t
@@ -145,7 +145,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
        ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
        if (ret<=0) {
 #ifdef ERR_INJ_DEBUG
-               printk("Virtual address %lx is not existing.\n",virt_addr);
+               printk("Virtual address %llx is not existing.\n", virt_addr);
 #endif
                return -EINVAL;
        }
@@ -163,7 +163,7 @@ show_err_data_buffer(struct device *dev,
 {
        unsigned int cpu=dev->id;
 
-       return sprintf(buf, "%lx, %lx, %lx\n",
+       return sprintf(buf, "%llx, %llx, %llx\n",
                        err_data_buffer[cpu].data1,
                        err_data_buffer[cpu].data2,
                        err_data_buffer[cpu].data3);
@@ -178,13 +178,13 @@ store_err_data_buffer(struct device *dev,
        int ret;
 
 #ifdef ERR_INJ_DEBUG
-       printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n",
+       printk("write err_data_buffer=[%llx,%llx,%llx] on cpu%d\n",
                 err_data_buffer[cpu].data1,
                 err_data_buffer[cpu].data2,
                 err_data_buffer[cpu].data3,
                 cpu);
 #endif
-       ret=sscanf(buf, "%lx, %lx, %lx",
+       ret = sscanf(buf, "%llx, %llx, %llx",
                        &err_data_buffer[cpu].data1,
                        &err_data_buffer[cpu].data2,
                        &err_data_buffer[cpu].data3);
index d4cae2f..adf6521 100644 (file)
@@ -1824,7 +1824,7 @@ ia64_mca_cpu_init(void *cpu_data)
                        data = mca_bootmem();
                        first_time = 0;
                } else
-                       data = (void *)__get_free_pages(GFP_KERNEL,
+                       data = (void *)__get_free_pages(GFP_ATOMIC,
                                                        get_order(sz));
                if (!data)
                        panic("Could not allocate MCA memory for cpu %d\n",
index 279be01..23a1403 100644 (file)
@@ -43,7 +43,7 @@
 #include <asm/prom.h>
 
 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB
-const char __section(".appended_dtb") __appended_dtb[0x100000];
+char __section(".appended_dtb") __appended_dtb[0x100000];
 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
 
 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
index 1234834..1f98947 100644 (file)
@@ -176,7 +176,7 @@ SECTIONS
        .fill : {
                FILL(0);
                BYTE(0);
-               . = ALIGN(8);
+               STRUCT_ALIGN();
        }
        __appended_dtb = .;
        /* leave space for appended DTB */
index 6eb98a7..ad5344e 100644 (file)
@@ -238,7 +238,7 @@ void flush_dcache_page(struct page *page)
 {
        struct address_space *mapping;
 
-       mapping = page_mapping(page);
+       mapping = page_mapping_file(page);
        if (mapping && !mapping_mapped(mapping))
                set_bit(PG_dcache_dirty, &page->flags);
        else {
index cf5ee9b..84ee232 100644 (file)
@@ -72,7 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
 #endif
        case 4: return __cmpxchg_u32((unsigned int *)ptr,
                                     (unsigned int)old, (unsigned int)new_);
-       case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
+       case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
        }
        __cmpxchg_called_with_bad_pointer();
        return old;
index 11ece0d..b5fbcd2 100644 (file)
@@ -272,7 +272,6 @@ on downward growing arches, it looks like this:
        regs->gr[23] = 0;                               \
 } while(0)
 
-struct task_struct;
 struct mm_struct;
 
 /* Free all resources held by a thread. */
index 853c19c..dec951d 100644 (file)
@@ -5,34 +5,10 @@
  * Floating-point emulation code
  *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
  */
-/*
- * BEGIN_DESC
- * 
- *  File: 
- *      @(#)   pa/fp/fpu.h             $Revision: 1.1 $
- * 
- *  Purpose:
- *      <<please update with a synopis of the functionality provided by this file>>
- * 
- * 
- * END_DESC  
-*/
-
-#ifdef __NO_PA_HDRS
-    PA header file -- do not include this header file for non-PA builds.
-#endif
-
 
 #ifndef _MACHINE_FPU_INCLUDED /* allows multiple inclusion */
 #define _MACHINE_FPU_INCLUDED
 
-#if 0
-#ifndef _SYS_STDSYMS_INCLUDED
-#    include <sys/stdsyms.h>
-#endif   /* _SYS_STDSYMS_INCLUDED  */
-#include  <machine/pdc/pdc_rqsts.h>
-#endif
-
 #define PA83_FPU_FLAG    0x00000001
 #define PA89_FPU_FLAG    0x00000002
 #define PA2_0_FPU_FLAG   0x00000010
 #define COPR_FP        0x00000080      /* Floating point -- Coprocessor 0 */
 #define SFU_MPY_DIVIDE 0x00008000      /* Multiply/Divide __ SFU 0 */
 
-
 #define EM_FPU_TYPE_OFFSET 272
 
 /* version of EMULATION software for COPR,0,0 instruction */
 #define EMULATION_VERSION 4
 
 /*
- * The only was to differeniate between TIMEX and ROLEX (or PCX-S and PCX-T)
- * is thorough the potential type field from the PDC_MODEL call.  The 
- * following flags are used at assist this differeniation.
+ * The only way to differentiate between TIMEX and ROLEX (or PCX-S and PCX-T)
+ * is through the potential type field from the PDC_MODEL call.
+ * The following flags are used to assist this differentiation.
  */
 
 #define ROLEX_POTENTIAL_KEY_FLAGS      PDC_MODEL_CPU_KEY_WORD_TO_IO
 #define TIMEX_POTENTIAL_KEY_FLAGS      (PDC_MODEL_CPU_KEY_QUAD_STORE | \
                                         PDC_MODEL_CPU_KEY_RECIP_SQRT)
 
-
 #endif /* ! _MACHINE_FPU_INCLUDED */
index 7897d16..727d4b3 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/bug.h>
 #include <asm/cputable.h>
 
-static inline bool early_cpu_has_feature(unsigned long feature)
+static __always_inline bool early_cpu_has_feature(unsigned long feature)
 {
        return !!((CPU_FTRS_ALWAYS & feature) ||
                  (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
@@ -46,7 +46,7 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
        return static_branch_likely(&cpu_feature_keys[i]);
 }
 #else
-static inline bool cpu_has_feature(unsigned long feature)
+static __always_inline bool cpu_has_feature(unsigned long feature)
 {
        return early_cpu_has_feature(feature);
 }
index 6084fa4..f66b63e 100644 (file)
@@ -191,3 +191,7 @@ $(obj)/prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o FORCE
 targets += prom_init_check
 
 clean-files := vmlinux.lds
+
+# Force dependency (incbin is bad)
+$(obj)/vdso32_wrapper.o : $(obj)/vdso32/vdso32.so.dbg
+$(obj)/vdso64_wrapper.o : $(obj)/vdso64/vdso64.so.dbg
index 8ebc11d..77abd1a 100644 (file)
@@ -6,11 +6,11 @@
 CFLAGS_ptrace-view.o           += -DUTS_MACHINE='"$(UTS_MACHINE)"'
 
 obj-y                          += ptrace.o ptrace-view.o
-obj-$(CONFIG_PPC_FPU_REGS)     += ptrace-fpu.o
+obj-y                          += ptrace-fpu.o
 obj-$(CONFIG_COMPAT)           += ptrace32.o
 obj-$(CONFIG_VSX)              += ptrace-vsx.o
 ifneq ($(CONFIG_VSX),y)
-obj-$(CONFIG_PPC_FPU_REGS)     += ptrace-novsx.o
+obj-y                          += ptrace-novsx.o
 endif
 obj-$(CONFIG_ALTIVEC)          += ptrace-altivec.o
 obj-$(CONFIG_SPE)              += ptrace-spe.o
index 3487f2c..eafe5f0 100644 (file)
@@ -165,22 +165,8 @@ int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data);
 extern const struct user_regset_view user_ppc_native_view;
 
 /* ptrace-fpu */
-#ifdef CONFIG_PPC_FPU_REGS
 int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data);
 int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data);
-#else
-static inline int
-ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data)
-{
-       return -EIO;
-}
-
-static inline int
-ptrace_put_fpr(struct task_struct *child, int index, unsigned long data)
-{
-       return -EIO;
-}
-#endif
 
 /* ptrace-(no)adv */
 void ppc_gethwdinfo(struct ppc_debug_info *dbginfo);
index 8301cb5..5dca193 100644 (file)
@@ -8,32 +8,42 @@
 
 int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data)
 {
+#ifdef CONFIG_PPC_FPU_REGS
        unsigned int fpidx = index - PT_FPR0;
+#endif
 
        if (index > PT_FPSCR)
                return -EIO;
 
+#ifdef CONFIG_PPC_FPU_REGS
        flush_fp_to_thread(child);
        if (fpidx < (PT_FPSCR - PT_FPR0))
                memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long));
        else
                *data = child->thread.fp_state.fpscr;
+#else
+       *data = 0;
+#endif
 
        return 0;
 }
 
 int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data)
 {
+#ifdef CONFIG_PPC_FPU_REGS
        unsigned int fpidx = index - PT_FPR0;
+#endif
 
        if (index > PT_FPSCR)
                return -EIO;
 
+#ifdef CONFIG_PPC_FPU_REGS
        flush_fp_to_thread(child);
        if (fpidx < (PT_FPSCR - PT_FPR0))
                memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long));
        else
                child->thread.fp_state.fpscr = data;
+#endif
 
        return 0;
 }
index b3b3683..7433f3d 100644 (file)
 int fpr_get(struct task_struct *target, const struct user_regset *regset,
            struct membuf to)
 {
+#ifdef CONFIG_PPC_FPU_REGS
        BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
                     offsetof(struct thread_fp_state, fpr[32]));
 
        flush_fp_to_thread(target);
 
        return membuf_write(&to, &target->thread.fp_state, 33 * sizeof(u64));
+#else
+       return membuf_write(&to, &empty_zero_page, 33 * sizeof(u64));
+#endif
 }
 
 /*
@@ -46,6 +50,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset,
            unsigned int pos, unsigned int count,
            const void *kbuf, const void __user *ubuf)
 {
+#ifdef CONFIG_PPC_FPU_REGS
        BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
                     offsetof(struct thread_fp_state, fpr[32]));
 
@@ -53,4 +58,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset,
 
        return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                  &target->thread.fp_state, 0, -1);
+#else
+       return 0;
+#endif
 }
index 2bad806..6ccffc6 100644 (file)
@@ -522,13 +522,11 @@ static const struct user_regset native_regsets[] = {
                .size = sizeof(long), .align = sizeof(long),
                .regset_get = gpr_get, .set = gpr_set
        },
-#ifdef CONFIG_PPC_FPU_REGS
        [REGSET_FPR] = {
                .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
                .size = sizeof(double), .align = sizeof(double),
                .regset_get = fpr_get, .set = fpr_set
        },
-#endif
 #ifdef CONFIG_ALTIVEC
        [REGSET_VMX] = {
                .core_note_type = NT_PPC_VMX, .n = 34,
index 75ee918..f651b99 100644 (file)
@@ -775,7 +775,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
        else
                prepare_save_user_regs(1);
 
-       if (!user_write_access_begin(frame, sizeof(*frame)))
+       if (!user_access_begin(frame, sizeof(*frame)))
                goto badframe;
 
        /* Put the siginfo & fill in most of the ucontext */
@@ -809,17 +809,15 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
                unsafe_put_user(PPC_INST_ADDI + __NR_rt_sigreturn, &mctx->mc_pad[0],
                                failed);
                unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
+               asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
        }
        unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
 
-       user_write_access_end();
+       user_access_end();
 
        if (copy_siginfo_to_user(&frame->info, &ksig->info))
                goto badframe;
 
-       if (tramp == (unsigned long)mctx->mc_pad)
-               flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long));
-
        regs->link = tramp;
 
 #ifdef CONFIG_PPC_FPU_REGS
@@ -844,7 +842,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
        return 0;
 
 failed:
-       user_write_access_end();
+       user_access_end();
 
 badframe:
        signal_fault(tsk, regs, "handle_rt_signal32", frame);
@@ -879,7 +877,7 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
        else
                prepare_save_user_regs(1);
 
-       if (!user_write_access_begin(frame, sizeof(*frame)))
+       if (!user_access_begin(frame, sizeof(*frame)))
                goto badframe;
        sc = (struct sigcontext __user *) &frame->sctx;
 
@@ -908,11 +906,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
                /* Set up the sigreturn trampoline: li r0,sigret; sc */
                unsafe_put_user(PPC_INST_ADDI + __NR_sigreturn, &mctx->mc_pad[0], failed);
                unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
+               asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
        }
-       user_write_access_end();
-
-       if (tramp == (unsigned long)mctx->mc_pad)
-               flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long));
+       user_access_end();
 
        regs->link = tramp;
 
@@ -935,7 +931,7 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
        return 0;
 
 failed:
-       user_write_access_end();
+       user_access_end();
 
 badframe:
        signal_fault(tsk, regs, "handle_signal32", frame);
index a6e29f8..d21d081 100644 (file)
@@ -65,3 +65,14 @@ V_FUNCTION_END(__kernel_clock_getres)
 V_FUNCTION_BEGIN(__kernel_time)
        cvdso_call_time __c_kernel_time
 V_FUNCTION_END(__kernel_time)
+
+/* Routines for restoring integer registers, called by the compiler.  */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the integer restore area.  */
+_GLOBAL(_restgpr_31_x)
+_GLOBAL(_rest32gpr_31_x)
+       lwz     r0,4(r11)
+       lwz     r31,-4(r11)
+       mtlr    r0
+       mr      r1,r11
+       blr
index 764170f..3805519 100644 (file)
@@ -887,7 +887,8 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
 
        want_v = hpte_encode_avpn(vpn, psize, ssize);
 
-       flags = (newpp & 7) | H_AVPN;
+       flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN;
+       flags |= (newpp & HPTE_R_KEY_HI) >> 48;
        if (mmu_has_feature(MMU_FTR_KERNEL_RO))
                /* Move pp0 into bit 8 (IBM 55) */
                flags |= (newpp & HPTE_R_PP0) >> 55;
index ea4d6a6..e83e089 100644 (file)
@@ -452,12 +452,28 @@ static int do_suspend(void)
        return ret;
 }
 
+/**
+ * struct pseries_suspend_info - State shared between CPUs for join/suspend.
+ * @counter: Threads are to increment this upon resuming from suspend
+ *           or if an error is received from H_JOIN. The thread which performs
+ *           the first increment (i.e. sets it to 1) is responsible for
+ *           waking the other threads.
+ * @done: False if join/suspend is in progress. True if the operation is
+ *        complete (successful or not).
+ */
+struct pseries_suspend_info {
+       atomic_t counter;
+       bool done;
+};
+
 static int do_join(void *arg)
 {
-       atomic_t *counter = arg;
+       struct pseries_suspend_info *info = arg;
+       atomic_t *counter = &info->counter;
        long hvrc;
        int ret;
 
+retry:
        /* Must ensure MSR.EE off for H_JOIN. */
        hard_irq_disable();
        hvrc = plpar_hcall_norets(H_JOIN);
@@ -473,8 +489,20 @@ static int do_join(void *arg)
        case H_SUCCESS:
                /*
                 * The suspend is complete and this cpu has received a
-                * prod.
+                * prod, or we've received a stray prod from unrelated
+                * code (e.g. paravirt spinlocks) and we need to join
+                * again.
+                *
+                * This barrier orders the return from H_JOIN above vs
+                * the load of info->done. It pairs with the barrier
+                * in the wakeup/prod path below.
                 */
+               smp_mb();
+               if (READ_ONCE(info->done) == false) {
+                       pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying",
+                                           smp_processor_id());
+                       goto retry;
+               }
                ret = 0;
                break;
        case H_BAD_MODE:
@@ -488,6 +516,13 @@ static int do_join(void *arg)
 
        if (atomic_inc_return(counter) == 1) {
                pr_info("CPU %u waking all threads\n", smp_processor_id());
+               WRITE_ONCE(info->done, true);
+               /*
+                * This barrier orders the store to info->done vs subsequent
+                * H_PRODs to wake the other CPUs. It pairs with the barrier
+                * in the H_SUCCESS case above.
+                */
+               smp_mb();
                prod_others();
        }
        /*
@@ -535,11 +570,16 @@ static int pseries_suspend(u64 handle)
        int ret;
 
        while (true) {
-               atomic_t counter = ATOMIC_INIT(0);
+               struct pseries_suspend_info info;
                unsigned long vasi_state;
                int vasi_err;
 
-               ret = stop_machine(do_join, &counter, cpu_online_mask);
+               info = (struct pseries_suspend_info) {
+                       .counter = ATOMIC_INIT(0),
+                       .done = false,
+               };
+
+               ret = stop_machine(do_join, &info, cpu_online_mask);
                if (ret == 0)
                        break;
                /*
index 85d626b..0d0cf67 100644 (file)
@@ -93,7 +93,6 @@ config RISCV
        select PCI_MSI if PCI
        select RISCV_INTC
        select RISCV_TIMER if RISCV_SBI
-       select SPARSEMEM_STATIC if 32BIT
        select SPARSE_IRQ
        select SYSCTL_EXCEPTION_TRACE
        select THREAD_INFO_IN_TASK
@@ -154,7 +153,8 @@ config ARCH_FLATMEM_ENABLE
 config ARCH_SPARSEMEM_ENABLE
        def_bool y
        depends on MMU
-       select SPARSEMEM_VMEMMAP_ENABLE
+       select SPARSEMEM_STATIC if 32BIT && SPARSMEM
+       select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
 
 config ARCH_SELECT_MEMORY_MODEL
        def_bool ARCH_SPARSEMEM_ENABLE
@@ -314,7 +314,7 @@ endchoice
 # Common NUMA Features
 config NUMA
        bool "NUMA Memory Allocation and Scheduler Support"
-       depends on SMP
+       depends on SMP && MMU
        select GENERIC_ARCH_NUMA
        select OF_NUMA
        select ARCH_SUPPORTS_NUMA_BALANCING
index 7efcece..e1b2690 100644 (file)
@@ -31,6 +31,8 @@ config SOC_CANAAN
        select SIFIVE_PLIC
        select ARCH_HAS_RESET_CONTROLLER
        select PINCTRL
+       select COMMON_CLK
+       select COMMON_CLK_K210
        help
          This enables support for Canaan Kendryte K210 SoC platform hardware.
 
index 27e005f..2a652b0 100644 (file)
@@ -9,4 +9,20 @@ long long __lshrti3(long long a, int b);
 long long __ashrti3(long long a, int b);
 long long __ashlti3(long long a, int b);
 
+
+#define DECLARE_DO_ERROR_INFO(name)    asmlinkage void name(struct pt_regs *regs)
+
+DECLARE_DO_ERROR_INFO(do_trap_unknown);
+DECLARE_DO_ERROR_INFO(do_trap_insn_misaligned);
+DECLARE_DO_ERROR_INFO(do_trap_insn_fault);
+DECLARE_DO_ERROR_INFO(do_trap_insn_illegal);
+DECLARE_DO_ERROR_INFO(do_trap_load_fault);
+DECLARE_DO_ERROR_INFO(do_trap_load_misaligned);
+DECLARE_DO_ERROR_INFO(do_trap_store_misaligned);
+DECLARE_DO_ERROR_INFO(do_trap_store_fault);
+DECLARE_DO_ERROR_INFO(do_trap_ecall_u);
+DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
+DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
+DECLARE_DO_ERROR_INFO(do_trap_break);
+
 #endif /* _ASM_RISCV_PROTOTYPES_H */
index 9807ad1..e4c4355 100644 (file)
@@ -12,4 +12,6 @@
 
 #include <asm-generic/irq.h>
 
+extern void __init init_IRQ(void);
+
 #endif /* _ASM_RISCV_IRQ_H */
index 3a24003..021ed64 100644 (file)
@@ -71,6 +71,7 @@ int riscv_of_processor_hartid(struct device_node *node);
 int riscv_of_parent_hartid(struct device_node *node);
 
 extern void riscv_fill_hwcap(void);
+extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
 
 #endif /* __ASSEMBLY__ */
 
index cb4abb6..09ad4e9 100644 (file)
@@ -119,6 +119,11 @@ extern int regs_query_register_offset(const char *name);
 extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
                                               unsigned int n);
 
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+                          unsigned long frame_pointer);
+int do_syscall_trace_enter(struct pt_regs *regs);
+void do_syscall_trace_exit(struct pt_regs *regs);
+
 /**
  * regs_get_register() - get register value from its offset
  * @regs:      pt_regs from which register value is gotten
index 99895d9..d702741 100644 (file)
@@ -51,10 +51,10 @@ enum sbi_ext_rfence_fid {
        SBI_EXT_RFENCE_REMOTE_FENCE_I = 0,
        SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
        SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
-       SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
        SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
-       SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
+       SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
        SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
+       SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
 };
 
 enum sbi_ext_hsm_fid {
index 81de51e..507cae2 100644 (file)
@@ -88,4 +88,6 @@ static inline int read_current_timer(unsigned long *timer_val)
        return 0;
 }
 
+extern void time_init(void);
+
 #endif /* _ASM_RISCV_TIMEX_H */
index 824b2c9..f944062 100644 (file)
@@ -306,7 +306,9 @@ do {                                                                \
  * data types like structures or arrays.
  *
  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
- * to the result of dereferencing @ptr.
+ * to the result of dereferencing @ptr. The value of @x is copied to avoid
+ * re-ordering where @x is evaluated inside the block that enables user-space
+ * access (thus bypassing user space protection if @x is a function).
  *
  * Caller must check the pointer with access_ok() before calling this
  * function.
@@ -316,12 +318,13 @@ do {                                                              \
 #define __put_user(x, ptr)                                     \
 ({                                                             \
        __typeof__(*(ptr)) __user *__gu_ptr = (ptr);            \
+       __typeof__(*__gu_ptr) __val = (x);                      \
        long __pu_err = 0;                                      \
                                                                \
        __chk_user_ptr(__gu_ptr);                               \
                                                                \
        __enable_user_access();                                 \
-       __put_user_nocheck(x, __gu_ptr, __pu_err);              \
+       __put_user_nocheck(__val, __gu_ptr, __pu_err);          \
        __disable_user_access();                                \
                                                                \
        __pu_err;                                               \
index 3dc0abd..647a47f 100644 (file)
@@ -8,6 +8,7 @@ CFLAGS_REMOVE_ftrace.o  = $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_patch.o  = $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_sbi.o    = $(CC_FLAGS_FTRACE)
 endif
+CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
 
 extra-y += head.o
 extra-y += vmlinux.lds
index 744f320..76274a4 100644 (file)
@@ -447,6 +447,7 @@ ENDPROC(__switch_to)
 #endif
 
        .section ".rodata"
+       .align LGREG
        /* Exception vector table */
 ENTRY(excp_vect_table)
        RISCV_PTR do_trap_insn_misaligned
index e637249..17ca5e9 100644 (file)
@@ -2,39 +2,41 @@
 
 #include <linux/kprobes.h>
 
-/* Ftrace callback handler for kprobes -- called under preepmt disabed */
+/* Ftrace callback handler for kprobes -- called under preepmt disabled */
 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
-                          struct ftrace_ops *ops, struct ftrace_regs *regs)
+                          struct ftrace_ops *ops, struct ftrace_regs *fregs)
 {
        struct kprobe *p;
+       struct pt_regs *regs;
        struct kprobe_ctlblk *kcb;
 
        p = get_kprobe((kprobe_opcode_t *)ip);
        if (unlikely(!p) || kprobe_disabled(p))
                return;
 
+       regs = ftrace_get_regs(fregs);
        kcb = get_kprobe_ctlblk();
        if (kprobe_running()) {
                kprobes_inc_nmissed_count(p);
        } else {
-               unsigned long orig_ip = instruction_pointer(&(regs->regs));
+               unsigned long orig_ip = instruction_pointer(regs);
 
-               instruction_pointer_set(&(regs->regs), ip);
+               instruction_pointer_set(regs, ip);
 
                __this_cpu_write(current_kprobe, p);
                kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-               if (!p->pre_handler || !p->pre_handler(p, &(regs->regs))) {
+               if (!p->pre_handler || !p->pre_handler(p, regs)) {
                        /*
                         * Emulate singlestep (and also recover regs->pc)
                         * as if there is a nop
                         */
-                       instruction_pointer_set(&(regs->regs),
+                       instruction_pointer_set(regs,
                                (unsigned long)p->addr + MCOUNT_INSN_SIZE);
                        if (unlikely(p->post_handler)) {
                                kcb->kprobe_status = KPROBE_HIT_SSDONE;
-                               p->post_handler(p, &(regs->regs), 0);
+                               p->post_handler(p, regs, 0);
                        }
-                       instruction_pointer_set(&(regs->regs), orig_ip);
+                       instruction_pointer_set(regs, orig_ip);
                }
 
                /*
index a2ec186..7e2c78e 100644 (file)
@@ -256,8 +256,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
                 * normal page fault.
                 */
                regs->epc = (unsigned long) cur->addr;
-               if (!instruction_pointer(regs))
-                       BUG();
+               BUG_ON(!instruction_pointer(regs));
 
                if (kcb->kprobe_status == KPROBE_REENTER)
                        restore_previous_kprobe(kcb);
index 6f728e7..f9cd57c 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/cpu.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
+#include <linux/sched/debug.h>
 #include <linux/sched/task_stack.h>
 #include <linux/tick.h>
 #include <linux/ptrace.h>
index f4a7db3..d3bf756 100644 (file)
@@ -116,7 +116,7 @@ void sbi_clear_ipi(void)
 EXPORT_SYMBOL(sbi_clear_ipi);
 
 /**
- * sbi_set_timer_v01() - Program the timer for next timer event.
+ * __sbi_set_timer_v01() - Program the timer for next timer event.
  * @stime_value: The value after which next timer event should fire.
  *
  * Return: None
index e85bacf..f8f1533 100644 (file)
@@ -147,7 +147,8 @@ static void __init init_resources(void)
        bss_res.end = __pa_symbol(__bss_stop) - 1;
        bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 
-       mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt) * sizeof(*mem_res);
+       /* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
+       mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt + 1) * sizeof(*mem_res);
        mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES);
        if (!mem_res)
                panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz);
index 3f893c9..2b3e0cb 100644 (file)
@@ -14,7 +14,7 @@
 
 #include <asm/stacktrace.h>
 
-register const unsigned long sp_in_global __asm__("sp");
+register unsigned long sp_in_global __asm__("sp");
 
 #ifdef CONFIG_FRAME_POINTER
 
index 8a5cf99..1b43226 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/delay.h>
 #include <asm/sbi.h>
 #include <asm/processor.h>
+#include <asm/timex.h>
 
 unsigned long riscv_timebase;
 EXPORT_SYMBOL_GPL(riscv_timebase);
index 3ed2c23..0879b5d 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/module.h>
 #include <linux/irq.h>
 
+#include <asm/asm-prototypes.h>
 #include <asm/bug.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
index 3fc18f4..937d13c 100644 (file)
@@ -155,7 +155,7 @@ static void __init kasan_populate(void *start, void *end)
        memset(start, KASAN_SHADOW_INIT, end - start);
 }
 
-void __init kasan_shallow_populate(void *start, void *end)
+static void __init kasan_shallow_populate(void *start, void *end)
 {
        unsigned long vaddr = (unsigned long)start & PAGE_MASK;
        unsigned long vend = PAGE_ALIGN((unsigned long)end);
@@ -187,6 +187,8 @@ void __init kasan_shallow_populate(void *start, void *end)
                }
                vaddr += PAGE_SIZE;
        }
+
+       local_flush_tlb_all();
 }
 
 void __init kasan_init(void)
@@ -214,7 +216,7 @@ void __init kasan_init(void)
                        break;
 
                kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
-       };
+       }
 
        for (i = 0; i < PTRS_PER_PTE; i++)
                set_pte(&kasan_early_shadow_pte[i],
index 053fe8b..a75d94a 100644 (file)
@@ -202,7 +202,7 @@ extern unsigned int s390_pci_no_rid;
 ----------------------------------------------------------------------------- */
 /* Base stuff */
 int zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
-void zpci_remove_device(struct zpci_dev *zdev);
+void zpci_remove_device(struct zpci_dev *zdev, bool set_error);
 int zpci_enable_device(struct zpci_dev *);
 int zpci_disable_device(struct zpci_dev *);
 int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
index ee056f4..2b54316 100644 (file)
@@ -12,6 +12,7 @@ enum stack_type {
        STACK_TYPE_IRQ,
        STACK_TYPE_NODAT,
        STACK_TYPE_RESTART,
+       STACK_TYPE_MCCK,
 };
 
 struct stack_info {
index 7b3cdb4..73ee891 100644 (file)
@@ -6,7 +6,7 @@
 #include <vdso/datapage.h>
 
 struct arch_vdso_data {
-       __u64 tod_steering_delta;
+       __s64 tod_steering_delta;
        __u64 tod_steering_end;
 };
 
index af013b4..2da0273 100644 (file)
@@ -37,10 +37,12 @@ static int diag8_noresponse(int cmdlen)
 
 static int diag8_response(int cmdlen, char *response, int *rlen)
 {
+       unsigned long _cmdlen = cmdlen | 0x40000000L;
+       unsigned long _rlen = *rlen;
        register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
        register unsigned long reg3 asm ("3") = (addr_t) response;
-       register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
-       register unsigned long reg5 asm ("5") = *rlen;
+       register unsigned long reg4 asm ("4") = _cmdlen;
+       register unsigned long reg5 asm ("5") = _rlen;
 
        asm volatile(
                "       diag    %2,%0,0x8\n"
index 0dc4b25..db1bc00 100644 (file)
@@ -79,6 +79,15 @@ static bool in_nodat_stack(unsigned long sp, struct stack_info *info)
        return in_stack(sp, info, STACK_TYPE_NODAT, top - THREAD_SIZE, top);
 }
 
+static bool in_mcck_stack(unsigned long sp, struct stack_info *info)
+{
+       unsigned long frame_size, top;
+
+       frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+       top = S390_lowcore.mcck_stack + frame_size;
+       return in_stack(sp, info, STACK_TYPE_MCCK, top - THREAD_SIZE, top);
+}
+
 static bool in_restart_stack(unsigned long sp, struct stack_info *info)
 {
        unsigned long frame_size, top;
@@ -108,7 +117,8 @@ int get_stack_info(unsigned long sp, struct task_struct *task,
        /* Check per-cpu stacks */
        if (!in_irq_stack(sp, info) &&
            !in_nodat_stack(sp, info) &&
-           !in_restart_stack(sp, info))
+           !in_restart_stack(sp, info) &&
+           !in_mcck_stack(sp, info))
                goto unknown;
 
 recursion_check:
index 601c217..714269e 100644 (file)
@@ -174,7 +174,7 @@ void noinstr do_ext_irq(struct pt_regs *regs)
 
        memcpy(&regs->int_code, &S390_lowcore.ext_cpu_addr, 4);
        regs->int_parm = S390_lowcore.ext_params;
-       regs->int_parm_long = *(unsigned long *)S390_lowcore.ext_params2;
+       regs->int_parm_long = S390_lowcore.ext_params2;
 
        from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit;
        if (from_idle)
index bc302b8..2e3e7ed 100644 (file)
@@ -968,7 +968,7 @@ static int cf_diag_all_start(void)
  */
 static size_t cf_diag_needspace(unsigned int sets)
 {
-       struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+       struct cpu_cf_events *cpuhw = get_cpu_ptr(&cpu_cf_events);
        size_t bytes = 0;
        int i;
 
@@ -984,6 +984,7 @@ static size_t cf_diag_needspace(unsigned int sets)
                     sizeof(((struct s390_ctrset_cpudata *)0)->no_sets));
        debug_sprintf_event(cf_diag_dbg, 5, "%s bytes %ld\n", __func__,
                            bytes);
+       put_cpu_ptr(&cpu_cf_events);
        return bytes;
 }
 
index 60da976..72134f9 100644 (file)
@@ -354,7 +354,7 @@ static int __init stack_realloc(void)
        if (!new)
                panic("Couldn't allocate machine check stack");
        WRITE_ONCE(S390_lowcore.mcck_stack, new + STACK_INIT_OFFSET);
-       memblock_free(old, THREAD_SIZE);
+       memblock_free_late(old, THREAD_SIZE);
        return 0;
 }
 early_initcall(stack_realloc);
index 165da96..326cb8f 100644 (file)
@@ -80,10 +80,12 @@ void __init time_early_init(void)
 {
        struct ptff_qto qto;
        struct ptff_qui qui;
+       int cs;
 
        /* Initialize TOD steering parameters */
        tod_steering_end = tod_clock_base.tod;
-       vdso_data->arch_data.tod_steering_end = tod_steering_end;
+       for (cs = 0; cs < CS_BASES; cs++)
+               vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
 
        if (!test_facility(28))
                return;
@@ -366,6 +368,7 @@ static void clock_sync_global(unsigned long delta)
 {
        unsigned long now, adj;
        struct ptff_qto qto;
+       int cs;
 
        /* Fixup the monotonic sched clock. */
        tod_clock_base.eitod += delta;
@@ -381,7 +384,10 @@ static void clock_sync_global(unsigned long delta)
                panic("TOD clock sync offset %li is too large to drift\n",
                      tod_steering_delta);
        tod_steering_end = now + (abs(tod_steering_delta) << 15);
-       vdso_data->arch_data.tod_steering_end = tod_steering_end;
+       for (cs = 0; cs < CS_BASES; cs++) {
+               vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
+               vdso_data[cs].arch_data.tod_steering_delta = tod_steering_delta;
+       }
 
        /* Update LPAR offset. */
        if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
index 73c7afc..f216a1b 100644 (file)
@@ -214,7 +214,7 @@ void vtime_flush(struct task_struct *tsk)
        avg_steal = S390_lowcore.avg_steal_timer / 2;
        if ((s64) steal > 0) {
                S390_lowcore.steal_timer = 0;
-               account_steal_time(steal);
+               account_steal_time(cputime_to_nsecs(steal));
                avg_steal += steal;
        }
        S390_lowcore.avg_steal_timer = avg_steal;
index 600881d..9106407 100644 (file)
@@ -682,16 +682,36 @@ int zpci_disable_device(struct zpci_dev *zdev)
 }
 EXPORT_SYMBOL_GPL(zpci_disable_device);
 
-void zpci_remove_device(struct zpci_dev *zdev)
+/* zpci_remove_device - Removes the given zdev from the PCI core
+ * @zdev: the zdev to be removed from the PCI core
+ * @set_error: if true the device's error state is set to permanent failure
+ *
+ * Sets a zPCI device to a configured but offline state; the zPCI
+ * device is still accessible through its hotplug slot and the zPCI
+ * API but is removed from the common code PCI bus, making it
+ * no longer available to drivers.
+ */
+void zpci_remove_device(struct zpci_dev *zdev, bool set_error)
 {
        struct zpci_bus *zbus = zdev->zbus;
        struct pci_dev *pdev;
 
+       if (!zdev->zbus->bus)
+               return;
+
        pdev = pci_get_slot(zbus->bus, zdev->devfn);
        if (pdev) {
-               if (pdev->is_virtfn)
-                       return zpci_iov_remove_virtfn(pdev, zdev->vfn);
+               if (set_error)
+                       pdev->error_state = pci_channel_io_perm_failure;
+               if (pdev->is_virtfn) {
+                       zpci_iov_remove_virtfn(pdev, zdev->vfn);
+                       /* balance pci_get_slot */
+                       pci_dev_put(pdev);
+                       return;
+               }
                pci_stop_and_remove_bus_device_locked(pdev);
+               /* balance pci_get_slot */
+               pci_dev_put(pdev);
        }
 }
 
@@ -765,7 +785,7 @@ void zpci_release_device(struct kref *kref)
        struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
 
        if (zdev->zbus->bus)
-               zpci_remove_device(zdev);
+               zpci_remove_device(zdev, false);
 
        switch (zdev->state) {
        case ZPCI_FN_STATE_ONLINE:
index b4162da..ac0c65c 100644 (file)
@@ -76,13 +76,10 @@ void zpci_event_error(void *data)
 static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
 {
        struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
-       struct pci_dev *pdev = NULL;
        enum zpci_state state;
+       struct pci_dev *pdev;
        int ret;
 
-       if (zdev && zdev->zbus->bus)
-               pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
-
        zpci_err("avail CCDF:\n");
        zpci_err_hex(ccdf, sizeof(*ccdf));
 
@@ -124,8 +121,7 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
        case 0x0303: /* Deconfiguration requested */
                if (!zdev)
                        break;
-               if (pdev)
-                       zpci_remove_device(zdev);
+               zpci_remove_device(zdev, false);
 
                ret = zpci_disable_device(zdev);
                if (ret)
@@ -140,12 +136,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
        case 0x0304: /* Configured -> Standby|Reserved */
                if (!zdev)
                        break;
-               if (pdev) {
-                       /* Give the driver a hint that the function is
-                        * already unusable. */
-                       pdev->error_state = pci_channel_io_perm_failure;
-                       zpci_remove_device(zdev);
-               }
+               /* Give the driver a hint that the function is
+                * already unusable.
+                */
+               zpci_remove_device(zdev, true);
 
                zdev->fh = ccdf->fh;
                zpci_disable_device(zdev);
index 2d6d5a2..9a85eae 100644 (file)
@@ -27,7 +27,7 @@ endif
 REALMODE_CFLAGS        := -m16 -g -Os -DDISABLE_BRANCH_PROFILING \
                   -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
                   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
-                  -mno-mmx -mno-sse
+                  -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
 
 REALMODE_CFLAGS += -ffreestanding
 REALMODE_CFLAGS += -fno-stack-protector
index 7bbb5bb..37ce384 100644 (file)
@@ -3659,6 +3659,9 @@ static int intel_pmu_hw_config(struct perf_event *event)
                return ret;
 
        if (event->attr.precise_ip) {
+               if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
+                       return -EINVAL;
+
                if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
                        event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
                        if (!(event->attr.sample_type &
index 7ebae18..d32b302 100644 (file)
@@ -2010,7 +2010,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
                 */
                if (!pebs_status && cpuc->pebs_enabled &&
                        !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
-                       pebs_status = cpuc->pebs_enabled;
+                       pebs_status = p->status = cpuc->pebs_enabled;
 
                bit = find_first_bit((unsigned long *)&pebs_status,
                                        x86_pmu.max_pebs_events);
index 97bbb4a..05b48b3 100644 (file)
@@ -56,8 +56,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
        else
                set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
 
-       /* Flush this CPU's TLB. */
+       /*
+        * Flush this CPU's TLB, assuming whoever did the allocation/free is
+        * likely to continue running on this CPU.
+        */
+       preempt_disable();
        flush_tlb_one_kernel(addr);
+       preempt_enable();
        return true;
 }
 
index 9bc091e..3768819 100644 (file)
@@ -884,12 +884,29 @@ struct kvm_hv_syndbg {
        u64 options;
 };
 
+/* Current state of Hyper-V TSC page clocksource */
+enum hv_tsc_page_status {
+       /* TSC page was not set up or disabled */
+       HV_TSC_PAGE_UNSET = 0,
+       /* TSC page MSR was written by the guest, update pending */
+       HV_TSC_PAGE_GUEST_CHANGED,
+       /* TSC page MSR was written by KVM userspace, update pending */
+       HV_TSC_PAGE_HOST_CHANGED,
+       /* TSC page was properly set up and is currently active  */
+       HV_TSC_PAGE_SET,
+       /* TSC page is currently being updated and therefore is inactive */
+       HV_TSC_PAGE_UPDATING,
+       /* TSC page was set up with an inaccessible GPA */
+       HV_TSC_PAGE_BROKEN,
+};
+
 /* Hyper-V emulation context */
 struct kvm_hv {
        struct mutex hv_lock;
        u64 hv_guest_os_id;
        u64 hv_hypercall;
        u64 hv_tsc_page;
+       enum hv_tsc_page_status hv_tsc_page_status;
 
        /* Hyper-v based guest crash (NT kernel bugcheck) parameters */
        u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
@@ -931,6 +948,12 @@ enum kvm_irqchip_mode {
        KVM_IRQCHIP_SPLIT,        /* created with KVM_CAP_SPLIT_IRQCHIP */
 };
 
+struct kvm_x86_msr_filter {
+       u8 count;
+       bool default_allow:1;
+       struct msr_bitmap_range ranges[16];
+};
+
 #define APICV_INHIBIT_REASON_DISABLE    0
 #define APICV_INHIBIT_REASON_HYPERV     1
 #define APICV_INHIBIT_REASON_NESTED     2
@@ -1025,16 +1048,11 @@ struct kvm_arch {
        bool guest_can_read_msr_platform_info;
        bool exception_payload_enabled;
 
+       bool bus_lock_detection_enabled;
+
        /* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
        u32 user_space_msr_mask;
-
-       struct {
-               u8 count;
-               bool default_allow:1;
-               struct msr_bitmap_range ranges[16];
-       } msr_filter;
-
-       bool bus_lock_detection_enabled;
+       struct kvm_x86_msr_filter __rcu *msr_filter;
 
        struct kvm_pmu_event_filter __rcu *pmu_event_filter;
        struct task_struct *nx_lpage_recovery_thread;
index dc6d149..f1b9ed5 100644 (file)
@@ -551,15 +551,6 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
        *size = fpu_kernel_xstate_size;
 }
 
-/*
- * Thread-synchronous status.
- *
- * This is different from the flags in that nobody else
- * ever touches our thread-synchronous status, so we don't
- * have to worry about atomic accesses.
- */
-#define TS_COMPAT              0x0002  /* 32bit syscall active (64BIT)*/
-
 static inline void
 native_load_sp0(unsigned long sp0)
 {
index c0538f8..630ff08 100644 (file)
@@ -132,6 +132,7 @@ void native_play_dead(void);
 void play_dead_common(void);
 void wbinvd_on_cpu(int cpu);
 int wbinvd_on_all_cpus(void);
+void cond_wakeup_cpu0(void);
 
 void native_smp_send_reschedule(int cpu);
 void native_send_call_func_ipi(const struct cpumask *mask);
index 0d751d5..06b740b 100644 (file)
@@ -205,10 +205,23 @@ static inline int arch_within_stack_frames(const void * const stack,
 
 #endif
 
+/*
+ * Thread-synchronous status.
+ *
+ * This is different from the flags in that nobody else
+ * ever touches our thread-synchronous status, so we don't
+ * have to worry about atomic accesses.
+ */
+#define TS_COMPAT              0x0002  /* 32bit syscall active (64BIT)*/
+
+#ifndef __ASSEMBLY__
 #ifdef CONFIG_COMPAT
 #define TS_I386_REGS_POKED     0x0004  /* regs poked by 32-bit ptracer */
+
+#define arch_set_restart_data(restart) \
+       do { restart->arch_data = current_thread_info()->status; } while (0)
+
 #endif
-#ifndef __ASSEMBLY__
 
 #ifdef CONFIG_X86_32
 #define in_ia32_syscall() true
index 7068e4b..1a162e5 100644 (file)
@@ -86,18 +86,6 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
 }
 #endif
 
-/*
- * The maximum amount of extra memory compared to the base size.  The
- * main scaling factor is the size of struct page.  At extreme ratios
- * of base:extra, all the base memory can be filled with page
- * structures for the extra memory, leaving no space for anything
- * else.
- *
- * 10x seems like a reasonable balance between scaling flexibility and
- * leaving a practically usable system.
- */
-#define XEN_EXTRA_MEM_RATIO    (10)
-
 /*
  * Helper functions to write or read unsigned long values to/from
  * memory, when the access may fault.
index 7bdc023..14cd318 100644 (file)
@@ -1554,10 +1554,18 @@ void __init acpi_boot_table_init(void)
        /*
         * Initialize the ACPI boot-time table parser.
         */
-       if (acpi_table_init()) {
+       if (acpi_locate_initial_tables())
                disable_acpi();
-               return;
-       }
+       else
+               acpi_reserve_initial_tables();
+}
+
+int __init early_acpi_boot_init(void)
+{
+       if (acpi_disabled)
+               return 1;
+
+       acpi_table_init_complete();
 
        acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
 
@@ -1570,18 +1578,9 @@ void __init acpi_boot_table_init(void)
                } else {
                        printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
                        disable_acpi();
-                       return;
+                       return 1;
                }
        }
-}
-
-int __init early_acpi_boot_init(void)
-{
-       /*
-        * If acpi_disabled, bail out
-        */
-       if (acpi_disabled)
-               return 1;
 
        /*
         * Process the Multiple APIC Description Table (MADT), if present
index bda4f2a..4f26700 100644 (file)
@@ -2342,6 +2342,11 @@ static int cpuid_to_apicid[] = {
        [0 ... NR_CPUS - 1] = -1,
 };
 
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+       return phys_id == cpuid_to_apicid[cpu];
+}
+
 #ifdef CONFIG_SMP
 /**
  * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
index c3b60c3..73ff4dd 100644 (file)
@@ -1032,6 +1032,16 @@ static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin,
        if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) {
                irq = mp_irqs[idx].srcbusirq;
                legacy = mp_is_legacy_irq(irq);
+               /*
+                * IRQ2 is unusable for historical reasons on systems which
+                * have a legacy PIC. See the comment vs. IRQ2 further down.
+                *
+                * If this gets removed at some point then the related code
+                * in lapic_assign_system_vectors() needs to be adjusted as
+                * well.
+                */
+               if (legacy && irq == PIC_CASCADE_IR)
+                       return -EINVAL;
        }
 
        mutex_lock(&ioapic_mutex);
index 373e5fa..51c7f52 100644 (file)
@@ -12,7 +12,7 @@
 
 #include "common.h"
 
-/* Ftrace callback handler for kprobes -- called under preepmt disabed */
+/* Ftrace callback handler for kprobes -- called under preepmt disabled */
 void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
                           struct ftrace_ops *ops, struct ftrace_regs *fregs)
 {
index 5e78e01..78bb0fa 100644 (file)
@@ -836,28 +836,25 @@ static void kvm_kick_cpu(int cpu)
 
 static void kvm_wait(u8 *ptr, u8 val)
 {
-       unsigned long flags;
-
        if (in_nmi())
                return;
 
-       local_irq_save(flags);
-
-       if (READ_ONCE(*ptr) != val)
-               goto out;
-
        /*
         * halt until it's our turn and kicked. Note that we do safe halt
         * for irq enabled case to avoid hang when lock info is overwritten
         * in irq spinlock slowpath and no spurious interrupt occur to save us.
         */
-       if (arch_irqs_disabled_flags(flags))
-               halt();
-       else
-               safe_halt();
+       if (irqs_disabled()) {
+               if (READ_ONCE(*ptr) == val)
+                       halt();
+       } else {
+               local_irq_disable();
 
-out:
-       local_irq_restore(flags);
+               if (READ_ONCE(*ptr) == val)
+                       safe_halt();
+
+               local_irq_enable();
+       }
 }
 
 #ifdef CONFIG_X86_32
index d883176..5ecd69a 100644 (file)
@@ -1045,6 +1045,9 @@ void __init setup_arch(char **cmdline_p)
 
        cleanup_highmap();
 
+       /* Look for ACPI tables and reserve memory occupied by them. */
+       acpi_boot_table_init();
+
        memblock_set_current_limit(ISA_END_ADDRESS);
        e820__memblock_setup();
 
@@ -1136,11 +1139,6 @@ void __init setup_arch(char **cmdline_p)
 
        early_platform_quirks();
 
-       /*
-        * Parse the ACPI tables for possible boot-time SMP configuration.
-        */
-       acpi_boot_table_init();
-
        early_acpi_boot_init();
 
        initmem_init();
index ea794a0..f306e85 100644 (file)
@@ -766,30 +766,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
 
 static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
 {
-       /*
-        * This function is fundamentally broken as currently
-        * implemented.
-        *
-        * The idea is that we want to trigger a call to the
-        * restart_block() syscall and that we want in_ia32_syscall(),
-        * in_x32_syscall(), etc. to match whatever they were in the
-        * syscall being restarted.  We assume that the syscall
-        * instruction at (regs->ip - 2) matches whatever syscall
-        * instruction we used to enter in the first place.
-        *
-        * The problem is that we can get here when ptrace pokes
-        * syscall-like values into regs even if we're not in a syscall
-        * at all.
-        *
-        * For now, we maintain historical behavior and guess based on
-        * stored state.  We could do better by saving the actual
-        * syscall arch in restart_block or (with caveats on x32) by
-        * checking if regs->ip points to 'int $0x80'.  The current
-        * behavior is incorrect if a tracer has a different bitness
-        * than the tracee.
-        */
 #ifdef CONFIG_IA32_EMULATION
-       if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED))
+       if (current->restart_block.arch_data & TS_COMPAT)
                return __NR_ia32_restart_syscall;
 #endif
 #ifdef CONFIG_X86_X32_ABI
index 02813a7..16703c3 100644 (file)
@@ -1659,13 +1659,17 @@ void play_dead_common(void)
        local_irq_disable();
 }
 
-static bool wakeup_cpu0(void)
+/**
+ * cond_wakeup_cpu0 - Wake up CPU0 if needed.
+ *
+ * If NMI wants to wake up CPU0, start CPU0.
+ */
+void cond_wakeup_cpu0(void)
 {
        if (smp_processor_id() == 0 && enable_start_cpu0)
-               return true;
-
-       return false;
+               start_cpu0();
 }
+EXPORT_SYMBOL_GPL(cond_wakeup_cpu0);
 
 /*
  * We need to flush the caches before going to sleep, lest we have
@@ -1734,11 +1738,8 @@ static inline void mwait_play_dead(void)
                __monitor(mwait_ptr, 0, 0);
                mb();
                __mwait(eax, 0);
-               /*
-                * If NMI wants to wake up CPU0, start CPU0.
-                */
-               if (wakeup_cpu0())
-                       start_cpu0();
+
+               cond_wakeup_cpu0();
        }
 }
 
@@ -1749,11 +1750,8 @@ void hlt_play_dead(void)
 
        while (1) {
                native_halt();
-               /*
-                * If NMI wants to wake up CPU0, start CPU0.
-                */
-               if (wakeup_cpu0())
-                       start_cpu0();
+
+               cond_wakeup_cpu0();
        }
 }
 
index ac1874a..651e3e5 100644 (file)
@@ -556,7 +556,7 @@ DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
                tsk->thread.trap_nr = X86_TRAP_GP;
 
                if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
-                       return;
+                       goto exit;
 
                show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
                force_sig(SIGSEGV);
@@ -1057,7 +1057,7 @@ static void math_error(struct pt_regs *regs, int trapnr)
                goto exit;
 
        if (fixup_vdso_exception(regs, trapnr, 0, 0))
-               return;
+               goto exit;
 
        force_sig_fault(SIGFPE, si_code,
                        (void __user *)uprobe_get_trap_addr(regs));
index 1b4766f..eafc4d6 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 
-ccflags-y += -Iarch/x86/kvm
+ccflags-y += -I $(srctree)/arch/x86/kvm
 ccflags-$(CONFIG_KVM_WERROR) += -Werror
 
 ifeq ($(CONFIG_FRAME_POINTER),y)
index 58fa8c0..f98370a 100644 (file)
@@ -520,10 +520,10 @@ static u64 get_time_ref_counter(struct kvm *kvm)
        u64 tsc;
 
        /*
-        * The guest has not set up the TSC page or the clock isn't
-        * stable, fall back to get_kvmclock_ns.
+        * Fall back to get_kvmclock_ns() when TSC page hasn't been set up,
+        * is broken, disabled or being updated.
         */
-       if (!hv->tsc_ref.tsc_sequence)
+       if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
                return div_u64(get_kvmclock_ns(kvm), 100);
 
        vcpu = kvm_get_vcpu(kvm, 0);
@@ -1077,6 +1077,21 @@ static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
        return true;
 }
 
+/*
+ * Don't touch TSC page values if the guest has opted for TSC emulation after
+ * migration. KVM doesn't fully support reenlightenment notifications and TSC
+ * access emulation and Hyper-V is known to expect the values in TSC page to
+ * stay constant before TSC access emulation is disabled from guest side
+ * (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC
+ * frequency and guest visible TSC value across migration (and prevent it when
+ * TSC scaling is unsupported).
+ */
+static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
+{
+       return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) &&
+               hv->hv_tsc_emulation_control;
+}
+
 void kvm_hv_setup_tsc_page(struct kvm *kvm,
                           struct pvclock_vcpu_time_info *hv_clock)
 {
@@ -1087,7 +1102,8 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
        BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
        BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
 
-       if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+       if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
+           hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
                return;
 
        mutex_lock(&hv->hv_lock);
@@ -1101,7 +1117,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
         */
        if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
                                    &tsc_seq, sizeof(tsc_seq))))
+               goto out_err;
+
+       if (tsc_seq && tsc_page_update_unsafe(hv)) {
+               if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
+                       goto out_err;
+
+               hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
                goto out_unlock;
+       }
 
        /*
         * While we're computing and writing the parameters, force the
@@ -1110,15 +1134,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
        hv->tsc_ref.tsc_sequence = 0;
        if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
                            &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
-               goto out_unlock;
+               goto out_err;
 
        if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
-               goto out_unlock;
+               goto out_err;
 
        /* Ensure sequence is zero before writing the rest of the struct.  */
        smp_wmb();
        if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
-               goto out_unlock;
+               goto out_err;
 
        /*
         * Now switch to the TSC page mechanism by writing the sequence.
@@ -1131,8 +1155,45 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
        smp_wmb();
 
        hv->tsc_ref.tsc_sequence = tsc_seq;
-       kvm_write_guest(kvm, gfn_to_gpa(gfn),
-                       &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
+       if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
+                           &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
+               goto out_err;
+
+       hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
+       goto out_unlock;
+
+out_err:
+       hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
+out_unlock:
+       mutex_unlock(&hv->hv_lock);
+}
+
+void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
+{
+       struct kvm_hv *hv = to_kvm_hv(kvm);
+       u64 gfn;
+
+       if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
+           hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
+           tsc_page_update_unsafe(hv))
+               return;
+
+       mutex_lock(&hv->hv_lock);
+
+       if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+               goto out_unlock;
+
+       /* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */
+       if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
+               hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
+
+       gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
+
+       hv->tsc_ref.tsc_sequence = 0;
+       if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
+                           &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
+               hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
+
 out_unlock:
        mutex_unlock(&hv->hv_lock);
 }
@@ -1193,8 +1254,15 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
        }
        case HV_X64_MSR_REFERENCE_TSC:
                hv->hv_tsc_page = data;
-               if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
+               if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
+                       if (!host)
+                               hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
+                       else
+                               hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
                        kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
+               } else {
+                       hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
+               }
                break;
        case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
                return kvm_hv_msr_set_crash_data(kvm,
@@ -1229,6 +1297,9 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
                hv->hv_tsc_emulation_control = data;
                break;
        case HV_X64_MSR_TSC_EMULATION_STATUS:
+               if (data && !host)
+                       return 1;
+
                hv->hv_tsc_emulation_status = data;
                break;
        case HV_X64_MSR_TIME_REF_COUNT:
index e951af1..60547d5 100644 (file)
@@ -133,6 +133,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
 
 void kvm_hv_setup_tsc_page(struct kvm *kvm,
                           struct pvclock_vcpu_time_info *hv_clock);
+void kvm_hv_invalidate_tsc_page(struct kvm *kvm);
 
 void kvm_hv_init_vm(struct kvm *kvm);
 void kvm_hv_destroy_vm(struct kvm *kvm);
index d75524b..951dae4 100644 (file)
@@ -5884,6 +5884,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
        struct kvm_mmu_page *sp;
        unsigned int ratio;
        LIST_HEAD(invalid_list);
+       bool flush = false;
        ulong to_zap;
 
        rcu_idx = srcu_read_lock(&kvm->srcu);
@@ -5905,19 +5906,19 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
                                      lpage_disallowed_link);
                WARN_ON_ONCE(!sp->lpage_disallowed);
                if (is_tdp_mmu_page(sp)) {
-                       kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
-                               sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
+                       flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
                } else {
                        kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
                        WARN_ON_ONCE(sp->lpage_disallowed);
                }
 
                if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
-                       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+                       kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
                        cond_resched_rwlock_write(&kvm->mmu_lock);
+                       flush = false;
                }
        }
-       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+       kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
 
        write_unlock(&kvm->mmu_lock);
        srcu_read_unlock(&kvm->srcu, rcu_idx);
index ec4fc28..1f6f98c 100644 (file)
@@ -78,6 +78,11 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
        return to_shadow_page(__pa(sptep));
 }
 
+static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
+{
+       return sp->role.smm ? 1 : 0;
+}
+
 static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
 {
        /*
index e5f1481..b3ed302 100644 (file)
@@ -20,6 +20,21 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level)
        return gfn & -KVM_PAGES_PER_HPAGE(level);
 }
 
+/*
+ * Return the TDP iterator to the root PT and allow it to continue its
+ * traversal over the paging structure from there.
+ */
+void tdp_iter_restart(struct tdp_iter *iter)
+{
+       iter->yielded_gfn = iter->next_last_level_gfn;
+       iter->level = iter->root_level;
+
+       iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);
+       tdp_iter_refresh_sptep(iter);
+
+       iter->valid = true;
+}
+
 /*
  * Sets a TDP iterator to walk a pre-order traversal of the paging structure
  * rooted at root_pt, starting with the walk to translate next_last_level_gfn.
@@ -31,16 +46,12 @@ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
        WARN_ON(root_level > PT64_ROOT_MAX_LEVEL);
 
        iter->next_last_level_gfn = next_last_level_gfn;
-       iter->yielded_gfn = iter->next_last_level_gfn;
        iter->root_level = root_level;
        iter->min_level = min_level;
-       iter->level = root_level;
-       iter->pt_path[iter->level - 1] = (tdp_ptep_t)root_pt;
-
-       iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);
-       tdp_iter_refresh_sptep(iter);
+       iter->pt_path[iter->root_level - 1] = (tdp_ptep_t)root_pt;
+       iter->as_id = kvm_mmu_page_as_id(sptep_to_sp(root_pt));
 
-       iter->valid = true;
+       tdp_iter_restart(iter);
 }
 
 /*
@@ -159,8 +170,3 @@ void tdp_iter_next(struct tdp_iter *iter)
        iter->valid = false;
 }
 
-tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter)
-{
-       return iter->pt_path[iter->root_level - 1];
-}
-
index 4cc177d..b1748b9 100644 (file)
@@ -36,6 +36,8 @@ struct tdp_iter {
        int min_level;
        /* The iterator's current level within the paging structure */
        int level;
+       /* The address space ID, i.e. SMM vs. regular. */
+       int as_id;
        /* A snapshot of the value at sptep */
        u64 old_spte;
        /*
@@ -62,6 +64,6 @@ tdp_ptep_t spte_to_child_pt(u64 pte, int level);
 void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
                    int min_level, gfn_t next_last_level_gfn);
 void tdp_iter_next(struct tdp_iter *iter);
-tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter);
+void tdp_iter_restart(struct tdp_iter *iter);
 
 #endif /* __KVM_X86_MMU_TDP_ITER_H */
index d789150..018d82e 100644 (file)
@@ -86,7 +86,7 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
        list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
 
 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
-                         gfn_t start, gfn_t end, bool can_yield);
+                         gfn_t start, gfn_t end, bool can_yield, bool flush);
 
 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
 {
@@ -99,7 +99,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
 
        list_del(&root->link);
 
-       zap_gfn_range(kvm, root, 0, max_gfn, false);
+       zap_gfn_range(kvm, root, 0, max_gfn, false, false);
 
        free_page((unsigned long)root->spt);
        kmem_cache_free(mmu_page_header_cache, root);
@@ -203,11 +203,6 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
                                u64 old_spte, u64 new_spte, int level,
                                bool shared);
 
-static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
-{
-       return sp->role.smm ? 1 : 0;
-}
-
 static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
 {
        bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
@@ -301,11 +296,16 @@ static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
  *
  * Given a page table that has been removed from the TDP paging structure,
  * iterates through the page table to clear SPTEs and free child page tables.
+ *
+ * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
+ * protection. Since this thread removed it from the paging structure,
+ * this thread will be responsible for ensuring the page is freed. Hence the
+ * early rcu_dereferences in the function.
  */
-static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt,
+static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
                                        bool shared)
 {
-       struct kvm_mmu_page *sp = sptep_to_sp(pt);
+       struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
        int level = sp->role.level;
        gfn_t base_gfn = sp->gfn;
        u64 old_child_spte;
@@ -318,7 +318,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt,
        tdp_mmu_unlink_page(kvm, sp, shared);
 
        for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
-               sptep = pt + i;
+               sptep = rcu_dereference(pt) + i;
                gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
 
                if (shared) {
@@ -492,10 +492,6 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
                                           struct tdp_iter *iter,
                                           u64 new_spte)
 {
-       u64 *root_pt = tdp_iter_root_pt(iter);
-       struct kvm_mmu_page *root = sptep_to_sp(root_pt);
-       int as_id = kvm_mmu_page_as_id(root);
-
        lockdep_assert_held_read(&kvm->mmu_lock);
 
        /*
@@ -509,8 +505,8 @@ static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
                      new_spte) != iter->old_spte)
                return false;
 
-       handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
-                           iter->level, true);
+       handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
+                           new_spte, iter->level, true);
 
        return true;
 }
@@ -538,7 +534,7 @@ static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
         * here since the SPTE is going from non-present
         * to non-present.
         */
-       WRITE_ONCE(*iter->sptep, 0);
+       WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
 
        return true;
 }
@@ -564,10 +560,6 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
                                      u64 new_spte, bool record_acc_track,
                                      bool record_dirty_log)
 {
-       tdp_ptep_t root_pt = tdp_iter_root_pt(iter);
-       struct kvm_mmu_page *root = sptep_to_sp(root_pt);
-       int as_id = kvm_mmu_page_as_id(root);
-
        lockdep_assert_held_write(&kvm->mmu_lock);
 
        /*
@@ -581,13 +573,13 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
 
        WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
 
-       __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
-                             iter->level, false);
+       __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
+                             new_spte, iter->level, false);
        if (record_acc_track)
                handle_changed_spte_acc_track(iter->old_spte, new_spte,
                                              iter->level);
        if (record_dirty_log)
-               handle_changed_spte_dirty_log(kvm, as_id, iter->gfn,
+               handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
                                              iter->old_spte, new_spte,
                                              iter->level);
 }
@@ -659,9 +651,7 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
 
                WARN_ON(iter->gfn > iter->next_last_level_gfn);
 
-               tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],
-                              iter->root_level, iter->min_level,
-                              iter->next_last_level_gfn);
+               tdp_iter_restart(iter);
 
                return true;
        }
@@ -678,20 +668,21 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
  * scheduler needs the CPU or there is contention on the MMU lock. If this
  * function cannot yield, it will not release the MMU lock or reschedule and
  * the caller must ensure it does not supply too large a GFN range, or the
- * operation can cause a soft lockup.
+ * operation can cause a soft lockup.  Note, in some use cases a flush may be
+ * required by prior actions.  Ensure the pending flush is performed prior to
+ * yielding.
  */
 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
-                         gfn_t start, gfn_t end, bool can_yield)
+                         gfn_t start, gfn_t end, bool can_yield, bool flush)
 {
        struct tdp_iter iter;
-       bool flush_needed = false;
 
        rcu_read_lock();
 
        tdp_root_for_each_pte(iter, root, start, end) {
                if (can_yield &&
-                   tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
-                       flush_needed = false;
+                   tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
+                       flush = false;
                        continue;
                }
 
@@ -709,11 +700,11 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
                        continue;
 
                tdp_mmu_set_spte(kvm, &iter, 0);
-               flush_needed = true;
+               flush = true;
        }
 
        rcu_read_unlock();
-       return flush_needed;
+       return flush;
 }
 
 /*
@@ -722,13 +713,14 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
  * SPTEs have been cleared and a TLB flush is needed before releasing the
  * MMU lock.
  */
-bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
+bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
+                                bool can_yield)
 {
        struct kvm_mmu_page *root;
        bool flush = false;
 
        for_each_tdp_mmu_root_yield_safe(kvm, root)
-               flush |= zap_gfn_range(kvm, root, start, end, true);
+               flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
 
        return flush;
 }
@@ -940,7 +932,7 @@ static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
                                     struct kvm_mmu_page *root, gfn_t start,
                                     gfn_t end, unsigned long unused)
 {
-       return zap_gfn_range(kvm, root, start, end, false);
+       return zap_gfn_range(kvm, root, start, end, false, false);
 }
 
 int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
index 3b761c1..31096ec 100644 (file)
@@ -8,7 +8,29 @@
 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
 
-bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end);
+bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
+                                bool can_yield);
+static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start,
+                                            gfn_t end)
+{
+       return __kvm_tdp_mmu_zap_gfn_range(kvm, start, end, true);
+}
+static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level);
+
+       /*
+        * Don't allow yielding, as the caller may have a flush pending.  Note,
+        * if mmu_lock is held for write, zapping will never yield in this case,
+        * but explicitly disallow it for safety.  The TDP MMU does not yield
+        * until it has made forward progress (steps sideways), and when zapping
+        * a single shadow page that it's guaranteed to see (thus the mmu_lock
+        * requirement), its "step sideways" will always step beyond the bounds
+        * of the shadow page's gfn range and stop iterating before yielding.
+        */
+       lockdep_assert_held_write(&kvm->mmu_lock);
+       return __kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, end, false);
+}
 void kvm_tdp_mmu_zap_all(struct kvm *kvm);
 
 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
index 35891d9..fb204ea 100644 (file)
@@ -246,11 +246,18 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
        return true;
 }
 
-static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
+static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
 {
        struct kvm_vcpu *vcpu = &svm->vcpu;
        bool vmcb12_lma;
 
+       /*
+        * FIXME: these should be done after copying the fields,
+        * to avoid TOC/TOU races.  For these save area checks
+        * the possible damage is limited since kvm_set_cr0 and
+        * kvm_set_cr4 handle failure; EFER_SVME is an exception
+        * so it is force-set later in nested_prepare_vmcb_save.
+        */
        if ((vmcb12->save.efer & EFER_SVME) == 0)
                return false;
 
@@ -271,7 +278,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
        if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
                return false;
 
-       return nested_vmcb_check_controls(&vmcb12->control);
+       return true;
 }
 
 static void load_nested_vmcb_control(struct vcpu_svm *svm,
@@ -396,7 +403,14 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
        svm->vmcb->save.gdtr = vmcb12->save.gdtr;
        svm->vmcb->save.idtr = vmcb12->save.idtr;
        kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
-       svm_set_efer(&svm->vcpu, vmcb12->save.efer);
+
+       /*
+        * Force-set EFER_SVME even though it is checked earlier on the
+        * VMCB12, because the guest can flip the bit between the check
+        * and now.  Clearing EFER_SVME would call svm_free_nested.
+        */
+       svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
+
        svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
        svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
        svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
@@ -468,7 +482,6 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
 
 
        svm->nested.vmcb12_gpa = vmcb12_gpa;
-       load_nested_vmcb_control(svm, &vmcb12->control);
        nested_prepare_vmcb_control(svm);
        nested_prepare_vmcb_save(svm, vmcb12);
 
@@ -515,7 +528,10 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
        if (WARN_ON_ONCE(!svm->nested.initialized))
                return -EINVAL;
 
-       if (!nested_vmcb_checks(svm, vmcb12)) {
+       load_nested_vmcb_control(svm, &vmcb12->control);
+
+       if (!nested_vmcb_check_save(svm, vmcb12) ||
+           !nested_vmcb_check_controls(&svm->nested.ctl)) {
                vmcb12->control.exit_code    = SVM_EXIT_ERR;
                vmcb12->control.exit_code_hi = 0;
                vmcb12->control.exit_info_1  = 0;
@@ -1209,6 +1225,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
         */
        if (!(save->cr0 & X86_CR0_PG))
                goto out_free;
+       if (!(save->efer & EFER_SVME))
+               goto out_free;
 
        /*
         * All checks done, we can enter guest mode.  L1 control fields
index 035da07..fdf587f 100644 (file)
@@ -98,6 +98,8 @@ static enum index msr_to_index(u32 msr)
 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
                                             enum pmu_type type)
 {
+       struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
+
        switch (msr) {
        case MSR_F15H_PERF_CTL0:
        case MSR_F15H_PERF_CTL1:
@@ -105,6 +107,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
        case MSR_F15H_PERF_CTL3:
        case MSR_F15H_PERF_CTL4:
        case MSR_F15H_PERF_CTL5:
+               if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
+                       return NULL;
+               fallthrough;
        case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
                if (type != PMU_TYPE_EVNTSEL)
                        return NULL;
@@ -115,6 +120,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
        case MSR_F15H_PERF_CTR3:
        case MSR_F15H_PERF_CTR4:
        case MSR_F15H_PERF_CTR5:
+               if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
+                       return NULL;
+               fallthrough;
        case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
                if (type != PMU_TYPE_COUNTER)
                        return NULL;
index 47e021b..eca6362 100644 (file)
@@ -271,8 +271,7 @@ static struct kmem_cache *x86_emulator_cache;
  * When called, it means the previous get/set msr reached an invalid msr.
  * Return true if we want to ignore/silent this failed msr access.
  */
-static bool kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
-                                 u64 data, bool write)
+static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write)
 {
        const char *op = write ? "wrmsr" : "rdmsr";
 
@@ -1445,7 +1444,7 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
        if (r == KVM_MSR_RET_INVALID) {
                /* Unconditionally clear the output for simplicity */
                *data = 0;
-               if (kvm_msr_ignored_check(vcpu, index, 0, false))
+               if (kvm_msr_ignored_check(index, 0, false))
                        r = 0;
        }
 
@@ -1526,35 +1525,44 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
 
 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
 {
+       struct kvm_x86_msr_filter *msr_filter;
+       struct msr_bitmap_range *ranges;
        struct kvm *kvm = vcpu->kvm;
-       struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges;
-       u32 count = kvm->arch.msr_filter.count;
-       u32 i;
-       bool r = kvm->arch.msr_filter.default_allow;
+       bool allowed;
        int idx;
+       u32 i;
 
-       /* MSR filtering not set up or x2APIC enabled, allow everything */
-       if (!count || (index >= 0x800 && index <= 0x8ff))
+       /* x2APIC MSRs do not support filtering. */
+       if (index >= 0x800 && index <= 0x8ff)
                return true;
 
-       /* Prevent collision with set_msr_filter */
        idx = srcu_read_lock(&kvm->srcu);
 
-       for (i = 0; i < count; i++) {
+       msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);
+       if (!msr_filter) {
+               allowed = true;
+               goto out;
+       }
+
+       allowed = msr_filter->default_allow;
+       ranges = msr_filter->ranges;
+
+       for (i = 0; i < msr_filter->count; i++) {
                u32 start = ranges[i].base;
                u32 end = start + ranges[i].nmsrs;
                u32 flags = ranges[i].flags;
                unsigned long *bitmap = ranges[i].bitmap;
 
                if ((index >= start) && (index < end) && (flags & type)) {
-                       r = !!test_bit(index - start, bitmap);
+                       allowed = !!test_bit(index - start, bitmap);
                        break;
                }
        }
 
+out:
        srcu_read_unlock(&kvm->srcu, idx);
 
-       return r;
+       return allowed;
 }
 EXPORT_SYMBOL_GPL(kvm_msr_allowed);
 
@@ -1611,7 +1619,7 @@ static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
        int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
 
        if (ret == KVM_MSR_RET_INVALID)
-               if (kvm_msr_ignored_check(vcpu, index, data, true))
+               if (kvm_msr_ignored_check(index, data, true))
                        ret = 0;
 
        return ret;
@@ -1649,7 +1657,7 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
        if (ret == KVM_MSR_RET_INVALID) {
                /* Unconditionally clear *data for simplicity */
                *data = 0;
-               if (kvm_msr_ignored_check(vcpu, index, 0, false))
+               if (kvm_msr_ignored_check(index, 0, false))
                        ret = 0;
        }
 
@@ -2320,7 +2328,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
        kvm_vcpu_write_tsc_offset(vcpu, offset);
        raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 
-       spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
+       spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
        if (!matched) {
                kvm->arch.nr_vcpus_matched_tsc = 0;
        } else if (!already_matched) {
@@ -2328,7 +2336,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
        }
 
        kvm_track_tsc_matching(vcpu);
-       spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
+       spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
 }
 
 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
@@ -2550,11 +2558,16 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
        int i;
        struct kvm_vcpu *vcpu;
        struct kvm_arch *ka = &kvm->arch;
+       unsigned long flags;
+
+       kvm_hv_invalidate_tsc_page(kvm);
 
-       spin_lock(&ka->pvclock_gtod_sync_lock);
        kvm_make_mclock_inprogress_request(kvm);
+
        /* no guest entries from this point */
+       spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
        pvclock_update_vm_gtod_copy(kvm);
+       spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
 
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -2562,8 +2575,6 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
        /* guest entries allowed */
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
-
-       spin_unlock(&ka->pvclock_gtod_sync_lock);
 #endif
 }
 
@@ -2571,17 +2582,18 @@ u64 get_kvmclock_ns(struct kvm *kvm)
 {
        struct kvm_arch *ka = &kvm->arch;
        struct pvclock_vcpu_time_info hv_clock;
+       unsigned long flags;
        u64 ret;
 
-       spin_lock(&ka->pvclock_gtod_sync_lock);
+       spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
        if (!ka->use_master_clock) {
-               spin_unlock(&ka->pvclock_gtod_sync_lock);
+               spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
                return get_kvmclock_base_ns() + ka->kvmclock_offset;
        }
 
        hv_clock.tsc_timestamp = ka->master_cycle_now;
        hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
-       spin_unlock(&ka->pvclock_gtod_sync_lock);
+       spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
 
        /* both __this_cpu_read() and rdtsc() should be on the same cpu */
        get_cpu();
@@ -2675,13 +2687,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
         * If the host uses TSC clock, then passthrough TSC as stable
         * to the guest.
         */
-       spin_lock(&ka->pvclock_gtod_sync_lock);
+       spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
        use_master_clock = ka->use_master_clock;
        if (use_master_clock) {
                host_tsc = ka->master_cycle_now;
                kernel_ns = ka->master_kernel_ns;
        }
-       spin_unlock(&ka->pvclock_gtod_sync_lock);
+       spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
 
        /* Keep irq disabled to prevent changes to the clock */
        local_irq_save(flags);
@@ -5352,25 +5364,34 @@ split_irqchip_unlock:
        return r;
 }
 
-static void kvm_clear_msr_filter(struct kvm *kvm)
+static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow)
+{
+       struct kvm_x86_msr_filter *msr_filter;
+
+       msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT);
+       if (!msr_filter)
+               return NULL;
+
+       msr_filter->default_allow = default_allow;
+       return msr_filter;
+}
+
+static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
 {
        u32 i;
-       u32 count = kvm->arch.msr_filter.count;
-       struct msr_bitmap_range ranges[16];
 
-       mutex_lock(&kvm->lock);
-       kvm->arch.msr_filter.count = 0;
-       memcpy(ranges, kvm->arch.msr_filter.ranges, count * sizeof(ranges[0]));
-       mutex_unlock(&kvm->lock);
-       synchronize_srcu(&kvm->srcu);
+       if (!msr_filter)
+               return;
 
-       for (i = 0; i < count; i++)
-               kfree(ranges[i].bitmap);
+       for (i = 0; i < msr_filter->count; i++)
+               kfree(msr_filter->ranges[i].bitmap);
+
+       kfree(msr_filter);
 }
 
-static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user_range)
+static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
+                             struct kvm_msr_filter_range *user_range)
 {
-       struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges;
        struct msr_bitmap_range range;
        unsigned long *bitmap = NULL;
        size_t bitmap_size;
@@ -5404,11 +5425,9 @@ static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user
                goto err;
        }
 
-       /* Everything ok, add this range identifier to our global pool */
-       ranges[kvm->arch.msr_filter.count] = range;
-       /* Make sure we filled the array before we tell anyone to walk it */
-       smp_wmb();
-       kvm->arch.msr_filter.count++;
+       /* Everything ok, add this range identifier. */
+       msr_filter->ranges[msr_filter->count] = range;
+       msr_filter->count++;
 
        return 0;
 err:
@@ -5419,10 +5438,11 @@ err:
 static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
 {
        struct kvm_msr_filter __user *user_msr_filter = argp;
+       struct kvm_x86_msr_filter *new_filter, *old_filter;
        struct kvm_msr_filter filter;
        bool default_allow;
-       int r = 0;
        bool empty = true;
+       int r = 0;
        u32 i;
 
        if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
@@ -5435,25 +5455,32 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
        if (empty && !default_allow)
                return -EINVAL;
 
-       kvm_clear_msr_filter(kvm);
-
-       kvm->arch.msr_filter.default_allow = default_allow;
+       new_filter = kvm_alloc_msr_filter(default_allow);
+       if (!new_filter)
+               return -ENOMEM;
 
-       /*
-        * Protect from concurrent calls to this function that could trigger
-        * a TOCTOU violation on kvm->arch.msr_filter.count.
-        */
-       mutex_lock(&kvm->lock);
        for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
-               r = kvm_add_msr_filter(kvm, &filter.ranges[i]);
-               if (r)
-                       break;
+               r = kvm_add_msr_filter(new_filter, &filter.ranges[i]);
+               if (r) {
+                       kvm_free_msr_filter(new_filter);
+                       return r;
+               }
        }
 
+       mutex_lock(&kvm->lock);
+
+       /* The per-VM filter is protected by kvm->lock... */
+       old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1);
+
+       rcu_assign_pointer(kvm->arch.msr_filter, new_filter);
+       synchronize_srcu(&kvm->srcu);
+
+       kvm_free_msr_filter(old_filter);
+
        kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED);
        mutex_unlock(&kvm->lock);
 
-       return r;
+       return 0;
 }
 
 long kvm_arch_vm_ioctl(struct file *filp,
@@ -5700,6 +5727,7 @@ set_pit2_out:
        }
 #endif
        case KVM_SET_CLOCK: {
+               struct kvm_arch *ka = &kvm->arch;
                struct kvm_clock_data user_ns;
                u64 now_ns;
 
@@ -5718,8 +5746,22 @@ set_pit2_out:
                 * pvclock_update_vm_gtod_copy().
                 */
                kvm_gen_update_masterclock(kvm);
-               now_ns = get_kvmclock_ns(kvm);
-               kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
+
+               /*
+                * This pairs with kvm_guest_time_update(): when masterclock is
+                * in use, we use master_kernel_ns + kvmclock_offset to set
+                * unsigned 'system_time' so if we use get_kvmclock_ns() (which
+                * is slightly ahead) here we risk going negative on unsigned
+                * 'system_time' when 'user_ns.clock' is very small.
+                */
+               spin_lock_irq(&ka->pvclock_gtod_sync_lock);
+               if (kvm->arch.use_master_clock)
+                       now_ns = ka->master_kernel_ns;
+               else
+                       now_ns = get_kvmclock_base_ns();
+               ka->kvmclock_offset = user_ns.clock - now_ns;
+               spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
+
                kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
                break;
        }
@@ -6603,7 +6645,7 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
                int cpu = get_cpu();
 
                cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
-               smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
+               on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask,
                                wbinvd_ipi, NULL, 1);
                put_cpu();
                cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
@@ -7698,6 +7740,7 @@ static void kvm_hyperv_tsc_notifier(void)
        struct kvm *kvm;
        struct kvm_vcpu *vcpu;
        int cpu;
+       unsigned long flags;
 
        mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list)
@@ -7713,17 +7756,15 @@ static void kvm_hyperv_tsc_notifier(void)
        list_for_each_entry(kvm, &vm_list, vm_list) {
                struct kvm_arch *ka = &kvm->arch;
 
-               spin_lock(&ka->pvclock_gtod_sync_lock);
-
+               spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
                pvclock_update_vm_gtod_copy(kvm);
+               spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
 
                kvm_for_each_vcpu(cpu, vcpu, kvm)
                        kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
 
                kvm_for_each_vcpu(cpu, vcpu, kvm)
                        kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
-
-               spin_unlock(&ka->pvclock_gtod_sync_lock);
        }
        mutex_unlock(&kvm_lock);
 }
@@ -10634,8 +10675,6 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm)
 
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
-       u32 i;
-
        if (current->mm == kvm->mm) {
                /*
                 * Free memory regions allocated on behalf of userspace,
@@ -10651,8 +10690,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
                mutex_unlock(&kvm->slots_lock);
        }
        static_call_cond(kvm_x86_vm_destroy)(kvm);
-       for (i = 0; i < kvm->arch.msr_filter.count; i++)
-               kfree(kvm->arch.msr_filter.ranges[i].bitmap);
+       kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
        kvm_pic_destroy(kvm);
        kvm_ioapic_destroy(kvm);
        kvm_free_vcpus(kvm);
index 39eb048..9035e34 100644 (file)
@@ -250,7 +250,6 @@ static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
 void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs);
 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
 
-void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
 u64 get_kvmclock_ns(struct kvm *kvm);
 
 int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
index 4b01f7d..ae78cef 100644 (file)
@@ -262,7 +262,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
        if (pgprot_val(old_prot) == pgprot_val(new_prot))
                return;
 
-       pa = pfn << page_level_shift(level);
+       pa = pfn << PAGE_SHIFT;
        size = page_level_size(level);
 
        /*
index 6926d0c..7f1b3a8 100644 (file)
@@ -1689,7 +1689,16 @@ emit_jmp:
                }
 
                if (image) {
-                       if (unlikely(proglen + ilen > oldproglen)) {
+                       /*
+                        * When populating the image, assert that:
+                        *
+                        *  i) We do not write beyond the allocated space, and
+                        * ii) addrs[i] did not change from the prior run, in order
+                        *     to validate assumptions made for computing branch
+                        *     displacements.
+                        */
+                       if (unlikely(proglen + ilen > oldproglen ||
+                                    proglen + ilen != addrs[i])) {
                                pr_err("bpf_jit: fatal error\n");
                                return -EFAULT;
                        }
@@ -1936,7 +1945,7 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
  * add rsp, 8                      // skip eth_type_trans's frame
  * ret                             // return to its caller
  */
-int arch_prepare_bpf_trampoline(void *image, void *image_end,
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
                                const struct btf_func_model *m, u32 flags,
                                struct bpf_tramp_progs *tprogs,
                                void *orig_call)
@@ -1975,6 +1984,15 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
 
        save_regs(m, &prog, nr_args, stack_size);
 
+       if (flags & BPF_TRAMP_F_CALL_ORIG) {
+               /* arg1: mov rdi, im */
+               emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
+               if (emit_call(&prog, __bpf_tramp_enter, prog)) {
+                       ret = -EINVAL;
+                       goto cleanup;
+               }
+       }
+
        if (fentry->nr_progs)
                if (invoke_bpf(m, &prog, fentry, stack_size))
                        return -EINVAL;
@@ -1993,8 +2011,7 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
        }
 
        if (flags & BPF_TRAMP_F_CALL_ORIG) {
-               if (fentry->nr_progs || fmod_ret->nr_progs)
-                       restore_regs(m, &prog, nr_args, stack_size);
+               restore_regs(m, &prog, nr_args, stack_size);
 
                /* call original function */
                if (emit_call(&prog, orig_call, prog)) {
@@ -2003,6 +2020,9 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
                }
                /* remember return value in a stack for bpf prog to access */
                emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
+               im->ip_after_call = prog;
+               memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
+               prog += X86_PATCH_SIZE;
        }
 
        if (fmod_ret->nr_progs) {
@@ -2033,9 +2053,17 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
         * the return value is only updated on the stack and still needs to be
         * restored to R0.
         */
-       if (flags & BPF_TRAMP_F_CALL_ORIG)
+       if (flags & BPF_TRAMP_F_CALL_ORIG) {
+               im->ip_epilogue = prog;
+               /* arg1: mov rdi, im */
+               emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
+               if (emit_call(&prog, __bpf_tramp_exit, prog)) {
+                       ret = -EINVAL;
+                       goto cleanup;
+               }
                /* restore original return value back into RAX */
                emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
+       }
 
        EMIT1(0x5B); /* pop rbx */
        EMIT1(0xC9); /* leave */
@@ -2225,7 +2253,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
                padding = true;
                goto skip_init_addrs;
        }
-       addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
+       addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
        if (!addrs) {
                prog = orig_prog;
                goto out_addrs;
@@ -2317,7 +2345,7 @@ out_image:
                if (image)
                        bpf_prog_fill_jited_linfo(prog, addrs + 1);
 out_addrs:
-               kfree(addrs);
+               kvfree(addrs);
                kfree(jit_data);
                prog->aux->jit_data = NULL;
        }
index d17b67c..6a99def 100644 (file)
@@ -2276,7 +2276,16 @@ notyet:
                }
 
                if (image) {
-                       if (unlikely(proglen + ilen > oldproglen)) {
+                       /*
+                        * When populating the image, assert that:
+                        *
+                        *  i) We do not write beyond the allocated space, and
+                        * ii) addrs[i] did not change from the prior run, in order
+                        *     to validate assumptions made for computing branch
+                        *     displacements.
+                        */
+                       if (unlikely(proglen + ilen > oldproglen ||
+                                    proglen + ilen != addrs[i])) {
                                pr_err("bpf_jit: fatal error\n");
                                return -EFAULT;
                        }
index 1ac8578..b42bfda 100644 (file)
@@ -27,7 +27,6 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Sébastien Hinderer <Sebastien.Hinderer@ens-lyon.org>");
 MODULE_DESCRIPTION("A power_off handler for Iris devices from EuroBraille");
-MODULE_SUPPORTED_DEVICE("Eurobraille/Iris");
 
 static bool force;
 
index 17d80f7..ac06ca3 100644 (file)
@@ -98,8 +98,8 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
 unsigned long xen_max_p2m_pfn __read_mostly;
 EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
 
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
-#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+#ifdef CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
+#define P2M_LIMIT CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
 #else
 #define P2M_LIMIT 0
 #endif
@@ -416,9 +416,6 @@ void __init xen_vmalloc_p2m_tree(void)
        xen_p2m_last_pfn = xen_max_p2m_pfn;
 
        p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
-       if (!p2m_limit && IS_ENABLED(CONFIG_XEN_UNPOPULATED_ALLOC))
-               p2m_limit = xen_start_info->nr_pages * XEN_EXTRA_MEM_RATIO;
-
        vm.flags = VM_ALLOC;
        vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
                        PMD_SIZE * PMDS_PER_MID_PAGE);
index 1a3b756..8bfc103 100644 (file)
@@ -59,6 +59,18 @@ static struct {
 } xen_remap_buf __initdata __aligned(PAGE_SIZE);
 static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
 
+/*
+ * The maximum amount of extra memory compared to the base size.  The
+ * main scaling factor is the size of struct page.  At extreme ratios
+ * of base:extra, all the base memory can be filled with page
+ * structures for the extra memory, leaving no space for anything
+ * else.
+ *
+ * 10x seems like a reasonable balance between scaling flexibility and
+ * leaving a practically usable system.
+ */
+#define EXTRA_MEM_RATIO                (10)
+
 static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
 
 static void __init xen_parse_512gb(void)
@@ -778,13 +790,13 @@ char * __init xen_memory_setup(void)
                extra_pages += max_pages - max_pfn;
 
        /*
-        * Clamp the amount of extra memory to a XEN_EXTRA_MEM_RATIO
+        * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
         * factor the base size.
         *
         * Make sure we have no memory above max_pages, as this area
         * isn't handled by the p2m management.
         */
-       extra_pages = min3(XEN_EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
+       extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
                           extra_pages, max_pages - max_pfn);
        i = 0;
        addr = xen_e820_table.entries[0].addr;
index c426b84..45cc0ae 100644 (file)
        LOAD_CP_REGS_TAB(6)
        LOAD_CP_REGS_TAB(7)
 
-/*
- * coprocessor_flush(struct thread_info*, index)
- *                             a2        a3
- *
- * Save coprocessor registers for coprocessor 'index'.
- * The register values are saved to or loaded from the coprocessor area 
- * inside the task_info structure.
- *
- * Note that this function doesn't update the coprocessor_owner information!
- *
- */
-
-ENTRY(coprocessor_flush)
-
-       /* reserve 4 bytes on stack to save a0 */
-       abi_entry(4)
-
-       s32i    a0, a1, 0
-       movi    a0, .Lsave_cp_regs_jump_table
-       addx8   a3, a3, a0
-       l32i    a4, a3, 4
-       l32i    a3, a3, 0
-       add     a2, a2, a4
-       beqz    a3, 1f
-       callx0  a3
-1:     l32i    a0, a1, 0
-
-       abi_ret(4)
-
-ENDPROC(coprocessor_flush)
-
 /*
  * Entry condition:
  *
@@ -245,6 +214,39 @@ ENTRY(fast_coprocessor)
 
 ENDPROC(fast_coprocessor)
 
+       .text
+
+/*
+ * coprocessor_flush(struct thread_info*, index)
+ *                             a2        a3
+ *
+ * Save coprocessor registers for coprocessor 'index'.
+ * The register values are saved to or loaded from the coprocessor area
+ * inside the task_info structure.
+ *
+ * Note that this function doesn't update the coprocessor_owner information!
+ *
+ */
+
+ENTRY(coprocessor_flush)
+
+       /* reserve 4 bytes on stack to save a0 */
+       abi_entry(4)
+
+       s32i    a0, a1, 0
+       movi    a0, .Lsave_cp_regs_jump_table
+       addx8   a3, a3, a0
+       l32i    a4, a3, 4
+       l32i    a3, a3, 0
+       add     a2, a2, a4
+       beqz    a3, 1f
+       callx0  a3
+1:     l32i    a0, a1, 0
+
+       abi_ret(4)
+
+ENDPROC(coprocessor_flush)
+
        .data
 
 ENTRY(coprocessor_owner)
index 7666408..95a7489 100644 (file)
@@ -112,8 +112,11 @@ good_area:
         */
        fault = handle_mm_fault(vma, address, flags, regs);
 
-       if (fault_signal_pending(fault, regs))
+       if (fault_signal_pending(fault, regs)) {
+               if (!user_mode(regs))
+                       goto bad_page_fault;
                return;
+       }
 
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
index 26b7f72..50e5790 100644 (file)
@@ -277,7 +277,7 @@ static struct bio *__bio_chain_endio(struct bio *bio)
 {
        struct bio *parent = bio->bi_private;
 
-       if (!parent->bi_status)
+       if (bio->bi_status && !parent->bi_status)
                parent->bi_status = bio->bi_status;
        bio_put(bio);
        return parent;
@@ -949,7 +949,7 @@ void bio_release_pages(struct bio *bio, bool mark_dirty)
 }
 EXPORT_SYMBOL_GPL(bio_release_pages);
 
-static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
+static void __bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
 {
        WARN_ON_ONCE(bio->bi_max_vecs);
 
@@ -959,11 +959,26 @@ static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
        bio->bi_iter.bi_size = iter->count;
        bio_set_flag(bio, BIO_NO_PAGE_REF);
        bio_set_flag(bio, BIO_CLONED);
+}
 
+static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
+{
+       __bio_iov_bvec_set(bio, iter);
        iov_iter_advance(iter, iter->count);
        return 0;
 }
 
+static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
+{
+       struct request_queue *q = bio->bi_bdev->bd_disk->queue;
+       struct iov_iter i = *iter;
+
+       iov_iter_truncate(&i, queue_max_zone_append_sectors(q) << 9);
+       __bio_iov_bvec_set(bio, &i);
+       iov_iter_advance(iter, i.count);
+       return 0;
+}
+
 #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
 
 /**
@@ -1094,8 +1109,8 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
        int ret = 0;
 
        if (iov_iter_is_bvec(iter)) {
-               if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
-                       return -EINVAL;
+               if (bio_op(bio) == REQ_OP_ZONE_APPEND)
+                       return bio_iov_bvec_set_append(bio, iter);
                return bio_iov_bvec_set(bio, iter);
        }
 
index ffb4aa0..4d97fb6 100644 (file)
@@ -382,6 +382,14 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
        switch (bio_op(rq->bio)) {
        case REQ_OP_DISCARD:
        case REQ_OP_SECURE_ERASE:
+               if (queue_max_discard_segments(rq->q) > 1) {
+                       struct bio *bio = rq->bio;
+
+                       for_each_bio(bio)
+                               nr_phys_segs++;
+                       return nr_phys_segs;
+               }
+               return 1;
        case REQ_OP_WRITE_ZEROES:
                return 0;
        case REQ_OP_WRITE_SAME:
index 9ebb344..271f659 100644 (file)
@@ -302,7 +302,6 @@ static const char *const rqf_name[] = {
        RQF_NAME(QUIET),
        RQF_NAME(ELVPRIV),
        RQF_NAME(IO_STAT),
-       RQF_NAME(ALLOCED),
        RQF_NAME(PM),
        RQF_NAME(HASHED),
        RQF_NAME(STATS),
index 1a75589..46f055b 100644 (file)
@@ -322,6 +322,13 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
        const char *dname;
        int err;
 
+       /*
+        * disk_max_parts() won't be zero, either GENHD_FL_EXT_DEVT is set
+        * or 'minors' is passed to alloc_disk().
+        */
+       if (partno >= disk_max_parts(disk))
+               return ERR_PTR(-EINVAL);
+
        /*
         * Partitions are not supported on zoned block devices that are used as
         * such.
index 3f045b5..a0c1a66 100644 (file)
@@ -99,13 +99,12 @@ acpi_status acpi_ns_root_initialize(void)
                 * just create and link the new node(s) here.
                 */
                new_node =
-                   ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_namespace_node));
+                   acpi_ns_create_node(*ACPI_CAST_PTR(u32, init_val->name));
                if (!new_node) {
                        status = AE_NO_MEMORY;
                        goto unlock_and_exit;
                }
 
-               ACPI_COPY_NAMESEG(new_node->name.ascii, init_val->name);
                new_node->descriptor_type = ACPI_DESC_TYPE_NAMED;
                new_node->type = init_val->type;
 
index e6a5d99..cb8f708 100644 (file)
@@ -9,6 +9,8 @@
 #ifndef _ACPI_INTERNAL_H_
 #define _ACPI_INTERNAL_H_
 
+#include <linux/idr.h>
+
 #define PREFIX "ACPI: "
 
 int early_acpi_osi_init(void);
@@ -96,9 +98,11 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
 
 extern struct list_head acpi_bus_id_list;
 
+#define ACPI_MAX_DEVICE_INSTANCES      4096
+
 struct acpi_device_bus_id {
        const char *bus_id;
-       unsigned int instance_no;
+       struct ida instance_ida;
        struct list_head node;
 };
 
index d93e400..4e2d76b 100644 (file)
@@ -29,6 +29,7 @@
  */
 #ifdef CONFIG_X86
 #include <asm/apic.h>
+#include <asm/cpu.h>
 #endif
 
 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
@@ -541,6 +542,10 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
                        wait_for_freeze();
                } else
                        return -ENODEV;
+
+#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
+               cond_wakeup_cpu0();
+#endif
        }
 
        /* Never reached */
index a184529..6efe7ed 100644 (file)
@@ -479,9 +479,8 @@ static void acpi_device_del(struct acpi_device *device)
        list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
                if (!strcmp(acpi_device_bus_id->bus_id,
                            acpi_device_hid(device))) {
-                       if (acpi_device_bus_id->instance_no > 0)
-                               acpi_device_bus_id->instance_no--;
-                       else {
+                       ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no);
+                       if (ida_is_empty(&acpi_device_bus_id->instance_ida)) {
                                list_del(&acpi_device_bus_id->node);
                                kfree_const(acpi_device_bus_id->bus_id);
                                kfree(acpi_device_bus_id);
@@ -631,6 +630,21 @@ static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
        return NULL;
 }
 
+static int acpi_device_set_name(struct acpi_device *device,
+                               struct acpi_device_bus_id *acpi_device_bus_id)
+{
+       struct ida *instance_ida = &acpi_device_bus_id->instance_ida;
+       int result;
+
+       result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL);
+       if (result < 0)
+               return result;
+
+       device->pnp.instance_no = result;
+       dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result);
+       return 0;
+}
+
 int acpi_device_add(struct acpi_device *device,
                    void (*release)(struct device *))
 {
@@ -665,7 +679,9 @@ int acpi_device_add(struct acpi_device *device,
 
        acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device));
        if (acpi_device_bus_id) {
-               acpi_device_bus_id->instance_no++;
+               result = acpi_device_set_name(device, acpi_device_bus_id);
+               if (result)
+                       goto err_unlock;
        } else {
                acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id),
                                             GFP_KERNEL);
@@ -681,9 +697,16 @@ int acpi_device_add(struct acpi_device *device,
                        goto err_unlock;
                }
 
+               ida_init(&acpi_device_bus_id->instance_ida);
+
+               result = acpi_device_set_name(device, acpi_device_bus_id);
+               if (result) {
+                       kfree(acpi_device_bus_id);
+                       goto err_unlock;
+               }
+
                list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
        }
-       dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
 
        if (device->parent)
                list_add_tail(&device->node, &device->parent->children);
@@ -1647,6 +1670,8 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
        device_initialize(&device->dev);
        dev_set_uevent_suppress(&device->dev, true);
        acpi_init_coherency(device);
+       /* Assume there are unmet deps to start with. */
+       device->dep_unmet = 1;
 }
 
 void acpi_device_add_finalize(struct acpi_device *device)
@@ -1910,6 +1935,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
 {
        struct acpi_dep_data *dep;
 
+       adev->dep_unmet = 0;
+
        mutex_lock(&acpi_dep_list_lock);
 
        list_for_each_entry(dep, &acpi_dep_list, node) {
@@ -1957,7 +1984,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
                return AE_CTRL_DEPTH;
 
        acpi_scan_init_hotplug(device);
-       if (!check_dep)
+       /*
+        * If check_dep is true at this point, the device has no dependencies,
+        * or the creation of the device object would have been postponed above.
+        */
+       if (check_dep)
+               device->dep_unmet = 0;
+       else
                acpi_scan_dep_init(device);
 
 out:
index e48690a..9d58104 100644 (file)
@@ -780,7 +780,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
 }
 
 /*
- * acpi_table_init()
+ * acpi_locate_initial_tables()
  *
  * find RSDP, find and checksum SDT/XSDT.
  * checksum all tables, print SDT/XSDT
@@ -788,7 +788,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
  * result: sdt_entry[] is initialized
  */
 
-int __init acpi_table_init(void)
+int __init acpi_locate_initial_tables(void)
 {
        acpi_status status;
 
@@ -803,9 +803,45 @@ int __init acpi_table_init(void)
        status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
        if (ACPI_FAILURE(status))
                return -EINVAL;
-       acpi_table_initrd_scan();
 
+       return 0;
+}
+
+void __init acpi_reserve_initial_tables(void)
+{
+       int i;
+
+       for (i = 0; i < ACPI_MAX_TABLES; i++) {
+               struct acpi_table_desc *table_desc = &initial_tables[i];
+               u64 start = table_desc->address;
+               u64 size = table_desc->length;
+
+               if (!start || !size)
+                       break;
+
+               pr_info("Reserving %4s table memory at [mem 0x%llx-0x%llx]\n",
+                       table_desc->signature.ascii, start, start + size - 1);
+
+               memblock_reserve(start, size);
+       }
+}
+
+void __init acpi_table_init_complete(void)
+{
+       acpi_table_initrd_scan();
        check_multiple_madt();
+}
+
+int __init acpi_table_init(void)
+{
+       int ret;
+
+       ret = acpi_locate_initial_tables();
+       if (ret)
+               return ret;
+
+       acpi_table_init_complete();
+
        return 0;
 }
 
index 811d298..83cd4c9 100644 (file)
@@ -147,6 +147,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
                },
        },
        {
+       .callback = video_detect_force_vendor,
        .ident = "Sony VPCEH3U1E",
        .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
index 9a70bee..495fd0a 100644 (file)
@@ -100,8 +100,6 @@ static LIST_HEAD(fore200e_boards);
 
 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
-MODULE_SUPPORTED_DEVICE("PCA-200E, SBA-200E");
-
 
 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
     { BUFFER_S1_NBR, BUFFER_L1_NBR },
index f43430e..24fd6f3 100644 (file)
@@ -470,12 +470,14 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
        char c;
 
        for (; count-- > 0; (*ppos)++, tmp++) {
-               if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
+               if (((count + 1) & 0x1f) == 0) {
                        /*
-                        * let's be a little nice with other processes
-                        * that need some CPU
+                        * charlcd_write() is invoked as a VFS->write() callback
+                        * and as such it is always invoked from preemptible
+                        * context and may sleep.
                         */
-                       schedule();
+                       cond_resched();
+               }
 
                if (get_user(c, tmp))
                        return -EFAULT;
@@ -537,12 +539,8 @@ static void charlcd_puts(struct charlcd *lcd, const char *s)
        int count = strlen(s);
 
        for (; count-- > 0; tmp++) {
-               if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
-                       /*
-                        * let's be a little nice with other processes
-                        * that need some CPU
-                        */
-                       schedule();
+               if (((count + 1) & 0x1f) == 0)
+                       cond_resched();
 
                charlcd_write_char(lcd, *tmp);
        }
index 9179825..37a5e5f 100644 (file)
@@ -97,6 +97,9 @@ static void deferred_probe_work_func(struct work_struct *work)
 
                get_device(dev);
 
+               kfree(dev->p->deferred_probe_reason);
+               dev->p->deferred_probe_reason = NULL;
+
                /*
                 * Drop the mutex while probing each device; the probe path may
                 * manipulate the deferred list
@@ -289,14 +292,16 @@ int driver_deferred_probe_check_state(struct device *dev)
 
 static void deferred_probe_timeout_work_func(struct work_struct *work)
 {
-       struct device_private *private, *p;
+       struct device_private *p;
 
        driver_deferred_probe_timeout = 0;
        driver_deferred_probe_trigger();
        flush_work(&deferred_probe_work);
 
-       list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
-               dev_info(private->device, "deferred probe pending\n");
+       mutex_lock(&deferred_probe_mutex);
+       list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
+               dev_info(p->device, "deferred probe pending\n");
+       mutex_unlock(&deferred_probe_mutex);
        wake_up_all(&probe_timeout_waitqueue);
 }
 static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
index 18b8242..fe1dad6 100644 (file)
@@ -305,7 +305,7 @@ static int rpm_get_suppliers(struct device *dev)
        return 0;
 }
 
-static void rpm_put_suppliers(struct device *dev)
+static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
 {
        struct device_link *link;
 
@@ -313,10 +313,30 @@ static void rpm_put_suppliers(struct device *dev)
                                device_links_read_lock_held()) {
 
                while (refcount_dec_not_one(&link->rpm_active))
-                       pm_runtime_put(link->supplier);
+                       pm_runtime_put_noidle(link->supplier);
+
+               if (try_to_suspend)
+                       pm_request_idle(link->supplier);
        }
 }
 
+static void rpm_put_suppliers(struct device *dev)
+{
+       __rpm_put_suppliers(dev, true);
+}
+
+static void rpm_suspend_suppliers(struct device *dev)
+{
+       struct device_link *link;
+       int idx = device_links_read_lock();
+
+       list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+                               device_links_read_lock_held())
+               pm_request_idle(link->supplier);
+
+       device_links_read_unlock(idx);
+}
+
 /**
  * __rpm_callback - Run a given runtime PM callback for a given device.
  * @cb: Runtime PM callback to run.
@@ -325,27 +345,29 @@ static void rpm_put_suppliers(struct device *dev)
 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
        __releases(&dev->power.lock) __acquires(&dev->power.lock)
 {
-       bool use_links = dev->power.links_count > 0;
-       bool get = false;
        int retval, idx;
-       bool put;
+       bool use_links = dev->power.links_count > 0;
 
        if (dev->power.irq_safe) {
                spin_unlock(&dev->power.lock);
-       } else if (!use_links) {
-               spin_unlock_irq(&dev->power.lock);
        } else {
-               get = dev->power.runtime_status == RPM_RESUMING;
-
                spin_unlock_irq(&dev->power.lock);
 
-               /* Resume suppliers if necessary. */
-               if (get) {
+               /*
+                * Resume suppliers if necessary.
+                *
+                * The device's runtime PM status cannot change until this
+                * routine returns, so it is safe to read the status outside of
+                * the lock.
+                */
+               if (use_links && dev->power.runtime_status == RPM_RESUMING) {
                        idx = device_links_read_lock();
 
                        retval = rpm_get_suppliers(dev);
-                       if (retval)
+                       if (retval) {
+                               rpm_put_suppliers(dev);
                                goto fail;
+                       }
 
                        device_links_read_unlock(idx);
                }
@@ -355,36 +377,24 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
 
        if (dev->power.irq_safe) {
                spin_lock(&dev->power.lock);
-               return retval;
-       }
-
-       spin_lock_irq(&dev->power.lock);
-
-       if (!use_links)
-               return retval;
-
-       /*
-        * If the device is suspending and the callback has returned success,
-        * drop the usage counters of the suppliers that have been reference
-        * counted on its resume.
-        *
-        * Do that if the resume fails too.
-        */
-       put = dev->power.runtime_status == RPM_SUSPENDING && !retval;
-       if (put)
-               __update_runtime_status(dev, RPM_SUSPENDED);
-       else
-               put = get && retval;
-
-       if (put) {
-               spin_unlock_irq(&dev->power.lock);
+       } else {
+               /*
+                * If the device is suspending and the callback has returned
+                * success, drop the usage counters of the suppliers that have
+                * been reference counted on its resume.
+                *
+                * Do that if resume fails too.
+                */
+               if (use_links
+                   && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
+                   || (dev->power.runtime_status == RPM_RESUMING && retval))) {
+                       idx = device_links_read_lock();
 
-               idx = device_links_read_lock();
+                       __rpm_put_suppliers(dev, false);
 
 fail:
-               rpm_put_suppliers(dev);
-
-               device_links_read_unlock(idx);
+                       device_links_read_unlock(idx);
+               }
 
                spin_lock_irq(&dev->power.lock);
        }
@@ -654,8 +664,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
                goto out;
        }
 
+       if (dev->power.irq_safe)
+               goto out;
+
        /* Maybe the parent is now able to suspend. */
-       if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
+       if (parent && !parent->power.ignore_children) {
                spin_unlock(&dev->power.lock);
 
                spin_lock(&parent->power.lock);
@@ -664,6 +677,14 @@ static int rpm_suspend(struct device *dev, int rpmflags)
 
                spin_lock(&dev->power.lock);
        }
+       /* Maybe the suppliers are now able to suspend. */
+       if (dev->power.links_count > 0) {
+               spin_unlock_irq(&dev->power.lock);
+
+               rpm_suspend_suppliers(dev);
+
+               spin_lock_irq(&dev->power.lock);
+       }
 
  out:
        trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
@@ -1669,8 +1690,8 @@ void pm_runtime_get_suppliers(struct device *dev)
                                device_links_read_lock_held())
                if (link->flags & DL_FLAG_PM_RUNTIME) {
                        link->supplier_preactivated = true;
-                       refcount_inc(&link->rpm_active);
                        pm_runtime_get_sync(link->supplier);
+                       refcount_inc(&link->rpm_active);
                }
 
        device_links_read_unlock(idx);
@@ -1683,6 +1704,8 @@ void pm_runtime_get_suppliers(struct device *dev)
 void pm_runtime_put_suppliers(struct device *dev)
 {
        struct device_link *link;
+       unsigned long flags;
+       bool put;
        int idx;
 
        idx = device_links_read_lock();
@@ -1691,7 +1714,11 @@ void pm_runtime_put_suppliers(struct device *dev)
                                device_links_read_lock_held())
                if (link->supplier_preactivated) {
                        link->supplier_preactivated = false;
-                       if (refcount_dec_not_one(&link->rpm_active))
+                       spin_lock_irqsave(&dev->power.lock, flags);
+                       put = pm_runtime_status_suspended(dev) &&
+                             refcount_dec_not_one(&link->rpm_active);
+                       spin_unlock_irqrestore(&dev->power.lock, flags);
+                       if (put)
                                pm_runtime_put(link->supplier);
                }
 
index 0b71292..4aa9683 100644 (file)
@@ -5091,7 +5091,6 @@ module_param(floppy, charp, 0);
 module_param(FLOPPY_IRQ, int, 0);
 module_param(FLOPPY_DMA, int, 0);
 MODULE_AUTHOR("Alain L. Knaff");
-MODULE_SUPPORTED_DEVICE("fd");
 MODULE_LICENSE("GPL");
 
 /* This doesn't actually get used other than for module information */
index d6c821d..51bfd77 100644 (file)
@@ -1369,10 +1369,13 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
        }
 
        if (dev->zoned)
-               cmd->error = null_process_zoned_cmd(cmd, op,
-                                                   sector, nr_sectors);
+               sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors);
        else
-               cmd->error = null_process_cmd(cmd, op, sector, nr_sectors);
+               sts = null_process_cmd(cmd, op, sector, nr_sectors);
+
+       /* Do not overwrite errors (e.g. timeout errors) */
+       if (cmd->error == BLK_STS_OK)
+               cmd->error = sts;
 
 out:
        nullb_complete_cmd(cmd);
@@ -1451,8 +1454,20 @@ static bool should_requeue_request(struct request *rq)
 
 static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
 {
+       struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
+
        pr_info("rq %p timed out\n", rq);
-       blk_mq_complete_request(rq);
+
+       /*
+        * If the device is marked as blocking (i.e. memory backed or zoned
+        * device), the submission path may be blocked waiting for resources
+        * and cause real timeouts. For these real timeouts, the submission
+        * path will complete the request using blk_mq_complete_request().
+        * Only fake timeouts need to execute blk_mq_complete_request() here.
+        */
+       cmd->error = BLK_STS_TIMEOUT;
+       if (cmd->fake_timeout)
+               blk_mq_complete_request(rq);
        return BLK_EH_DONE;
 }
 
@@ -1473,6 +1488,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
        cmd->rq = bd->rq;
        cmd->error = BLK_STS_OK;
        cmd->nq = nq;
+       cmd->fake_timeout = should_timeout_request(bd->rq);
 
        blk_mq_start_request(bd->rq);
 
@@ -1489,7 +1505,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
                        return BLK_STS_OK;
                }
        }
-       if (should_timeout_request(bd->rq))
+       if (cmd->fake_timeout)
                return BLK_STS_OK;
 
        return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
index 83504f3..4876d5a 100644 (file)
@@ -22,6 +22,7 @@ struct nullb_cmd {
        blk_status_t error;
        struct nullb_queue *nq;
        struct hrtimer timer;
+       bool fake_timeout;
 };
 
 struct nullb_queue {
index 1cdf09f..14e4528 100644 (file)
@@ -891,7 +891,7 @@ next:
 out:
        for (i = last_map; i < num; i++) {
                /* Don't zap current batch's valid persistent grants. */
-               if(i >= last_map + segs_to_map)
+               if(i >= map_until)
                        pages[i]->persistent_gnt = NULL;
                pages[i]->handle = BLKBACK_INVALID_HANDLE;
        }
index 3951f7b..bea1595 100644 (file)
@@ -194,5 +194,4 @@ module_init(rsi_91x_bt_module_init);
 module_exit(rsi_91x_bt_module_exit);
 MODULE_AUTHOR("Redpine Signals Inc");
 MODULE_DESCRIPTION("RSI BT driver");
-MODULE_SUPPORTED_DEVICE("RSI-BT");
 MODULE_LICENSE("Dual BSD/GPL");
index 52683fd..5cbfbd9 100644 (file)
@@ -4849,8 +4849,8 @@ static int btusb_probe(struct usb_interface *intf,
                        data->diag = NULL;
        }
 
-       if (!enable_autosuspend)
-               usb_disable_autosuspend(data->udev);
+       if (enable_autosuspend)
+               usb_enable_autosuspend(data->udev);
 
        err = hci_register_dev(hdev);
        if (err < 0)
@@ -4910,9 +4910,6 @@ static void btusb_disconnect(struct usb_interface *intf)
                gpiod_put(data->reset_gpio);
 
        hci_free_dev(hdev);
-
-       if (!enable_autosuspend)
-               usb_enable_autosuspend(data->udev);
 }
 
 #ifdef CONFIG_PM
index b20fdcb..fd87a59 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Turris Mox module configuration bus driver
  *
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
  */
 
 #include <dt-bindings/bus/moxtet.h>
@@ -879,6 +879,6 @@ static void __exit moxtet_exit(void)
 }
 module_exit(moxtet_exit);
 
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
 MODULE_DESCRIPTION("CZ.NIC's Turris Mox module configuration bus");
 MODULE_LICENSE("GPL v2");
index dd9e734..ea04249 100644 (file)
@@ -618,7 +618,7 @@ mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
                 * This part of the memory is above 4 GB, so we don't
                 * care for the MBus bridge hole.
                 */
-               if (reg_start >= 0x100000000ULL)
+               if ((u64)reg_start >= 0x100000000ULL)
                        continue;
 
                /*
index b040447..dcfb32e 100644 (file)
@@ -285,7 +285,7 @@ static int omap_l3_probe(struct platform_device *pdev)
         */
        l3->debug_irq = platform_get_irq(pdev, 0);
        ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
-                              0x0, "l3-dbg-irq", l3);
+                              IRQF_NO_THREAD, "l3-dbg-irq", l3);
        if (ret) {
                dev_err(l3->dev, "request_irq failed for %d\n",
                        l3->debug_irq);
@@ -294,7 +294,7 @@ static int omap_l3_probe(struct platform_device *pdev)
 
        l3->app_irq = platform_get_irq(pdev, 1);
        ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
-                              0x0, "l3-app-irq", l3);
+                              IRQF_NO_THREAD, "l3-app-irq", l3);
        if (ret)
                dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
 
index a27d751..3d74f23 100644 (file)
@@ -3053,7 +3053,9 @@ static int sysc_remove(struct platform_device *pdev)
 
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
-       reset_control_assert(ddata->rsts);
+
+       if (!reset_control_status(ddata->rsts))
+               reset_control_assert(ddata->rsts);
 
 unprepare:
        sysc_unprepare(ddata);
index a086dd3..4f501e4 100644 (file)
@@ -125,7 +125,7 @@ config AGP_HP_ZX1
 
 config AGP_PARISC
        tristate "HP Quicksilver AGP support"
-       depends on AGP && PARISC && 64BIT
+       depends on AGP && PARISC && 64BIT && IOMMU_SBA
        help
          This option gives you AGP GART support for the HP Quicksilver
          AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000
index 14b2d80..45ac7ab 100644 (file)
@@ -81,9 +81,6 @@ MODULE_DESCRIPTION("Driver for Applicom Profibus card");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_MISCDEV(AC_MINOR);
 
-MODULE_SUPPORTED_DEVICE("ac");
-
-
 static struct applicom_board {
        unsigned long PhysIO;
        void __iomem *RamIO;
index aff0a8e..776abbf 100644 (file)
@@ -64,7 +64,6 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jonathan Buzzard <jonathan@buzzard.org.uk>");
 MODULE_DESCRIPTION("Toshiba laptop SMM driver");
-MODULE_SUPPORTED_DEVICE("toshiba");
 
 static DEFINE_MUTEX(tosh_mutex);
 static int tosh_fn;
index 4f7bf39..4e4b6d3 100644 (file)
@@ -66,7 +66,14 @@ EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
 
 static void devm_clk_hw_register_fixed_factor_release(struct device *dev, void *res)
 {
-       clk_hw_unregister_fixed_factor(&((struct clk_fixed_factor *)res)->hw);
+       struct clk_fixed_factor *fix = res;
+
+       /*
+        * We can not use clk_hw_unregister_fixed_factor, since it will kfree()
+        * the hw, resulting in double free. Just unregister the hw and let
+        * devres code kfree() it.
+        */
+       clk_hw_unregister(&fix->hw);
 }
 
 static struct clk_hw *
index e54e797..20582aa 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/clk-provider.h>
+#include <linux/device.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/io.h>
@@ -206,6 +207,40 @@ struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
 }
 EXPORT_SYMBOL_GPL(__clk_hw_register_mux);
 
+static void devm_clk_hw_release_mux(struct device *dev, void *res)
+{
+       clk_hw_unregister_mux(*(struct clk_hw **)res);
+}
+
+struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node *np,
+               const char *name, u8 num_parents,
+               const char * const *parent_names,
+               const struct clk_hw **parent_hws,
+               const struct clk_parent_data *parent_data,
+               unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
+               u8 clk_mux_flags, u32 *table, spinlock_t *lock)
+{
+       struct clk_hw **ptr, *hw;
+
+       ptr = devres_alloc(devm_clk_hw_release_mux, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return ERR_PTR(-ENOMEM);
+
+       hw = __clk_hw_register_mux(dev, np, name, num_parents, parent_names, parent_hws,
+                                      parent_data, flags, reg, shift, mask,
+                                      clk_mux_flags, table, lock);
+
+       if (!IS_ERR(hw)) {
+               *ptr = hw;
+               devres_add(dev, ptr);
+       } else {
+               devres_free(ptr);
+       }
+
+       return hw;
+}
+EXPORT_SYMBOL_GPL(__devm_clk_hw_register_mux);
+
 struct clk *clk_register_mux_table(struct device *dev, const char *name,
                const char * const *parent_names, u8 num_parents,
                unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
index 5052541..39cfc6c 100644 (file)
@@ -4357,20 +4357,19 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
        /* search the list of notifiers for this clk */
        list_for_each_entry(cn, &clk_notifier_list, node)
                if (cn->clk == clk)
-                       break;
+                       goto found;
 
        /* if clk wasn't in the notifier list, allocate new clk_notifier */
-       if (cn->clk != clk) {
-               cn = kzalloc(sizeof(*cn), GFP_KERNEL);
-               if (!cn)
-                       goto out;
+       cn = kzalloc(sizeof(*cn), GFP_KERNEL);
+       if (!cn)
+               goto out;
 
-               cn->clk = clk;
-               srcu_init_notifier_head(&cn->notifier_head);
+       cn->clk = clk;
+       srcu_init_notifier_head(&cn->notifier_head);
 
-               list_add(&cn->node, &clk_notifier_list);
-       }
+       list_add(&cn->node, &clk_notifier_list);
 
+found:
        ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
 
        clk->core->notifier_count++;
@@ -4395,32 +4394,28 @@ EXPORT_SYMBOL_GPL(clk_notifier_register);
  */
 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
 {
-       struct clk_notifier *cn = NULL;
-       int ret = -EINVAL;
+       struct clk_notifier *cn;
+       int ret = -ENOENT;
 
        if (!clk || !nb)
                return -EINVAL;
 
        clk_prepare_lock();
 
-       list_for_each_entry(cn, &clk_notifier_list, node)
-               if (cn->clk == clk)
-                       break;
-
-       if (cn->clk == clk) {
-               ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
+       list_for_each_entry(cn, &clk_notifier_list, node) {
+               if (cn->clk == clk) {
+                       ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
 
-               clk->core->notifier_count--;
+                       clk->core->notifier_count--;
 
-               /* XXX the notifier code should handle this better */
-               if (!cn->notifier_head.head) {
-                       srcu_cleanup_notifier_head(&cn->notifier_head);
-                       list_del(&cn->node);
-                       kfree(cn);
+                       /* XXX the notifier code should handle this better */
+                       if (!cn->notifier_head.head) {
+                               srcu_cleanup_notifier_head(&cn->notifier_head);
+                               list_del(&cn->node);
+                               kfree(cn);
+                       }
+                       break;
                }
-
-       } else {
-               ret = -ENOENT;
        }
 
        clk_prepare_unlock();
index dbac565..9bcf2f8 100644 (file)
@@ -304,7 +304,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = {
                .name = "cam_cc_bps_clk_src",
                .parent_data = cam_cc_parent_data_2,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -325,7 +325,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = {
                .name = "cam_cc_cci_0_clk_src",
                .parent_data = cam_cc_parent_data_5,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -339,7 +339,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = {
                .name = "cam_cc_cci_1_clk_src",
                .parent_data = cam_cc_parent_data_5,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -360,7 +360,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
                .name = "cam_cc_cphy_rx_clk_src",
                .parent_data = cam_cc_parent_data_3,
                .num_parents = 6,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -379,7 +379,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
                .name = "cam_cc_csi0phytimer_clk_src",
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -393,7 +393,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
                .name = "cam_cc_csi1phytimer_clk_src",
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -407,7 +407,7 @@ static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
                .name = "cam_cc_csi2phytimer_clk_src",
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -421,7 +421,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
                .name = "cam_cc_csi3phytimer_clk_src",
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -443,7 +443,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
                .name = "cam_cc_fast_ahb_clk_src",
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -466,7 +466,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
                .name = "cam_cc_icp_clk_src",
                .parent_data = cam_cc_parent_data_2,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -488,7 +488,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
                .name = "cam_cc_ife_0_clk_src",
                .parent_data = cam_cc_parent_data_4,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -510,7 +510,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
                .name = "cam_cc_ife_0_csid_clk_src",
                .parent_data = cam_cc_parent_data_3,
                .num_parents = 6,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -524,7 +524,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = {
                .name = "cam_cc_ife_1_clk_src",
                .parent_data = cam_cc_parent_data_4,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -538,7 +538,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
                .name = "cam_cc_ife_1_csid_clk_src",
                .parent_data = cam_cc_parent_data_3,
                .num_parents = 6,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -553,7 +553,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
                .parent_data = cam_cc_parent_data_4,
                .num_parents = 4,
                .flags = CLK_SET_RATE_PARENT,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -567,7 +567,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
                .name = "cam_cc_ife_lite_csid_clk_src",
                .parent_data = cam_cc_parent_data_3,
                .num_parents = 6,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -590,7 +590,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
                .name = "cam_cc_ipe_0_clk_src",
                .parent_data = cam_cc_parent_data_2,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -613,7 +613,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
                .name = "cam_cc_jpeg_clk_src",
                .parent_data = cam_cc_parent_data_2,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -635,7 +635,7 @@ static struct clk_rcg2 cam_cc_lrme_clk_src = {
                .name = "cam_cc_lrme_clk_src",
                .parent_data = cam_cc_parent_data_6,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -656,7 +656,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = {
                .name = "cam_cc_mclk0_clk_src",
                .parent_data = cam_cc_parent_data_1,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -670,7 +670,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = {
                .name = "cam_cc_mclk1_clk_src",
                .parent_data = cam_cc_parent_data_1,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -684,7 +684,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = {
                .name = "cam_cc_mclk2_clk_src",
                .parent_data = cam_cc_parent_data_1,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -698,7 +698,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = {
                .name = "cam_cc_mclk3_clk_src",
                .parent_data = cam_cc_parent_data_1,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -712,7 +712,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = {
                .name = "cam_cc_mclk4_clk_src",
                .parent_data = cam_cc_parent_data_1,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -732,7 +732,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
                .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
index 42f13a2..05ff3b0 100644 (file)
@@ -730,7 +730,8 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
        struct clk_rate_request parent_req = { };
        struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
        struct clk_hw *xo, *p0, *p1, *p2;
-       unsigned long request, p0_rate;
+       unsigned long p0_rate;
+       u8 mux_div = cgfx->div;
        int ret;
 
        p0 = cgfx->hws[0];
@@ -750,14 +751,15 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
                return 0;
        }
 
-       request = req->rate;
-       if (cgfx->div > 1)
-               parent_req.rate = request = request * cgfx->div;
+       if (mux_div == 0)
+               mux_div = 1;
+
+       parent_req.rate = req->rate * mux_div;
 
        /* This has to be a fixed rate PLL */
        p0_rate = clk_hw_get_rate(p0);
 
-       if (request == p0_rate) {
+       if (parent_req.rate == p0_rate) {
                req->rate = req->best_parent_rate = p0_rate;
                req->best_parent_hw = p0;
                return 0;
@@ -765,7 +767,7 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
 
        if (req->best_parent_hw == p0) {
                /* Are we going back to a previously used rate? */
-               if (clk_hw_get_rate(p2) == request)
+               if (clk_hw_get_rate(p2) == parent_req.rate)
                        req->best_parent_hw = p2;
                else
                        req->best_parent_hw = p1;
@@ -780,8 +782,7 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
                return ret;
 
        req->rate = req->best_parent_rate = parent_req.rate;
-       if (cgfx->div > 1)
-               req->rate /= cgfx->div;
+       req->rate /= mux_div;
 
        return 0;
 }
index 91dc390..c623ce9 100644 (file)
@@ -510,9 +510,12 @@ static const struct clk_rpmh_desc clk_rpmh_sm8350 = {
        .num_clks = ARRAY_SIZE(sm8350_rpmh_clocks),
 };
 
+/* Resource name must match resource id present in cmd-db */
+DEFINE_CLK_RPMH_ARC(sc7280, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 4);
+
 static struct clk_hw *sc7280_rpmh_clocks[] = {
-       [RPMH_CXO_CLK]      = &sdm845_bi_tcxo.hw,
-       [RPMH_CXO_CLK_A]    = &sdm845_bi_tcxo_ao.hw,
+       [RPMH_CXO_CLK]      = &sc7280_bi_tcxo.hw,
+       [RPMH_CXO_CLK_A]    = &sc7280_bi_tcxo_ao.hw,
        [RPMH_LN_BB_CLK2]   = &sdm845_ln_bb_clk2.hw,
        [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
        [RPMH_RF_CLK1]      = &sdm845_rf_clk1.hw,
index 88e896a..da8b627 100644 (file)
@@ -620,7 +620,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
                .name = "gcc_sdcc1_apps_clk_src",
                .parent_data = gcc_parent_data_1,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_floor_ops,
        },
 };
 
@@ -642,7 +642,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
                .name = "gcc_sdcc1_ice_core_clk_src",
                .parent_data = gcc_parent_data_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_floor_ops,
+               .ops = &clk_rcg2_ops,
        },
 };
 
index 43ecd50..cf94a12 100644 (file)
@@ -99,7 +99,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
                val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
                val &= GENMASK(socfpgaclk->width - 1, 0);
                /* Check for GPIO_DB_CLK by its offset */
-               if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
+               if ((uintptr_t) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
                        div = val + 1;
                else
                        div = (1 << val);
index ef2a974..75bc401 100644 (file)
@@ -31,7 +31,7 @@ struct stm32_timer_cnt {
        struct counter_device counter;
        struct regmap *regmap;
        struct clk *clk;
-       u32 ceiling;
+       u32 max_arr;
        bool enabled;
        struct stm32_timer_regs bak;
 };
@@ -44,13 +44,14 @@ struct stm32_timer_cnt {
  * @STM32_COUNT_ENCODER_MODE_3: counts on both TI1FP1 and TI2FP2 edges
  */
 enum stm32_count_function {
-       STM32_COUNT_SLAVE_MODE_DISABLED = -1,
+       STM32_COUNT_SLAVE_MODE_DISABLED,
        STM32_COUNT_ENCODER_MODE_1,
        STM32_COUNT_ENCODER_MODE_2,
        STM32_COUNT_ENCODER_MODE_3,
 };
 
 static enum counter_count_function stm32_count_functions[] = {
+       [STM32_COUNT_SLAVE_MODE_DISABLED] = COUNTER_COUNT_FUNCTION_INCREASE,
        [STM32_COUNT_ENCODER_MODE_1] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
        [STM32_COUNT_ENCODER_MODE_2] = COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B,
        [STM32_COUNT_ENCODER_MODE_3] = COUNTER_COUNT_FUNCTION_QUADRATURE_X4,
@@ -73,8 +74,10 @@ static int stm32_count_write(struct counter_device *counter,
                             const unsigned long val)
 {
        struct stm32_timer_cnt *const priv = counter->priv;
+       u32 ceiling;
 
-       if (val > priv->ceiling)
+       regmap_read(priv->regmap, TIM_ARR, &ceiling);
+       if (val > ceiling)
                return -EINVAL;
 
        return regmap_write(priv->regmap, TIM_CNT, val);
@@ -90,6 +93,9 @@ static int stm32_count_function_get(struct counter_device *counter,
        regmap_read(priv->regmap, TIM_SMCR, &smcr);
 
        switch (smcr & TIM_SMCR_SMS) {
+       case 0:
+               *function = STM32_COUNT_SLAVE_MODE_DISABLED;
+               return 0;
        case 1:
                *function = STM32_COUNT_ENCODER_MODE_1;
                return 0;
@@ -99,9 +105,9 @@ static int stm32_count_function_get(struct counter_device *counter,
        case 3:
                *function = STM32_COUNT_ENCODER_MODE_3;
                return 0;
+       default:
+               return -EINVAL;
        }
-
-       return -EINVAL;
 }
 
 static int stm32_count_function_set(struct counter_device *counter,
@@ -112,6 +118,9 @@ static int stm32_count_function_set(struct counter_device *counter,
        u32 cr1, sms;
 
        switch (function) {
+       case STM32_COUNT_SLAVE_MODE_DISABLED:
+               sms = 0;
+               break;
        case STM32_COUNT_ENCODER_MODE_1:
                sms = 1;
                break;
@@ -122,8 +131,7 @@ static int stm32_count_function_set(struct counter_device *counter,
                sms = 3;
                break;
        default:
-               sms = 0;
-               break;
+               return -EINVAL;
        }
 
        /* Store enable status */
@@ -131,10 +139,6 @@ static int stm32_count_function_set(struct counter_device *counter,
 
        regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
 
-       /* TIMx_ARR register shouldn't be buffered (ARPE=0) */
-       regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
-       regmap_write(priv->regmap, TIM_ARR, priv->ceiling);
-
        regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
 
        /* Make sure that registers are updated */
@@ -185,11 +189,13 @@ static ssize_t stm32_count_ceiling_write(struct counter_device *counter,
        if (ret)
                return ret;
 
+       if (ceiling > priv->max_arr)
+               return -ERANGE;
+
        /* TIMx_ARR register shouldn't be buffered (ARPE=0) */
        regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
        regmap_write(priv->regmap, TIM_ARR, ceiling);
 
-       priv->ceiling = ceiling;
        return len;
 }
 
@@ -274,31 +280,36 @@ static int stm32_action_get(struct counter_device *counter,
        size_t function;
        int err;
 
-       /* Default action mode (e.g. STM32_COUNT_SLAVE_MODE_DISABLED) */
-       *action = STM32_SYNAPSE_ACTION_NONE;
-
        err = stm32_count_function_get(counter, count, &function);
        if (err)
-               return 0;
+               return err;
 
        switch (function) {
+       case STM32_COUNT_SLAVE_MODE_DISABLED:
+               /* counts on internal clock when CEN=1 */
+               *action = STM32_SYNAPSE_ACTION_NONE;
+               return 0;
        case STM32_COUNT_ENCODER_MODE_1:
                /* counts up/down on TI1FP1 edge depending on TI2FP2 level */
                if (synapse->signal->id == count->synapses[0].signal->id)
                        *action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
-               break;
+               else
+                       *action = STM32_SYNAPSE_ACTION_NONE;
+               return 0;
        case STM32_COUNT_ENCODER_MODE_2:
                /* counts up/down on TI2FP2 edge depending on TI1FP1 level */
                if (synapse->signal->id == count->synapses[1].signal->id)
                        *action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
-               break;
+               else
+                       *action = STM32_SYNAPSE_ACTION_NONE;
+               return 0;
        case STM32_COUNT_ENCODER_MODE_3:
                /* counts up/down on both TI1FP1 and TI2FP2 edges */
                *action = STM32_SYNAPSE_ACTION_BOTH_EDGES;
-               break;
+               return 0;
+       default:
+               return -EINVAL;
        }
-
-       return 0;
 }
 
 static const struct counter_ops stm32_timer_cnt_ops = {
@@ -359,7 +370,7 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev)
 
        priv->regmap = ddata->regmap;
        priv->clk = ddata->clk;
-       priv->ceiling = ddata->max_arr;
+       priv->max_arr = ddata->max_arr;
 
        priv->counter.name = dev_name(dev);
        priv->counter.parent = dev;
index d3f756f..67e56cf 100644 (file)
@@ -267,7 +267,7 @@ struct freq_attr cpufreq_freq_attr_##_name##_freqs =     \
 __ATTR_RO(_name##_frequencies)
 
 /*
- * show_scaling_available_frequencies - show available normal frequencies for
+ * scaling_available_frequencies_show - show available normal frequencies for
  * the specified CPU
  */
 static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
@@ -279,7 +279,7 @@ cpufreq_attr_available_freq(scaling_available);
 EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
 
 /*
- * show_available_boost_freqs - show available boost frequencies for
+ * scaling_boost_frequencies_show - show available boost frequencies for
  * the specified CPU
  */
 static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
index d64fc03..ce0f5ef 100644 (file)
@@ -123,7 +123,9 @@ static const struct dma_fence_ops dma_fence_stub_ops = {
 /**
  * dma_fence_get_stub - return a signaled fence
  *
- * Return a stub fence which is already signaled.
+ * Return a stub fence which is already signaled. The fence's
+ * timestamp corresponds to the first time after boot this
+ * function is called.
  */
 struct dma_fence *dma_fence_get_stub(void)
 {
@@ -141,6 +143,29 @@ struct dma_fence *dma_fence_get_stub(void)
 }
 EXPORT_SYMBOL(dma_fence_get_stub);
 
+/**
+ * dma_fence_allocate_private_stub - return a private, signaled fence
+ *
+ * Return a newly allocated and signaled stub fence.
+ */
+struct dma_fence *dma_fence_allocate_private_stub(void)
+{
+       struct dma_fence *fence;
+
+       fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+       if (fence == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       dma_fence_init(fence,
+                      &dma_fence_stub_ops,
+                      &dma_fence_stub_lock,
+                      0, 0);
+       dma_fence_signal(fence);
+
+       return fence;
+}
+EXPORT_SYMBOL(dma_fence_allocate_private_stub);
+
 /**
  * dma_fence_context_alloc - allocate an array of fence contexts
  * @num: amount of contexts to allocate
index 0a6438c..e7a9561 100644 (file)
@@ -1241,6 +1241,7 @@ int extcon_dev_register(struct extcon_dev *edev)
                                sizeof(*edev->nh), GFP_KERNEL);
        if (!edev->nh) {
                ret = -ENOMEM;
+               device_unregister(&edev->dev);
                goto err_dev;
        }
 
index 5fd6a60..88ed971 100644 (file)
@@ -346,6 +346,7 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        struct client *client = file->private_data;
        spinlock_t *client_list_lock = &client->lynx->client_list_lock;
        struct nosy_stats stats;
+       int ret;
 
        switch (cmd) {
        case NOSY_IOC_GET_STATS:
@@ -360,11 +361,15 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        return 0;
 
        case NOSY_IOC_START:
+               ret = -EBUSY;
                spin_lock_irq(client_list_lock);
-               list_add_tail(&client->link, &client->lynx->client_list);
+               if (list_empty(&client->link)) {
+                       list_add_tail(&client->link, &client->lynx->client_list);
+                       ret = 0;
+               }
                spin_unlock_irq(client_list_lock);
 
-               return 0;
+               return ret;
 
        case NOSY_IOC_STOP:
                spin_lock_irq(client_list_lock);
index df3f9bc..4b7ee3f 100644 (file)
@@ -927,7 +927,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
        }
 
        /* first try to find a slot in an existing linked list entry */
-       for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
+       for (prsv = efi_memreserve_root->next; prsv; ) {
                rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
                index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
                if (index < rsv->size) {
@@ -937,6 +937,7 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
                        memunmap(rsv);
                        return efi_mem_reserve_iomem(addr, size);
                }
+               prsv = rsv->next;
                memunmap(rsv);
        }
 
index 41c1d00..abdc8a6 100644 (file)
@@ -484,6 +484,10 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
                                }
                        }
 
+                       break;
+               case EFI_UNSUPPORTED:
+                       err = -EOPNOTSUPP;
+                       status = EFI_NOT_FOUND;
                        break;
                case EFI_NOT_FOUND:
                        break;
index 50bb2a6..62f0d1a 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Turris Mox rWTM firmware driver
  *
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
  */
 
 #include <linux/armada-37xx-rwtm-mailbox.h>
@@ -547,4 +547,4 @@ module_platform_driver(turris_mox_rwtm_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Turris Mox rWTM firmware driver");
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
index 8299909..61f9efd 100644 (file)
@@ -2,7 +2,7 @@
 /*
  *  Turris Mox Moxtet GPIO expander
  *
- *  Copyright (C) 2018 Marek Behun <marek.behun@nic.cz>
+ *  Copyright (C) 2018 Marek Behún <kabel@kernel.org>
  */
 
 #include <linux/bitops.h>
@@ -174,6 +174,6 @@ static struct moxtet_driver moxtet_gpio_driver = {
 };
 module_moxtet_driver(moxtet_gpio_driver);
 
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
 MODULE_DESCRIPTION("Turris Mox Moxtet GPIO expander");
 MODULE_LICENSE("GPL v2");
index 7ec0822..6367646 100644 (file)
@@ -571,6 +571,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
                               struct lock_class_key *lock_key,
                               struct lock_class_key *request_key)
 {
+       struct fwnode_handle *fwnode = gc->parent ? dev_fwnode(gc->parent) : NULL;
        unsigned long   flags;
        int             ret = 0;
        unsigned        i;
@@ -594,6 +595,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
 
        of_gpio_dev_init(gc, gdev);
 
+       /*
+        * Assign fwnode depending on the result of the previous calls,
+        * if none of them succeed, assign it to the parent's one.
+        */
+       gdev->dev.fwnode = dev_fwnode(&gdev->dev) ?: fwnode;
+
        gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
        if (gdev->id < 0) {
                ret = gdev->id;
@@ -4256,7 +4263,8 @@ static int __init gpiolib_dev_init(void)
                return ret;
        }
 
-       if (driver_register(&gpio_stub_drv) < 0) {
+       ret = driver_register(&gpio_stub_drv);
+       if (ret < 0) {
                pr_err("gpiolib: could not register GPIO stub driver\n");
                bus_unregister(&gpio_bus_type);
                return ret;
index 741b688..ee85e8a 100644 (file)
@@ -71,7 +71,7 @@ amdgpu-y += \
        vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
        vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
        arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
-       nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o
+       nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o
 
 # add DF block
 amdgpu-y += \
@@ -88,7 +88,7 @@ amdgpu-y += \
 
 # add UMC block
 amdgpu-y += \
-       umc_v6_1.o umc_v6_0.o umc_v8_7.o
+       umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o
 
 # add IH block
 amdgpu-y += \
@@ -179,9 +179,14 @@ amdgpu-y += \
        smuio_v11_0_6.o \
        smuio_v13_0.o
 
+# add reset block
+amdgpu-y += \
+       amdgpu_reset.o
+
 # add amdkfd interfaces
 amdgpu-y += amdgpu_amdkfd.o
 
+
 ifneq ($(CONFIG_HSA_AMD),)
 AMDKFD_PATH := ../amdkfd
 include $(FULL_AMD_PATH)/amdkfd/Makefile
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
new file mode 100644 (file)
index 0000000..65b1dca
--- /dev/null
@@ -0,0 +1,407 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "aldebaran.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_dpm.h"
+#include "amdgpu_job.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_psp.h"
+#include "amdgpu_xgmi.h"
+
+static struct amdgpu_reset_handler *
+aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
+                           struct amdgpu_reset_context *reset_context)
+{
+       struct amdgpu_reset_handler *handler;
+       struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+       if (reset_context->method != AMD_RESET_METHOD_NONE) {
+               dev_dbg(adev->dev, "Getting reset handler for method %d\n",
+                       reset_context->method);
+               list_for_each_entry(handler, &reset_ctl->reset_handlers,
+                                    handler_list) {
+                       if (handler->reset_method == reset_context->method)
+                               return handler;
+               }
+       }
+
+       if (adev->gmc.xgmi.connected_to_cpu) {
+               list_for_each_entry(handler, &reset_ctl->reset_handlers,
+                                    handler_list) {
+                       if (handler->reset_method == AMD_RESET_METHOD_MODE2) {
+                               reset_context->method = AMD_RESET_METHOD_MODE2;
+                               return handler;
+                       }
+               }
+       }
+
+       dev_dbg(adev->dev, "Reset handler not found!\n");
+
+       return NULL;
+}
+
+static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
+{
+       int r, i;
+
+       amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+       amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
+       for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+               if (!(adev->ip_blocks[i].version->type ==
+                             AMD_IP_BLOCK_TYPE_GFX ||
+                     adev->ip_blocks[i].version->type ==
+                             AMD_IP_BLOCK_TYPE_SDMA))
+                       continue;
+
+               r = adev->ip_blocks[i].version->funcs->suspend(adev);
+
+               if (r) {
+                       dev_err(adev->dev,
+                               "suspend of IP block <%s> failed %d\n",
+                               adev->ip_blocks[i].version->funcs->name, r);
+                       return r;
+               }
+
+               adev->ip_blocks[i].status.hw = false;
+       }
+
+       return r;
+}
+
+static int
+aldebaran_mode2_prepare_hwcontext(struct amdgpu_reset_control *reset_ctl,
+                                 struct amdgpu_reset_context *reset_context)
+{
+       int r = 0;
+       struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+       dev_dbg(adev->dev, "Aldebaran prepare hw context\n");
+       /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
+       if (!amdgpu_sriov_vf(adev))
+               r = aldebaran_mode2_suspend_ip(adev);
+
+       return r;
+}
+
+static void aldebaran_async_reset(struct work_struct *work)
+{
+       struct amdgpu_reset_handler *handler;
+       struct amdgpu_reset_control *reset_ctl =
+               container_of(work, struct amdgpu_reset_control, reset_work);
+       struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+       list_for_each_entry(handler, &reset_ctl->reset_handlers,
+                            handler_list) {
+               if (handler->reset_method == reset_ctl->active_reset) {
+                       dev_dbg(adev->dev, "Resetting device\n");
+                       handler->do_reset(adev);
+                       break;
+               }
+       }
+}
+
+static int aldebaran_mode2_reset(struct amdgpu_device *adev)
+{
+       /* disable BM */
+       pci_clear_master(adev->pdev);
+       adev->asic_reset_res = amdgpu_dpm_mode2_reset(adev);
+       return adev->asic_reset_res;
+}
+
+static int
+aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
+                             struct amdgpu_reset_context *reset_context)
+{
+       struct amdgpu_device *tmp_adev = NULL;
+       struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+       int r = 0;
+
+       dev_dbg(adev->dev, "aldebaran perform hw reset\n");
+       if (reset_context->hive == NULL) {
+               /* Wrong context, return error */
+               return -EINVAL;
+       }
+
+       list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
+                            gmc.xgmi.head) {
+               mutex_lock(&tmp_adev->reset_cntl->reset_lock);
+               tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_MODE2;
+       }
+       /*
+        * Mode2 reset doesn't need any sync between nodes in XGMI hive, instead launch
+        * them together so that they can be completed asynchronously on multiple nodes
+        */
+       list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
+                            gmc.xgmi.head) {
+               /* For XGMI run all resets in parallel to speed up the process */
+               if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+                       if (!queue_work(system_unbound_wq,
+                                       &tmp_adev->reset_cntl->reset_work))
+                               r = -EALREADY;
+               } else
+                       r = aldebaran_mode2_reset(tmp_adev);
+               if (r) {
+                       dev_err(tmp_adev->dev,
+                               "ASIC reset failed with error, %d for drm dev, %s",
+                               r, adev_to_drm(tmp_adev)->unique);
+                       break;
+               }
+       }
+
+       /* For XGMI wait for all resets to complete before proceed */
+       if (!r) {
+               list_for_each_entry(tmp_adev,
+                                    &reset_context->hive->device_list,
+                                    gmc.xgmi.head) {
+                       if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+                               flush_work(&tmp_adev->reset_cntl->reset_work);
+                               r = tmp_adev->asic_reset_res;
+                               if (r)
+                                       break;
+                       }
+               }
+       }
+
+       list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
+                            gmc.xgmi.head) {
+               mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
+               tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
+       }
+
+       return r;
+}
+
+static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
+{
+       struct amdgpu_firmware_info *ucode_list[AMDGPU_UCODE_ID_MAXIMUM];
+       struct amdgpu_firmware_info *ucode;
+       struct amdgpu_ip_block *cmn_block;
+       int ucode_count = 0;
+       int i, r;
+
+       dev_dbg(adev->dev, "Reloading ucodes after reset\n");
+       for (i = 0; i < adev->firmware.max_ucodes; i++) {
+               ucode = &adev->firmware.ucode[i];
+               if (!ucode->fw)
+                       continue;
+               switch (ucode->ucode_id) {
+               case AMDGPU_UCODE_ID_SDMA0:
+               case AMDGPU_UCODE_ID_SDMA1:
+               case AMDGPU_UCODE_ID_SDMA2:
+               case AMDGPU_UCODE_ID_SDMA3:
+               case AMDGPU_UCODE_ID_SDMA4:
+               case AMDGPU_UCODE_ID_SDMA5:
+               case AMDGPU_UCODE_ID_SDMA6:
+               case AMDGPU_UCODE_ID_SDMA7:
+               case AMDGPU_UCODE_ID_CP_MEC1:
+               case AMDGPU_UCODE_ID_CP_MEC1_JT:
+               case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
+               case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
+               case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
+               case AMDGPU_UCODE_ID_RLC_G:
+                       ucode_list[ucode_count++] = ucode;
+                       break;
+               default:
+                       break;
+               };
+       }
+
+       /* Reinit NBIF block */
+       cmn_block =
+               amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_COMMON);
+       if (unlikely(!cmn_block)) {
+               dev_err(adev->dev, "Failed to get BIF handle\n");
+               return -EINVAL;
+       }
+       r = cmn_block->version->funcs->resume(adev);
+       if (r)
+               return r;
+
+       /* Reinit GFXHUB */
+       adev->gfxhub.funcs->init(adev);
+       r = adev->gfxhub.funcs->gart_enable(adev);
+       if (r) {
+               dev_err(adev->dev, "GFXHUB gart reenable failed after reset\n");
+               return r;
+       }
+
+       /* Reload GFX firmware */
+       r = psp_load_fw_list(&adev->psp, ucode_list, ucode_count);
+       if (r) {
+               dev_err(adev->dev, "GFX ucode load failed after reset\n");
+               return r;
+       }
+
+       /* Resume RLC, FW needs RLC alive to complete reset process */
+       adev->gfx.rlc.funcs->resume(adev);
+
+       /* Wait for FW reset event complete */
+       r = smu_wait_for_event(adev, SMU_EVENT_RESET_COMPLETE, 0);
+       if (r) {
+               dev_err(adev->dev,
+                       "Failed to get response from firmware after reset\n");
+               return r;
+       }
+
+       for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!(adev->ip_blocks[i].version->type ==
+                             AMD_IP_BLOCK_TYPE_GFX ||
+                     adev->ip_blocks[i].version->type ==
+                             AMD_IP_BLOCK_TYPE_SDMA))
+                       continue;
+               r = adev->ip_blocks[i].version->funcs->resume(adev);
+               if (r) {
+                       dev_err(adev->dev,
+                               "resume of IP block <%s> failed %d\n",
+                               adev->ip_blocks[i].version->funcs->name, r);
+                       return r;
+               }
+
+               adev->ip_blocks[i].status.hw = true;
+       }
+
+       for (i = 0; i < adev->num_ip_blocks; i++) {
+               if (!(adev->ip_blocks[i].version->type ==
+                             AMD_IP_BLOCK_TYPE_GFX ||
+                     adev->ip_blocks[i].version->type ==
+                             AMD_IP_BLOCK_TYPE_SDMA ||
+                     adev->ip_blocks[i].version->type ==
+                             AMD_IP_BLOCK_TYPE_COMMON))
+                       continue;
+
+               if (adev->ip_blocks[i].version->funcs->late_init) {
+                       r = adev->ip_blocks[i].version->funcs->late_init(
+                               (void *)adev);
+                       if (r) {
+                               dev_err(adev->dev,
+                                       "late_init of IP block <%s> failed %d after reset\n",
+                                       adev->ip_blocks[i].version->funcs->name,
+                                       r);
+                               return r;
+                       }
+               }
+               adev->ip_blocks[i].status.late_initialized = true;
+       }
+
+       amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
+       amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
+
+       return r;
+}
+
+static int
+aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
+                                 struct amdgpu_reset_context *reset_context)
+{
+       int r;
+       struct amdgpu_device *tmp_adev = NULL;
+
+       if (reset_context->hive == NULL) {
+               /* Wrong context, return error */
+               return -EINVAL;
+       }
+
+       list_for_each_entry(tmp_adev, &reset_context->hive->device_list,
+                            gmc.xgmi.head) {
+               dev_info(tmp_adev->dev,
+                        "GPU reset succeeded, trying to resume\n");
+               r = aldebaran_mode2_restore_ip(tmp_adev);
+               if (r)
+                       goto end;
+
+               /*
+                * Add this ASIC as tracked as reset was already
+                * complete successfully.
+                */
+               amdgpu_register_gpu_instance(tmp_adev);
+
+               /* Resume RAS */
+               amdgpu_ras_resume(tmp_adev);
+
+               /* Update PSP FW topology after reset */
+               if (reset_context->hive &&
+                   tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+                       r = amdgpu_xgmi_update_topology(reset_context->hive,
+                                                       tmp_adev);
+
+               if (!r) {
+                       amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+
+                       r = amdgpu_ib_ring_tests(tmp_adev);
+                       if (r) {
+                               dev_err(tmp_adev->dev,
+                                       "ib ring test failed (%d).\n", r);
+                               r = -EAGAIN;
+                               tmp_adev->asic_reset_res = r;
+                               goto end;
+                       }
+               }
+       }
+
+end:
+       return r;
+}
+
+static struct amdgpu_reset_handler aldebaran_mode2_handler = {
+       .reset_method           = AMD_RESET_METHOD_MODE2,
+       .prepare_env            = NULL,
+       .prepare_hwcontext      = aldebaran_mode2_prepare_hwcontext,
+       .perform_reset          = aldebaran_mode2_perform_reset,
+       .restore_hwcontext      = aldebaran_mode2_restore_hwcontext,
+       .restore_env            = NULL,
+       .do_reset               = aldebaran_mode2_reset,
+};
+
+int aldebaran_reset_init(struct amdgpu_device *adev)
+{
+       struct amdgpu_reset_control *reset_ctl;
+
+       reset_ctl = kzalloc(sizeof(*reset_ctl), GFP_KERNEL);
+       if (!reset_ctl)
+               return -ENOMEM;
+
+       reset_ctl->handle = adev;
+       reset_ctl->async_reset = aldebaran_async_reset;
+       reset_ctl->active_reset = AMD_RESET_METHOD_NONE;
+       reset_ctl->get_reset_handler = aldebaran_get_reset_handler;
+
+       INIT_LIST_HEAD(&reset_ctl->reset_handlers);
+       INIT_WORK(&reset_ctl->reset_work, reset_ctl->async_reset);
+       /* Only mode2 is handled through reset control now */
+       amdgpu_reset_add_handler(reset_ctl, &aldebaran_mode2_handler);
+
+       adev->reset_cntl = reset_ctl;
+
+       return 0;
+}
+
+int aldebaran_reset_fini(struct amdgpu_device *adev)
+{
+       kfree(adev->reset_cntl);
+       adev->reset_cntl = NULL;
+       return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.h b/drivers/gpu/drm/amd/amdgpu/aldebaran.h
new file mode 100644 (file)
index 0000000..a07db54
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __ALDEBARAN_H__
+#define __ALDEBARAN_H__
+
+#include "amdgpu.h"
+
+int aldebaran_reset_init(struct amdgpu_device *adev);
+int aldebaran_reset_fini(struct amdgpu_device *adev);
+
+#endif
index a037c22..dc3a692 100644 (file)
 #include "amdgpu_gfxhub.h"
 #include "amdgpu_df.h"
 #include "amdgpu_smuio.h"
-#include "amdgpu_hdp.h"
 
 #define MAX_GPU_INSTANCE               16
 
@@ -271,6 +270,8 @@ struct amdgpu_bo_va_mapping;
 struct amdgpu_atif;
 struct kfd_vm_fault_info;
 struct amdgpu_hive_info;
+struct amdgpu_reset_context;
+struct amdgpu_reset_control;
 
 enum amdgpu_cp_irq {
        AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0,
@@ -589,6 +590,7 @@ struct amdgpu_allowed_register_entry {
 };
 
 enum amd_reset_method {
+       AMD_RESET_METHOD_NONE = -1,
        AMD_RESET_METHOD_LEGACY = 0,
        AMD_RESET_METHOD_MODE0,
        AMD_RESET_METHOD_MODE1,
@@ -920,6 +922,7 @@ struct amdgpu_device {
        struct amdgpu_irq_src           pageflip_irq;
        struct amdgpu_irq_src           hpd_irq;
        struct amdgpu_irq_src           dmub_trace_irq;
+       struct amdgpu_irq_src           dmub_outbox_irq;
 
        /* rings */
        u64                             fence_context;
@@ -1030,13 +1033,9 @@ struct amdgpu_device {
 
        /* s3/s4 mask */
        bool                            in_suspend;
-       bool                            in_hibernate;
-
-       /*
-        * The combination flag in_poweroff_reboot_com used to identify the poweroff
-        * and reboot opt in the s0i3 system-wide suspend.
-        */
-       bool                            in_poweroff_reboot_com;
+       bool                            in_s3;
+       bool                            in_s4;
+       bool                            in_s0ix;
 
        atomic_t                        in_gpu_reset;
        enum pp_mp1_state               mp1_state;
@@ -1078,6 +1077,8 @@ struct amdgpu_device {
 
        bool                            in_pci_err_recovery;
        struct pci_saved_state          *pci_state;
+
+       struct amdgpu_reset_control     *reset_cntl;
 };
 
 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
@@ -1129,13 +1130,10 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
 
 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
-                                 struct amdgpu_job *job,
-                                 bool *need_full_reset_arg);
+                                struct amdgpu_reset_context *reset_context);
 
-int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
-                         struct list_head *device_list_handle,
-                         bool *need_full_reset_arg,
-                         bool skip_hw_reset);
+int amdgpu_do_asic_reset(struct list_head *device_list_handle,
+                        struct amdgpu_reset_context *reset_context);
 
 int emu_soc_asic_init(struct amdgpu_device *adev);
 
@@ -1275,8 +1273,9 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
                                             const u32 *registers,
                                             const u32 array_size);
 
-bool amdgpu_device_supports_atpx(struct drm_device *dev);
 int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
+bool amdgpu_device_supports_atpx(struct drm_device *dev);
+bool amdgpu_device_supports_px(struct drm_device *dev);
 bool amdgpu_device_supports_boco(struct drm_device *dev);
 bool amdgpu_device_supports_baco(struct drm_device *dev);
 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
@@ -1390,6 +1389,13 @@ void amdgpu_pci_resume(struct pci_dev *pdev);
 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev);
 bool amdgpu_device_load_pci_state(struct pci_dev *pdev);
 
+bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev);
+
+int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
+                              enum amd_clockgating_state state);
+int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
+                              enum amd_powergating_state state);
+
 #include "amdgpu_object.h"
 
 static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
index 1c6be53..5f6696a 100644 (file)
@@ -246,6 +246,7 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
        bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
        bp.type = ttm_bo_type_kernel;
        bp.resv = NULL;
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 
        if (cp_mqd_gfx9)
                bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
@@ -317,6 +318,7 @@ int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
        struct amdgpu_bo *bo = NULL;
+       struct amdgpu_bo_user *ubo;
        struct amdgpu_bo_param bp;
        int r;
 
@@ -327,14 +329,16 @@ int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
        bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
        bp.type = ttm_bo_type_device;
        bp.resv = NULL;
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 
-       r = amdgpu_bo_create(adev, &bp, &bo);
+       r = amdgpu_bo_create_user(adev, &bp, &ubo);
        if (r) {
                dev_err(adev->dev,
                        "failed to allocate gws BO for amdkfd (%d)\n", r);
                return r;
        }
 
+       bo = &ubo->bo;
        *mem_obj = bo;
        return 0;
 }
@@ -495,8 +499,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
                *dma_buf_kgd = (struct kgd_dev *)adev;
        if (bo_size)
                *bo_size = amdgpu_bo_size(bo);
-       if (metadata_size)
-               *metadata_size = bo->metadata_size;
        if (metadata_buffer)
                r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
                                           metadata_size, &metadata_flags);
index e05648a..494b2e1 100644 (file)
@@ -1232,157 +1232,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *
        return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
 }
 
-int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
-                                             u16 *leakage_id)
-{
-       union set_voltage args;
-       int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
-       u8 frev, crev;
-
-       if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
-               return -EINVAL;
-
-       switch (crev) {
-       case 3:
-       case 4:
-               args.v3.ucVoltageType = 0;
-               args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
-               args.v3.usVoltageLevel = 0;
-
-               amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-
-               *leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
-               break;
-       default:
-               DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev,
-                                                            u16 *vddc, u16 *vddci,
-                                                            u16 virtual_voltage_id,
-                                                            u16 vbios_voltage_id)
-{
-       int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
-       u8 frev, crev;
-       u16 data_offset, size;
-       int i, j;
-       ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
-       u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
-
-       *vddc = 0;
-       *vddci = 0;
-
-       if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
-                                   &frev, &crev, &data_offset))
-               return -EINVAL;
-
-       profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
-               (adev->mode_info.atom_context->bios + data_offset);
-
-       switch (frev) {
-       case 1:
-               return -EINVAL;
-       case 2:
-               switch (crev) {
-               case 1:
-                       if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))
-                               return -EINVAL;
-                       leakage_bin = (u16 *)
-                               (adev->mode_info.atom_context->bios + data_offset +
-                                le16_to_cpu(profile->usLeakageBinArrayOffset));
-                       vddc_id_buf = (u16 *)
-                               (adev->mode_info.atom_context->bios + data_offset +
-                                le16_to_cpu(profile->usElbVDDC_IdArrayOffset));
-                       vddc_buf = (u16 *)
-                               (adev->mode_info.atom_context->bios + data_offset +
-                                le16_to_cpu(profile->usElbVDDC_LevelArrayOffset));
-                       vddci_id_buf = (u16 *)
-                               (adev->mode_info.atom_context->bios + data_offset +
-                                le16_to_cpu(profile->usElbVDDCI_IdArrayOffset));
-                       vddci_buf = (u16 *)
-                               (adev->mode_info.atom_context->bios + data_offset +
-                                le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset));
-
-                       if (profile->ucElbVDDC_Num > 0) {
-                               for (i = 0; i < profile->ucElbVDDC_Num; i++) {
-                                       if (vddc_id_buf[i] == virtual_voltage_id) {
-                                               for (j = 0; j < profile->ucLeakageBinNum; j++) {
-                                                       if (vbios_voltage_id <= leakage_bin[j]) {
-                                                               *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
-                                                               break;
-                                                       }
-                                               }
-                                               break;
-                                       }
-                               }
-                       }
-                       if (profile->ucElbVDDCI_Num > 0) {
-                               for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
-                                       if (vddci_id_buf[i] == virtual_voltage_id) {
-                                               for (j = 0; j < profile->ucLeakageBinNum; j++) {
-                                                       if (vbios_voltage_id <= leakage_bin[j]) {
-                                                               *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
-                                                               break;
-                                                       }
-                                               }
-                                               break;
-                                       }
-                               }
-                       }
-                       break;
-               default:
-                       DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
-                       return -EINVAL;
-               }
-               break;
-       default:
-               DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-union get_voltage_info {
-       struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
-       struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
-};
-
-int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev,
-                                   u16 virtual_voltage_id,
-                                   u16 *voltage)
-{
-       int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
-       u32 entry_id;
-       u32 count = adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
-       union get_voltage_info args;
-
-       for (entry_id = 0; entry_id < count; entry_id++) {
-               if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
-                   virtual_voltage_id)
-                       break;
-       }
-
-       if (entry_id >= count)
-               return -EINVAL;
-
-       args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
-       args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
-       args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id);
-       args.in.ulSCLKFreq =
-               cpu_to_le32(adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
-
-       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-
-       *voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
-
-       return 0;
-}
-
 union voltage_object_info {
        struct _ATOM_VOLTAGE_OBJECT_INFO v1;
        struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
@@ -1913,7 +1762,7 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
        struct amdgpu_device *adev = drm_to_adev(ddev);
        struct atom_context *ctx = adev->mode_info.atom_context;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
+       return sysfs_emit(buf, "%s\n", ctx->vbios_version);
 }
 
 static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
index 1321ec0..8cc0222 100644 (file)
@@ -168,18 +168,6 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
 void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
                                             u32 eng_clock, u32 mem_clock);
 
-int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
-                                             u16 *leakage_id);
-
-int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev,
-                                                            u16 *vddc, u16 *vddci,
-                                                            u16 virtual_voltage_id,
-                                                            u16 vbios_voltage_id);
-
-int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev,
-                                   u16 virtual_voltage_id,
-                                   u16 *voltage);
-
 bool
 amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
                                u8 voltage_type, u8 voltage_mode);
index d9b35df..313517f 100644 (file)
@@ -85,6 +85,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
        bp.flags = 0;
        bp.type = ttm_bo_type_kernel;
        bp.resv = NULL;
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
        n = AMDGPU_BENCHMARK_ITERATIONS;
        r = amdgpu_bo_create(adev, &bp, &sobj);
        if (r) {
index 0f82c5d..b4ad1c0 100644 (file)
@@ -65,6 +65,7 @@
 #include "amdgpu_ras.h"
 #include "amdgpu_pmu.h"
 #include "amdgpu_fru_eeprom.h"
+#include "amdgpu_reset.h"
 
 #include <linux/suspend.h>
 #include <drm/task_barrier.h>
@@ -137,7 +138,7 @@ static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
        struct amdgpu_device *adev = drm_to_adev(ddev);
        uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
+       return sysfs_emit(buf, "%llu\n", cnt);
 }
 
 static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
@@ -161,7 +162,7 @@ static ssize_t amdgpu_device_get_product_name(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
+       return sysfs_emit(buf, "%s\n", adev->product_name);
 }
 
 static DEVICE_ATTR(product_name, S_IRUGO,
@@ -183,7 +184,7 @@ static ssize_t amdgpu_device_get_product_number(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
+       return sysfs_emit(buf, "%s\n", adev->product_number);
 }
 
 static DEVICE_ATTR(product_number, S_IRUGO,
@@ -205,25 +206,25 @@ static ssize_t amdgpu_device_get_serial_number(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
+       return sysfs_emit(buf, "%s\n", adev->serial);
 }
 
 static DEVICE_ATTR(serial_number, S_IRUGO,
                amdgpu_device_get_serial_number, NULL);
 
 /**
- * amdgpu_device_supports_atpx - Is the device a dGPU with HG/PX power control
+ * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
  *
  * @dev: drm_device pointer
  *
- * Returns true if the device is a dGPU with HG/PX power control,
+ * Returns true if the device is a dGPU with ATPX power control,
  * otherwise return false.
  */
-bool amdgpu_device_supports_atpx(struct drm_device *dev)
+bool amdgpu_device_supports_px(struct drm_device *dev)
 {
        struct amdgpu_device *adev = drm_to_adev(dev);
 
-       if (adev->flags & AMD_IS_PX)
+       if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
                return true;
        return false;
 }
@@ -233,14 +234,15 @@ bool amdgpu_device_supports_atpx(struct drm_device *dev)
  *
  * @dev: drm_device pointer
  *
- * Returns true if the device is a dGPU with HG/PX power control,
+ * Returns true if the device is a dGPU with ACPI power control,
  * otherwise return false.
  */
 bool amdgpu_device_supports_boco(struct drm_device *dev)
 {
        struct amdgpu_device *adev = drm_to_adev(dev);
 
-       if (adev->has_pr3)
+       if (adev->has_pr3 ||
+           ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
                return true;
        return false;
 }
@@ -326,6 +328,35 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 /*
  * register access helper functions.
  */
+
+/* Check if hw access should be skipped because of hotplug or device error */
+bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
+{
+       if (adev->in_pci_err_recovery)
+               return true;
+
+#ifdef CONFIG_LOCKDEP
+       /*
+        * This is a bit complicated to understand, so worth a comment. What we assert
+        * here is that the GPU reset is not running on another thread in parallel.
+        *
+        * For this we trylock the read side of the reset semaphore, if that succeeds
+        * we know that the reset is not running in paralell.
+        *
+        * If the trylock fails we assert that we are either already holding the read
+        * side of the lock or are the reset thread itself and hold the write side of
+        * the lock.
+        */
+       if (in_task()) {
+               if (down_read_trylock(&adev->reset_sem))
+                       up_read(&adev->reset_sem);
+               else
+                       lockdep_assert_held(&adev->reset_sem);
+       }
+#endif
+       return false;
+}
+
 /**
  * amdgpu_device_rreg - read a memory mapped IO or indirect register
  *
@@ -340,7 +371,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
 {
        uint32_t ret;
 
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return 0;
 
        if ((reg * 4) < adev->rmmio_size) {
@@ -377,7 +408,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
  */
 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
 {
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return 0;
 
        if (offset < adev->rmmio_size)
@@ -402,7 +433,7 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
  */
 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
 {
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return;
 
        if (offset < adev->rmmio_size)
@@ -425,7 +456,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
                        uint32_t reg, uint32_t v,
                        uint32_t acc_flags)
 {
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return;
 
        if ((reg * 4) < adev->rmmio_size) {
@@ -452,14 +483,14 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
                             uint32_t reg, uint32_t v)
 {
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return;
 
        if (amdgpu_sriov_fullaccess(adev) &&
            adev->gfx.rlc.funcs &&
            adev->gfx.rlc.funcs->is_rlcg_access_range) {
                if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
-                       return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
+                       return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0);
        } else {
                writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
        }
@@ -476,7 +507,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
  */
 u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
 {
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return 0;
 
        if (index < adev->doorbell.num_doorbells) {
@@ -499,7 +530,7 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
  */
 void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
 {
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return;
 
        if (index < adev->doorbell.num_doorbells) {
@@ -520,7 +551,7 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
  */
 u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
 {
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return 0;
 
        if (index < adev->doorbell.num_doorbells) {
@@ -543,7 +574,7 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
  */
 void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
 {
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return;
 
        if (index < adev->doorbell.num_doorbells) {
@@ -1391,7 +1422,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
        struct drm_device *dev = pci_get_drvdata(pdev);
        int r;
 
-       if (amdgpu_device_supports_atpx(dev) && state == VGA_SWITCHEROO_OFF)
+       if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
                return;
 
        if (state == VGA_SWITCHEROO_ON) {
@@ -2049,6 +2080,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
                                amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
                                return r;
                        }
+
+                       /*get pf2vf msg info at it's earliest time*/
+                       if (amdgpu_sriov_vf(adev))
+                               amdgpu_virt_init_data_exchange(adev);
+
                }
        }
 
@@ -2331,8 +2367,8 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
  * Returns 0 on success, negative error code on failure.
  */
 
-static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
-                                               enum amd_clockgating_state state)
+int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
+                              enum amd_clockgating_state state)
 {
        int i, j, r;
 
@@ -2343,6 +2379,10 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
                i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
                if (!adev->ip_blocks[i].status.late_initialized)
                        continue;
+               /* skip CG for GFX on S0ix */
+               if (adev->in_s0ix &&
+                   adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+                       continue;
                /* skip CG for VCE/UVD, it's handled specially */
                if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
                    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
@@ -2363,7 +2403,8 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
        return 0;
 }
 
-static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
+int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
+                              enum amd_powergating_state state)
 {
        int i, j, r;
 
@@ -2374,6 +2415,10 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power
                i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
                if (!adev->ip_blocks[i].status.late_initialized)
                        continue;
+               /* skip PG for GFX on S0ix */
+               if (adev->in_s0ix &&
+                   adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+                       continue;
                /* skip CG for VCE/UVD, it's handled specially */
                if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
                    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
@@ -2655,11 +2700,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
 {
        int i, r;
 
-       if (adev->in_poweroff_reboot_com ||
-           !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
-               amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
-               amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
-       }
+       amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+       amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
 
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.valid)
@@ -2699,6 +2741,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
 {
        int i, r;
 
+       if (adev->in_s0ix)
+               amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
+
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.valid)
                        continue;
@@ -2721,6 +2766,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
                        adev->ip_blocks[i].status.hw = false;
                        continue;
                }
+
+               /* skip suspend of gfx and psp for S0ix
+                * gfx is in gfxoff state, so on resume it will exit gfxoff just
+                * like at runtime. PSP is also part of the always on hardware
+                * so no need to suspend it.
+                */
+               if (adev->in_s0ix &&
+                   (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
+                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
+                       continue;
+
                /* XXX handle errors */
                r = adev->ip_blocks[i].version->funcs->suspend(adev);
                /* XXX handle errors */
@@ -3086,8 +3142,9 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
                if (adev->asic_reset_res)
                        goto fail;
 
-               if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
-                       adev->mmhub.funcs->reset_ras_error_count(adev);
+               if (adev->mmhub.ras_funcs &&
+                   adev->mmhub.ras_funcs->reset_ras_error_count)
+                       adev->mmhub.ras_funcs->reset_ras_error_count(adev);
        } else {
 
                task_barrier_full(&hive->tb);
@@ -3197,7 +3254,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        struct drm_device *ddev = adev_to_drm(adev);
        struct pci_dev *pdev = adev->pdev;
        int r, i;
-       bool atpx = false;
+       bool px = false;
        u32 max_MBps;
 
        adev->shutdown = false;
@@ -3359,16 +3416,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
                vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
 
-       if (amdgpu_device_supports_atpx(ddev))
-               atpx = true;
-       if (amdgpu_has_atpx() &&
-           (amdgpu_is_atpx_hybrid() ||
-            amdgpu_has_atpx_dgpu_power_cntl()) &&
-           !pci_is_thunderbolt_attached(adev->pdev))
+       if (amdgpu_device_supports_px(ddev)) {
+               px = true;
                vga_switcheroo_register_client(adev->pdev,
-                                              &amdgpu_switcheroo_ops, atpx);
-       if (atpx)
+                                              &amdgpu_switcheroo_ops, px);
                vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
+       }
 
        if (amdgpu_emu_mode == 1) {
                /* post the asic on emulation mode */
@@ -3376,6 +3429,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                goto fence_driver_init;
        }
 
+       amdgpu_reset_init(adev);
+
        /* detect if we are with an SRIOV vbios */
        amdgpu_device_detect_sriov_bios(adev);
 
@@ -3575,7 +3630,7 @@ release_ras_con:
 
 failed:
        amdgpu_vf_error_trans_all(adev);
-       if (atpx)
+       if (px)
                vga_switcheroo_fini_domain_pm_ops(adev->dev);
 
 failed_unmap:
@@ -3626,6 +3681,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        release_firmware(adev->firmware.gpu_info_fw);
        adev->firmware.gpu_info_fw = NULL;
        adev->accel_working = false;
+
+       amdgpu_reset_fini(adev);
+
        /* free i2c buses */
        if (!amdgpu_device_has_dc_support(adev))
                amdgpu_i2c_fini(adev);
@@ -3635,13 +3693,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
 
        kfree(adev->bios);
        adev->bios = NULL;
-       if (amdgpu_has_atpx() &&
-           (amdgpu_is_atpx_hybrid() ||
-            amdgpu_has_atpx_dgpu_power_cntl()) &&
-           !pci_is_thunderbolt_attached(adev->pdev))
+       if (amdgpu_device_supports_px(adev_to_drm(adev))) {
                vga_switcheroo_unregister_client(adev->pdev);
-       if (amdgpu_device_supports_atpx(adev_to_drm(adev)))
                vga_switcheroo_fini_domain_pm_ops(adev->dev);
+       }
        if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
                vga_client_register(adev->pdev, NULL, NULL, NULL);
        iounmap(adev->rmmio);
@@ -3674,14 +3729,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
  */
 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
 {
-       struct amdgpu_device *adev;
-       struct drm_crtc *crtc;
-       struct drm_connector *connector;
-       struct drm_connector_list_iter iter;
+       struct amdgpu_device *adev = drm_to_adev(dev);
        int r;
 
-       adev = drm_to_adev(dev);
-
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
@@ -3693,61 +3743,19 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
 
        cancel_delayed_work_sync(&adev->delayed_init_work);
 
-       if (!amdgpu_device_has_dc_support(adev)) {
-               /* turn off display hw */
-               drm_modeset_lock_all(dev);
-               drm_connector_list_iter_begin(dev, &iter);
-               drm_for_each_connector_iter(connector, &iter)
-                       drm_helper_connector_dpms(connector,
-                                                 DRM_MODE_DPMS_OFF);
-               drm_connector_list_iter_end(&iter);
-               drm_modeset_unlock_all(dev);
-                       /* unpin the front buffers and cursors */
-               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-                       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-                       struct drm_framebuffer *fb = crtc->primary->fb;
-                       struct amdgpu_bo *robj;
-
-                       if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
-                               struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-                               r = amdgpu_bo_reserve(aobj, true);
-                               if (r == 0) {
-                                       amdgpu_bo_unpin(aobj);
-                                       amdgpu_bo_unreserve(aobj);
-                               }
-                       }
-
-                       if (fb == NULL || fb->obj[0] == NULL) {
-                               continue;
-                       }
-                       robj = gem_to_amdgpu_bo(fb->obj[0]);
-                       /* don't unpin kernel fb objects */
-                       if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
-                               r = amdgpu_bo_reserve(robj, true);
-                               if (r == 0) {
-                                       amdgpu_bo_unpin(robj);
-                                       amdgpu_bo_unreserve(robj);
-                               }
-                       }
-               }
-       }
-
        amdgpu_ras_suspend(adev);
 
        r = amdgpu_device_ip_suspend_phase1(adev);
 
-       amdgpu_amdkfd_suspend(adev, adev->in_runpm);
+       if (!adev->in_s0ix)
+               amdgpu_amdkfd_suspend(adev, adev->in_runpm);
 
        /* evict vram memory */
        amdgpu_bo_evict_vram(adev);
 
        amdgpu_fence_driver_suspend(adev);
 
-       if (adev->in_poweroff_reboot_com ||
-           !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
-               r = amdgpu_device_ip_suspend_phase2(adev);
-       else
-               amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
+       r = amdgpu_device_ip_suspend_phase2(adev);
        /* evict remaining vram memory
         * This second call to evict vram is to evict the gart page table
         * using the CPU.
@@ -3769,16 +3777,13 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
  */
 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
 {
-       struct drm_connector *connector;
-       struct drm_connector_list_iter iter;
        struct amdgpu_device *adev = drm_to_adev(dev);
-       struct drm_crtc *crtc;
        int r = 0;
 
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       if (amdgpu_acpi_is_s0ix_supported(adev))
+       if (adev->in_s0ix)
                amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
 
        /* post card */
@@ -3803,50 +3808,17 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
        queue_delayed_work(system_wq, &adev->delayed_init_work,
                           msecs_to_jiffies(AMDGPU_RESUME_MS));
 
-       if (!amdgpu_device_has_dc_support(adev)) {
-               /* pin cursors */
-               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-                       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
-                       if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
-                               struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-                               r = amdgpu_bo_reserve(aobj, true);
-                               if (r == 0) {
-                                       r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
-                                       if (r != 0)
-                                               dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
-                                       amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
-                                       amdgpu_bo_unreserve(aobj);
-                               }
-                       }
-               }
+       if (!adev->in_s0ix) {
+               r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
+               if (r)
+                       return r;
        }
-       r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
-       if (r)
-               return r;
 
        /* Make sure IB tests flushed */
        flush_delayed_work(&adev->delayed_init_work);
 
-       /* blat the mode back in */
-       if (fbcon) {
-               if (!amdgpu_device_has_dc_support(adev)) {
-                       /* pre DCE11 */
-                       drm_helper_resume_force_mode(dev);
-
-                       /* turn on display hw */
-                       drm_modeset_lock_all(dev);
-
-                       drm_connector_list_iter_begin(dev, &iter);
-                       drm_for_each_connector_iter(connector, &iter)
-                               drm_helper_connector_dpms(connector,
-                                                         DRM_MODE_DPMS_ON);
-                       drm_connector_list_iter_end(&iter);
-
-                       drm_modeset_unlock_all(dev);
-               }
+       if (fbcon)
                amdgpu_fbdev_set_suspend(adev, 0);
-       }
 
        drm_kms_helper_poll_enable(dev);
 
@@ -4144,11 +4116,11 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
        amdgpu_amdkfd_post_reset(adev);
 
 error:
-       amdgpu_virt_release_full_gpu(adev, true);
        if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
                amdgpu_inc_vram_lost(adev);
                r = amdgpu_device_recover_vram(adev);
        }
+       amdgpu_virt_release_full_gpu(adev, true);
 
        return r;
 }
@@ -4225,6 +4197,8 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
                case CHIP_SIENNA_CICHLID:
                case CHIP_NAVY_FLOUNDER:
                case CHIP_DIMGREY_CAVEFISH:
+               case CHIP_VANGOGH:
+               case CHIP_ALDEBARAN:
                        break;
                default:
                        goto disabled;
@@ -4279,11 +4253,15 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
 }
 
 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
-                                 struct amdgpu_job *job,
-                                 bool *need_full_reset_arg)
+                                struct amdgpu_reset_context *reset_context)
 {
        int i, r = 0;
-       bool need_full_reset  = *need_full_reset_arg;
+       struct amdgpu_job *job = NULL;
+       bool need_full_reset =
+               test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+
+       if (reset_context->reset_req_dev == adev)
+               job = reset_context->job;
 
        /* no need to dump if device is not in good state during probe period */
        if (!adev->gmc.xgmi.pending_reset)
@@ -4308,6 +4286,13 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
        if(job)
                drm_sched_increase_karma(&job->base);
 
+       r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
+       /* If reset handler not implemented, continue; otherwise return */
+       if (r == -ENOSYS)
+               r = 0;
+       else
+               return r;
+
        /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
        if (!amdgpu_sriov_vf(adev)) {
 
@@ -4326,22 +4311,38 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
 
                if (need_full_reset)
                        r = amdgpu_device_ip_suspend(adev);
-
-               *need_full_reset_arg = need_full_reset;
+               if (need_full_reset)
+                       set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+               else
+                       clear_bit(AMDGPU_NEED_FULL_RESET,
+                                 &reset_context->flags);
        }
 
        return r;
 }
 
-int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
-                         struct list_head *device_list_handle,
-                         bool *need_full_reset_arg,
-                         bool skip_hw_reset)
+int amdgpu_do_asic_reset(struct list_head *device_list_handle,
+                        struct amdgpu_reset_context *reset_context)
 {
        struct amdgpu_device *tmp_adev = NULL;
-       bool need_full_reset = *need_full_reset_arg, vram_lost = false;
+       bool need_full_reset, skip_hw_reset, vram_lost = false;
        int r = 0;
 
+       /* Try reset handler method first */
+       tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
+                                   reset_list);
+       r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
+       /* If reset handler not implemented, continue; otherwise return */
+       if (r == -ENOSYS)
+               r = 0;
+       else
+               return r;
+
+       /* Reset handler not implemented, use the default method */
+       need_full_reset =
+               test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+       skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
+
        /*
         * ASIC reset has to be done on all XGMI hive nodes ASAP
         * to allow proper links negotiation in FW (within 1 sec)
@@ -4378,9 +4379,9 @@ int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
 
        if (!r && amdgpu_ras_intr_triggered()) {
                list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
-                       if (tmp_adev->mmhub.funcs &&
-                           tmp_adev->mmhub.funcs->reset_ras_error_count)
-                               tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
+                       if (tmp_adev->mmhub.ras_funcs &&
+                           tmp_adev->mmhub.ras_funcs->reset_ras_error_count)
+                               tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev);
                }
 
                amdgpu_ras_intr_cleared();
@@ -4425,7 +4426,8 @@ int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
                                 */
                                amdgpu_register_gpu_instance(tmp_adev);
 
-                               if (!hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+                               if (!reset_context->hive &&
+                                   tmp_adev->gmc.xgmi.num_physical_nodes > 1)
                                        amdgpu_xgmi_add_device(tmp_adev);
 
                                r = amdgpu_device_ip_late_init(tmp_adev);
@@ -4453,8 +4455,10 @@ int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
                                }
 
                                /* Update PSP FW topology after reset */
-                               if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
-                                       r = amdgpu_xgmi_update_topology(hive, tmp_adev);
+                               if (reset_context->hive &&
+                                   tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+                                       r = amdgpu_xgmi_update_topology(
+                                               reset_context->hive, tmp_adev);
                        }
                }
 
@@ -4478,7 +4482,10 @@ out:
        }
 
 end:
-       *need_full_reset_arg = need_full_reset;
+       if (need_full_reset)
+               set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+       else
+               clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
        return r;
 }
 
@@ -4615,6 +4622,74 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
        return 0;
 }
 
+void amdgpu_device_recheck_guilty_jobs(
+       struct amdgpu_device *adev, struct list_head *device_list_handle,
+       struct amdgpu_reset_context *reset_context)
+{
+       int i, r = 0;
+
+       for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+               struct amdgpu_ring *ring = adev->rings[i];
+               int ret = 0;
+               struct drm_sched_job *s_job;
+
+               if (!ring || !ring->sched.thread)
+                       continue;
+
+               s_job = list_first_entry_or_null(&ring->sched.pending_list,
+                               struct drm_sched_job, list);
+               if (s_job == NULL)
+                       continue;
+
+               /* clear job's guilty and depend the folowing step to decide the real one */
+               drm_sched_reset_karma(s_job);
+               drm_sched_resubmit_jobs_ext(&ring->sched, 1);
+
+               ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
+               if (ret == 0) { /* timeout */
+                       DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
+                                               ring->sched.name, s_job->id);
+
+                       /* set guilty */
+                       drm_sched_increase_karma(s_job);
+retry:
+                       /* do hw reset */
+                       if (amdgpu_sriov_vf(adev)) {
+                               amdgpu_virt_fini_data_exchange(adev);
+                               r = amdgpu_device_reset_sriov(adev, false);
+                               if (r)
+                                       adev->asic_reset_res = r;
+                       } else {
+                               clear_bit(AMDGPU_SKIP_HW_RESET,
+                                         &reset_context->flags);
+                               r = amdgpu_do_asic_reset(device_list_handle,
+                                                        reset_context);
+                               if (r && r == -EAGAIN)
+                                       goto retry;
+                       }
+
+                       /*
+                        * add reset counter so that the following
+                        * resubmitted job could flush vmid
+                        */
+                       atomic_inc(&adev->gpu_reset_counter);
+                       continue;
+               }
+
+               /* got the hw fence, signal finished fence */
+               atomic_dec(ring->sched.score);
+               dma_fence_get(&s_job->s_fence->finished);
+               dma_fence_signal(&s_job->s_fence->finished);
+               dma_fence_put(&s_job->s_fence->finished);
+
+               /* remove node from list and free the job */
+               spin_lock(&ring->sched.job_list_lock);
+               list_del_init(&s_job->list);
+               spin_unlock(&ring->sched.job_list_lock);
+               ring->sched.ops->free_job(s_job);
+       }
+}
+
 /**
  * amdgpu_device_gpu_recover - reset the asic and recover scheduler
  *
@@ -4630,13 +4705,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                              struct amdgpu_job *job)
 {
        struct list_head device_list, *device_list_handle =  NULL;
-       bool need_full_reset = false;
        bool job_signaled = false;
        struct amdgpu_hive_info *hive = NULL;
        struct amdgpu_device *tmp_adev = NULL;
        int i, r = 0;
        bool need_emergency_restart = false;
        bool audio_suspended = false;
+       int tmp_vram_lost_counter;
+       struct amdgpu_reset_context reset_context;
+
+       memset(&reset_context, 0, sizeof(reset_context));
 
        /*
         * Special case: RAS triggered and full reset isn't supported
@@ -4677,6 +4755,12 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                mutex_lock(&hive->hive_lock);
        }
 
+       reset_context.method = AMD_RESET_METHOD_NONE;
+       reset_context.reset_req_dev = adev;
+       reset_context.job = job;
+       reset_context.hive = hive;
+       clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+
        /*
         * lock the device before we try to operate the linked list
         * if didn't get the device lock, don't touch the linked list since
@@ -4777,9 +4861,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
 retry: /* Rest of adevs pre asic reset from XGMI hive. */
        list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
-               r = amdgpu_device_pre_asic_reset(tmp_adev,
-                                                (tmp_adev == adev) ? job : NULL,
-                                                &need_full_reset);
+               r = amdgpu_device_pre_asic_reset(tmp_adev, &reset_context);
                /*TODO Should we stop ?*/
                if (r) {
                        dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
@@ -4788,6 +4870,7 @@ retry:    /* Rest of adevs pre asic reset from XGMI hive. */
                }
        }
 
+       tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
        /* Actual ASIC resets if needed.*/
        /* TODO Implement XGMI hive reset logic for SRIOV */
        if (amdgpu_sriov_vf(adev)) {
@@ -4795,7 +4878,7 @@ retry:    /* Rest of adevs pre asic reset from XGMI hive. */
                if (r)
                        adev->asic_reset_res = r;
        } else {
-               r  = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset, false);
+               r = amdgpu_do_asic_reset(device_list_handle, &reset_context);
                if (r && r == -EAGAIN)
                        goto retry;
        }
@@ -4805,6 +4888,18 @@ skip_hw_reset:
        /* Post ASIC reset for all devs .*/
        list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
 
+               /*
+                * Sometimes a later bad compute job can block a good gfx job as gfx
+                * and compute ring share internal GC HW mutually. We add an additional
+                * guilty jobs recheck step to find the real guilty job, it synchronously
+                * submits and pends for the first job being signaled. If it gets timeout,
+                * we identify it as a real guilty job.
+                */
+               if (amdgpu_gpu_recovery == 2 &&
+                       !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
+                       amdgpu_device_recheck_guilty_jobs(
+                               tmp_adev, device_list_handle, &reset_context);
+
                for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                        struct amdgpu_ring *ring = tmp_adev->rings[i];
 
@@ -5148,12 +5243,14 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
        struct drm_device *dev = pci_get_drvdata(pdev);
        struct amdgpu_device *adev = drm_to_adev(dev);
        int r, i;
-       bool need_full_reset = true;
+       struct amdgpu_reset_context reset_context;
        u32 memsize;
        struct list_head device_list;
 
        DRM_INFO("PCI error: slot reset callback!!\n");
 
+       memset(&reset_context, 0, sizeof(reset_context));
+
        INIT_LIST_HEAD(&device_list);
        list_add_tail(&adev->reset_list, &device_list);
 
@@ -5176,13 +5273,18 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
                goto out;
        }
 
+       reset_context.method = AMD_RESET_METHOD_NONE;
+       reset_context.reset_req_dev = adev;
+       set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+       set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+
        adev->in_pci_err_recovery = true;
-       r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
+       r = amdgpu_device_pre_asic_reset(adev, &reset_context);
        adev->in_pci_err_recovery = false;
        if (r)
                goto out;
 
-       r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
+       r = amdgpu_do_asic_reset(&device_list, &reset_context);
 
 out:
        if (!r) {
index b05301e..9a2f811 100644 (file)
@@ -1354,3 +1354,92 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
        return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
                                                  stime, etime, mode);
 }
+
+int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
+{
+       struct drm_device *dev = adev_to_drm(adev);
+       struct drm_crtc *crtc;
+       struct drm_connector *connector;
+       struct drm_connector_list_iter iter;
+       int r;
+
+       /* turn off display hw */
+       drm_modeset_lock_all(dev);
+       drm_connector_list_iter_begin(dev, &iter);
+       drm_for_each_connector_iter(connector, &iter)
+               drm_helper_connector_dpms(connector,
+                                         DRM_MODE_DPMS_OFF);
+       drm_connector_list_iter_end(&iter);
+       drm_modeset_unlock_all(dev);
+       /* unpin the front buffers and cursors */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+               struct drm_framebuffer *fb = crtc->primary->fb;
+               struct amdgpu_bo *robj;
+
+               if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+                       struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+                       r = amdgpu_bo_reserve(aobj, true);
+                       if (r == 0) {
+                               amdgpu_bo_unpin(aobj);
+                               amdgpu_bo_unreserve(aobj);
+                       }
+               }
+
+               if (fb == NULL || fb->obj[0] == NULL) {
+                       continue;
+               }
+               robj = gem_to_amdgpu_bo(fb->obj[0]);
+               /* don't unpin kernel fb objects */
+               if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
+                       r = amdgpu_bo_reserve(robj, true);
+                       if (r == 0) {
+                               amdgpu_bo_unpin(robj);
+                               amdgpu_bo_unreserve(robj);
+                       }
+               }
+       }
+       return r;
+}
+
+int amdgpu_display_resume_helper(struct amdgpu_device *adev)
+{
+       struct drm_device *dev = adev_to_drm(adev);
+       struct drm_connector *connector;
+       struct drm_connector_list_iter iter;
+       struct drm_crtc *crtc;
+       int r;
+
+       /* pin cursors */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+               if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+                       struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+                       r = amdgpu_bo_reserve(aobj, true);
+                       if (r == 0) {
+                               r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+                               if (r != 0)
+                                       dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
+                               amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+                               amdgpu_bo_unreserve(aobj);
+                       }
+               }
+       }
+
+       drm_helper_resume_force_mode(dev);
+
+       /* turn on display hw */
+       drm_modeset_lock_all(dev);
+
+       drm_connector_list_iter_begin(dev, &iter);
+       drm_for_each_connector_iter(connector, &iter)
+               drm_helper_connector_dpms(connector,
+                                         DRM_MODE_DPMS_ON);
+       drm_connector_list_iter_end(&iter);
+
+       drm_modeset_unlock_all(dev);
+
+       return 0;
+}
+
index dc7b7d1..7b6d83e 100644 (file)
@@ -47,4 +47,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
 const struct drm_format_info *
 amdgpu_lookup_format_info(u32 format, uint64_t modifier);
 
+int amdgpu_display_suspend_helper(struct amdgpu_device *adev);
+int amdgpu_display_resume_helper(struct amdgpu_device *adev);
+
 #endif
index 33991b4..d8f131e 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/vga_switcheroo.h>
 #include <drm/drm_probe_helper.h>
 #include <linux/mmu_notifier.h>
+#include <linux/suspend.h>
 
 #include "amdgpu.h"
 #include "amdgpu_irq.h"
@@ -46,6 +47,7 @@
 
 #include "amdgpu_ras.h"
 #include "amdgpu_xgmi.h"
+#include "amdgpu_reset.h"
 
 /*
  * KMS wrapper.
@@ -515,7 +517,7 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
  * DOC: gpu_recovery (int)
  * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
  */
-MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
+MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (2 = advanced tdr mode, 1 = enable, 0 = disable, -1 = auto)");
 module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
 
 /**
@@ -1161,6 +1163,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
        {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
        {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+       {0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
        {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
 
        /* Van Gogh */
@@ -1333,9 +1336,7 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
         */
        if (!amdgpu_passthrough(adev))
                adev->mp1_state = PP_MP1_STATE_UNLOAD;
-       adev->in_poweroff_reboot_com = true;
        amdgpu_device_ip_suspend(adev);
-       adev->in_poweroff_reboot_com = false;
        adev->mp1_state = PP_MP1_STATE_NONE;
 }
 
@@ -1349,7 +1350,9 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
        struct list_head device_list;
        struct amdgpu_device *adev;
        int i, r;
-       bool need_full_reset = true;
+       struct amdgpu_reset_context reset_context;
+
+       memset(&reset_context, 0, sizeof(reset_context));
 
        mutex_lock(&mgpu_info.mutex);
        if (mgpu_info.pending_reset == true) {
@@ -1359,9 +1362,14 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
        mgpu_info.pending_reset = true;
        mutex_unlock(&mgpu_info.mutex);
 
+       /* Use a common context, just need to make sure full reset is done */
+       reset_context.method = AMD_RESET_METHOD_NONE;
+       set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+
        for (i = 0; i < mgpu_info.num_dgpu; i++) {
                adev = mgpu_info.gpu_ins[i].adev;
-               r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
+               reset_context.reset_req_dev = adev;
+               r = amdgpu_device_pre_asic_reset(adev, &reset_context);
                if (r) {
                        dev_err(adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
                                r, adev_to_drm(adev)->unique);
@@ -1388,7 +1396,10 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
        list_for_each_entry(adev, &device_list, reset_list)
                amdgpu_unregister_gpu_instance(adev);
 
-       r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
+       /* Use a common context, just need to make sure full reset is done */
+       set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+       r = amdgpu_do_asic_reset(&device_list, &reset_context);
+
        if (r) {
                DRM_ERROR("reinit gpus failure");
                return;
@@ -1402,18 +1413,50 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
        return;
 }
 
+static int amdgpu_pmops_prepare(struct device *dev)
+{
+       struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+       /* Return a positive number here so
+        * DPM_FLAG_SMART_SUSPEND works properly
+        */
+       if (amdgpu_device_supports_boco(drm_dev))
+               return pm_runtime_suspended(dev) &&
+                       pm_suspend_via_firmware();
+
+       return 0;
+}
+
+static void amdgpu_pmops_complete(struct device *dev)
+{
+       /* nothing to do */
+}
+
 static int amdgpu_pmops_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(drm_dev);
+       int r;
 
-       return amdgpu_device_suspend(drm_dev, true);
+       if (amdgpu_acpi_is_s0ix_supported(adev))
+               adev->in_s0ix = true;
+       adev->in_s3 = true;
+       r = amdgpu_device_suspend(drm_dev, true);
+       adev->in_s3 = false;
+
+       return r;
 }
 
 static int amdgpu_pmops_resume(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(drm_dev);
+       int r;
 
-       return amdgpu_device_resume(drm_dev, true);
+       r = amdgpu_device_resume(drm_dev, true);
+       if (amdgpu_acpi_is_s0ix_supported(adev))
+               adev->in_s0ix = false;
+       return r;
 }
 
 static int amdgpu_pmops_freeze(struct device *dev)
@@ -1422,9 +1465,9 @@ static int amdgpu_pmops_freeze(struct device *dev)
        struct amdgpu_device *adev = drm_to_adev(drm_dev);
        int r;
 
-       adev->in_hibernate = true;
+       adev->in_s4 = true;
        r = amdgpu_device_suspend(drm_dev, true);
-       adev->in_hibernate = false;
+       adev->in_s4 = false;
        if (r)
                return r;
        return amdgpu_asic_reset(adev);
@@ -1440,13 +1483,8 @@ static int amdgpu_pmops_thaw(struct device *dev)
 static int amdgpu_pmops_poweroff(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(drm_dev);
-       int r;
 
-       adev->in_poweroff_reboot_com = true;
-       r =  amdgpu_device_suspend(drm_dev, true);
-       adev->in_poweroff_reboot_com = false;
-       return r;
+       return amdgpu_device_suspend(drm_dev, true);
 }
 
 static int amdgpu_pmops_restore(struct device *dev)
@@ -1479,7 +1517,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
        }
 
        adev->in_runpm = true;
-       if (amdgpu_device_supports_atpx(drm_dev))
+       if (amdgpu_device_supports_px(drm_dev))
                drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
        ret = amdgpu_device_suspend(drm_dev, false);
@@ -1488,16 +1526,14 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
                return ret;
        }
 
-       if (amdgpu_device_supports_atpx(drm_dev)) {
+       if (amdgpu_device_supports_px(drm_dev)) {
                /* Only need to handle PCI state in the driver for ATPX
                 * PCI core handles it for _PR3.
                 */
-               if (!amdgpu_is_atpx_hybrid()) {
-                       amdgpu_device_cache_pci_state(pdev);
-                       pci_disable_device(pdev);
-                       pci_ignore_hotplug(pdev);
-                       pci_set_power_state(pdev, PCI_D3cold);
-               }
+               amdgpu_device_cache_pci_state(pdev);
+               pci_disable_device(pdev);
+               pci_ignore_hotplug(pdev);
+               pci_set_power_state(pdev, PCI_D3cold);
                drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
        } else if (amdgpu_device_supports_baco(drm_dev)) {
                amdgpu_device_baco_enter(drm_dev);
@@ -1516,19 +1552,17 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
        if (!adev->runpm)
                return -EINVAL;
 
-       if (amdgpu_device_supports_atpx(drm_dev)) {
+       if (amdgpu_device_supports_px(drm_dev)) {
                drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 
                /* Only need to handle PCI state in the driver for ATPX
                 * PCI core handles it for _PR3.
                 */
-               if (!amdgpu_is_atpx_hybrid()) {
-                       pci_set_power_state(pdev, PCI_D0);
-                       amdgpu_device_load_pci_state(pdev);
-                       ret = pci_enable_device(pdev);
-                       if (ret)
-                               return ret;
-               }
+               pci_set_power_state(pdev, PCI_D0);
+               amdgpu_device_load_pci_state(pdev);
+               ret = pci_enable_device(pdev);
+               if (ret)
+                       return ret;
                pci_set_master(pdev);
        } else if (amdgpu_device_supports_boco(drm_dev)) {
                /* Only need to handle PCI state in the driver for ATPX
@@ -1539,7 +1573,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
                amdgpu_device_baco_exit(drm_dev);
        }
        ret = amdgpu_device_resume(drm_dev, false);
-       if (amdgpu_device_supports_atpx(drm_dev))
+       if (amdgpu_device_supports_px(drm_dev))
                drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
        adev->in_runpm = false;
        return 0;
@@ -1620,6 +1654,8 @@ out:
 }
 
 static const struct dev_pm_ops amdgpu_pm_ops = {
+       .prepare = amdgpu_pmops_prepare,
+       .complete = amdgpu_pmops_complete,
        .suspend = amdgpu_pmops_suspend,
        .resume = amdgpu_pmops_resume,
        .freeze = amdgpu_pmops_freeze,
index 1a4809d..47ea468 100644 (file)
@@ -439,7 +439,8 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
  * Helper function for amdgpu_fence_driver_init().
  */
 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
-                                 unsigned num_hw_submission)
+                                 unsigned num_hw_submission,
+                                 atomic_t *sched_score)
 {
        struct amdgpu_device *adev = ring->adev;
        long timeout;
@@ -467,30 +468,31 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
                return -ENOMEM;
 
        /* No need to setup the GPU scheduler for rings that don't need it */
-       if (!ring->no_scheduler) {
-               switch (ring->funcs->type) {
-               case AMDGPU_RING_TYPE_GFX:
-                       timeout = adev->gfx_timeout;
-                       break;
-               case AMDGPU_RING_TYPE_COMPUTE:
-                       timeout = adev->compute_timeout;
-                       break;
-               case AMDGPU_RING_TYPE_SDMA:
-                       timeout = adev->sdma_timeout;
-                       break;
-               default:
-                       timeout = adev->video_timeout;
-                       break;
-               }
+       if (ring->no_scheduler)
+               return 0;
 
-               r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
-                                  num_hw_submission, amdgpu_job_hang_limit,
-                                  timeout, NULL, ring->name);
-               if (r) {
-                       DRM_ERROR("Failed to create scheduler on ring %s.\n",
-                                 ring->name);
-                       return r;
-               }
+       switch (ring->funcs->type) {
+       case AMDGPU_RING_TYPE_GFX:
+               timeout = adev->gfx_timeout;
+               break;
+       case AMDGPU_RING_TYPE_COMPUTE:
+               timeout = adev->compute_timeout;
+               break;
+       case AMDGPU_RING_TYPE_SDMA:
+               timeout = adev->sdma_timeout;
+               break;
+       default:
+               timeout = adev->video_timeout;
+               break;
+       }
+
+       r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
+                          num_hw_submission, amdgpu_job_hang_limit,
+                          timeout, sched_score, ring->name);
+       if (r) {
+               DRM_ERROR("Failed to create scheduler on ring %s.\n",
+                         ring->name);
+               return r;
        }
 
        return 0;
index 5807cad..c5a9a4f 100644 (file)
@@ -126,6 +126,8 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
                        AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
                bp.type = ttm_bo_type_kernel;
                bp.resv = NULL;
+               bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
                r = amdgpu_bo_create(adev, &bp, &adev->gart.bo);
                if (r) {
                        return r;
index fb7171e..311bcdc 100644 (file)
@@ -58,6 +58,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
                             struct drm_gem_object **obj)
 {
        struct amdgpu_bo *bo;
+       struct amdgpu_bo_user *ubo;
        struct amdgpu_bo_param bp;
        int r;
 
@@ -71,10 +72,13 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
        bp.preferred_domain = initial_domain;
        bp.flags = flags;
        bp.domain = initial_domain;
-       r = amdgpu_bo_create(adev, &bp, &bo);
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+       r = amdgpu_bo_create_user(adev, &bp, &ubo);
        if (r)
                return r;
 
+       bo = &ubo->bo;
        *obj = &bo->tbo.base;
        (*obj)->funcs = &amdgpu_gem_object_funcs;
 
index 689addb..95d4f43 100644 (file)
@@ -310,9 +310,8 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
        ring->eop_gpu_addr = kiq->eop_gpu_addr;
        ring->no_scheduler = true;
        sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
-       r = amdgpu_ring_init(adev, ring, 1024,
-                            irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
-                            AMDGPU_RING_PRIO_DEFAULT);
+       r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
 
@@ -463,20 +462,25 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
 {
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
        struct amdgpu_ring *kiq_ring = &kiq->ring;
-       int i;
+       int i, r;
 
        if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
                return -EINVAL;
 
+       spin_lock(&adev->gfx.kiq.ring_lock);
        if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
-                                       adev->gfx.num_compute_rings))
+                                       adev->gfx.num_compute_rings)) {
+               spin_unlock(&adev->gfx.kiq.ring_lock);
                return -ENOMEM;
+       }
 
        for (i = 0; i < adev->gfx.num_compute_rings; i++)
                kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
                                           RESET_QUEUES, 0, 0);
+       r = amdgpu_ring_test_helper(kiq_ring);
+       spin_unlock(&adev->gfx.kiq.ring_lock);
 
-       return amdgpu_ring_test_helper(kiq_ring);
+       return r;
 }
 
 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
@@ -519,12 +523,13 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
 
        DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
                                                        kiq_ring->queue);
-
+       spin_lock(&adev->gfx.kiq.ring_lock);
        r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
                                        adev->gfx.num_compute_rings +
                                        kiq->pmf->set_resources_size);
        if (r) {
                DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+               spin_unlock(&adev->gfx.kiq.ring_lock);
                return r;
        }
 
@@ -533,6 +538,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
                kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
 
        r = amdgpu_ring_test_helper(kiq_ring);
+       spin_unlock(&adev->gfx.kiq.ring_lock);
        if (r)
                DRM_ERROR("KCQ enable failed\n");
 
@@ -671,8 +677,9 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
         */
        if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
                kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
-               if (adev->gfx.funcs->query_ras_error_count)
-                       adev->gfx.funcs->query_ras_error_count(adev, err_data);
+               if (adev->gfx.ras_funcs &&
+                   adev->gfx.ras_funcs->query_ras_error_count)
+                       adev->gfx.ras_funcs->query_ras_error_count(adev, err_data);
                amdgpu_ras_reset_gpu(adev);
        }
        return AMDGPU_RAS_SUCCESS;
@@ -705,7 +712,7 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
        struct amdgpu_kiq *kiq = &adev->gfx.kiq;
        struct amdgpu_ring *ring = &kiq->ring;
 
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return 0;
 
        BUG_ON(!ring->funcs->emit_rreg);
@@ -772,7 +779,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
 
        BUG_ON(!ring->funcs->emit_wreg);
 
-       if (adev->in_pci_err_recovery)
+       if (amdgpu_device_skip_hw_access(adev))
                return;
 
        spin_lock_irqsave(&kiq->ring_lock, flags);
@@ -836,14 +843,10 @@ int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
 
 void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state)
 {
-       if (is_support_sw_smu(adev)) {
-               smu_gfx_state_change_set(&adev->smu, state);
-       } else {
-               mutex_lock(&adev->pm.mutex);
-               if (adev->powerplay.pp_funcs &&
-                   adev->powerplay.pp_funcs->gfx_state_change_set)
-                       ((adev)->powerplay.pp_funcs->gfx_state_change_set(
-                               (adev)->powerplay.pp_handle, state));
-               mutex_unlock(&adev->pm.mutex);
-       }
+       mutex_lock(&adev->pm.mutex);
+       if (adev->powerplay.pp_funcs &&
+           adev->powerplay.pp_funcs->gfx_state_change_set)
+               ((adev)->powerplay.pp_funcs->gfx_state_change_set(
+                       (adev)->powerplay.pp_handle, state));
+       mutex_unlock(&adev->pm.mutex);
 }
index 38af93f..d43fe2e 100644 (file)
@@ -205,6 +205,19 @@ struct amdgpu_cu_info {
        uint32_t bitmap[4][4];
 };
 
+struct amdgpu_gfx_ras_funcs {
+       int (*ras_late_init)(struct amdgpu_device *adev);
+       void (*ras_fini)(struct amdgpu_device *adev);
+       int (*ras_error_inject)(struct amdgpu_device *adev,
+                               void *inject_if);
+       int (*query_ras_error_count)(struct amdgpu_device *adev,
+                                    void *ras_error_status);
+       void (*reset_ras_error_count)(struct amdgpu_device *adev);
+       void (*query_ras_error_status)(struct amdgpu_device *adev);
+       void (*reset_ras_error_status)(struct amdgpu_device *adev);
+       void (*enable_watchdog_timer)(struct amdgpu_device *adev);
+};
+
 struct amdgpu_gfx_funcs {
        /* get the gpu clock counter */
        uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
@@ -220,14 +233,8 @@ struct amdgpu_gfx_funcs {
                                uint32_t *dst);
        void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe,
                                 u32 queue, u32 vmid);
-       int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if);
-       int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
-       void (*reset_ras_error_count) (struct amdgpu_device *adev);
        void (*init_spm_golden)(struct amdgpu_device *adev);
-       void (*query_ras_error_status) (struct amdgpu_device *adev);
-       void (*reset_ras_error_status) (struct amdgpu_device *adev);
        void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable);
-       void (*enable_watchdog_timer)(struct amdgpu_device *adev);
 };
 
 struct sq_work {
@@ -330,7 +337,8 @@ struct amdgpu_gfx {
        DECLARE_BITMAP                  (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
 
        /*ras */
-       struct ras_common_if            *ras_if;
+       struct ras_common_if                    *ras_if;
+       const struct amdgpu_gfx_ras_funcs       *ras_funcs;
 };
 
 #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
index 6f79952..4d32233 100644 (file)
@@ -55,6 +55,8 @@ int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev)
                AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
        bp.type = ttm_bo_type_kernel;
        bp.resv = NULL;
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
        r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo);
        if (r)
                return r;
@@ -389,26 +391,46 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
 {
        int r;
 
-       if (adev->umc.funcs && adev->umc.funcs->ras_late_init) {
-               r = adev->umc.funcs->ras_late_init(adev);
+       if (adev->umc.ras_funcs &&
+           adev->umc.ras_funcs->ras_late_init) {
+               r = adev->umc.ras_funcs->ras_late_init(adev);
                if (r)
                        return r;
        }
 
-       if (adev->mmhub.funcs && adev->mmhub.funcs->ras_late_init) {
-               r = adev->mmhub.funcs->ras_late_init(adev);
+       if (adev->mmhub.ras_funcs &&
+           adev->mmhub.ras_funcs->ras_late_init) {
+               r = adev->mmhub.ras_funcs->ras_late_init(adev);
                if (r)
                        return r;
        }
 
-       return amdgpu_xgmi_ras_late_init(adev);
+       if (!adev->gmc.xgmi.connected_to_cpu)
+               adev->gmc.xgmi.ras_funcs = &xgmi_ras_funcs;
+
+       if (adev->gmc.xgmi.ras_funcs &&
+           adev->gmc.xgmi.ras_funcs->ras_late_init) {
+               r = adev->gmc.xgmi.ras_funcs->ras_late_init(adev);
+               if (r)
+                       return r;
+       }
+
+       return 0;
 }
 
 void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
 {
-       amdgpu_umc_ras_fini(adev);
-       amdgpu_mmhub_ras_fini(adev);
-       amdgpu_xgmi_ras_fini(adev);
+       if (adev->umc.ras_funcs &&
+           adev->umc.ras_funcs->ras_fini)
+               adev->umc.ras_funcs->ras_fini(adev);
+
+       if (adev->mmhub.ras_funcs &&
+           adev->mmhub.ras_funcs->ras_fini)
+               amdgpu_mmhub_ras_fini(adev);
+
+       if (adev->gmc.xgmi.ras_funcs &&
+           adev->gmc.xgmi.ras_funcs->ras_fini)
+               adev->gmc.xgmi.ras_funcs->ras_fini(adev);
 }
 
        /*
@@ -514,6 +536,7 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
        switch (adev->asic_type) {
        case CHIP_VEGA10:
        case CHIP_VEGA20:
+       case CHIP_ARCTURUS:
        case CHIP_ALDEBARAN:
                /*
                 * noretry = 0 will cause kfd page fault tests fail
index 7e248a4..cbb7735 100644 (file)
@@ -135,6 +135,14 @@ struct amdgpu_gmc_funcs {
        unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev);
 };
 
+struct amdgpu_xgmi_ras_funcs {
+       int (*ras_late_init)(struct amdgpu_device *adev);
+       void (*ras_fini)(struct amdgpu_device *adev);
+       int (*query_ras_error_count)(struct amdgpu_device *adev,
+                                    void *ras_error_status);
+       void (*reset_ras_error_count)(struct amdgpu_device *adev);
+};
+
 struct amdgpu_xgmi {
        /* from psp */
        u64 node_id;
@@ -151,6 +159,7 @@ struct amdgpu_xgmi {
        struct ras_common_if *ras_if;
        bool connected_to_cpu;
        bool pending_reset;
+       const struct amdgpu_xgmi_ras_funcs *ras_funcs;
 };
 
 struct amdgpu_gmc {
index 8980329..540c010 100644 (file)
@@ -49,8 +49,7 @@ static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
        struct amdgpu_device *adev = drm_to_adev(ddev);
        struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n",
-                       man->size * PAGE_SIZE);
+       return sysfs_emit(buf, "%llu\n", man->size * PAGE_SIZE);
 }
 
 /**
@@ -68,8 +67,7 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
        struct amdgpu_device *adev = drm_to_adev(ddev);
        struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n",
-                       amdgpu_gtt_mgr_usage(man));
+       return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(man));
 }
 
 static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
index af02610..90f5056 100644 (file)
@@ -199,13 +199,13 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg)
         * ack the interrupt if it is there
         */
        if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) {
-               if (adev->nbio.funcs &&
-                   adev->nbio.funcs->handle_ras_controller_intr_no_bifring)
-                       adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev);
+               if (adev->nbio.ras_funcs &&
+                   adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring)
+                       adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring(adev);
 
-               if (adev->nbio.funcs &&
-                   adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring)
-                       adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
+               if (adev->nbio.ras_funcs &&
+                   adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring)
+                       adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring(adev);
        }
 
        return ret;
@@ -382,11 +382,6 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
 
                        kfree(src->enabled_types);
                        src->enabled_types = NULL;
-                       if (src->data) {
-                               kfree(src->data);
-                               kfree(src);
-                               adev->irq.client[i].sources[j] = NULL;
-                       }
                }
                kfree(adev->irq.client[i].sources);
                adev->irq.client[i].sources = NULL;
index ac527e5..cf61166 100644 (file)
@@ -62,7 +62,6 @@ struct amdgpu_irq_src {
        unsigned                                num_types;
        atomic_t                                *enabled_types;
        const struct amdgpu_irq_src_funcs       *funcs;
-       void *data;
 };
 
 struct amdgpu_irq_client {
index ada807d..39ee88d 100644 (file)
@@ -159,7 +159,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
                goto out;
        }
 
-       if (amdgpu_device_supports_atpx(dev) &&
+       if (amdgpu_device_supports_px(dev) &&
            (amdgpu_runtime_pm != 0)) { /* enable runpm by default for atpx */
                adev->runpm = true;
                dev_info(adev->dev, "Using ATPX for runtime pm\n");
@@ -200,9 +200,13 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
 
        if (adev->runpm) {
                /* only need to skip on ATPX */
-               if (amdgpu_device_supports_atpx(dev) &&
-                   !amdgpu_is_atpx_hybrid())
+               if (amdgpu_device_supports_px(dev))
                        dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+               /* we want direct complete for BOCO */
+               if (amdgpu_device_supports_boco(dev))
+                       dev_pm_set_driver_flags(dev->dev, DPM_FLAG_SMART_PREPARE |
+                                               DPM_FLAG_SMART_SUSPEND |
+                                               DPM_FLAG_MAY_SKIP_RESUME);
                pm_runtime_use_autosuspend(dev->dev);
                pm_runtime_set_autosuspend_delay(dev->dev, 5000);
                pm_runtime_allow(dev->dev);
@@ -785,9 +789,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        dev_info->high_va_offset = AMDGPU_GMC_HOLE_END;
                        dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
                }
-               dev_info->virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
+               dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
                dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
-               dev_info->gart_page_size = AMDGPU_GPU_PAGE_SIZE;
+               dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
                dev_info->cu_active_number = adev->gfx.cu_info.number;
                dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
                dev_info->ce_ram_size = adev->gfx.ce_ram_size;
index 1ae9bda..11aa299 100644 (file)
 #ifndef __AMDGPU_MMHUB_H__
 #define __AMDGPU_MMHUB_H__
 
-struct amdgpu_mmhub_funcs {
-       void (*ras_init)(struct amdgpu_device *adev);
+struct amdgpu_mmhub_ras_funcs {
        int (*ras_late_init)(struct amdgpu_device *adev);
+       void (*ras_fini)(struct amdgpu_device *adev);
        void (*query_ras_error_count)(struct amdgpu_device *adev,
-                                       void *ras_error_status);
+                                     void *ras_error_status);
+       void (*query_ras_error_status)(struct amdgpu_device *adev);
        void (*reset_ras_error_count)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_mmhub_funcs {
        u64 (*get_fb_location)(struct amdgpu_device *adev);
        void (*init)(struct amdgpu_device *adev);
        int (*gart_enable)(struct amdgpu_device *adev);
@@ -40,12 +44,12 @@ struct amdgpu_mmhub_funcs {
                                uint64_t page_table_base);
        void (*update_power_gating)(struct amdgpu_device *adev,
                                 bool enable);
-       void (*query_ras_error_status)(struct amdgpu_device *adev);
 };
 
 struct amdgpu_mmhub {
        struct ras_common_if *ras_if;
        const struct amdgpu_mmhub_funcs *funcs;
+       const struct amdgpu_mmhub_ras_funcs *ras_funcs;
 };
 
 int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev);
index 7c11bce..25ee535 100644 (file)
@@ -47,6 +47,17 @@ struct nbio_hdp_flush_reg {
        u32 ref_and_mask_sdma7;
 };
 
+struct amdgpu_nbio_ras_funcs {
+       void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
+       void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
+       int (*init_ras_controller_interrupt)(struct amdgpu_device *adev);
+       int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev);
+       void (*query_ras_error_count)(struct amdgpu_device *adev,
+                                     void *ras_error_status);
+       int (*ras_late_init)(struct amdgpu_device *adev);
+       void (*ras_fini)(struct amdgpu_device *adev);
+};
+
 struct amdgpu_nbio_funcs {
        const struct nbio_hdp_flush_reg *hdp_flush_reg;
        u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
@@ -79,13 +90,6 @@ struct amdgpu_nbio_funcs {
        void (*ih_control)(struct amdgpu_device *adev);
        void (*init_registers)(struct amdgpu_device *adev);
        void (*remap_hdp_registers)(struct amdgpu_device *adev);
-       void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
-       void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
-       int (*init_ras_controller_interrupt)(struct amdgpu_device *adev);
-       int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev);
-       void (*query_ras_error_count)(struct amdgpu_device *adev,
-                                       void *ras_error_status);
-       int (*ras_late_init)(struct amdgpu_device *adev);
        void (*enable_aspm)(struct amdgpu_device *adev,
                            bool enable);
        void (*program_aspm)(struct amdgpu_device *adev);
@@ -97,6 +101,7 @@ struct amdgpu_nbio {
        struct amdgpu_irq_src ras_err_event_athub_irq;
        struct ras_common_if *ras_if;
        const struct amdgpu_nbio_funcs *funcs;
+       const struct amdgpu_nbio_ras_funcs *ras_funcs;
 };
 
 int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev);
index ac1bb50..1345f7e 100644 (file)
@@ -77,6 +77,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
        struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+       struct amdgpu_bo_user *ubo;
 
        if (bo->tbo.pin_count > 0)
                amdgpu_bo_subtract_pin_size(bo);
@@ -94,7 +95,11 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
        }
        amdgpu_bo_unref(&bo->parent);
 
-       kfree(bo->metadata);
+       if (bo->tbo.type == ttm_bo_type_device) {
+               ubo = to_amdgpu_bo_user(bo);
+               kfree(ubo->metadata);
+       }
+
        kfree(bo);
 }
 
@@ -248,6 +253,7 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
        bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
        bp.type = ttm_bo_type_kernel;
        bp.resv = NULL;
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 
        if (!*bo_ptr) {
                r = amdgpu_bo_create(adev, &bp, bo_ptr);
@@ -543,9 +549,10 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
        if (!amdgpu_bo_validate_size(adev, size, bp->domain))
                return -ENOMEM;
 
-       *bo_ptr = NULL;
+       BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
 
-       bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
+       *bo_ptr = NULL;
+       bo = kzalloc(bp->bo_ptr_size, GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
        drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
@@ -635,6 +642,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
                AMDGPU_GEM_CREATE_SHADOW;
        bp.type = ttm_bo_type_kernel;
        bp.resv = bo->tbo.base.resv;
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 
        r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
        if (!r) {
@@ -669,6 +677,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
        int r;
 
        bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
+
        r = amdgpu_bo_do_create(adev, bp, bo_ptr);
        if (r)
                return r;
@@ -690,6 +699,34 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
        return r;
 }
 
+/**
+ * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
+ * @adev: amdgpu device object
+ * @bp: parameters to be used for the buffer object
+ * @ubo_ptr: pointer to the buffer object pointer
+ *
+ * Create a BO to be used by user application;
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+
+int amdgpu_bo_create_user(struct amdgpu_device *adev,
+                         struct amdgpu_bo_param *bp,
+                         struct amdgpu_bo_user **ubo_ptr)
+{
+       struct amdgpu_bo *bo_ptr;
+       int r;
+
+       bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
+       bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
+       r = amdgpu_bo_do_create(adev, bp, &bo_ptr);
+       if (r)
+               return r;
+
+       *ubo_ptr = to_amdgpu_bo_user(bo_ptr);
+       return r;
+}
 /**
  * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
  * @bo: pointer to the buffer object
@@ -1024,13 +1061,10 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
 {
        struct ttm_resource_manager *man;
 
-       /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
-#ifndef CONFIG_HIBERNATION
-       if (adev->flags & AMD_IS_APU) {
-               /* Useless to evict on IGP chips */
+       if (adev->in_s3 && (adev->flags & AMD_IS_APU)) {
+               /* No need to evict vram on APUs for suspend to ram */
                return 0;
        }
-#endif
 
        man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
        return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
@@ -1095,25 +1129,6 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
        }
 }
 
-/**
- * amdgpu_bo_fbdev_mmap - mmap fbdev memory
- * @bo: &amdgpu_bo buffer object
- * @vma: vma as input from the fbdev mmap method
- *
- * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
-                            struct vm_area_struct *vma)
-{
-       if (vma->vm_pgoff != 0)
-               return -EACCES;
-
-       return ttm_bo_mmap_obj(vma, &bo->tbo);
-}
-
 /**
  * amdgpu_bo_set_tiling_flags - set tiling flags
  * @bo: &amdgpu_bo buffer object
@@ -1128,12 +1143,15 @@ int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct amdgpu_bo_user *ubo;
 
+       BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
        if (adev->family <= AMDGPU_FAMILY_CZ &&
            AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
                return -EINVAL;
 
-       bo->tiling_flags = tiling_flags;
+       ubo = to_amdgpu_bo_user(bo);
+       ubo->tiling_flags = tiling_flags;
        return 0;
 }
 
@@ -1147,10 +1165,14 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
  */
 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
 {
+       struct amdgpu_bo_user *ubo;
+
+       BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
        dma_resv_assert_held(bo->tbo.base.resv);
+       ubo = to_amdgpu_bo_user(bo);
 
        if (tiling_flags)
-               *tiling_flags = bo->tiling_flags;
+               *tiling_flags = ubo->tiling_flags;
 }
 
 /**
@@ -1169,13 +1191,16 @@ void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
                            uint32_t metadata_size, uint64_t flags)
 {
+       struct amdgpu_bo_user *ubo;
        void *buffer;
 
+       BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
+       ubo = to_amdgpu_bo_user(bo);
        if (!metadata_size) {
-               if (bo->metadata_size) {
-                       kfree(bo->metadata);
-                       bo->metadata = NULL;
-                       bo->metadata_size = 0;
+               if (ubo->metadata_size) {
+                       kfree(ubo->metadata);
+                       ubo->metadata = NULL;
+                       ubo->metadata_size = 0;
                }
                return 0;
        }
@@ -1187,10 +1212,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
        if (buffer == NULL)
                return -ENOMEM;
 
-       kfree(bo->metadata);
-       bo->metadata_flags = flags;
-       bo->metadata = buffer;
-       bo->metadata_size = metadata_size;
+       kfree(ubo->metadata);
+       ubo->metadata_flags = flags;
+       ubo->metadata = buffer;
+       ubo->metadata_size = metadata_size;
 
        return 0;
 }
@@ -1214,21 +1239,25 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
                           size_t buffer_size, uint32_t *metadata_size,
                           uint64_t *flags)
 {
+       struct amdgpu_bo_user *ubo;
+
        if (!buffer && !metadata_size)
                return -EINVAL;
 
+       BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
+       ubo = to_amdgpu_bo_user(bo);
        if (buffer) {
-               if (buffer_size < bo->metadata_size)
+               if (buffer_size < ubo->metadata_size)
                        return -EINVAL;
 
-               if (bo->metadata_size)
-                       memcpy(buffer, bo->metadata, bo->metadata_size);
+               if (ubo->metadata_size)
+                       memcpy(buffer, ubo->metadata, ubo->metadata_size);
        }
 
        if (metadata_size)
-               *metadata_size = bo->metadata_size;
+               *metadata_size = ubo->metadata_size;
        if (flags)
-               *flags = bo->metadata_flags;
+               *flags = ubo->metadata_flags;
 
        return 0;
 }
index 54ceb06..2d1fefb 100644 (file)
 #define AMDGPU_BO_INVALID_OFFSET       LONG_MAX
 #define AMDGPU_BO_MAX_PLACEMENTS       3
 
+#define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
+
 struct amdgpu_bo_param {
        unsigned long                   size;
        int                             byte_align;
+       u32                             bo_ptr_size;
        u32                             domain;
        u32                             preferred_domain;
        u64                             flags;
@@ -89,10 +92,6 @@ struct amdgpu_bo {
        struct ttm_buffer_object        tbo;
        struct ttm_bo_kmap_obj          kmap;
        u64                             flags;
-       u64                             tiling_flags;
-       u64                             metadata_flags;
-       void                            *metadata;
-       u32                             metadata_size;
        unsigned                        prime_shared_count;
        /* per VM structure for page tables and with virtual addresses */
        struct amdgpu_vm_bo_base        *vm_bo;
@@ -111,6 +110,15 @@ struct amdgpu_bo {
        struct kgd_mem                  *kfd_bo;
 };
 
+struct amdgpu_bo_user {
+       struct amdgpu_bo                bo;
+       u64                             tiling_flags;
+       u64                             metadata_flags;
+       void                            *metadata;
+       u32                             metadata_size;
+
+};
+
 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
 {
        return container_of(tbo, struct amdgpu_bo, tbo);
@@ -254,6 +262,9 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
                               uint64_t offset, uint64_t size, uint32_t domain,
                               struct amdgpu_bo **bo_ptr, void **cpu_addr);
+int amdgpu_bo_create_user(struct amdgpu_device *adev,
+                         struct amdgpu_bo_param *bp,
+                         struct amdgpu_bo_user **ubo_ptr);
 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
                           void **cpu_addr);
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
@@ -268,8 +279,6 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo);
 int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
 int amdgpu_bo_init(struct amdgpu_device *adev);
 void amdgpu_bo_fini(struct amdgpu_device *adev);
-int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
-                               struct vm_area_struct *vma);
 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
index bae304b..9e769cf 100644 (file)
@@ -556,6 +556,24 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp,
        return ret;
 }
 
+static int psp_boot_config_set(struct amdgpu_device *adev)
+{
+       struct psp_context *psp = &adev->psp;
+       struct psp_gfx_cmd_resp *cmd = psp->cmd;
+
+       if (adev->asic_type != CHIP_SIENNA_CICHLID)
+               return 0;
+
+       memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
+
+       cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
+       cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
+       cmd->cmd.boot_cfg.boot_config = BOOT_CONFIG_GECC;
+       cmd->cmd.boot_cfg.boot_config_valid = BOOT_CONFIG_GECC;
+
+       return psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+}
+
 static int psp_rl_load(struct amdgpu_device *adev)
 {
        struct psp_context *psp = &adev->psp;
@@ -1912,6 +1930,11 @@ static int psp_hw_start(struct psp_context *psp)
                return ret;
        }
 
+       ret = psp_boot_config_set(adev);
+       if (ret) {
+               DRM_WARN("PSP set boot config@\n");
+       }
+
        ret = psp_tmr_init(psp);
        if (ret) {
                DRM_ERROR("PSP tmr init failed!\n");
@@ -2146,9 +2169,13 @@ static int psp_load_smu_fw(struct psp_context *psp)
        if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
                return 0;
 
-
-       if (amdgpu_in_reset(adev) && ras && ras->supported &&
-               adev->asic_type == CHIP_ARCTURUS) {
+       if ((amdgpu_in_reset(adev) &&
+            ras && ras->supported &&
+            (adev->asic_type == CHIP_ARCTURUS ||
+             adev->asic_type == CHIP_VEGA20)) ||
+            (adev->in_runpm &&
+             adev->asic_type >= CHIP_NAVI10 &&
+             adev->asic_type <= CHIP_NAVI12)) {
                ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
                if (ret) {
                        DRM_WARN("Failed to set MP1 state prepare for reload\n");
@@ -2201,6 +2228,22 @@ static bool fw_load_skip_check(struct psp_context *psp,
        return false;
 }
 
+int psp_load_fw_list(struct psp_context *psp,
+                    struct amdgpu_firmware_info **ucode_list, int ucode_count)
+{
+       int ret = 0, i;
+       struct amdgpu_firmware_info *ucode;
+
+       for (i = 0; i < ucode_count; ++i) {
+               ucode = ucode_list[i];
+               psp_print_fw_hdr(psp, ucode);
+               ret = psp_execute_np_fw_load(psp, ucode);
+               if (ret)
+                       return ret;
+       }
+       return ret;
+}
+
 static int psp_np_fw_load(struct psp_context *psp)
 {
        int i, ret;
@@ -2967,7 +3010,7 @@ static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
                return ret;
        }
 
-       return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver);
+       return sysfs_emit(buf, "%x\n", fw_ver);
 }
 
 static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
index 64f1433..46a5328 100644 (file)
@@ -420,4 +420,7 @@ int psp_init_ta_microcode(struct psp_context *psp,
                          const char *chip_name);
 int psp_get_fw_attestation_records_addr(struct psp_context *psp,
                                        uint64_t *output_ptr);
+
+int psp_load_fw_list(struct psp_context *psp,
+                    struct amdgpu_firmware_info **ucode_list, int ucode_count);
 #endif
index 0e16683..0541196 100644 (file)
@@ -99,6 +99,49 @@ static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
        return false;
 }
 
+static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
+{
+       struct ras_err_data err_data = {0, 0, 0, NULL};
+       struct eeprom_table_record err_rec;
+
+       if ((address >= adev->gmc.mc_vram_size) ||
+           (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
+               dev_warn(adev->dev,
+                        "RAS WARN: input address 0x%llx is invalid.\n",
+                        address);
+               return -EINVAL;
+       }
+
+       if (amdgpu_ras_check_bad_page(adev, address)) {
+               dev_warn(adev->dev,
+                        "RAS WARN: 0x%llx has been marked as bad page!\n",
+                        address);
+               return 0;
+       }
+
+       memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
+
+       err_rec.address = address;
+       err_rec.retired_page = address >> AMDGPU_GPU_PAGE_SHIFT;
+       err_rec.ts = (uint64_t)ktime_get_real_seconds();
+       err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+
+       err_data.err_addr = &err_rec;
+       err_data.err_addr_cnt = 1;
+
+       if (amdgpu_bad_page_threshold != 0) {
+               amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
+                                        err_data.err_addr_cnt);
+               amdgpu_ras_save_bad_pages(adev);
+       }
+
+       dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
+       dev_warn(adev->dev, "Clear EEPROM:\n");
+       dev_warn(adev->dev, "    echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
+
+       return 0;
+}
+
 static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
                                        size_t size, loff_t *pos)
 {
@@ -178,11 +221,25 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
                op = 1;
        else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
                op = 2;
+       else if (sscanf(str, "retire_page") == 0)
+               op = 3;
        else if (str[0] && str[1] && str[2] && str[3])
                /* ascii string, but commands are not matched. */
                return -EINVAL;
 
        if (op != -1) {
+
+               if (op == 3) {
+                       if (sscanf(str, "%*s %llu", &address) != 1)
+                               if (sscanf(str, "%*s 0x%llx", &address) != 1)
+                                       return -EINVAL;
+
+                       data->op = op;
+                       data->inject.address = address;
+
+                       return 0;
+               }
+
                if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
                        return -EINVAL;
 
@@ -310,6 +367,16 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
        if (ret)
                return -EINVAL;
 
+       if (data.op == 3)
+       {
+               ret = amdgpu_reserve_page_direct(adev, data.inject.address);
+
+               if (ret)
+                       return size;
+               else
+                       return ret;
+       }
+
        if (!amdgpu_ras_is_supported(adev, data.head.block))
                return -EINVAL;
 
@@ -431,15 +498,13 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
        };
 
        if (!amdgpu_ras_get_error_query_ready(obj->adev))
-               return snprintf(buf, PAGE_SIZE,
-                               "Query currently inaccessible\n");
+               return sysfs_emit(buf, "Query currently inaccessible\n");
 
        if (amdgpu_ras_query_error_status(obj->adev, &info))
                return -EINVAL;
 
-       return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n",
-                       "ue", info.ue_count,
-                       "ce", info.ce_count);
+       return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+                         "ce", info.ce_count);
 }
 
 /* obj begin */
@@ -449,11 +514,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
 
 static inline void put_obj(struct ras_manager *obj)
 {
-       if (obj && --obj->use == 0)
+       if (obj && (--obj->use == 0))
                list_del(&obj->node);
-       if (obj && obj->use < 0) {
-                DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
-       }
+       if (obj && (obj->use < 0))
+               DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
 }
 
 /* make one obj and return it. */
@@ -777,13 +841,15 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
 
        switch (info->head.block) {
        case AMDGPU_RAS_BLOCK__UMC:
-               if (adev->umc.funcs->query_ras_error_count)
-                       adev->umc.funcs->query_ras_error_count(adev, &err_data);
+               if (adev->umc.ras_funcs &&
+                   adev->umc.ras_funcs->query_ras_error_count)
+                       adev->umc.ras_funcs->query_ras_error_count(adev, &err_data);
                /* umc query_ras_error_address is also responsible for clearing
                 * error status
                 */
-               if (adev->umc.funcs->query_ras_error_address)
-                       adev->umc.funcs->query_ras_error_address(adev, &err_data);
+               if (adev->umc.ras_funcs &&
+                   adev->umc.ras_funcs->query_ras_error_address)
+                       adev->umc.ras_funcs->query_ras_error_address(adev, &err_data);
                break;
        case AMDGPU_RAS_BLOCK__SDMA:
                if (adev->sdma.funcs->query_ras_error_count) {
@@ -793,25 +859,32 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
                }
                break;
        case AMDGPU_RAS_BLOCK__GFX:
-               if (adev->gfx.funcs->query_ras_error_count)
-                       adev->gfx.funcs->query_ras_error_count(adev, &err_data);
+               if (adev->gfx.ras_funcs &&
+                   adev->gfx.ras_funcs->query_ras_error_count)
+                       adev->gfx.ras_funcs->query_ras_error_count(adev, &err_data);
 
-               if (adev->gfx.funcs->query_ras_error_status)
-                       adev->gfx.funcs->query_ras_error_status(adev);
+               if (adev->gfx.ras_funcs &&
+                   adev->gfx.ras_funcs->query_ras_error_status)
+                       adev->gfx.ras_funcs->query_ras_error_status(adev);
                break;
        case AMDGPU_RAS_BLOCK__MMHUB:
-               if (adev->mmhub.funcs->query_ras_error_count)
-                       adev->mmhub.funcs->query_ras_error_count(adev, &err_data);
+               if (adev->mmhub.ras_funcs &&
+                   adev->mmhub.ras_funcs->query_ras_error_count)
+                       adev->mmhub.ras_funcs->query_ras_error_count(adev, &err_data);
 
-               if (adev->mmhub.funcs->query_ras_error_status)
-                       adev->mmhub.funcs->query_ras_error_status(adev);
+               if (adev->mmhub.ras_funcs &&
+                   adev->mmhub.ras_funcs->query_ras_error_status)
+                       adev->mmhub.ras_funcs->query_ras_error_status(adev);
                break;
        case AMDGPU_RAS_BLOCK__PCIE_BIF:
-               if (adev->nbio.funcs->query_ras_error_count)
-                       adev->nbio.funcs->query_ras_error_count(adev, &err_data);
+               if (adev->nbio.ras_funcs &&
+                   adev->nbio.ras_funcs->query_ras_error_count)
+                       adev->nbio.ras_funcs->query_ras_error_count(adev, &err_data);
                break;
        case AMDGPU_RAS_BLOCK__XGMI_WAFL:
-               amdgpu_xgmi_query_ras_error_count(adev, &err_data);
+               if (adev->gmc.xgmi.ras_funcs &&
+                   adev->gmc.xgmi.ras_funcs->query_ras_error_count)
+                       adev->gmc.xgmi.ras_funcs->query_ras_error_count(adev, &err_data);
                break;
        default:
                break;
@@ -848,15 +921,18 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
 
        switch (block) {
        case AMDGPU_RAS_BLOCK__GFX:
-               if (adev->gfx.funcs->reset_ras_error_count)
-                       adev->gfx.funcs->reset_ras_error_count(adev);
+               if (adev->gfx.ras_funcs &&
+                   adev->gfx.ras_funcs->reset_ras_error_count)
+                       adev->gfx.ras_funcs->reset_ras_error_count(adev);
 
-               if (adev->gfx.funcs->reset_ras_error_status)
-                       adev->gfx.funcs->reset_ras_error_status(adev);
+               if (adev->gfx.ras_funcs &&
+                   adev->gfx.ras_funcs->reset_ras_error_status)
+                       adev->gfx.ras_funcs->reset_ras_error_status(adev);
                break;
        case AMDGPU_RAS_BLOCK__MMHUB:
-               if (adev->mmhub.funcs->reset_ras_error_count)
-                       adev->mmhub.funcs->reset_ras_error_count(adev);
+               if (adev->mmhub.ras_funcs &&
+                   adev->mmhub.ras_funcs->reset_ras_error_count)
+                       adev->mmhub.ras_funcs->reset_ras_error_count(adev);
                break;
        case AMDGPU_RAS_BLOCK__SDMA:
                if (adev->sdma.funcs->reset_ras_error_count)
@@ -921,12 +997,14 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
 
        switch (info->head.block) {
        case AMDGPU_RAS_BLOCK__GFX:
-               if (adev->gfx.funcs->ras_error_inject)
-                       ret = adev->gfx.funcs->ras_error_inject(adev, info);
+               if (adev->gfx.ras_funcs &&
+                   adev->gfx.ras_funcs->ras_error_inject)
+                       ret = adev->gfx.ras_funcs->ras_error_inject(adev, info);
                else
                        ret = -EINVAL;
                break;
        case AMDGPU_RAS_BLOCK__UMC:
+       case AMDGPU_RAS_BLOCK__SDMA:
        case AMDGPU_RAS_BLOCK__MMHUB:
        case AMDGPU_RAS_BLOCK__PCIE_BIF:
                ret = psp_ras_trigger_error(&adev->psp, &block_info);
@@ -1508,12 +1586,14 @@ static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
         */
        switch (info->head.block) {
        case AMDGPU_RAS_BLOCK__GFX:
-               if (adev->gfx.funcs->query_ras_error_status)
-                       adev->gfx.funcs->query_ras_error_status(adev);
+               if (adev->gfx.ras_funcs &&
+                   adev->gfx.ras_funcs->query_ras_error_status)
+                       adev->gfx.ras_funcs->query_ras_error_status(adev);
                break;
        case AMDGPU_RAS_BLOCK__MMHUB:
-               if (adev->mmhub.funcs->query_ras_error_status)
-                       adev->mmhub.funcs->query_ras_error_status(adev);
+               if (adev->mmhub.ras_funcs &&
+                   adev->mmhub.ras_funcs->query_ras_error_status)
+                       adev->mmhub.ras_funcs->query_ras_error_status(adev);
                break;
        default:
                break;
@@ -1933,15 +2013,13 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
        return 0;
 }
 
-static int amdgpu_ras_check_asic_type(struct amdgpu_device *adev)
+static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
 {
-       if (adev->asic_type != CHIP_VEGA10 &&
-               adev->asic_type != CHIP_VEGA20 &&
-               adev->asic_type != CHIP_ARCTURUS &&
-               adev->asic_type != CHIP_SIENNA_CICHLID)
-               return 1;
-       else
-               return 0;
+       return adev->asic_type == CHIP_VEGA10 ||
+               adev->asic_type == CHIP_VEGA20 ||
+               adev->asic_type == CHIP_ARCTURUS ||
+               adev->asic_type == CHIP_ALDEBARAN ||
+               adev->asic_type == CHIP_SIENNA_CICHLID;
 }
 
 /*
@@ -1960,22 +2038,32 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
        *supported = 0;
 
        if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
-               amdgpu_ras_check_asic_type(adev))
+           !amdgpu_ras_asic_supported(adev))
                return;
 
-       if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
-               dev_info(adev->dev, "MEM ECC is active.\n");
-               *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
-                               1 << AMDGPU_RAS_BLOCK__DF);
-       } else
-               dev_info(adev->dev, "MEM ECC is not presented.\n");
+       if (!adev->gmc.xgmi.connected_to_cpu) {
+               if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
+                       dev_info(adev->dev, "MEM ECC is active.\n");
+                       *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
+                                       1 << AMDGPU_RAS_BLOCK__DF);
+               } else {
+                       dev_info(adev->dev, "MEM ECC is not presented.\n");
+               }
 
-       if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
-               dev_info(adev->dev, "SRAM ECC is active.\n");
-               *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
-                               1 << AMDGPU_RAS_BLOCK__DF);
-       } else
-               dev_info(adev->dev, "SRAM ECC is not presented.\n");
+               if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
+                       dev_info(adev->dev, "SRAM ECC is active.\n");
+                       *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+                                       1 << AMDGPU_RAS_BLOCK__DF);
+               } else {
+                       dev_info(adev->dev, "SRAM ECC is not presented.\n");
+               }
+       } else {
+               /* driver only manages a few IP blocks RAS feature
+                * when GPU is connected cpu through XGMI */
+               *hw_supported |= (1 << AMDGPU_RAS_BLOCK__GFX |
+                               1 << AMDGPU_RAS_BLOCK__SDMA |
+                               1 << AMDGPU_RAS_BLOCK__MMHUB);
+       }
 
        /* hw_supported needs to be aligned with RAS block mask. */
        *hw_supported &= AMDGPU_RAS_BLOCK_MASK;
@@ -2024,14 +2112,31 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
        /* Might need get this flag from vbios. */
        con->flags = RAS_DEFAULT_FLAGS;
 
-       if (adev->nbio.funcs->init_ras_controller_interrupt) {
-               r = adev->nbio.funcs->init_ras_controller_interrupt(adev);
+       /* initialize nbio ras function ahead of any other
+        * ras functions so hardware fatal error interrupt
+        * can be enabled as early as possible */
+       switch (adev->asic_type) {
+       case CHIP_VEGA20:
+       case CHIP_ARCTURUS:
+       case CHIP_ALDEBARAN:
+               if (!adev->gmc.xgmi.connected_to_cpu)
+                       adev->nbio.ras_funcs = &nbio_v7_4_ras_funcs;
+               break;
+       default:
+               /* nbio ras is not available */
+               break;
+       }
+
+       if (adev->nbio.ras_funcs &&
+           adev->nbio.ras_funcs->init_ras_controller_interrupt) {
+               r = adev->nbio.ras_funcs->init_ras_controller_interrupt(adev);
                if (r)
                        goto release_con;
        }
 
-       if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) {
-               r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev);
+       if (adev->nbio.ras_funcs &&
+           adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) {
+               r = adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt(adev);
                if (r)
                        goto release_con;
        }
@@ -2052,6 +2157,32 @@ release_con:
        return r;
 }
 
+static int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
+{
+       if (adev->gmc.xgmi.connected_to_cpu)
+               return 1;
+       return 0;
+}
+
+static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
+                                       struct ras_common_if *ras_block)
+{
+       struct ras_query_if info = {
+               .head = *ras_block,
+       };
+
+       if (!amdgpu_persistent_edc_harvesting_supported(adev))
+               return 0;
+
+       if (amdgpu_ras_query_error_status(adev, &info) != 0)
+               DRM_WARN("RAS init harvest failure");
+
+       if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
+               DRM_WARN("RAS init harvest reset failure");
+
+       return 0;
+}
+
 /* helper function to handle common stuff in ip late init phase */
 int amdgpu_ras_late_init(struct amdgpu_device *adev,
                         struct ras_common_if *ras_block,
@@ -2081,6 +2212,9 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
                        return r;
        }
 
+       /* check for errors on warm reset edc persisant supported ASIC */
+       amdgpu_persistent_edc_harvesting(adev, ras_block);
+
        /* in resume phase, no need to create ras fs node */
        if (adev->in_suspend || amdgpu_in_reset(adev))
                return 0;
index a05dbbb..f40c871 100644 (file)
@@ -31,6 +31,7 @@
 #define EEPROM_I2C_TARGET_ADDR_ARCTURUS                0xA8
 #define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342   0xA0
 #define EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID   0xA0
+#define EEPROM_I2C_TARGET_ADDR_ALDEBARAN        0xA0
 
 /*
  * The 2 macros bellow represent the actual size in bytes that
@@ -64,7 +65,8 @@ static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
 {
        if ((adev->asic_type == CHIP_VEGA20) ||
            (adev->asic_type == CHIP_ARCTURUS) ||
-           (adev->asic_type == CHIP_SIENNA_CICHLID))
+           (adev->asic_type == CHIP_SIENNA_CICHLID) ||
+           (adev->asic_type == CHIP_ALDEBARAN))
                return true;
 
        return false;
@@ -106,6 +108,10 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
                *i2c_addr = EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID;
                break;
 
+       case CHIP_ALDEBARAN:
+               *i2c_addr = EEPROM_I2C_TARGET_ADDR_ALDEBARAN;
+               break;
+
        default:
                return false;
        }
index b49a61d..40f2adf 100644 (file)
@@ -64,7 +64,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
        BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
 
        node = res->mm_node;
-       while (start > node->size << PAGE_SHIFT)
+       while (start >= node->size << PAGE_SHIFT)
                start -= node++->size << PAGE_SHIFT;
 
        cur->start = (node->start << PAGE_SHIFT) + start;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
new file mode 100644 (file)
index 0000000..02afd41
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_reset.h"
+#include "aldebaran.h"
+
+int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
+                            struct amdgpu_reset_handler *handler)
+{
+       /* TODO: Check if handler exists? */
+       list_add_tail(&handler->handler_list, &reset_ctl->reset_handlers);
+       return 0;
+}
+
+int amdgpu_reset_init(struct amdgpu_device *adev)
+{
+       int ret = 0;
+
+       switch (adev->asic_type) {
+       case CHIP_ALDEBARAN:
+               ret = aldebaran_reset_init(adev);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+int amdgpu_reset_fini(struct amdgpu_device *adev)
+{
+       int ret = 0;
+
+       switch (adev->asic_type) {
+       case CHIP_ALDEBARAN:
+               ret = aldebaran_reset_fini(adev);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
+                                  struct amdgpu_reset_context *reset_context)
+{
+       struct amdgpu_reset_handler *reset_handler = NULL;
+
+       if (adev->reset_cntl && adev->reset_cntl->get_reset_handler)
+               reset_handler = adev->reset_cntl->get_reset_handler(
+                       adev->reset_cntl, reset_context);
+       if (!reset_handler)
+               return -ENOSYS;
+
+       return reset_handler->prepare_hwcontext(adev->reset_cntl,
+                                               reset_context);
+}
+
+int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
+                              struct amdgpu_reset_context *reset_context)
+{
+       int ret;
+       struct amdgpu_reset_handler *reset_handler = NULL;
+
+       if (adev->reset_cntl)
+               reset_handler = adev->reset_cntl->get_reset_handler(
+                       adev->reset_cntl, reset_context);
+       if (!reset_handler)
+               return -ENOSYS;
+
+       ret = reset_handler->perform_reset(adev->reset_cntl, reset_context);
+       if (ret)
+               return ret;
+
+       return reset_handler->restore_hwcontext(adev->reset_cntl,
+                                               reset_context);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
new file mode 100644 (file)
index 0000000..e00d38d
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_RESET_H__
+#define __AMDGPU_RESET_H__
+
+#include "amdgpu.h"
+
+enum AMDGPU_RESET_FLAGS {
+
+       AMDGPU_NEED_FULL_RESET = 0,
+       AMDGPU_SKIP_HW_RESET = 1,
+};
+
+struct amdgpu_reset_context {
+       enum amd_reset_method method;
+       struct amdgpu_device *reset_req_dev;
+       struct amdgpu_job *job;
+       struct amdgpu_hive_info *hive;
+       unsigned long flags;
+};
+
+struct amdgpu_reset_handler {
+       enum amd_reset_method reset_method;
+       struct list_head handler_list;
+       int (*prepare_env)(struct amdgpu_reset_control *reset_ctl,
+                          struct amdgpu_reset_context *context);
+       int (*prepare_hwcontext)(struct amdgpu_reset_control *reset_ctl,
+                                struct amdgpu_reset_context *context);
+       int (*perform_reset)(struct amdgpu_reset_control *reset_ctl,
+                            struct amdgpu_reset_context *context);
+       int (*restore_hwcontext)(struct amdgpu_reset_control *reset_ctl,
+                                struct amdgpu_reset_context *context);
+       int (*restore_env)(struct amdgpu_reset_control *reset_ctl,
+                          struct amdgpu_reset_context *context);
+
+       int (*do_reset)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_reset_control {
+       void *handle;
+       struct work_struct reset_work;
+       struct mutex reset_lock;
+       struct list_head reset_handlers;
+       atomic_t in_reset;
+       enum amd_reset_method active_reset;
+       struct amdgpu_reset_handler *(*get_reset_handler)(
+               struct amdgpu_reset_control *reset_ctl,
+               struct amdgpu_reset_context *context);
+       void (*async_reset)(struct work_struct *work);
+};
+
+int amdgpu_reset_init(struct amdgpu_device *adev);
+int amdgpu_reset_fini(struct amdgpu_device *adev);
+
+int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
+                                  struct amdgpu_reset_context *reset_context);
+
+int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
+                              struct amdgpu_reset_context *reset_context);
+
+int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
+                            struct amdgpu_reset_handler *handler);
+
+#endif
index b644c78..688624e 100644 (file)
@@ -164,7 +164,8 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
  */
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                     unsigned int max_dw, struct amdgpu_irq_src *irq_src,
-                    unsigned int irq_type, unsigned int hw_prio)
+                    unsigned int irq_type, unsigned int hw_prio,
+                    atomic_t *sched_score)
 {
        int r;
        int sched_hw_submission = amdgpu_sched_hw_submission;
@@ -189,7 +190,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                ring->adev = adev;
                ring->idx = adev->num_rings++;
                adev->rings[ring->idx] = ring;
-               r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission);
+               r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission,
+                                                 sched_score);
                if (r)
                        return r;
        }
index 56acec1..ca16228 100644 (file)
@@ -111,7 +111,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
 
 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
-                                 unsigned num_hw_submission);
+                                 unsigned num_hw_submission,
+                                 atomic_t *sched_score);
 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
                                   struct amdgpu_irq_src *irq_src,
                                   unsigned irq_type);
@@ -282,7 +283,8 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                     unsigned int ring_size, struct amdgpu_irq_src *irq_src,
-                    unsigned int irq_type, unsigned int prio);
+                    unsigned int irq_type, unsigned int prio,
+                    atomic_t *sched_score);
 void amdgpu_ring_fini(struct amdgpu_ring *ring);
 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
                                                uint32_t reg0, uint32_t val0,
index aeaaae7..4fc2ce8 100644 (file)
@@ -127,7 +127,8 @@ struct amdgpu_rlc_funcs {
        void (*reset)(struct amdgpu_device *adev);
        void (*start)(struct amdgpu_device *adev);
        void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
-       void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v);
+       void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag);
+       u32 (*rlcg_rreg)(struct amdgpu_device *adev, u32 offset, u32 flag);
        bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
 };
 
index 9cf856c..5369c8d 100644 (file)
@@ -95,9 +95,7 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
        struct drm_device *dev = adev_to_drm(adev);
        uint32_t phy_id;
        uint32_t op;
-       int i;
        char str[64];
-       char i2c_output[256];
        int ret;
 
        if (*pos || size > sizeof(str) - 1)
@@ -139,11 +137,9 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
                ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
                if (!ret) {
                        if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS) {
-                               memset(i2c_output,  0, sizeof(i2c_output));
-                               for (i = 0; i < TA_SECUREDISPLAY_I2C_BUFFER_SIZE; i++)
-                                       sprintf(i2c_output, "%s 0x%X", i2c_output,
-                                               securedisplay_cmd->securedisplay_out_message.send_roi_crc.i2c_buf[i]);
-                               dev_info(adev->dev, "SECUREDISPLAY: I2C buffer out put is :%s\n", i2c_output);
+                               dev_info(adev->dev, "SECUREDISPLAY: I2C buffer out put is: %*ph\n",
+                                        TA_SECUREDISPLAY_I2C_BUFFER_SIZE,
+                                        securedisplay_cmd->securedisplay_out_message.send_roi_crc.i2c_buf);
                        } else {
                                psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
                        }
index 7b230bc..909d830 100644 (file)
@@ -62,6 +62,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
        bp.flags = 0;
        bp.type = ttm_bo_type_kernel;
        bp.resv = NULL;
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 
        r = amdgpu_bo_create(adev, &bp, &vram_obj);
        if (r) {
index 1c61314..3bef043 100644 (file)
@@ -823,15 +823,14 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
-       int r;
-
        int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
        enum dma_data_direction direction = write ?
                DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+       int r;
 
        /* Allocate an SG array and squash pages into it */
        r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
-                                     ttm->num_pages << PAGE_SHIFT,
+                                     (u64)ttm->num_pages << PAGE_SHIFT,
                                      GFP_KERNEL);
        if (r)
                goto release_sg;
@@ -861,13 +860,12 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
-
        int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
        enum dma_data_direction direction = write ?
                DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 
        /* double check that we don't free the table twice */
-       if (!ttm->sg->sgl)
+       if (!ttm->sg || !ttm->sg->sgl)
                return;
 
        /* unmap the pages mapped to the device */
@@ -1087,13 +1085,13 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
        int r;
 
-       if (!gtt->bound)
-               return;
-
        /* if the pages have userptr pinning then clear that first */
        if (gtt->userptr)
                amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
 
+       if (!gtt->bound)
+               return;
+
        if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
                return;
 
@@ -1503,7 +1501,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
                                memcpy(buf, &value, bytes);
                        }
                } else {
-                       bytes = cursor.size & 0x3ull;
+                       bytes = cursor.size & ~0x3ULL;
                        amdgpu_device_vram_access(adev, cursor.start,
                                                  (uint32_t *)buf, bytes,
                                                  write);
index a2975c8..ea6f99b 100644 (file)
@@ -60,8 +60,9 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
        }
 
        /* ras init of specific umc version */
-       if (adev->umc.funcs && adev->umc.funcs->err_cnt_init)
-               adev->umc.funcs->err_cnt_init(adev);
+       if (adev->umc.ras_funcs &&
+           adev->umc.ras_funcs->err_cnt_init)
+               adev->umc.ras_funcs->err_cnt_init(adev);
 
        return 0;
 
@@ -95,12 +96,12 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
        struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
 
        kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
-       if (adev->umc.funcs &&
-           adev->umc.funcs->query_ras_error_count)
-           adev->umc.funcs->query_ras_error_count(adev, ras_error_status);
+       if (adev->umc.ras_funcs &&
+           adev->umc.ras_funcs->query_ras_error_count)
+           adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status);
 
-       if (adev->umc.funcs &&
-           adev->umc.funcs->query_ras_error_address &&
+       if (adev->umc.ras_funcs &&
+           adev->umc.ras_funcs->query_ras_error_address &&
            adev->umc.max_ras_err_cnt_per_query) {
                err_data->err_addr =
                        kcalloc(adev->umc.max_ras_err_cnt_per_query,
@@ -116,7 +117,7 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
                /* umc query_ras_error_address is also responsible for clearing
                 * error status
                 */
-               adev->umc.funcs->query_ras_error_address(adev, ras_error_status);
+               adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status);
        }
 
        /* only uncorrectable error needs gpu reset */
index 1838144..bbcccf5 100644 (file)
 #define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
 #define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
 
-struct amdgpu_umc_funcs {
+struct amdgpu_umc_ras_funcs {
        void (*err_cnt_init)(struct amdgpu_device *adev);
        int (*ras_late_init)(struct amdgpu_device *adev);
+       void (*ras_fini)(struct amdgpu_device *adev);
        void (*query_ras_error_count)(struct amdgpu_device *adev,
-                                       void *ras_error_status);
+                                     void *ras_error_status);
        void (*query_ras_error_address)(struct amdgpu_device *adev,
                                        void *ras_error_status);
+};
+
+struct amdgpu_umc_funcs {
        void (*init_registers)(struct amdgpu_device *adev);
 };
 
@@ -59,6 +63,7 @@ struct amdgpu_umc {
        struct ras_common_if *ras_if;
 
        const struct amdgpu_umc_funcs *funcs;
+       const struct amdgpu_umc_ras_funcs *ras_funcs;
 };
 
 int amdgpu_umc_ras_late_init(struct amdgpu_device *adev);
index e2ed468..c6dbc08 100644 (file)
@@ -259,7 +259,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
                if ((adev->asic_type == CHIP_POLARIS10 ||
                     adev->asic_type == CHIP_POLARIS11) &&
                    (adev->uvd.fw_version < FW_1_66_16))
-                       DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
+                       DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
                                  version_major, version_minor);
        } else {
                unsigned int enc_major, enc_minor, dec_minor;
index 1843bf8..bc76cab 100644 (file)
@@ -212,6 +212,7 @@ struct amdgpu_vcn_inst {
        void                    *saved_bo;
        struct amdgpu_ring      ring_dec;
        struct amdgpu_ring      ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
+       atomic_t                sched_score;
        struct amdgpu_irq_src   irq;
        struct amdgpu_vcn_reg   external;
        struct amdgpu_bo        *dpg_sram_bo;
index d9ffff8..0c9c525 100644 (file)
@@ -466,6 +466,8 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
                        ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
                adev->virt.gim_feature =
                        ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
+               adev->virt.reg_access =
+                       ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
 
                break;
        default:
@@ -617,6 +619,14 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
                                if (adev->virt.ras_init_done)
                                        amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
                        }
+       } else if (adev->bios != NULL) {
+               adev->virt.fw_reserve.p_pf2vf =
+                       (struct amd_sriov_msg_pf2vf_info_header *)
+                       (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+
+               amdgpu_virt_read_pf2vf_data(adev);
+
+               return;
        }
 
        if (adev->virt.vf2pf_update_interval_ms != 0) {
index 8dd624c..383d4bd 100644 (file)
@@ -104,6 +104,17 @@ enum AMDGIM_FEATURE_FLAG {
        AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8,
        /* PP ONE VF MODE in GIM */
        AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
+       /* Indirect Reg Access enabled */
+       AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5),
+};
+
+enum AMDGIM_REG_ACCESS_FLAG {
+       /* Use PSP to program IH_RB_CNTL */
+       AMDGIM_FEATURE_IH_REG_PSP_EN     = (1 << 0),
+       /* Use RLC to program MMHUB regs */
+       AMDGIM_FEATURE_MMHUB_REG_RLC_EN  = (1 << 1),
+       /* Use RLC to program GC regs */
+       AMDGIM_FEATURE_GC_REG_RLC_EN     = (1 << 2),
 };
 
 struct amdgim_pf2vf_info_v1 {
@@ -217,6 +228,7 @@ struct amdgpu_virt {
        bool tdr_debug;
        struct amdgpu_virt_ras_err_handler_data *virt_eh_data;
        bool ras_init_done;
+       uint32_t reg_access;
 
        /* vf2pf message */
        struct delayed_work vf2pf_work;
@@ -238,6 +250,22 @@ struct amdgpu_virt {
 #define amdgpu_sriov_fullaccess(adev) \
 (amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev)))
 
+#define amdgpu_sriov_reg_indirect_en(adev) \
+(amdgpu_sriov_vf((adev)) && \
+       ((adev)->virt.gim_feature & (AMDGIM_FEATURE_INDIRECT_REG_ACCESS)))
+
+#define amdgpu_sriov_reg_indirect_ih(adev) \
+(amdgpu_sriov_vf((adev)) && \
+       ((adev)->virt.reg_access & (AMDGIM_FEATURE_IH_REG_PSP_EN)))
+
+#define amdgpu_sriov_reg_indirect_mmhub(adev) \
+(amdgpu_sriov_vf((adev)) && \
+       ((adev)->virt.reg_access & (AMDGIM_FEATURE_MMHUB_REG_RLC_EN)))
+
+#define amdgpu_sriov_reg_indirect_gc(adev) \
+(amdgpu_sriov_vf((adev)) && \
+       ((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN)))
+
 #define amdgpu_passthrough(adev) \
 ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
 
index f314e1e..0ffdf84 100644 (file)
@@ -869,6 +869,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
        bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
                AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+       bp->bo_ptr_size = sizeof(struct amdgpu_bo);
        if (vm->use_cpu_for_update)
                bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
        else if (!vm->root.base.bo || vm->root.base.bo->shadow)
@@ -2197,8 +2198,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        uint64_t eaddr;
 
        /* validate the parameters */
-       if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
-           size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+       if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+           size == 0 || size & ~PAGE_MASK)
                return -EINVAL;
 
        /* make sure object fit at this offset */
@@ -2263,8 +2264,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
        int r;
 
        /* validate the parameters */
-       if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
-           size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+       if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+           size == 0 || size & ~PAGE_MASK)
                return -EINVAL;
 
        /* make sure object fit at this offset */
@@ -2409,7 +2410,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
                        after->start = eaddr + 1;
                        after->last = tmp->last;
                        after->offset = tmp->offset;
-                       after->offset += after->start - tmp->start;
+                       after->offset += (after->start - tmp->start) << PAGE_SHIFT;
                        after->flags = tmp->flags;
                        after->bo_va = tmp->bo_va;
                        list_add(&after->list, &tmp->bo_va->invalids);
@@ -3300,7 +3301,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
        struct amdgpu_bo *root;
        uint64_t value, flags;
        struct amdgpu_vm *vm;
-       long r;
+       int r;
 
        spin_lock(&adev->vm_manager.pasid_lock);
        vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
@@ -3349,6 +3350,12 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
                value = 0;
        }
 
+       r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
+       if (r) {
+               pr_debug("failed %d to reserve fence slot\n", r);
+               goto error_unlock;
+       }
+
        r = amdgpu_vm_bo_update_mapping(adev, adev, vm, true, false, NULL, addr,
                                        addr, flags, value, NULL, NULL,
                                        NULL);
@@ -3360,7 +3367,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
 error_unlock:
        amdgpu_bo_unreserve(root);
        if (r < 0)
-               DRM_ERROR("Can't handle page fault (%ld)\n", r);
+               DRM_ERROR("Can't handle page fault (%d)\n", r);
 
 error_unref:
        amdgpu_bo_unref(&root);
index b2fc475..592a2dd 100644 (file)
@@ -52,7 +52,7 @@ static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size);
+       return sysfs_emit(buf, "%llu\n", adev->gmc.real_vram_size);
 }
 
 /**
@@ -69,7 +69,7 @@ static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size);
+       return sysfs_emit(buf, "%llu\n", adev->gmc.visible_vram_size);
 }
 
 /**
@@ -87,8 +87,7 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
        struct amdgpu_device *adev = drm_to_adev(ddev);
        struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n",
-                       amdgpu_vram_mgr_usage(man));
+       return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_usage(man));
 }
 
 /**
@@ -106,8 +105,7 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
        struct amdgpu_device *adev = drm_to_adev(ddev);
        struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n",
-                       amdgpu_vram_mgr_vis_usage(man));
+       return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_vis_usage(man));
 }
 
 static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
@@ -119,27 +117,27 @@ static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
 
        switch (adev->gmc.vram_vendor) {
        case SAMSUNG:
-               return snprintf(buf, PAGE_SIZE, "samsung\n");
+               return sysfs_emit(buf, "samsung\n");
        case INFINEON:
-               return snprintf(buf, PAGE_SIZE, "infineon\n");
+               return sysfs_emit(buf, "infineon\n");
        case ELPIDA:
-               return snprintf(buf, PAGE_SIZE, "elpida\n");
+               return sysfs_emit(buf, "elpida\n");
        case ETRON:
-               return snprintf(buf, PAGE_SIZE, "etron\n");
+               return sysfs_emit(buf, "etron\n");
        case NANYA:
-               return snprintf(buf, PAGE_SIZE, "nanya\n");
+               return sysfs_emit(buf, "nanya\n");
        case HYNIX:
-               return snprintf(buf, PAGE_SIZE, "hynix\n");
+               return sysfs_emit(buf, "hynix\n");
        case MOSEL:
-               return snprintf(buf, PAGE_SIZE, "mosel\n");
+               return sysfs_emit(buf, "mosel\n");
        case WINBOND:
-               return snprintf(buf, PAGE_SIZE, "winbond\n");
+               return sysfs_emit(buf, "winbond\n");
        case ESMT:
-               return snprintf(buf, PAGE_SIZE, "esmt\n");
+               return sysfs_emit(buf, "esmt\n");
        case MICRON:
-               return snprintf(buf, PAGE_SIZE, "micron\n");
+               return sysfs_emit(buf, "micron\n");
        default:
-               return snprintf(buf, PAGE_SIZE, "unknown\n");
+               return sysfs_emit(buf, "unknown\n");
        }
 }
 
index 33f748e..8567d5d 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/list.h>
 #include "amdgpu.h"
 #include "amdgpu_xgmi.h"
-#include "amdgpu_smu.h"
 #include "amdgpu_ras.h"
 #include "soc15.h"
 #include "df/df_3_6_offset.h"
@@ -217,7 +216,7 @@ static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);
+       return sysfs_emit(buf, "%llu\n", adev->gmc.xgmi.node_id);
 
 }
 
@@ -246,7 +245,7 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev,
 
        adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", error_count);
+       return sysfs_emit(buf, "%u\n", error_count);
 }
 
 
@@ -629,7 +628,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
        return psp_xgmi_terminate(&adev->psp);
 }
 
-int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
+static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
 {
        int r;
        struct ras_ih_if ih_info = {
@@ -643,7 +642,7 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
            adev->gmc.xgmi.num_physical_nodes == 0)
                return 0;
 
-       amdgpu_xgmi_reset_ras_error_count(adev);
+       adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
 
        if (!adev->gmc.xgmi.ras_if) {
                adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
@@ -665,7 +664,7 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
        return r;
 }
 
-void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
+static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
 {
        if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
                        adev->gmc.xgmi.ras_if) {
@@ -692,7 +691,7 @@ static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg
        WREG32_PCIE(pcs_status_reg, 0);
 }
 
-void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
+static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
 {
        uint32_t i;
 
@@ -752,8 +751,8 @@ static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
        return 0;
 }
 
-int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
-                                     void *ras_error_status)
+static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+                                            void *ras_error_status)
 {
        struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
        int i;
@@ -802,10 +801,17 @@ int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
                break;
        }
 
-       amdgpu_xgmi_reset_ras_error_count(adev);
+       adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
 
        err_data->ue_count += ue_cnt;
        err_data->ce_count += ce_cnt;
 
        return 0;
 }
+
+const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs = {
+       .ras_late_init = amdgpu_xgmi_ras_late_init,
+       .ras_fini = amdgpu_xgmi_ras_fini,
+       .query_ras_error_count = amdgpu_xgmi_query_ras_error_count,
+       .reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count,
+};
index 148560d..12969c0 100644 (file)
@@ -50,6 +50,7 @@ struct amdgpu_pcs_ras_field {
        uint32_t pcs_err_shift;
 };
 
+extern const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs;
 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
 void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive);
 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
@@ -58,14 +59,8 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
 int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
 int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
                struct amdgpu_device *peer_adev);
-int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
-void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
 uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
                                           uint64_t addr);
-int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
-                                     void *ras_error_status);
-void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev);
-
 static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
                struct amdgpu_device *bo_adev)
 {
index 5355827..1a8f6d4 100644 (file)
@@ -90,11 +90,22 @@ union amd_sriov_msg_feature_flags {
                uint32_t  host_flr_vramlost  : 1;
                uint32_t  mm_bw_management   : 1;
                uint32_t  pp_one_vf_mode     : 1;
-               uint32_t  reserved           : 27;
+               uint32_t  reg_indirect_acc   : 1;
+               uint32_t  reserved           : 26;
        } flags;
        uint32_t      all;
 };
 
+union amd_sriov_reg_access_flags {
+       struct {
+               uint32_t vf_reg_access_ih    : 1;
+               uint32_t vf_reg_access_mmhub : 1;
+               uint32_t vf_reg_access_gc    : 1;
+               uint32_t reserved            : 29;
+       } flags;
+       uint32_t all;
+};
+
 union amd_sriov_msg_os_info {
        struct {
                uint32_t  windows            : 1;
@@ -149,8 +160,10 @@ struct amd_sriov_msg_pf2vf_info {
        /* identification in ROCm SMI */
        uint64_t uuid;
        uint32_t fcn_idx;
+       /* flags which indicate the register access method VF should use */
+       union amd_sriov_reg_access_flags reg_access_flags;
        /* reserved */
-       uint32_t reserved[256-26];
+       uint32_t reserved[256-27];
 };
 
 struct amd_sriov_msg_vf2pf_info_header {
index 43b9781..c4bb8ee 100644 (file)
@@ -984,10 +984,9 @@ static int cik_sdma_sw_init(void *handle)
                sprintf(ring->name, "sdma%d", i);
                r = amdgpu_ring_init(adev, ring, 1024,
                                     &adev->sdma.trap_irq,
-                                    (i == 0) ?
-                                    AMDGPU_SDMA_IRQ_INSTANCE0 :
+                                    (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
                                     AMDGPU_SDMA_IRQ_INSTANCE1,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index ea825b4..d1570a4 100644 (file)
@@ -2896,6 +2896,11 @@ static int dce_v10_0_hw_fini(void *handle)
 static int dce_v10_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = amdgpu_display_suspend_helper(adev);
+       if (r)
+               return r;
 
        adev->mode_info.bl_level =
                amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@@ -2920,8 +2925,10 @@ static int dce_v10_0_resume(void *handle)
                amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
                                                    bl_level);
        }
+       if (ret)
+               return ret;
 
-       return ret;
+       return amdgpu_display_resume_helper(adev);
 }
 
 static bool dce_v10_0_is_idle(void *handle)
index a360a6d..18a7b3b 100644 (file)
@@ -3026,6 +3026,11 @@ static int dce_v11_0_hw_fini(void *handle)
 static int dce_v11_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = amdgpu_display_suspend_helper(adev);
+       if (r)
+               return r;
 
        adev->mode_info.bl_level =
                amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@@ -3050,8 +3055,10 @@ static int dce_v11_0_resume(void *handle)
                amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
                                                    bl_level);
        }
+       if (ret)
+               return ret;
 
-       return ret;
+       return amdgpu_display_resume_helper(adev);
 }
 
 static bool dce_v11_0_is_idle(void *handle)
index ef124ac..dbcb09c 100644 (file)
@@ -2769,7 +2769,11 @@ static int dce_v6_0_hw_fini(void *handle)
 static int dce_v6_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
 
+       r = amdgpu_display_suspend_helper(adev);
+       if (r)
+               return r;
        adev->mode_info.bl_level =
                amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
 
@@ -2793,8 +2797,10 @@ static int dce_v6_0_resume(void *handle)
                amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
                                                    bl_level);
        }
+       if (ret)
+               return ret;
 
-       return ret;
+       return amdgpu_display_resume_helper(adev);
 }
 
 static bool dce_v6_0_is_idle(void *handle)
index c986501..b200b9e 100644 (file)
@@ -2795,6 +2795,11 @@ static int dce_v8_0_hw_fini(void *handle)
 static int dce_v8_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = amdgpu_display_suspend_helper(adev);
+       if (r)
+               return r;
 
        adev->mode_info.bl_level =
                amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@@ -2819,8 +2824,10 @@ static int dce_v8_0_resume(void *handle)
                amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
                                                    bl_level);
        }
+       if (ret)
+               return ret;
 
-       return ret;
+       return amdgpu_display_resume_helper(adev);
 }
 
 static bool dce_v8_0_is_idle(void *handle)
index 9810af7..5c11144 100644 (file)
@@ -39,6 +39,7 @@
 #include "dce_v11_0.h"
 #include "dce_virtual.h"
 #include "ivsrcid/ivsrcid_vislands30.h"
+#include "amdgpu_display.h"
 
 #define DCE_VIRTUAL_VBLANK_PERIOD 16666666
 
@@ -491,12 +492,24 @@ static int dce_virtual_hw_fini(void *handle)
 
 static int dce_virtual_suspend(void *handle)
 {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = amdgpu_display_suspend_helper(adev);
+       if (r)
+               return r;
        return dce_virtual_hw_fini(handle);
 }
 
 static int dce_virtual_resume(void *handle)
 {
-       return dce_virtual_hw_init(handle);
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = dce_virtual_hw_init(handle);
+       if (r)
+               return r;
+       return amdgpu_display_resume_helper(adev);
 }
 
 static bool dce_virtual_is_idle(void *handle)
index 44109a6..0d8459d 100644 (file)
@@ -205,7 +205,7 @@ static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev,
                        count++;
        }
 
-       return snprintf(buf, PAGE_SIZE, "%i\n", count);
+       return sysfs_emit(buf, "%i\n", count);
 }
 
 /* device attr for available perfmon counters */
index 45d1172..196d9d2 100644 (file)
@@ -29,7 +29,6 @@
 #include "amdgpu.h"
 #include "amdgpu_gfx.h"
 #include "amdgpu_psp.h"
-#include "amdgpu_smu.h"
 #include "nv.h"
 #include "nvd.h"
 
 #define mmGC_THROTTLE_CTRL_Sienna_Cichlid              0x2030
 #define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX     0
 
+#define GFX_RLCG_GC_WRITE_OLD  (0x8 << 28)
+#define GFX_RLCG_GC_WRITE      (0x0 << 28)
+#define GFX_RLCG_GC_READ       (0x1 << 28)
+#define GFX_RLCG_MMHUB_WRITE   (0x2 << 28)
+
 MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
 MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
 MODULE_FIRMWARE("amdgpu/navi10_me.bin");
@@ -1419,38 +1423,127 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
 };
 
-static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+static bool gfx_v10_is_rlcg_rw(struct amdgpu_device *adev, u32 offset, uint32_t *flag, bool write)
+{
+       /* always programed by rlcg, only for gc */
+       if (offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI) ||
+           offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO) ||
+           offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH) ||
+           offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL) ||
+           offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX) ||
+           offset == SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL)) {
+               if (!amdgpu_sriov_reg_indirect_gc(adev))
+                       *flag = GFX_RLCG_GC_WRITE_OLD;
+               else
+                       *flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
+
+               return true;
+       }
+
+       /* currently support gc read/write, mmhub write */
+       if (offset >= SOC15_REG_OFFSET(GC, 0, mmSDMA0_DEC_START) &&
+           offset <= SOC15_REG_OFFSET(GC, 0, mmRLC_GTS_OFFSET_MSB)) {
+               if (amdgpu_sriov_reg_indirect_gc(adev))
+                       *flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
+               else
+                       return false;
+       } else {
+               if (amdgpu_sriov_reg_indirect_mmhub(adev))
+                       *flag = GFX_RLCG_MMHUB_WRITE;
+               else
+                       return false;
+       }
+
+       return true;
+}
+
+static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag)
 {
        static void *scratch_reg0;
        static void *scratch_reg1;
+       static void *scratch_reg2;
+       static void *scratch_reg3;
        static void *spare_int;
+       static uint32_t grbm_cntl;
+       static uint32_t grbm_idx;
        uint32_t i = 0;
        uint32_t retries = 50000;
+       u32 ret = 0;
+
+       scratch_reg0 = adev->rmmio +
+                      (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0) * 4;
+       scratch_reg1 = adev->rmmio +
+                      (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1) * 4;
+       scratch_reg2 = adev->rmmio +
+                      (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG2) * 4;
+       scratch_reg3 = adev->rmmio +
+                      (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4;
+       spare_int = adev->rmmio +
+                   (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4;
+
+       grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
+       grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
+
+       if (offset == grbm_cntl || offset == grbm_idx) {
+               if (offset  == grbm_cntl)
+                       writel(v, scratch_reg2);
+               else if (offset == grbm_idx)
+                       writel(v, scratch_reg3);
+
+               writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+       } else {
+               writel(v, scratch_reg0);
+               writel(offset | flag, scratch_reg1);
+               writel(1, spare_int);
+               for (i = 0; i < retries; i++) {
+                       u32 tmp;
+
+                       tmp = readl(scratch_reg1);
+                       if (!(tmp & flag))
+                               break;
 
-       scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
-       scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
-       spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
+                       udelay(10);
+               }
 
-       if (amdgpu_sriov_runtime(adev)) {
-               pr_err("shouldn't call rlcg write register during runtime\n");
-               return;
+               if (i >= retries)
+                       pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
        }
 
-       writel(v, scratch_reg0);
-       writel(offset | 0x80000000, scratch_reg1);
-       writel(1, spare_int);
-       for (i = 0; i < retries; i++) {
-               u32 tmp;
+       ret = readl(scratch_reg0);
 
-               tmp = readl(scratch_reg1);
-               if (!(tmp & 0x80000000))
-                       break;
+       return ret;
+}
+
+static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 flag)
+{
+       uint32_t rlcg_flag;
 
-               udelay(10);
+       if (amdgpu_sriov_fullaccess(adev) &&
+           gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 1)) {
+               gfx_v10_rlcg_rw(adev, offset, value, rlcg_flag);
+
+               return;
        }
+       if (flag & AMDGPU_REGS_NO_KIQ)
+               WREG32_NO_KIQ(offset, value);
+       else
+               WREG32(offset, value);
+}
 
-       if (i >= retries)
-               pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
+static u32 gfx_v10_rlcg_rreg(struct amdgpu_device *adev, u32 offset, u32 flag)
+{
+       uint32_t rlcg_flag;
+
+       if (amdgpu_sriov_fullaccess(adev) &&
+           gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 0))
+               return gfx_v10_rlcg_rw(adev, offset, 0, rlcg_flag);
+
+       if (flag & AMDGPU_REGS_NO_KIQ)
+               return RREG32_NO_KIQ(offset);
+       else
+               return RREG32(offset);
+
+       return 0;
 }
 
 static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
@@ -4459,9 +4552,8 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
        sprintf(ring->name, "gfx_%d.%d.%d", ring->me, ring->pipe, ring->queue);
 
        irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
-       r = amdgpu_ring_init(adev, ring, 1024,
-                            &adev->gfx.eop_irq, irq_type,
-                            AMDGPU_RING_PRIO_DEFAULT);
+       r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
        return 0;
@@ -4495,8 +4587,8 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
                        AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
        /* type-2 packets are deprecated on MEC, use type-3 instead */
-       r = amdgpu_ring_init(adev, ring, 1024,
-                            &adev->gfx.eop_irq, irq_type, hw_prio);
+       r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+                            hw_prio, NULL);
        if (r)
                return r;
 
@@ -7172,16 +7264,10 @@ static int gfx_v10_0_hw_init(void *handle)
                 * loaded firstly, so in direct type, it has to load smc ucode
                 * here before rlc.
                 */
-               if (adev->smu.ppt_funcs != NULL && !(adev->flags & AMD_IS_APU)) {
-                       r = smu_load_microcode(&adev->smu);
+               if (!(adev->flags & AMD_IS_APU)) {
+                       r = amdgpu_pm_load_smu_firmware(adev, NULL);
                        if (r)
                                return r;
-
-                       r = smu_check_fw_status(&adev->smu);
-                       if (r) {
-                               pr_err("SMC firmware status is not correct\n");
-                               return r;
-                       }
                }
                gfx_v10_0_disable_gpa_mode(adev);
        }
@@ -7892,6 +7978,7 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = {
        .start = gfx_v10_0_rlc_start,
        .update_spm_vmid = gfx_v10_0_update_spm_vmid,
        .rlcg_wreg = gfx_v10_rlcg_wreg,
+       .rlcg_rreg = gfx_v10_rlcg_rreg,
        .is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
 };
 
index ca74638..3a8d52a 100644 (file)
@@ -3114,7 +3114,7 @@ static int gfx_v6_0_sw_init(void *handle)
                r = amdgpu_ring_init(adev, ring, 1024,
                                     &adev->gfx.eop_irq,
                                     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
@@ -3137,7 +3137,7 @@ static int gfx_v6_0_sw_init(void *handle)
                irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
                r = amdgpu_ring_init(adev, ring, 1024,
                                     &adev->gfx.eop_irq, irq_type,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index a368724..c35fdd2 100644 (file)
@@ -1877,7 +1877,7 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
        mutex_unlock(&adev->srbm_mutex);
 
        /* Initialize all compute VMIDs to have no GDS, GWS, or OA
-          acccess. These should be enabled by FW for target VMIDs. */
+          access. These should be enabled by FW for target VMIDs. */
        for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
                WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
                WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
@@ -2058,7 +2058,7 @@ static void gfx_v7_0_constants_init(struct amdgpu_device *adev)
  * @adev: amdgpu_device pointer
  *
  * Set up the number and offset of the CP scratch registers.
- * NOTE: use of CP scratch registers is a legacy inferface and
+ * NOTE: use of CP scratch registers is a legacy interface and
  * is not used by default on newer asics (r6xx+).  On newer asics,
  * memory buffers are used for fences rather than scratch regs.
  */
@@ -2172,7 +2172,7 @@ static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
  * @seq: sequence number
  * @flags: fence related flags
  *
- * Emits a fence sequnce number on the gfx ring and flushes
+ * Emits a fence sequence number on the gfx ring and flushes
  * GPU caches.
  */
 static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
@@ -2215,7 +2215,7 @@ static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
  * @seq: sequence number
  * @flags: fence related flags
  *
- * Emits a fence sequnce number on the compute ring and flushes
+ * Emits a fence sequence number on the compute ring and flushes
  * GPU caches.
  */
 static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
@@ -2245,14 +2245,14 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
  * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
  *
  * @ring: amdgpu_ring structure holding ring information
- * @job: job to retrive vmid from
+ * @job: job to retrieve vmid from
  * @ib: amdgpu indirect buffer object
  * @flags: options (AMDGPU_HAVE_CTX_SWITCH)
  *
  * Emits an DE (drawing engine) or CE (constant engine) IB
  * on the gfx ring.  IBs are usually generated by userspace
  * acceleration drivers and submitted to the kernel for
- * sheduling on the ring.  This function schedules the IB
+ * scheduling on the ring.  This function schedules the IB
  * on the gfx ring for execution by the GPU.
  */
 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
@@ -2402,7 +2402,7 @@ err1:
 
 /*
  * CP.
- * On CIK, gfx and compute now have independant command processors.
+ * On CIK, gfx and compute now have independent command processors.
  *
  * GFX
  * Gfx consists of a single ring and can process both gfx jobs and
@@ -2630,7 +2630,7 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
        ring->wptr = 0;
        WREG32(mmCP_RB0_WPTR, lower_32_bits(ring->wptr));
 
-       /* set the wb address wether it's enabled or not */
+       /* set the wb address whether it's enabled or not */
        rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
        WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr));
        WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF);
@@ -2985,7 +2985,7 @@ static void gfx_v7_0_mqd_init(struct amdgpu_device *adev,
        mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
        mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
 
-       /* set the wb address wether it's enabled or not */
+       /* set the wb address whether it's enabled or not */
        wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
        mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
        mqd->cp_hqd_pq_rptr_report_addr_hi =
@@ -3198,7 +3198,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
 /**
  * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
  *
- * @ring: the ring to emmit the commands to
+ * @ring: the ring to emit the commands to
  *
  * Sync the command pipeline with the PFP. E.g. wait for everything
  * to be completed.
@@ -3220,7 +3220,7 @@ static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, 4); /* poll interval */
 
        if (usepfp) {
-               /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+               /* sync CE with ME to prevent CE fetch CEIB before context switch done */
                amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
                amdgpu_ring_write(ring, 0);
                amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
@@ -4438,7 +4438,7 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        /* type-2 packets are deprecated on MEC, use type-3 instead */
        r = amdgpu_ring_init(adev, ring, 1024,
                             &adev->gfx.eop_irq, irq_type,
-                            AMDGPU_RING_PRIO_DEFAULT);
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
@@ -4512,7 +4512,7 @@ static int gfx_v7_0_sw_init(void *handle)
                r = amdgpu_ring_init(adev, ring, 1024,
                                     &adev->gfx.eop_irq,
                                     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 4d45844..c26e060 100644 (file)
@@ -1927,8 +1927,8 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
                        AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
        /* type-2 packets are deprecated on MEC, use type-3 instead */
-       r = amdgpu_ring_init(adev, ring, 1024,
-                            &adev->gfx.eop_irq, irq_type, hw_prio);
+       r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+                            hw_prio, NULL);
        if (r)
                return r;
 
@@ -2033,7 +2033,7 @@ static int gfx_v8_0_sw_init(void *handle)
 
                r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
                                     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 1393cca..06811a1 100644 (file)
@@ -734,7 +734,7 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
        mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
 };
 
-static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+static void gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
 {
        static void *scratch_reg0;
        static void *scratch_reg1;
@@ -787,6 +787,20 @@ static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
 
 }
 
+static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+{
+       if (amdgpu_sriov_fullaccess(adev)) {
+               gfx_v9_0_rlcg_rw(adev, offset, v, flag);
+
+               return;
+       }
+
+       if (flag & AMDGPU_REGS_NO_KIQ)
+               WREG32_NO_KIQ(offset, v);
+       else
+               WREG32(offset, v);
+}
+
 #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -2089,45 +2103,22 @@ static void gfx_v9_0_select_me_pipe_q(struct amdgpu_device *adev,
 }
 
 static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
-       .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
-       .select_se_sh = &gfx_v9_0_select_se_sh,
-       .read_wave_data = &gfx_v9_0_read_wave_data,
-       .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
-       .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
-       .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+        .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
+        .select_se_sh = &gfx_v9_0_select_se_sh,
+        .read_wave_data = &gfx_v9_0_read_wave_data,
+        .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+        .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
+        .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+};
+
+static const struct amdgpu_gfx_ras_funcs gfx_v9_0_ras_funcs = {
+       .ras_late_init = amdgpu_gfx_ras_late_init,
+       .ras_fini = amdgpu_gfx_ras_fini,
        .ras_error_inject = &gfx_v9_0_ras_error_inject,
        .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
        .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
 };
 
-static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
-       .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
-       .select_se_sh = &gfx_v9_0_select_se_sh,
-       .read_wave_data = &gfx_v9_0_read_wave_data,
-       .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
-       .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
-       .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
-       .ras_error_inject = &gfx_v9_4_ras_error_inject,
-       .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
-       .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
-       .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
-};
-
-static const struct amdgpu_gfx_funcs gfx_v9_4_2_gfx_funcs = {
-       .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
-       .select_se_sh = &gfx_v9_0_select_se_sh,
-       .read_wave_data = &gfx_v9_0_read_wave_data,
-       .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
-       .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
-       .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
-       .ras_error_inject = &gfx_v9_4_2_ras_error_inject,
-       .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
-       .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count,
-       .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status,
-       .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status,
-       .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer,
-};
-
 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
 {
        u32 gb_addr_config;
@@ -2154,6 +2145,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
                DRM_INFO("fix gfx.config for vega12\n");
                break;
        case CHIP_VEGA20:
+               adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs;
                adev->gfx.config.max_hw_contexts = 8;
                adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2179,7 +2171,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
                        gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_ARCTURUS:
-               adev->gfx.funcs = &gfx_v9_4_gfx_funcs;
+               adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs;
                adev->gfx.config.max_hw_contexts = 8;
                adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2200,7 +2192,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
                gb_addr_config |= 0x22010042;
                break;
        case CHIP_ALDEBARAN:
-               adev->gfx.funcs = &gfx_v9_4_2_gfx_funcs;
+               adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs;
                adev->gfx.config.max_hw_contexts = 8;
                adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
                adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2286,8 +2278,8 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
                        AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
        /* type-2 packets are deprecated on MEC, use type-3 instead */
-       return amdgpu_ring_init(adev, ring, 1024,
-                               &adev->gfx.eop_irq, irq_type, hw_prio);
+       return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+                               hw_prio, NULL);
 }
 
 static int gfx_v9_0_sw_init(void *handle)
@@ -2376,10 +2368,9 @@ static int gfx_v9_0_sw_init(void *handle)
                        sprintf(ring->name, "gfx_%d", i);
                ring->use_doorbell = true;
                ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
-               r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->gfx.eop_irq,
+               r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
                                     AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
@@ -2434,7 +2425,9 @@ static int gfx_v9_0_sw_fini(void *handle)
        int i;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       amdgpu_gfx_ras_fini(adev);
+       if (adev->gfx.ras_funcs &&
+           adev->gfx.ras_funcs->ras_fini)
+               adev->gfx.ras_funcs->ras_fini(adev);
 
        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
@@ -4025,8 +4018,14 @@ static int gfx_v9_0_hw_fini(void *handle)
        }
 
        gfx_v9_0_cp_enable(adev, false);
-       adev->gfx.rlc.funcs->stop(adev);
 
+       /* Skip suspend with A+A reset */
+       if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) {
+               dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n");
+               return 0;
+       }
+
+       adev->gfx.rlc.funcs->stop(adev);
        return 0;
 }
 
@@ -4747,12 +4746,16 @@ static int gfx_v9_0_ecc_late_init(void *handle)
        if (r)
                return r;
 
-       r = amdgpu_gfx_ras_late_init(adev);
-       if (r)
-               return r;
+       if (adev->gfx.ras_funcs &&
+           adev->gfx.ras_funcs->ras_late_init) {
+               r = adev->gfx.ras_funcs->ras_late_init(adev);
+               if (r)
+                       return r;
+       }
 
-       if (adev->gfx.funcs->enable_watchdog_timer)
-               adev->gfx.funcs->enable_watchdog_timer(adev);
+       if (adev->gfx.ras_funcs &&
+           adev->gfx.ras_funcs->enable_watchdog_timer)
+               adev->gfx.ras_funcs->enable_watchdog_timer(adev);
 
        return 0;
 }
index bc699d6..830080f 100644 (file)
@@ -863,8 +863,8 @@ static int gfx_v9_4_ras_error_count(struct amdgpu_device *adev,
        return 0;
 }
 
-int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
-                                  void *ras_error_status)
+static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
+                                         void *ras_error_status)
 {
        struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
        uint32_t sec_count = 0, ded_count = 0;
@@ -906,7 +906,7 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
        return 0;
 }
 
-void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
+static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
 {
        int i, j, k;
 
@@ -971,7 +971,8 @@ void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
        WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255);
 }
 
-int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
+static int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
+                                    void *inject_if)
 {
        struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
        int ret;
@@ -996,7 +997,7 @@ int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
 static const struct soc15_reg_entry gfx_v9_4_rdrsp_status_regs =
        { SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 };
 
-void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
+static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
 {
        uint32_t i, j;
        uint32_t reg_value;
@@ -1021,3 +1022,12 @@ void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
        gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
 }
+
+const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs = {
+        .ras_late_init = amdgpu_gfx_ras_late_init,
+        .ras_fini = amdgpu_gfx_ras_fini,
+        .ras_error_inject = &gfx_v9_4_ras_error_inject,
+        .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
+        .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
+        .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
+};
index 875f184..bdd16b5 100644 (file)
 #ifndef __GFX_V9_4_H__
 #define __GFX_V9_4_H__
 
-void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev);
-
-int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
-                                  void *ras_error_status);
-
-int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
-                                    void *inject_if);
-
-void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev);
-
-void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev);
+extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs;
 
 #endif /* __GFX_V9_4_H__ */
index 2e94998..9ca76a3 100644 (file)
@@ -1283,4 +1283,15 @@ static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev)
        }
        gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
        mutex_unlock(&adev->grbm_idx_mutex);
-}
\ No newline at end of file
+}
+
+const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs = {
+       .ras_late_init = amdgpu_gfx_ras_late_init,
+       .ras_fini = amdgpu_gfx_ras_fini,
+       .ras_error_inject = &gfx_v9_4_2_ras_error_inject,
+       .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
+       .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count,
+       .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status,
+       .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status,
+       .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer,
+};
index c143d17..81c5833 100644 (file)
@@ -30,11 +30,6 @@ void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
                                      uint32_t die_id);
 void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev);
 
-void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev);
-int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if);
-void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev);
-int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
-                                  void *ras_error_status);
-void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev);
-void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev);
+extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs;
+
 #endif /* __GFX_V9_4_2_H__ */
index 33e54ee..2bfd620 100644 (file)
@@ -655,7 +655,7 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
                adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
                adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA;
                adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0];
-               adev->umc.funcs = &umc_v8_7_funcs;
+               adev->umc.ras_funcs = &umc_v8_7_ras_funcs;
                break;
        default:
                break;
index 468acc0..c82d82d 100644 (file)
@@ -653,7 +653,8 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
        adev->gmc.vm_fault.num_types = 1;
        adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
 
-       if (!amdgpu_sriov_vf(adev)) {
+       if (!amdgpu_sriov_vf(adev) &&
+           !adev->gmc.xgmi.connected_to_cpu) {
                adev->gmc.ecc_irq.num_types = 1;
                adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
        }
@@ -1155,7 +1156,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
                adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
                adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
                adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
-               adev->umc.funcs = &umc_v6_1_funcs;
+               adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
                break;
        case CHIP_ARCTURUS:
                adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
@@ -1163,7 +1164,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
                adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
                adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
                adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
-               adev->umc.funcs = &umc_v6_1_funcs;
+               adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
                break;
        default:
                break;
@@ -1185,6 +1186,24 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
        }
 }
 
+static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_VEGA20:
+               adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs;
+               break;
+       case CHIP_ARCTURUS:
+               adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs;
+               break;
+       case CHIP_ALDEBARAN:
+               adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs;
+               break;
+       default:
+               /* mmhub ras is not available */
+               break;
+       }
+}
+
 static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
 {
        adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
@@ -1194,12 +1213,6 @@ static int gmc_v9_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       gmc_v9_0_set_gmc_funcs(adev);
-       gmc_v9_0_set_irq_funcs(adev);
-       gmc_v9_0_set_umc_funcs(adev);
-       gmc_v9_0_set_mmhub_funcs(adev);
-       gmc_v9_0_set_gfxhub_funcs(adev);
-
        if (adev->asic_type == CHIP_VEGA20 ||
            adev->asic_type == CHIP_ARCTURUS)
                adev->gmc.xgmi.supported = true;
@@ -1208,7 +1221,14 @@ static int gmc_v9_0_early_init(void *handle)
                adev->gmc.xgmi.supported = true;
                adev->gmc.xgmi.connected_to_cpu =
                        adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
-        }
+       }
+
+       gmc_v9_0_set_gmc_funcs(adev);
+       gmc_v9_0_set_irq_funcs(adev);
+       gmc_v9_0_set_umc_funcs(adev);
+       gmc_v9_0_set_mmhub_funcs(adev);
+       gmc_v9_0_set_mmhub_ras_funcs(adev);
+       gmc_v9_0_set_gfxhub_funcs(adev);
 
        adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
        adev->gmc.shared_aperture_end =
@@ -1240,8 +1260,9 @@ static int gmc_v9_0_late_init(void *handle)
                }
        }
 
-       if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
-               adev->mmhub.funcs->reset_ras_error_count(adev);
+       if (adev->mmhub.ras_funcs &&
+           adev->mmhub.ras_funcs->reset_ras_error_count)
+               adev->mmhub.ras_funcs->reset_ras_error_count(adev);
 
        r = amdgpu_gmc_ras_late_init(adev);
        if (r)
@@ -1506,7 +1527,8 @@ static int gmc_v9_0_sw_init(void *handle)
        if (r)
                return r;
 
-       if (!amdgpu_sriov_vf(adev)) {
+       if (!amdgpu_sriov_vf(adev) &&
+           !adev->gmc.xgmi.connected_to_cpu) {
                /* interrupt sent to DF. */
                r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
                                      &adev->gmc.ecc_irq);
index 7332a32..9360204 100644 (file)
@@ -487,7 +487,7 @@ int jpeg_v1_0_sw_init(void *handle)
        ring = &adev->jpeg.inst->ring_dec;
        sprintf(ring->name, "jpeg_dec");
        r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
-                            0, AMDGPU_RING_PRIO_DEFAULT);
+                            0, AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
index 3b22953..de5abce 100644 (file)
@@ -108,7 +108,7 @@ static int jpeg_v2_0_sw_init(void *handle)
        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
        sprintf(ring->name, "jpeg_dec");
        r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
-                            0, AMDGPU_RING_PRIO_DEFAULT);
+                            0, AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
index 072774a..8353199 100644 (file)
@@ -115,7 +115,7 @@ static int jpeg_v2_5_sw_init(void *handle)
                ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
                sprintf(ring->name, "jpeg_dec_%d", i);
                r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
-                                    0, AMDGPU_RING_PRIO_DEFAULT);
+                                    0, AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
 
index e8fbb2a..de5dfcf 100644 (file)
@@ -94,7 +94,7 @@ static int jpeg_v3_0_sw_init(void *handle)
        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
        sprintf(ring->name, "jpeg_dec");
        r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
-                            AMDGPU_RING_PRIO_DEFAULT);
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
index 7f30629..a7ec4ac 100644 (file)
@@ -848,7 +848,8 @@ static int mes_v10_1_ring_init(struct amdgpu_device *adev)
        ring->no_scheduler = true;
        sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
 
-       return amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT);
+       return amdgpu_ring_init(adev, ring, 1024, NULL, 0,
+                               AMDGPU_RING_PRIO_DEFAULT, NULL);
 }
 
 static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev)
index d7b39c0..aa9be56 100644 (file)
@@ -776,10 +776,14 @@ static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
        }
 }
 
-const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
+const struct amdgpu_mmhub_ras_funcs mmhub_v1_0_ras_funcs = {
        .ras_late_init = amdgpu_mmhub_ras_late_init,
+       .ras_fini = amdgpu_mmhub_ras_fini,
        .query_ras_error_count = mmhub_v1_0_query_ras_error_count,
        .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
+};
+
+const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
        .get_fb_location = mmhub_v1_0_get_fb_location,
        .init = mmhub_v1_0_init,
        .gart_enable = mmhub_v1_0_gart_enable,
index d77f5b6..4661b09 100644 (file)
@@ -24,5 +24,6 @@
 #define __MMHUB_V1_0_H__
 
 extern const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs;
+extern const struct amdgpu_mmhub_ras_funcs mmhub_v1_0_ras_funcs;
 
 #endif
index ae7d8a1..7977a78 100644 (file)
@@ -1313,10 +1313,15 @@ static void mmhub_v1_7_query_ras_error_status(struct amdgpu_device *adev)
        }
 }
 
-const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs = {
+const struct amdgpu_mmhub_ras_funcs mmhub_v1_7_ras_funcs = {
        .ras_late_init = amdgpu_mmhub_ras_late_init,
+       .ras_fini = amdgpu_mmhub_ras_fini,
        .query_ras_error_count = mmhub_v1_7_query_ras_error_count,
        .reset_ras_error_count = mmhub_v1_7_reset_ras_error_count,
+       .query_ras_error_status = mmhub_v1_7_query_ras_error_status,
+};
+
+const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs = {
        .get_fb_location = mmhub_v1_7_get_fb_location,
        .init = mmhub_v1_7_init,
        .gart_enable = mmhub_v1_7_gart_enable,
@@ -1325,5 +1330,4 @@ const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs = {
        .set_clockgating = mmhub_v1_7_set_clockgating,
        .get_clockgating = mmhub_v1_7_get_clockgating,
        .setup_vm_pt_regs = mmhub_v1_7_setup_vm_pt_regs,
-       .query_ras_error_status = mmhub_v1_7_query_ras_error_status,
 };
index bf2fbeb..a7f9dfc 100644 (file)
@@ -24,5 +24,6 @@
 #define __MMHUB_V1_7_H__
 
 extern const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs;
+extern const struct amdgpu_mmhub_ras_funcs mmhub_v1_7_ras_funcs;
 
 #endif
index f107385..da7edd1 100644 (file)
@@ -689,7 +689,6 @@ static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
 }
 
 const struct amdgpu_mmhub_funcs mmhub_v2_0_funcs = {
-       .ras_late_init = amdgpu_mmhub_ras_late_init,
        .init = mmhub_v2_0_init,
        .gart_enable = mmhub_v2_0_gart_enable,
        .set_fault_enable_default = mmhub_v2_0_set_fault_enable_default,
index ab9be5a..1141c37 100644 (file)
@@ -616,7 +616,6 @@ static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags)
 }
 
 const struct amdgpu_mmhub_funcs mmhub_v2_3_funcs = {
-       .ras_late_init = amdgpu_mmhub_ras_late_init,
        .init = mmhub_v2_3_init,
        .gart_enable = mmhub_v2_3_gart_enable,
        .set_fault_enable_default = mmhub_v2_3_set_fault_enable_default,
index 4a31737..0cffa82 100644 (file)
@@ -1652,10 +1652,15 @@ static void mmhub_v9_4_query_ras_error_status(struct amdgpu_device *adev)
        }
 }
 
-const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
+const struct amdgpu_mmhub_ras_funcs mmhub_v9_4_ras_funcs = {
        .ras_late_init = amdgpu_mmhub_ras_late_init,
+       .ras_fini = amdgpu_mmhub_ras_fini,
        .query_ras_error_count = mmhub_v9_4_query_ras_error_count,
        .reset_ras_error_count = mmhub_v9_4_reset_ras_error_count,
+       .query_ras_error_status = mmhub_v9_4_query_ras_error_status,
+};
+
+const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
        .get_fb_location = mmhub_v9_4_get_fb_location,
        .init = mmhub_v9_4_init,
        .gart_enable = mmhub_v9_4_gart_enable,
@@ -1664,5 +1669,4 @@ const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
        .set_clockgating = mmhub_v9_4_set_clockgating,
        .get_clockgating = mmhub_v9_4_get_clockgating,
        .setup_vm_pt_regs = mmhub_v9_4_setup_vm_pt_regs,
-       .query_ras_error_status = mmhub_v9_4_query_ras_error_status,
 };
index 92404a8..90436ef 100644 (file)
@@ -24,5 +24,6 @@
 #define __MMHUB_V9_4_H__
 
 extern const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs;
+extern const struct amdgpu_mmhub_ras_funcs mmhub_v9_4_ras_funcs;
 
 #endif
index c477f89..af44aad 100644 (file)
@@ -557,6 +557,16 @@ static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
                       DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
 }
 
+const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs = {
+       .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
+       .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
+       .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
+       .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
+       .query_ras_error_count = nbio_v7_4_query_ras_error_count,
+       .ras_late_init = amdgpu_nbio_ras_late_init,
+       .ras_fini = amdgpu_nbio_ras_fini,
+};
+
 const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
        .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset,
        .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset,
@@ -577,10 +587,4 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
        .ih_control = nbio_v7_4_ih_control,
        .init_registers = nbio_v7_4_init_registers,
        .remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
-       .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
-       .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
-       .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt,
-       .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt,
-       .query_ras_error_count = nbio_v7_4_query_ras_error_count,
-       .ras_late_init = amdgpu_nbio_ras_late_init,
 };
index b1ac828..b821658 100644 (file)
@@ -28,5 +28,6 @@
 
 extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
 extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
+extern const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs;
 
 #endif
index 5846eac..46d4bba 100644 (file)
@@ -34,7 +34,6 @@
 #include "amdgpu_vce.h"
 #include "amdgpu_ucode.h"
 #include "amdgpu_psp.h"
-#include "amdgpu_smu.h"
 #include "atom.h"
 #include "amd_pcie.h"
 
@@ -516,21 +515,9 @@ static int nv_asic_mode2_reset(struct amdgpu_device *adev)
        return ret;
 }
 
-static bool nv_asic_supports_baco(struct amdgpu_device *adev)
-{
-       struct smu_context *smu = &adev->smu;
-
-       if (smu_baco_is_support(smu))
-               return true;
-       else
-               return false;
-}
-
 static enum amd_reset_method
 nv_asic_reset_method(struct amdgpu_device *adev)
 {
-       struct smu_context *smu = &adev->smu;
-
        if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
            amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
            amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
@@ -549,7 +536,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
        case CHIP_DIMGREY_CAVEFISH:
                return AMD_RESET_METHOD_MODE1;
        default:
-               if (smu_baco_is_support(smu))
+               if (amdgpu_dpm_is_baco_supported(adev))
                        return AMD_RESET_METHOD_BACO;
                else
                        return AMD_RESET_METHOD_MODE1;
@@ -559,11 +546,6 @@ nv_asic_reset_method(struct amdgpu_device *adev)
 static int nv_asic_reset(struct amdgpu_device *adev)
 {
        int ret = 0;
-       struct smu_context *smu = &adev->smu;
-
-       /* skip reset on vangogh for now */
-       if (adev->asic_type == CHIP_VANGOGH)
-               return 0;
 
        switch (nv_asic_reset_method(adev)) {
        case AMD_RESET_METHOD_PCI:
@@ -572,13 +554,7 @@ static int nv_asic_reset(struct amdgpu_device *adev)
                break;
        case AMD_RESET_METHOD_BACO:
                dev_info(adev->dev, "BACO reset\n");
-
-               ret = smu_baco_enter(smu);
-               if (ret)
-                       return ret;
-               ret = smu_baco_exit(smu);
-               if (ret)
-                       return ret;
+               ret = amdgpu_dpm_baco_reset(adev);
                break;
        case AMD_RESET_METHOD_MODE2:
                dev_info(adev->dev, "MODE2 reset\n");
@@ -986,7 +962,7 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
        .need_full_reset = &nv_need_full_reset,
        .need_reset_on_init = &nv_need_reset_on_init,
        .get_pcie_replay_count = &nv_get_pcie_replay_count,
-       .supports_baco = &nv_asic_supports_baco,
+       .supports_baco = &amdgpu_dpm_is_baco_supported,
        .pre_asic_init = &nv_pre_asic_init,
        .update_umd_stable_pstate = &nv_update_umd_stable_pstate,
        .query_video_codecs = &nv_query_video_codecs,
index a41b054..dd4d65f 100644 (file)
@@ -102,6 +102,21 @@ enum psp_gfx_cmd_id
     /* IDs upto 0x1F are reserved for older programs (Raven, Vega 10/12/20) */
     GFX_CMD_ID_LOAD_TOC           = 0x00000020,   /* Load TOC and obtain TMR size */
     GFX_CMD_ID_AUTOLOAD_RLC       = 0x00000021,   /* Indicates all graphics fw loaded, start RLC autoload */
+    GFX_CMD_ID_BOOT_CFG           = 0x00000022,   /* Boot Config */
+};
+
+/* PSP boot config sub-commands */
+enum psp_gfx_boot_config_cmd
+{
+    BOOTCFG_CMD_SET         = 1, /* Set boot configuration settings */
+    BOOTCFG_CMD_GET         = 2, /* Get boot configuration settings */
+    BOOTCFG_CMD_INVALIDATE  = 3  /* Reset current boot configuration settings to VBIOS defaults */
+};
+
+/* PSP boot config bitmask values */
+enum psp_gfx_boot_config
+{
+    BOOT_CONFIG_GECC = 0x1,
 };
 
 /* Command to load Trusted Application binary into PSP OS. */
@@ -273,6 +288,15 @@ struct psp_gfx_cmd_load_toc
     uint32_t        toc_size;               /* FW buffer size in bytes */
 };
 
+/* Dynamic boot configuration */
+struct psp_gfx_cmd_boot_cfg
+{
+    uint32_t                        timestamp;            /* calendar time as number of seconds */
+    enum psp_gfx_boot_config_cmd    sub_cmd;              /* sub-command indicating how to process command data */
+    uint32_t                        boot_config;          /* dynamic boot configuration bitmask */
+    uint32_t                        boot_config_valid;    /* dynamic boot configuration valid bits bitmask */
+};
+
 /* All GFX ring buffer commands. */
 union psp_gfx_commands
 {
@@ -285,6 +309,7 @@ union psp_gfx_commands
     struct psp_gfx_cmd_reg_prog       cmd_setup_reg_prog;
     struct psp_gfx_cmd_setup_tmr        cmd_setup_vmr;
     struct psp_gfx_cmd_load_toc         cmd_load_toc;
+    struct psp_gfx_cmd_boot_cfg         boot_cfg;
 };
 
 struct psp_gfx_uresp_reserved
index c325d6f..589410c 100644 (file)
@@ -598,7 +598,7 @@ static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
 }
 
 /*
- * save and restore proces
+ * save and restore process
  */
 static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
 {
@@ -661,9 +661,9 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
 
        if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) {
                /*
-                * Long traing will encroach certain mount of bottom VRAM,
-                * saving the content of this bottom VRAM to system memory
-                * before training, and restoring it after training to avoid
+                * Long training will encroach a certain amount on the bottom of VRAM;
+                * save the content from the bottom of VRAM to system memory
+                * before training, and restore it after training to avoid
                 * VRAM corruption.
                 */
                sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE;
index eb5dc6c..9f0dda0 100644 (file)
@@ -876,12 +876,10 @@ static int sdma_v2_4_sw_init(void *handle)
                ring->ring_obj = NULL;
                ring->use_doorbell = false;
                sprintf(ring->name, "sdma%d", i);
-               r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->sdma.trap_irq,
-                                    (i == 0) ?
-                                    AMDGPU_SDMA_IRQ_INSTANCE0 :
+               r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
+                                    (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
                                     AMDGPU_SDMA_IRQ_INSTANCE1,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index ad308d8..135727b 100644 (file)
@@ -1160,12 +1160,10 @@ static int sdma_v3_0_sw_init(void *handle)
                }
 
                sprintf(ring->name, "sdma%d", i);
-               r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->sdma.trap_irq,
-                                    (i == 0) ?
-                                    AMDGPU_SDMA_IRQ_INSTANCE0 :
+               r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
+                                    (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
                                     AMDGPU_SDMA_IRQ_INSTANCE1,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 98b7db5..5715be6 100644 (file)
@@ -1968,7 +1968,7 @@ static int sdma_v4_0_sw_init(void *handle)
                sprintf(ring->name, "sdma%d", i);
                r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
                                     AMDGPU_SDMA_IRQ_INSTANCE0 + i,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
 
@@ -1987,7 +1987,7 @@ static int sdma_v4_0_sw_init(void *handle)
                        r = amdgpu_ring_init(adev, ring, 1024,
                                             &adev->sdma.trap_irq,
                                             AMDGPU_SDMA_IRQ_INSTANCE0 + i,
-                                            AMDGPU_RING_PRIO_DEFAULT);
+                                            AMDGPU_RING_PRIO_DEFAULT, NULL);
                        if (r)
                                return r;
                }
index d345e32..920fc6d 100644 (file)
@@ -1273,12 +1273,10 @@ static int sdma_v5_0_sw_init(void *handle)
                        : (adev->doorbell_index.sdma_engine[1] << 1); // get DWORD offset
 
                sprintf(ring->name, "sdma%d", i);
-               r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->sdma.trap_irq,
-                                    (i == 0) ?
-                                    AMDGPU_SDMA_IRQ_INSTANCE0 :
+               r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
+                                    (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
                                     AMDGPU_SDMA_IRQ_INSTANCE1,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index b39e7db..93f826a 100644 (file)
@@ -1283,10 +1283,9 @@ static int sdma_v5_2_sw_init(void *handle)
                        (adev->doorbell_index.sdma_engine[i] << 1); //get DWORD offset
 
                sprintf(ring->name, "sdma%d", i);
-               r = amdgpu_ring_init(adev, ring, 1024,
-                                    &adev->sdma.trap_irq,
+               r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
                                     AMDGPU_SDMA_IRQ_INSTANCE0 + i,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 488497a..cb703e3 100644 (file)
@@ -507,10 +507,9 @@ static int si_dma_sw_init(void *handle)
                sprintf(ring->name, "sdma%d", i);
                r = amdgpu_ring_init(adev, ring, 1024,
                                     &adev->sdma.trap_irq,
-                                    (i == 0) ?
-                                    AMDGPU_SDMA_IRQ_INSTANCE0 :
+                                    (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
                                     AMDGPU_SDMA_IRQ_INSTANCE1,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 3808402..5c5eb3a 100644 (file)
@@ -76,7 +76,6 @@
 #include "smuio_v13_0.h"
 #include "dce_virtual.h"
 #include "mxgpu_ai.h"
-#include "amdgpu_smu.h"
 #include "amdgpu_ras.h"
 #include "amdgpu_xgmi.h"
 #include <uapi/linux/kfd_ioctl.h>
@@ -1495,8 +1494,8 @@ static int soc15_common_early_init(void *handle)
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_SDMA_MGCG |
                        AMD_CG_SUPPORT_SDMA_LS |
-                       AMD_CG_SUPPORT_IH_CG;
-                       /*AMD_CG_SUPPORT_VCN_MGCG |AMD_CG_SUPPORT_JPEG_MGCG;*/
+                       AMD_CG_SUPPORT_IH_CG |
+                       AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG;
                adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG;
                adev->external_rev_id = adev->rev_id + 0x3c;
                break;
@@ -1524,8 +1523,9 @@ static int soc15_common_late_init(void *handle)
        if (adev->hdp.funcs->reset_ras_error_count)
                adev->hdp.funcs->reset_ras_error_count(adev);
 
-       if (adev->nbio.funcs->ras_late_init)
-               r = adev->nbio.funcs->ras_late_init(adev);
+       if (adev->nbio.ras_funcs &&
+           adev->nbio.ras_funcs->ras_late_init)
+               r = adev->nbio.ras_funcs->ras_late_init(adev);
 
        return r;
 }
@@ -1546,7 +1546,9 @@ static int soc15_common_sw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       amdgpu_nbio_ras_fini(adev);
+       if (adev->nbio.ras_funcs &&
+           adev->nbio.ras_funcs->ras_fini)
+               adev->nbio.ras_funcs->ras_fini(adev);
        adev->df.funcs->sw_fini(adev);
        return 0;
 }
@@ -1610,9 +1612,11 @@ static int soc15_common_hw_fini(void *handle)
 
        if (adev->nbio.ras_if &&
            amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) {
-               if (adev->nbio.funcs->init_ras_controller_interrupt)
+               if (adev->nbio.ras_funcs &&
+                   adev->nbio.ras_funcs->init_ras_controller_interrupt)
                        amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0);
-               if (adev->nbio.funcs->init_ras_err_event_athub_interrupt)
+               if (adev->nbio.ras_funcs &&
+                   adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt)
                        amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0);
        }
 
index 8cdf5d1..14bd794 100644 (file)
 })
 
 #define WREG32_RLC(reg, value) \
-       do {                                                    \
-               if (amdgpu_sriov_fullaccess(adev)) {    \
-                       uint32_t i = 0; \
-                       uint32_t retries = 50000;       \
-                       uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0;   \
-                       uint32_t r1 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1;   \
-                       uint32_t spare_int = adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT;  \
-                       WREG32(r0, value);      \
-                       WREG32(r1, (reg | 0x80000000)); \
-                       WREG32(spare_int, 0x1); \
-                       for (i = 0; i < retries; i++) { \
-                               u32 tmp = RREG32(r1);   \
-                               if (!(tmp & 0x80000000))        \
-                                       break;  \
-                               udelay(10);     \
-                       }       \
-                       if (i >= retries)       \
-                               pr_err("timeout: rlcg program reg:0x%05x failed !\n", reg);     \
-               } else {        \
-                       WREG32(reg, value); \
-               }       \
+       do { \
+               if (adev->gfx.rlc.funcs->rlcg_wreg) \
+                       adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, 0); \
+               else \
+                       WREG32(reg, value);     \
        } while (0)
 
 #define WREG32_RLC_EX(prefix, reg, value) \
        } while (0)
 
 #define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
-       do {                                                    \
-               uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
-               if (amdgpu_sriov_fullaccess(adev)) {    \
-                       uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2;   \
-                       uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3;   \
-                       uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;   \
-                       uint32_t grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;   \
-                       if (target_reg == grbm_cntl) \
-                               WREG32(r2, value);      \
-                       else if (target_reg == grbm_idx) \
-                               WREG32(r3, value);      \
-                       WREG32(target_reg, value);      \
-               } else {        \
-                       WREG32(target_reg, value); \
-               }       \
+       WREG32_RLC((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
+
+#define RREG32_RLC(reg) \
+       (adev->gfx.rlc.funcs->rlcg_rreg ? \
+               adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, 0) : RREG32(reg))
+
+#define WREG32_RLC_NO_KIQ(reg, value) \
+       do { \
+               if (adev->gfx.rlc.funcs->rlcg_wreg) \
+                       adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, AMDGPU_REGS_NO_KIQ); \
+               else \
+                       WREG32_NO_KIQ(reg, value);      \
        } while (0)
 
+#define RREG32_RLC_NO_KIQ(reg) \
+       (adev->gfx.rlc.funcs->rlcg_rreg ? \
+               adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, AMDGPU_REGS_NO_KIQ) : RREG32_NO_KIQ(reg))
+
 #define WREG32_SOC15_RLC_SHADOW_EX(prefix, ip, inst, reg, value) \
        do {                                                    \
                uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
                }       \
        } while (0)
 
+#define RREG32_SOC15_RLC(ip, inst, reg) \
+       RREG32_RLC(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+
 #define WREG32_SOC15_RLC(ip, inst, reg, value) \
        do {                                                    \
-                       uint32_t target_reg = adev->reg_offset[GC_HWIP][0][reg##_BASE_IDX] + reg;\
-                       WREG32_RLC(target_reg, value); \
+               uint32_t target_reg = adev->reg_offset[ip##_HWIP][0][reg##_BASE_IDX] + reg;\
+               WREG32_RLC(target_reg, value); \
        } while (0)
 
 #define WREG32_SOC15_RLC_EX(prefix, ip, inst, reg, value) \
        } while (0)
 
 #define WREG32_FIELD15_RLC(ip, idx, reg, field, val)   \
-    WREG32_RLC((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
-    (RREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
-    & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+       WREG32_RLC((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
+       (RREG32_RLC(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
+       & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
 
 #define WREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset, value) \
-    WREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset), value)
+       WREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset), value)
+
+#define RREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset) \
+       RREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset))
 
 #endif
index 96d7769..20b4498 100644 (file)
@@ -22,6 +22,7 @@
  */
 #include "umc_v6_1.h"
 #include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
 #include "amdgpu.h"
 
 #include "rsmu/rsmu_0_0_2_offset.h"
@@ -464,9 +465,10 @@ static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev)
                umc_v6_1_enable_umc_index_mode(adev);
 }
 
-const struct amdgpu_umc_funcs umc_v6_1_funcs = {
+const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs = {
        .err_cnt_init = umc_v6_1_err_cnt_init,
        .ras_late_init = amdgpu_umc_ras_late_init,
+       .ras_fini = amdgpu_umc_ras_fini,
        .query_ras_error_count = umc_v6_1_query_ras_error_count,
        .query_ras_error_address = umc_v6_1_query_ras_error_address,
 };
index 0ce1d32..5dc36c7 100644 (file)
@@ -45,7 +45,7 @@
 /* umc ce count initial value */
 #define UMC_V6_1_CE_CNT_INIT   (UMC_V6_1_CE_CNT_MAX - UMC_V6_1_CE_INT_THRESHOLD)
 
-extern const struct amdgpu_umc_funcs umc_v6_1_funcs;
+extern const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs;
 extern const uint32_t
        umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM];
 
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
new file mode 100644 (file)
index 0000000..3a8f787
--- /dev/null
@@ -0,0 +1,281 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "umc_v6_7.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
+#include "amdgpu.h"
+
+#include "umc/umc_6_7_0_offset.h"
+#include "umc/umc_6_7_0_sh_mask.h"
+
+static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev,
+                                             uint32_t umc_inst,
+                                             uint32_t ch_inst)
+{
+       return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst;
+}
+
+static void umc_v6_7_query_correctable_error_count(struct amdgpu_device *adev,
+                                                  uint32_t umc_reg_offset,
+                                                  unsigned long *error_count)
+{
+       uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+       uint32_t ecc_err_cnt, ecc_err_cnt_addr;
+       uint64_t mc_umc_status;
+       uint32_t mc_umc_status_addr;
+
+       /* UMC 6_1_1 registers */
+       ecc_err_cnt_sel_addr =
+               SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccErrCntSel);
+       ecc_err_cnt_addr =
+               SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccErrCnt);
+       mc_umc_status_addr =
+               SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+
+       /* select the lower chip and check the error count */
+       ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
+       ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
+                                       EccErrCntCsSel, 0);
+       WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
+       ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+       *error_count +=
+               (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
+                UMC_V6_7_CE_CNT_INIT);
+
+       /* select the higher chip and check the err counter */
+       ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
+                                       EccErrCntCsSel, 1);
+       WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
+       ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
+       *error_count +=
+               (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
+                UMC_V6_7_CE_CNT_INIT);
+
+       /* check for SRAM correctable error
+         MCUMC_STATUS is a 64 bit register */
+       mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+       if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
+               *error_count += 1;
+}
+
+static void umc_v6_7_querry_uncorrectable_error_count(struct amdgpu_device *adev,
+                                                     uint32_t umc_reg_offset,
+                                                     unsigned long *error_count)
+{
+       uint64_t mc_umc_status;
+       uint32_t mc_umc_status_addr;
+
+       mc_umc_status_addr =
+               SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+
+       /* check the MCUMC_STATUS */
+       mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+       if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+           (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
+               *error_count += 1;
+}
+
+static void umc_v6_7_reset_error_count_per_channel(struct amdgpu_device *adev,
+                                                  uint32_t umc_reg_offset)
+{
+       uint32_t ecc_err_cnt_addr;
+       uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+
+       ecc_err_cnt_sel_addr =
+               SOC15_REG_OFFSET(UMC, 0,
+                               regUMCCH0_0_EccErrCntSel);
+       ecc_err_cnt_addr =
+               SOC15_REG_OFFSET(UMC, 0,
+                               regUMCCH0_0_EccErrCnt);
+
+       /* select the lower chip */
+       ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+                                      umc_reg_offset) * 4);
+       ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+                                       UMCCH0_0_EccErrCntSel,
+                                       EccErrCntCsSel, 0);
+       WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+                       ecc_err_cnt_sel);
+
+       /* clear lower chip error count */
+       WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+                       UMC_V6_7_CE_CNT_INIT);
+
+       /* select the higher chip */
+       ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+                                       umc_reg_offset) * 4);
+       ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+                                       UMCCH0_0_EccErrCntSel,
+                                       EccErrCntCsSel, 1);
+       WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+                       ecc_err_cnt_sel);
+
+       /* clear higher chip error count */
+       WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+                       UMC_V6_7_CE_CNT_INIT);
+}
+
+static void umc_v6_7_reset_error_count(struct amdgpu_device *adev)
+{
+       uint32_t umc_inst        = 0;
+       uint32_t ch_inst         = 0;
+       uint32_t umc_reg_offset  = 0;
+
+       LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+               umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+                                                        umc_inst,
+                                                        ch_inst);
+
+               umc_v6_7_reset_error_count_per_channel(adev,
+                                                      umc_reg_offset);
+       }
+}
+
+static void umc_v6_7_query_ras_error_count(struct amdgpu_device *adev,
+                                          void *ras_error_status)
+{
+       struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+       uint32_t umc_inst        = 0;
+       uint32_t ch_inst         = 0;
+       uint32_t umc_reg_offset  = 0;
+
+       /*TODO: driver needs to toggle DF Cstate to ensure
+        * safe access of UMC registers. Will add the protection */
+       LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+               umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+                                                        umc_inst,
+                                                        ch_inst);
+               umc_v6_7_query_correctable_error_count(adev,
+                                                      umc_reg_offset,
+                                                      &(err_data->ce_count));
+               umc_v6_7_querry_uncorrectable_error_count(adev,
+                                                         umc_reg_offset,
+                                                         &(err_data->ue_count));
+       }
+
+       umc_v6_7_reset_error_count(adev);
+}
+
+static void umc_v6_7_query_error_address(struct amdgpu_device *adev,
+                                        struct ras_err_data *err_data,
+                                        uint32_t umc_reg_offset,
+                                        uint32_t ch_inst,
+                                        uint32_t umc_inst)
+{
+       uint32_t mc_umc_status_addr;
+       uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0;
+       struct eeprom_table_record *err_rec;
+       uint32_t channel_index;
+
+       mc_umc_status_addr =
+               SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
+       mc_umc_addrt0 =
+               SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
+
+       mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+
+       if (mc_umc_status == 0)
+               return;
+
+       if (!err_data->err_addr) {
+               /* clear umc status */
+               WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+               return;
+       }
+
+       err_rec = &err_data->err_addr[err_data->err_addr_cnt];
+
+       channel_index =
+               adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
+
+       /* calculate error address if ue/ce error is detected */
+       if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+           (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+           REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
+
+               err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
+               err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
+
+               /* translate umc channel address to soc pa, 3 parts are included */
+               retired_page = ADDR_OF_8KB_BLOCK(err_addr) |
+                               ADDR_OF_256B_BLOCK(channel_index) |
+                               OFFSET_IN_256B_BLOCK(err_addr);
+
+               /* we only save ue error information currently, ce is skipped */
+               if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC)
+                               == 1) {
+                       err_rec->address = err_addr;
+                       /* page frame address is saved */
+                       err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+                       err_rec->ts = (uint64_t)ktime_get_real_seconds();
+                       err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+                       err_rec->cu = 0;
+                       err_rec->mem_channel = channel_index;
+                       err_rec->mcumc_id = umc_inst;
+
+                       err_data->err_addr_cnt++;
+               }
+       }
+
+       /* clear umc status */
+       WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
+}
+
+static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
+                                            void *ras_error_status)
+{
+       struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+       uint32_t umc_inst        = 0;
+       uint32_t ch_inst         = 0;
+       uint32_t umc_reg_offset  = 0;
+
+       /*TODO: driver needs to toggle DF Cstate to ensure
+        * safe access of UMC resgisters. Will add the protection
+        * when firmware interface is ready */
+       LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+               umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+                                                        umc_inst,
+                                                        ch_inst);
+               umc_v6_7_query_error_address(adev,
+                                            err_data,
+                                            umc_reg_offset,
+                                            ch_inst,
+                                            umc_inst);
+       }
+}
+
+const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs = {
+       .ras_late_init = amdgpu_umc_ras_late_init,
+       .ras_fini = amdgpu_umc_ras_fini,
+       .query_ras_error_count = umc_v6_7_query_ras_error_count,
+       .query_ras_error_address = umc_v6_7_query_ras_error_address,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h
new file mode 100644 (file)
index 0000000..4eb85f2
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __UMC_V6_7_H__
+#define __UMC_V6_7_H__
+
+/* EccErrCnt max value */
+#define UMC_V6_7_CE_CNT_MAX            0xffff
+/* umc ce interrupt threshold */
+#define UMC_V6_7_CE_INT_THRESHOLD      0xffff
+/* umc ce count initial value */
+#define UMC_V6_7_CE_CNT_INIT   (UMC_V6_7_CE_CNT_MAX - UMC_V6_7_CE_INT_THRESHOLD)
+
+#define UMC_V6_7_INST_DIST     0x40000
+
+extern const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs;
+
+#endif
index a064c09..89d20ad 100644 (file)
@@ -22,6 +22,7 @@
  */
 #include "umc_v8_7.h"
 #include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
 #include "amdgpu.h"
 
 #include "rsmu/rsmu_0_0_2_offset.h"
@@ -323,9 +324,10 @@ static void umc_v8_7_err_cnt_init(struct amdgpu_device *adev)
        }
 }
 
-const struct amdgpu_umc_funcs umc_v8_7_funcs = {
+const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs = {
        .err_cnt_init = umc_v8_7_err_cnt_init,
        .ras_late_init = amdgpu_umc_ras_late_init,
+       .ras_fini = amdgpu_umc_ras_fini,
        .query_ras_error_count = umc_v8_7_query_ras_error_count,
        .query_ras_error_address = umc_v8_7_query_ras_error_address,
 };
index d4d0468..37e6dc7 100644 (file)
@@ -44,7 +44,7 @@
 /* umc ce count initial value */
 #define UMC_V8_7_CE_CNT_INIT   (UMC_V8_7_CE_CNT_MAX - UMC_V8_7_CE_INT_THRESHOLD)
 
-extern const struct amdgpu_umc_funcs umc_v8_7_funcs;
+extern const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs;
 extern const uint32_t
        umc_v8_7_channel_idx_tbl[UMC_V8_7_UMC_INSTANCE_NUM][UMC_V8_7_CHANNEL_INSTANCE_NUM];
 
index 10ecae2..284447d 100644 (file)
@@ -562,7 +562,7 @@ static int uvd_v3_1_sw_init(void *handle)
        ring = &adev->uvd.inst->ring;
        sprintf(ring->name, "uvd");
        r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
-                        AMDGPU_RING_PRIO_DEFAULT);
+                        AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
index a70d2a0..a301518 100644 (file)
@@ -119,7 +119,7 @@ static int uvd_v4_2_sw_init(void *handle)
        ring = &adev->uvd.inst->ring;
        sprintf(ring->name, "uvd");
        r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
-                            AMDGPU_RING_PRIO_DEFAULT);
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
index f3b0a92..a4d5bd2 100644 (file)
@@ -117,7 +117,7 @@ static int uvd_v5_0_sw_init(void *handle)
        ring = &adev->uvd.inst->ring;
        sprintf(ring->name, "uvd");
        r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
-                            AMDGPU_RING_PRIO_DEFAULT);
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
index 7608598..2bab9c7 100644 (file)
@@ -420,7 +420,7 @@ static int uvd_v6_0_sw_init(void *handle)
        ring = &adev->uvd.inst->ring;
        sprintf(ring->name, "uvd");
        r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
-                            AMDGPU_RING_PRIO_DEFAULT);
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
@@ -434,7 +434,7 @@ static int uvd_v6_0_sw_init(void *handle)
                        sprintf(ring->name, "uvd_enc%d", i);
                        r = amdgpu_ring_init(adev, ring, 512,
                                             &adev->uvd.inst->irq, 0,
-                                            AMDGPU_RING_PRIO_DEFAULT);
+                                            AMDGPU_RING_PRIO_DEFAULT, NULL);
                        if (r)
                                return r;
                }
index 7cd67cb..0cd98fc 100644 (file)
@@ -454,7 +454,7 @@ static int uvd_v7_0_sw_init(void *handle)
                        sprintf(ring->name, "uvd_%d", ring->me);
                        r = amdgpu_ring_init(adev, ring, 512,
                                             &adev->uvd.inst[j].irq, 0,
-                                            AMDGPU_RING_PRIO_DEFAULT);
+                                            AMDGPU_RING_PRIO_DEFAULT, NULL);
                        if (r)
                                return r;
                }
@@ -475,7 +475,7 @@ static int uvd_v7_0_sw_init(void *handle)
                        }
                        r = amdgpu_ring_init(adev, ring, 512,
                                             &adev->uvd.inst[j].irq, 0,
-                                            AMDGPU_RING_PRIO_DEFAULT);
+                                            AMDGPU_RING_PRIO_DEFAULT, NULL);
                        if (r)
                                return r;
                }
index 0e2945b..c7d28c1 100644 (file)
@@ -433,9 +433,8 @@ static int vce_v2_0_sw_init(void *handle)
        for (i = 0; i < adev->vce.num_rings; i++) {
                ring = &adev->vce.ring[i];
                sprintf(ring->name, "vce%d", i);
-               r = amdgpu_ring_init(adev, ring, 512,
-                                    &adev->vce.irq, 0,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+               r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 6d9108f..3b82fb2 100644 (file)
@@ -443,7 +443,7 @@ static int vce_v3_0_sw_init(void *handle)
                ring = &adev->vce.ring[i];
                sprintf(ring->name, "vce%d", i);
                r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 37fa163..8e238de 100644 (file)
@@ -477,7 +477,7 @@ static int vce_v4_0_sw_init(void *handle)
                                ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
                }
                r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 6117931..51a773a 100644 (file)
@@ -129,7 +129,7 @@ static int vcn_v1_0_sw_init(void *handle)
        ring = &adev->vcn.inst->ring_dec;
        sprintf(ring->name, "vcn_dec");
        r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
-                            AMDGPU_RING_PRIO_DEFAULT);
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
@@ -148,7 +148,7 @@ static int vcn_v1_0_sw_init(void *handle)
                ring = &adev->vcn.inst->ring_enc[i];
                sprintf(ring->name, "vcn_enc%d", i);
                r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index d63198c..116b964 100644 (file)
@@ -136,7 +136,7 @@ static int vcn_v2_0_sw_init(void *handle)
 
        sprintf(ring->name, "vcn_dec");
        r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
-                            AMDGPU_RING_PRIO_DEFAULT);
+                            AMDGPU_RING_PRIO_DEFAULT, NULL);
        if (r)
                return r;
 
@@ -167,7 +167,7 @@ static int vcn_v2_0_sw_init(void *handle)
                        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
                sprintf(ring->name, "vcn_enc%d", i);
                r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
        }
index 87ec883..948813d 100644 (file)
@@ -189,7 +189,7 @@ static int vcn_v2_5_sw_init(void *handle)
                                (amdgpu_sriov_vf(adev) ? 2*j : 8*j);
                sprintf(ring->name, "vcn_dec_%d", j);
                r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
-                                    0, AMDGPU_RING_PRIO_DEFAULT);
+                                    0, AMDGPU_RING_PRIO_DEFAULT, NULL);
                if (r)
                        return r;
 
@@ -203,7 +203,7 @@ static int vcn_v2_5_sw_init(void *handle)
                        sprintf(ring->name, "vcn_enc_%d.%d", j, i);
                        r = amdgpu_ring_init(adev, ring, 512,
                                             &adev->vcn.inst[j].irq, 0,
-                                            AMDGPU_RING_PRIO_DEFAULT);
+                                            AMDGPU_RING_PRIO_DEFAULT, NULL);
                        if (r)
                                return r;
                }
index b61d1ba..3f15bf3 100644 (file)
@@ -50,6 +50,9 @@
 #define VCN_INSTANCES_SIENNA_CICHLID                           2
 #define DEC_SW_RING_ENABLED                                    FALSE
 
+#define RDECODE_MSG_CREATE                                     0x00000000
+#define RDECODE_MESSAGE_CREATE                                 0x00000001
+
 static int amdgpu_ih_clientid_vcns[] = {
        SOC15_IH_CLIENTID_VCN,
        SOC15_IH_CLIENTID_VCN1
@@ -171,6 +174,7 @@ static int vcn_v3_0_sw_init(void *handle)
 
        for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
                volatile struct amdgpu_fw_shared *fw_shared;
+
                if (adev->vcn.harvest_config & (1 << i))
                        continue;
 
@@ -198,6 +202,8 @@ static int vcn_v3_0_sw_init(void *handle)
                if (r)
                        return r;
 
+               atomic_set(&adev->vcn.inst[i].sched_score, 0);
+
                ring = &adev->vcn.inst[i].ring_dec;
                ring->use_doorbell = true;
                if (amdgpu_sriov_vf(adev)) {
@@ -205,11 +211,10 @@ static int vcn_v3_0_sw_init(void *handle)
                } else {
                        ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i;
                }
-               if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 0)
-                       ring->no_scheduler = true;
                sprintf(ring->name, "vcn_dec_%d", i);
                r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
-                                    AMDGPU_RING_PRIO_DEFAULT);
+                                    AMDGPU_RING_PRIO_DEFAULT,
+                                    &adev->vcn.inst[i].sched_score);
                if (r)
                        return r;
 
@@ -227,11 +232,10 @@ static int vcn_v3_0_sw_init(void *handle)
                        } else {
                                ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i;
                        }
-                       if (adev->asic_type == CHIP_SIENNA_CICHLID && i != 1)
-                               ring->no_scheduler = true;
                        sprintf(ring->name, "vcn_enc_%d.%d", i, j);
                        r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
-                                            AMDGPU_RING_PRIO_DEFAULT);
+                                            AMDGPU_RING_PRIO_DEFAULT,
+                                            &adev->vcn.inst[i].sched_score);
                        if (r)
                                return r;
                }
@@ -1844,6 +1848,132 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = {
        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
 };
 
+static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p)
+{
+       struct drm_gpu_scheduler **scheds;
+
+       /* The create msg must be in the first IB submitted */
+       if (atomic_read(&p->entity->fence_seq))
+               return -EINVAL;
+
+       scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
+               [AMDGPU_RING_PRIO_DEFAULT].sched;
+       drm_sched_entity_modify_sched(p->entity, scheds, 1);
+       return 0;
+}
+
+static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
+{
+       struct ttm_operation_ctx ctx = { false, false };
+       struct amdgpu_bo_va_mapping *map;
+       uint32_t *msg, num_buffers;
+       struct amdgpu_bo *bo;
+       uint64_t start, end;
+       unsigned int i;
+       void * ptr;
+       int r;
+
+       addr &= AMDGPU_GMC_HOLE_MASK;
+       r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
+       if (r) {
+               DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
+               return r;
+       }
+
+       start = map->start * AMDGPU_GPU_PAGE_SIZE;
+       end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
+       if (addr & 0x7) {
+               DRM_ERROR("VCN messages must be 8 byte aligned!\n");
+               return -EINVAL;
+       }
+
+       bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+       amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+       if (r) {
+               DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
+               return r;
+       }
+
+       r = amdgpu_bo_kmap(bo, &ptr);
+       if (r) {
+               DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
+               return r;
+       }
+
+       msg = ptr + addr - start;
+
+       /* Check length */
+       if (msg[1] > end - addr) {
+               r = -EINVAL;
+               goto out;
+       }
+
+       if (msg[3] != RDECODE_MSG_CREATE)
+               goto out;
+
+       num_buffers = msg[2];
+       for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
+               uint32_t offset, size, *create;
+
+               if (msg[0] != RDECODE_MESSAGE_CREATE)
+                       continue;
+
+               offset = msg[1];
+               size = msg[2];
+
+               if (offset + size > end) {
+                       r = -EINVAL;
+                       goto out;
+               }
+
+               create = ptr + addr + offset - start;
+
+               /* H246, HEVC and VP9 can run on any instance */
+               if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
+                       continue;
+
+               r = vcn_v3_0_limit_sched(p);
+               if (r)
+                       goto out;
+       }
+
+out:
+       amdgpu_bo_kunmap(bo);
+       return r;
+}
+
+static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
+                                          uint32_t ib_idx)
+{
+       struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
+       struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+       uint32_t msg_lo = 0, msg_hi = 0;
+       unsigned i;
+       int r;
+
+       /* The first instance can decode anything */
+       if (!ring->me)
+               return 0;
+
+       for (i = 0; i < ib->length_dw; i += 2) {
+               uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
+               uint32_t val = amdgpu_get_ib_value(p, ib_idx, i + 1);
+
+               if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
+                       msg_lo = val;
+               } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
+                       msg_hi = val;
+               } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) &&
+                          val == 0) {
+                       r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo);
+                       if (r)
+                               return r;
+               }
+       }
+       return 0;
+}
+
 static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
        .type = AMDGPU_RING_TYPE_VCN_DEC,
        .align_mask = 0xf,
@@ -1851,6 +1981,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
        .get_rptr = vcn_v3_0_dec_ring_get_rptr,
        .get_wptr = vcn_v3_0_dec_ring_get_wptr,
        .set_wptr = vcn_v3_0_dec_ring_set_wptr,
+       .patch_cs_in_place = vcn_v3_0_ring_patch_cs_in_place,
        .emit_frame_size =
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
index 6c3cb35..8a122b4 100644 (file)
@@ -264,10 +264,10 @@ static void vega20_ih_reroute_ih(struct amdgpu_device *adev)
 {
        uint32_t tmp;
 
-       /* vega20 ih reroute will go through psp
-        * this function is only used for arcturus
+       /* vega20 ih reroute will go through psp this
+        * function is used for newer asics starting arcturus
         */
-       if (adev->asic_type == CHIP_ARCTURUS) {
+       if (adev->asic_type >= CHIP_ARCTURUS) {
                /* Reroute to IH ring 1 for VMC */
                WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
                tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
index 6802c61..43de260 100644 (file)
@@ -870,52 +870,47 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
 {
        struct kfd_ioctl_get_process_apertures_args *args = data;
        struct kfd_process_device_apertures *pAperture;
-       struct kfd_process_device *pdd;
+       int i;
 
        dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
 
        args->num_of_nodes = 0;
 
        mutex_lock(&p->mutex);
+       /* Run over all pdd of the process */
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
+
+               pAperture =
+                       &args->process_apertures[args->num_of_nodes];
+               pAperture->gpu_id = pdd->dev->id;
+               pAperture->lds_base = pdd->lds_base;
+               pAperture->lds_limit = pdd->lds_limit;
+               pAperture->gpuvm_base = pdd->gpuvm_base;
+               pAperture->gpuvm_limit = pdd->gpuvm_limit;
+               pAperture->scratch_base = pdd->scratch_base;
+               pAperture->scratch_limit = pdd->scratch_limit;
 
-       /*if the process-device list isn't empty*/
-       if (kfd_has_process_device_data(p)) {
-               /* Run over all pdd of the process */
-               pdd = kfd_get_first_process_device_data(p);
-               do {
-                       pAperture =
-                               &args->process_apertures[args->num_of_nodes];
-                       pAperture->gpu_id = pdd->dev->id;
-                       pAperture->lds_base = pdd->lds_base;
-                       pAperture->lds_limit = pdd->lds_limit;
-                       pAperture->gpuvm_base = pdd->gpuvm_base;
-                       pAperture->gpuvm_limit = pdd->gpuvm_limit;
-                       pAperture->scratch_base = pdd->scratch_base;
-                       pAperture->scratch_limit = pdd->scratch_limit;
-
-                       dev_dbg(kfd_device,
-                               "node id %u\n", args->num_of_nodes);
-                       dev_dbg(kfd_device,
-                               "gpu id %u\n", pdd->dev->id);
-                       dev_dbg(kfd_device,
-                               "lds_base %llX\n", pdd->lds_base);
-                       dev_dbg(kfd_device,
-                               "lds_limit %llX\n", pdd->lds_limit);
-                       dev_dbg(kfd_device,
-                               "gpuvm_base %llX\n", pdd->gpuvm_base);
-                       dev_dbg(kfd_device,
-                               "gpuvm_limit %llX\n", pdd->gpuvm_limit);
-                       dev_dbg(kfd_device,
-                               "scratch_base %llX\n", pdd->scratch_base);
-                       dev_dbg(kfd_device,
-                               "scratch_limit %llX\n", pdd->scratch_limit);
-
-                       args->num_of_nodes++;
-
-                       pdd = kfd_get_next_process_device_data(p, pdd);
-               } while (pdd && (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
-       }
+               dev_dbg(kfd_device,
+                       "node id %u\n", args->num_of_nodes);
+               dev_dbg(kfd_device,
+                       "gpu id %u\n", pdd->dev->id);
+               dev_dbg(kfd_device,
+                       "lds_base %llX\n", pdd->lds_base);
+               dev_dbg(kfd_device,
+                       "lds_limit %llX\n", pdd->lds_limit);
+               dev_dbg(kfd_device,
+                       "gpuvm_base %llX\n", pdd->gpuvm_base);
+               dev_dbg(kfd_device,
+                       "gpuvm_limit %llX\n", pdd->gpuvm_limit);
+               dev_dbg(kfd_device,
+                       "scratch_base %llX\n", pdd->scratch_base);
+               dev_dbg(kfd_device,
+                       "scratch_limit %llX\n", pdd->scratch_limit);
 
+               if (++args->num_of_nodes >= NUM_OF_SUPPORTED_GPUS)
+                       break;
+       }
        mutex_unlock(&p->mutex);
 
        return 0;
@@ -926,9 +921,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
 {
        struct kfd_ioctl_get_process_apertures_new_args *args = data;
        struct kfd_process_device_apertures *pa;
-       struct kfd_process_device *pdd;
-       uint32_t nodes = 0;
        int ret;
+       int i;
 
        dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
 
@@ -937,17 +931,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
                 * sufficient memory
                 */
                mutex_lock(&p->mutex);
-
-               if (!kfd_has_process_device_data(p))
-                       goto out_unlock;
-
-               /* Run over all pdd of the process */
-               pdd = kfd_get_first_process_device_data(p);
-               do {
-                       args->num_of_nodes++;
-                       pdd = kfd_get_next_process_device_data(p, pdd);
-               } while (pdd);
-
+               args->num_of_nodes = p->n_pdds;
                goto out_unlock;
        }
 
@@ -962,22 +946,23 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
 
        mutex_lock(&p->mutex);
 
-       if (!kfd_has_process_device_data(p)) {
+       if (!p->n_pdds) {
                args->num_of_nodes = 0;
                kfree(pa);
                goto out_unlock;
        }
 
        /* Run over all pdd of the process */
-       pdd = kfd_get_first_process_device_data(p);
-       do {
-               pa[nodes].gpu_id = pdd->dev->id;
-               pa[nodes].lds_base = pdd->lds_base;
-               pa[nodes].lds_limit = pdd->lds_limit;
-               pa[nodes].gpuvm_base = pdd->gpuvm_base;
-               pa[nodes].gpuvm_limit = pdd->gpuvm_limit;
-               pa[nodes].scratch_base = pdd->scratch_base;
-               pa[nodes].scratch_limit = pdd->scratch_limit;
+       for (i = 0; i < min(p->n_pdds, args->num_of_nodes); i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
+
+               pa[i].gpu_id = pdd->dev->id;
+               pa[i].lds_base = pdd->lds_base;
+               pa[i].lds_limit = pdd->lds_limit;
+               pa[i].gpuvm_base = pdd->gpuvm_base;
+               pa[i].gpuvm_limit = pdd->gpuvm_limit;
+               pa[i].scratch_base = pdd->scratch_base;
+               pa[i].scratch_limit = pdd->scratch_limit;
 
                dev_dbg(kfd_device,
                        "gpu id %u\n", pdd->dev->id);
@@ -993,17 +978,14 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
                        "scratch_base %llX\n", pdd->scratch_base);
                dev_dbg(kfd_device,
                        "scratch_limit %llX\n", pdd->scratch_limit);
-               nodes++;
-
-               pdd = kfd_get_next_process_device_data(p, pdd);
-       } while (pdd && (nodes < args->num_of_nodes));
+       }
        mutex_unlock(&p->mutex);
 
-       args->num_of_nodes = nodes;
+       args->num_of_nodes = i;
        ret = copy_to_user(
                        (void __user *)args->kfd_process_device_apertures_ptr,
                        pa,
-                       (nodes * sizeof(struct kfd_process_device_apertures)));
+                       (i * sizeof(struct kfd_process_device_apertures)));
        kfree(pa);
        return ret ? -EFAULT : 0;
 
index b258a3d..159add0 100644 (file)
@@ -155,7 +155,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
 
        /* Wait till CP writes sync code: */
        status = amdkfd_fence_wait_timeout(
-                       (unsigned int *) rm_state,
+                       rm_state,
                        QUEUESTATE__ACTIVE, 1500);
 
        kfd_gtt_sa_free(dbgdev->dev, mem_obj);
index 511712c..673d5e3 100644 (file)
@@ -33,6 +33,11 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
 
        return single_open(file, show, NULL);
 }
+static int kfd_debugfs_hang_hws_read(struct seq_file *m, void *data)
+{
+       seq_printf(m, "echo gpu_id > hang_hws\n");
+       return 0;
+}
 
 static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
        const char __user *user_buf, size_t size, loff_t *ppos)
@@ -94,7 +99,7 @@ void kfd_debugfs_init(void)
        debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
                            kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
        debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
-                           NULL, &kfd_debugfs_hang_hws_fops);
+                           kfd_debugfs_hang_hws_read, &kfd_debugfs_hang_hws_fops);
 }
 
 void kfd_debugfs_fini(void)
index f860cd7..357b9bf 100644 (file)
@@ -1322,7 +1322,7 @@ void kfd_dec_compute_active(struct kfd_dev *kfd)
 
 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
 {
-       if (kfd)
+       if (kfd && kfd->init_complete)
                kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask);
 }
 
index 965f9f2..d3eaa15 100644 (file)
@@ -1180,7 +1180,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
        if (retval)
                goto fail_allocate_vidmem;
 
-       dqm->fence_addr = dqm->fence_mem->cpu_ptr;
+       dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
        dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
 
        init_interrupts(dqm);
@@ -1353,8 +1353,8 @@ out:
        return retval;
 }
 
-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
-                               unsigned int fence_value,
+int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
+                               uint64_t fence_value,
                                unsigned int timeout_ms)
 {
        unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
index dd0d883..71e2fde 100644 (file)
@@ -187,7 +187,7 @@ struct device_queue_manager {
        uint16_t                vmid_pasid[VMID_NUM];
        uint64_t                pipelines_addr;
        uint64_t                fence_gpu_addr;
-       unsigned int            *fence_addr;
+       uint64_t                *fence_addr;
        struct kfd_mem_obj      *fence_mem;
        bool                    active_runlist;
        int                     sched_policy;
index 9318936..5a1f243 100644 (file)
@@ -135,11 +135,11 @@ int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd)
  */
 void kfd_iommu_unbind_process(struct kfd_process *p)
 {
-       struct kfd_process_device *pdd;
+       int i;
 
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list)
-               if (pdd->bound == PDD_BOUND)
-                       amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
+       for (i = 0; i < p->n_pdds; i++)
+               if (p->pdds[i]->bound == PDD_BOUND)
+                       amd_iommu_unbind_pasid(p->pdds[i]->dev->pdev, p->pasid);
 }
 
 /* Callback for process shutdown invoked by the IOMMU driver */
index d903f69..e840dd5 100644 (file)
@@ -348,7 +348,7 @@ fail_create_runlist_ib:
 }
 
 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
-                       uint32_t fence_value)
+                       uint64_t fence_value)
 {
        uint32_t *buffer, size;
        int retval = 0;
index dfaf771..e3ba0cd 100644 (file)
@@ -283,7 +283,7 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
 }
 
 static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
-                       uint64_t fence_address, uint32_t fence_value)
+                       uint64_t fence_address, uint64_t fence_value)
 {
        struct pm4_mes_query_status *packet;
 
index a852e0d..08442e7 100644 (file)
@@ -263,7 +263,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
 }
 
 static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
-                       uint64_t fence_address, uint32_t fence_value)
+                       uint64_t fence_address, uint64_t fence_value)
 {
        struct pm4_mes_query_status *packet;
 
index d8c8b5f..0b6595f 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/swap.h>
 
 #include "amd_shared.h"
+#include "amdgpu.h"
 
 #define KFD_MAX_RING_ENTRY_SIZE        8
 
@@ -649,12 +650,6 @@ enum kfd_pdd_bound {
 
 /* Data that is per-process-per device. */
 struct kfd_process_device {
-       /*
-        * List of all per-device data for a process.
-        * Starts from kfd_process.per_device_data.
-        */
-       struct list_head per_device_list;
-
        /* The device that owns this data. */
        struct kfd_dev *dev;
 
@@ -771,10 +766,11 @@ struct kfd_process {
        u32 pasid;
 
        /*
-        * List of kfd_process_device structures,
+        * Array of kfd_process_device pointers,
         * one for each device the process is using.
         */
-       struct list_head per_device_data;
+       struct kfd_process_device *pdds[MAX_GPU_INSTANCE];
+       uint32_t n_pdds;
 
        struct process_queue_manager pqm;
 
@@ -872,14 +868,6 @@ void *kfd_process_device_translate_handle(struct kfd_process_device *p,
 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
                                        int handle);
 
-/* Process device data iterator */
-struct kfd_process_device *kfd_get_first_process_device_data(
-                                                       struct kfd_process *p);
-struct kfd_process_device *kfd_get_next_process_device_data(
-                                               struct kfd_process *p,
-                                               struct kfd_process_device *pdd);
-bool kfd_has_process_device_data(struct kfd_process *p);
-
 /* PASIDs */
 int kfd_pasid_init(void);
 void kfd_pasid_exit(void);
@@ -1012,8 +1000,8 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
                       u32 *ctl_stack_used_size,
                       u32 *save_area_used_size);
 
-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
-                             unsigned int fence_value,
+int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
+                             uint64_t fence_value,
                              unsigned int timeout_ms);
 
 /* Packet Manager */
@@ -1049,7 +1037,7 @@ struct packet_manager_funcs {
                        uint32_t filter_param, bool reset,
                        unsigned int sdma_engine);
        int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
-                       uint64_t fence_address, uint32_t fence_value);
+                       uint64_t fence_address, uint64_t fence_value);
        int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
 
        /* Packet sizes */
@@ -1071,7 +1059,7 @@ int pm_send_set_resources(struct packet_manager *pm,
                                struct scheduling_resources *res);
 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
-                               uint32_t fence_value);
+                               uint64_t fence_value);
 
 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
                        enum kfd_unmap_queues_filter mode,
index f523799..d4241d2 100644 (file)
@@ -505,7 +505,7 @@ static int kfd_sysfs_create_file(struct kfd_process *p, struct attribute *attr,
 static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
 {
        int ret = 0;
-       struct kfd_process_device *pdd;
+       int i;
        char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
 
        if (!p)
@@ -520,7 +520,8 @@ static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
         * - proc/<pid>/stats_<gpuid>/evicted_ms
         * - proc/<pid>/stats_<gpuid>/cu_occupancy
         */
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
                struct kobject *kobj_stats;
 
                snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
@@ -571,7 +572,7 @@ err:
 static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
 {
        int ret = 0;
-       struct kfd_process_device *pdd;
+       int i;
 
        if (!p)
                return -EINVAL;
@@ -584,7 +585,9 @@ static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
         * - proc/<pid>/vram_<gpuid>
         * - proc/<pid>/sdma_<gpuid>
         */
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
+
                snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
                         pdd->dev->id);
                ret = kfd_sysfs_create_file(p, &pdd->attr_vram, pdd->vram_filename);
@@ -881,21 +884,23 @@ void kfd_unref_process(struct kfd_process *p)
        kref_put(&p->ref, kfd_process_ref_release);
 }
 
+
 static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
 {
        struct kfd_process *p = pdd->process;
        void *mem;
        int id;
+       int i;
 
        /*
         * Remove all handles from idr and release appropriate
         * local memory object
         */
        idr_for_each_entry(&pdd->alloc_idr, mem, id) {
-               struct kfd_process_device *peer_pdd;
 
-               list_for_each_entry(peer_pdd, &p->per_device_data,
-                                   per_device_list) {
+               for (i = 0; i < p->n_pdds; i++) {
+                       struct kfd_process_device *peer_pdd = p->pdds[i];
+
                        if (!peer_pdd->vm)
                                continue;
                        amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
@@ -909,18 +914,19 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
 
 static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
 {
-       struct kfd_process_device *pdd;
+       int i;
 
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list)
-               kfd_process_device_free_bos(pdd);
+       for (i = 0; i < p->n_pdds; i++)
+               kfd_process_device_free_bos(p->pdds[i]);
 }
 
 static void kfd_process_destroy_pdds(struct kfd_process *p)
 {
-       struct kfd_process_device *pdd, *temp;
+       int i;
+
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
 
-       list_for_each_entry_safe(pdd, temp, &p->per_device_data,
-                                per_device_list) {
                pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
                                pdd->dev->id, p->pasid);
 
@@ -933,8 +939,6 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
                        amdgpu_amdkfd_gpuvm_destroy_process_vm(
                                pdd->dev->kgd, pdd->vm);
 
-               list_del(&pdd->per_device_list);
-
                if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
                        free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
                                get_order(KFD_CWSR_TBA_TMA_SIZE));
@@ -955,7 +959,9 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
                }
 
                kfree(pdd);
+               p->pdds[i] = NULL;
        }
+       p->n_pdds = 0;
 }
 
 /* No process locking is needed in this function, because the process
@@ -967,7 +973,7 @@ static void kfd_process_wq_release(struct work_struct *work)
 {
        struct kfd_process *p = container_of(work, struct kfd_process,
                                             release_work);
-       struct kfd_process_device *pdd;
+       int i;
 
        /* Remove the procfs files */
        if (p->kobj) {
@@ -976,7 +982,9 @@ static void kfd_process_wq_release(struct work_struct *work)
                kobject_put(p->kobj_queues);
                p->kobj_queues = NULL;
 
-               list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+               for (i = 0; i < p->n_pdds; i++) {
+                       struct kfd_process_device *pdd = p->pdds[i];
+
                        sysfs_remove_file(p->kobj, &pdd->attr_vram);
                        sysfs_remove_file(p->kobj, &pdd->attr_sdma);
                        sysfs_remove_file(p->kobj, &pdd->attr_evict);
@@ -1036,7 +1044,7 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
                                        struct mm_struct *mm)
 {
        struct kfd_process *p;
-       struct kfd_process_device *pdd = NULL;
+       int i;
 
        /*
         * The kfd_process structure can not be free because the
@@ -1060,8 +1068,8 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
         * pdd is in debug mode, we should first force unregistration,
         * then we will be able to destroy the queues
         */
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
-               struct kfd_dev *dev = pdd->dev;
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_dev *dev = p->pdds[i]->dev;
 
                mutex_lock(kfd_get_dbgmgr_mutex());
                if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
@@ -1098,11 +1106,11 @@ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
 static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
 {
        unsigned long  offset;
-       struct kfd_process_device *pdd;
+       int i;
 
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
-               struct kfd_dev *dev = pdd->dev;
-               struct qcm_process_device *qpd = &pdd->qpd;
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_dev *dev = p->pdds[i]->dev;
+               struct qcm_process_device *qpd = &p->pdds[i]->qpd;
 
                if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
                        continue;
@@ -1199,7 +1207,7 @@ static struct kfd_process *create_process(const struct task_struct *thread)
        mutex_init(&process->mutex);
        process->mm = thread->mm;
        process->lead_thread = thread->group_leader;
-       INIT_LIST_HEAD(&process->per_device_data);
+       process->n_pdds = 0;
        INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
        INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
        process->last_restore_timestamp = get_jiffies_64();
@@ -1290,11 +1298,11 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
                                                        struct kfd_process *p)
 {
-       struct kfd_process_device *pdd = NULL;
+       int i;
 
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list)
-               if (pdd->dev == dev)
-                       return pdd;
+       for (i = 0; i < p->n_pdds; i++)
+               if (p->pdds[i]->dev == dev)
+                       return p->pdds[i];
 
        return NULL;
 }
@@ -1304,6 +1312,8 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
 {
        struct kfd_process_device *pdd = NULL;
 
+       if (WARN_ON_ONCE(p->n_pdds >= MAX_GPU_INSTANCE))
+               return NULL;
        pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
        if (!pdd)
                return NULL;
@@ -1332,7 +1342,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
        pdd->vram_usage = 0;
        pdd->sdma_past_activity_counter = 0;
        atomic64_set(&pdd->evict_duration_counter, 0);
-       list_add(&pdd->per_device_list, &p->per_device_data);
+       p->pdds[p->n_pdds++] = pdd;
 
        /* Init idr used for memory handle translation */
        idr_init(&pdd->alloc_idr);
@@ -1464,28 +1474,6 @@ out:
        return ERR_PTR(err);
 }
 
-struct kfd_process_device *kfd_get_first_process_device_data(
-                                               struct kfd_process *p)
-{
-       return list_first_entry(&p->per_device_data,
-                               struct kfd_process_device,
-                               per_device_list);
-}
-
-struct kfd_process_device *kfd_get_next_process_device_data(
-                                               struct kfd_process *p,
-                                               struct kfd_process_device *pdd)
-{
-       if (list_is_last(&pdd->per_device_list, &p->per_device_data))
-               return NULL;
-       return list_next_entry(pdd, per_device_list);
-}
-
-bool kfd_has_process_device_data(struct kfd_process *p)
-{
-       return !(list_empty(&p->per_device_data));
-}
-
 /* Create specific handle mapped to mem from process local memory idr
  * Assumes that the process lock is held.
  */
@@ -1561,11 +1549,13 @@ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
  */
 int kfd_process_evict_queues(struct kfd_process *p)
 {
-       struct kfd_process_device *pdd;
        int r = 0;
+       int i;
        unsigned int n_evicted = 0;
 
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
+
                r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
                                                            &pdd->qpd);
                if (r) {
@@ -1581,7 +1571,9 @@ fail:
        /* To keep state consistent, roll back partial eviction by
         * restoring queues
         */
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
+
                if (n_evicted == 0)
                        break;
                if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
@@ -1597,10 +1589,12 @@ fail:
 /* kfd_process_restore_queues - Restore all user queues of a process */
 int kfd_process_restore_queues(struct kfd_process *p)
 {
-       struct kfd_process_device *pdd;
        int r, ret = 0;
+       int i;
+
+       for (i = 0; i < p->n_pdds; i++) {
+               struct kfd_process_device *pdd = p->pdds[i];
 
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
                r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
                                                              &pdd->qpd);
                if (r) {
index eb1635a..95a6c36 100644 (file)
@@ -126,10 +126,10 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
 
 void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
 {
-       struct kfd_process_device *pdd;
+       int i;
 
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list)
-               kfd_process_dequeue_from_device(pdd);
+       for (i = 0; i < p->n_pdds; i++)
+               kfd_process_dequeue_from_device(p->pdds[i]);
 }
 
 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
index 17d1736..2465224 100644 (file)
@@ -81,7 +81,7 @@ static ssize_t kfd_smi_ev_read(struct file *filep, char __user *user,
        struct kfd_smi_client *client = filep->private_data;
        unsigned char *buf;
 
-       buf = kmalloc(MAX_KFIFO_SIZE * sizeof(*buf), GFP_KERNEL);
+       buf = kmalloc_array(MAX_KFIFO_SIZE, sizeof(*buf), GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
 
index 00edf78..a0c8c41 100644 (file)
@@ -121,7 +121,7 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
  * DOC: overview
  *
  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
- * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
+ * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
  * requests into DC requests, and DC responses into DRM responses.
  *
  * The root control structure is &struct amdgpu_display_manager.
@@ -130,6 +130,7 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
 /* basic init/fini API */
 static int amdgpu_dm_init(struct amdgpu_device *adev);
 static void amdgpu_dm_fini(struct amdgpu_device *adev);
+static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
 
 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
 {
@@ -371,14 +372,14 @@ static void dm_pflip_high_irq(void *interrupt_params)
        /* IRQ could occur when in initial stage */
        /* TODO work and BO cleanup */
        if (amdgpu_crtc == NULL) {
-               DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
+               DC_LOG_PFLIP("CRTC is null, returning.\n");
                return;
        }
 
        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 
        if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
-               DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
+               DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
                                                 amdgpu_crtc->pflip_status,
                                                 AMDGPU_FLIP_SUBMITTED,
                                                 amdgpu_crtc->crtc_id,
@@ -449,9 +450,9 @@ static void dm_pflip_high_irq(void *interrupt_params)
        amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
        spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 
-       DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
-                        amdgpu_crtc->crtc_id, amdgpu_crtc,
-                        vrr_active, (int) !e);
+       DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
+                    amdgpu_crtc->crtc_id, amdgpu_crtc,
+                    vrr_active, (int) !e);
 }
 
 static void dm_vupdate_high_irq(void *interrupt_params)
@@ -459,6 +460,9 @@ static void dm_vupdate_high_irq(void *interrupt_params)
        struct common_irq_params *irq_params = interrupt_params;
        struct amdgpu_device *adev = irq_params->adev;
        struct amdgpu_crtc *acrtc;
+       struct drm_device *drm_dev;
+       struct drm_vblank_crtc *vblank;
+       ktime_t frame_duration_ns, previous_timestamp;
        unsigned long flags;
        int vrr_active;
 
@@ -466,8 +470,19 @@ static void dm_vupdate_high_irq(void *interrupt_params)
 
        if (acrtc) {
                vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
+               drm_dev = acrtc->base.dev;
+               vblank = &drm_dev->vblank[acrtc->base.index];
+               previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
+               frame_duration_ns = vblank->time - previous_timestamp;
 
-               DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
+               if (frame_duration_ns > 0) {
+                       trace_amdgpu_refresh_rate_track(acrtc->base.index,
+                                               frame_duration_ns,
+                                               ktime_divns(NSEC_PER_SEC, frame_duration_ns));
+                       atomic64_set(&irq_params->previous_timestamp, vblank->time);
+               }
+
+               DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
                              acrtc->crtc_id,
                              vrr_active);
 
@@ -520,7 +535,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
 
        vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
 
-       DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
+       DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
                      vrr_active, acrtc->dm_irq_params.active_planes);
 
        /**
@@ -923,6 +938,32 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
 }
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
+#define DMUB_TRACE_MAX_READ 64
+static void dm_dmub_trace_high_irq(void *interrupt_params)
+{
+       struct common_irq_params *irq_params = interrupt_params;
+       struct amdgpu_device *adev = irq_params->adev;
+       struct amdgpu_display_manager *dm = &adev->dm;
+       struct dmcub_trace_buf_entry entry = { 0 };
+       uint32_t count = 0;
+
+       do {
+               if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
+                       trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
+                                                       entry.param0, entry.param1);
+
+                       DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+                                entry.trace_code, entry.tick_count, entry.param0, entry.param1);
+               } else
+                       break;
+
+               count++;
+
+       } while (count <= DMUB_TRACE_MAX_READ);
+
+       ASSERT(count <= DMUB_TRACE_MAX_READ);
+}
+
 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
 {
        uint64_t pt_base;
@@ -987,13 +1028,12 @@ static void event_mall_stutter(struct work_struct *work)
 
        if (vblank_work->enable)
                dm->active_vblank_irq_count++;
-       else
+       else if(dm->active_vblank_irq_count)
                dm->active_vblank_irq_count--;
 
        dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
 
-       DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
-
+       DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
 
        mutex_unlock(&dm->dc_lock);
 }
@@ -1809,8 +1849,8 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
                if (acrtc && state->stream_status[i].plane_count != 0) {
                        irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
                        rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
-                       DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
-                                 acrtc->crtc_id, enable ? "en" : "dis", rc);
+                       DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
+                                     acrtc->crtc_id, enable ? "en" : "dis", rc);
                        if (rc)
                                DRM_WARN("Failed to %s pflip interrupts\n",
                                         enable ? "enable" : "disable");
@@ -3104,6 +3144,28 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
 
        }
 
+       if (dc->ctx->dmub_srv) {
+               i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
+               r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
+
+               if (r) {
+                       DRM_ERROR("Failed to add dmub trace irq id!\n");
+                       return r;
+               }
+
+               int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+               int_params.irq_source =
+                       dc_interrupt_to_irq_source(dc, i, 0);
+
+               c_irq_params = &adev->dm.dmub_trace_params[0];
+
+               c_irq_params->adev = adev;
+               c_irq_params->irq_src = int_params.irq_source;
+
+               amdgpu_dm_irq_register_interrupt(adev, &int_params,
+                               dm_dmub_trace_high_irq, c_irq_params);
+       }
+
        /* HPD */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
                        &adev->hpd_irq);
@@ -4892,8 +4954,8 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
        stream->src = src;
        stream->dst = dst;
 
-       DRM_DEBUG_DRIVER("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
-                       dst.x, dst.y, dst.width, dst.height);
+       DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
+                     dst.x, dst.y, dst.width, dst.height);
 
 }
 
@@ -5106,15 +5168,27 @@ static void fill_stream_properties_from_drm_display_mode(
                timing_out->hdmi_vic = hv_frame.vic;
        }
 
-       timing_out->h_addressable = mode_in->hdisplay;
-       timing_out->h_total = mode_in->htotal;
-       timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
-       timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
-       timing_out->v_total = mode_in->vtotal;
-       timing_out->v_addressable = mode_in->vdisplay;
-       timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
-       timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
-       timing_out->pix_clk_100hz = mode_in->clock * 10;
+       if (is_freesync_video_mode(mode_in, aconnector)) {
+               timing_out->h_addressable = mode_in->hdisplay;
+               timing_out->h_total = mode_in->htotal;
+               timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
+               timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
+               timing_out->v_total = mode_in->vtotal;
+               timing_out->v_addressable = mode_in->vdisplay;
+               timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
+               timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
+               timing_out->pix_clk_100hz = mode_in->clock * 10;
+       } else {
+               timing_out->h_addressable = mode_in->crtc_hdisplay;
+               timing_out->h_total = mode_in->crtc_htotal;
+               timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
+               timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
+               timing_out->v_total = mode_in->crtc_vtotal;
+               timing_out->v_addressable = mode_in->crtc_vdisplay;
+               timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
+               timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
+               timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
+       }
 
        timing_out->aspect_ratio = get_aspect_ratio(mode_in);
 
@@ -5234,9 +5308,14 @@ create_fake_sink(struct amdgpu_dm_connector *aconnector)
 static void set_multisync_trigger_params(
                struct dc_stream_state *stream)
 {
+       struct dc_stream_state *master = NULL;
+
        if (stream->triggered_crtc_reset.enabled) {
-               stream->triggered_crtc_reset.event = CRTC_EVENT_VSYNC_RISING;
-               stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_LINE;
+               master = stream->triggered_crtc_reset.event_source;
+               stream->triggered_crtc_reset.event =
+                       master->timing.flags.VSYNC_POSITIVE_POLARITY ?
+                       CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
+               stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
        }
 }
 
@@ -5266,6 +5345,7 @@ static void set_master_stream(struct dc_stream_state *stream_set[],
 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
 {
        int i = 0;
+       struct dc_stream_state *stream;
 
        if (context->stream_count < 2)
                return;
@@ -5277,9 +5357,18 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
                 * crtc_sync_master.multi_sync_enabled flag
                 * For now it's set to false
                 */
-               set_multisync_trigger_params(context->streams[i]);
        }
+
        set_master_stream(context->streams, context->stream_count);
+
+       for (i = 0; i < context->stream_count ; i++) {
+               stream = context->streams[i];
+
+               if (!stream)
+                       continue;
+
+               set_multisync_trigger_params(stream);
+       }
 }
 
 static struct drm_display_mode *
@@ -5335,7 +5424,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
        return m_pref;
 }
 
-static bool is_freesync_video_mode(struct drm_display_mode *mode,
+static bool is_freesync_video_mode(const struct drm_display_mode *mode,
                                   struct amdgpu_dm_connector *aconnector)
 {
        struct drm_display_mode *high_mode;
@@ -5458,7 +5547,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 
        if (recalculate_timing)
                drm_mode_set_crtcinfo(&saved_mode, 0);
-       else
+       else if (!dm_state)
                drm_mode_set_crtcinfo(&mode, 0);
 
        /*
@@ -5636,8 +5725,8 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
 
        rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
 
-       DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
-                        acrtc->crtc_id, enable ? "en" : "dis", rc);
+       DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
+                     acrtc->crtc_id, enable ? "en" : "dis", rc);
        return rc;
 }
 
@@ -6075,6 +6164,15 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 
        } while (stream == NULL && requested_bpc >= 6);
 
+       if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
+               DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
+
+               aconnector->force_yuv420_output = true;
+               stream = create_validate_stream_for_sink(aconnector, drm_mode,
+                                               dm_state, old_stream);
+               aconnector->force_yuv420_output = false;
+       }
+
        return stream;
 }
 
@@ -6577,7 +6675,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
        int r;
 
        if (!new_state->fb) {
-               DRM_DEBUG_DRIVER("No FB bound\n");
+               DRM_DEBUG_KMS("No FB bound\n");
                return 0;
        }
 
@@ -7295,7 +7393,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
 
        if (!(amdgpu_freesync_vid_mode && edid))
                return;
-       
+
        if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
                amdgpu_dm_connector->num_modes +=
                        add_fs_modes(amdgpu_dm_connector);
@@ -7810,11 +7908,11 @@ static void handle_cursor_update(struct drm_plane *plane,
        if (!plane->state->fb && !old_plane_state->fb)
                return;
 
-       DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
-                        __func__,
-                        amdgpu_crtc->crtc_id,
-                        plane->state->crtc_w,
-                        plane->state->crtc_h);
+       DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
+                     __func__,
+                     amdgpu_crtc->crtc_id,
+                     plane->state->crtc_w,
+                     plane->state->crtc_h);
 
        ret = get_cursor_position(plane, crtc, &position);
        if (ret)
@@ -7872,8 +7970,8 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
        /* Mark this event as consumed */
        acrtc->base.state->event = NULL;
 
-       DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
-                                                acrtc->crtc_id);
+       DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
+                    acrtc->crtc_id);
 }
 
 static void update_freesync_state_on_stream(
@@ -8179,7 +8277,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                        &bundle->flip_addrs[planes_count].address,
                        afb->tmz_surface, false);
 
-               DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
+               DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
                                 new_plane_state->plane->index,
                                 bundle->plane_infos[planes_count].dcc.enable);
 
@@ -8213,7 +8311,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                                dc_plane,
                                bundle->flip_addrs[planes_count].flip_timestamp_in_us);
 
-               DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
+               DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
                                 __func__,
                                 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
                                 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
@@ -8535,7 +8633,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
 
-               DRM_DEBUG_DRIVER(
+               DRM_DEBUG_ATOMIC(
                        "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
                        "planes_changed:%d, mode_changed:%d,active_changed:%d,"
                        "connectors_changed:%d\n",
@@ -8569,7 +8667,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 
                if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
 
-                       DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
+                       DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
 
                        if (!dm_new_crtc_state->stream) {
                                /*
@@ -8602,7 +8700,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                        crtc->hwmode = new_crtc_state->mode;
                        mode_set_reset_required = true;
                } else if (modereset_required(new_crtc_state)) {
-                       DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
+                       DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
                        /* i.e. reset mode */
                        if (dm_old_crtc_state->stream)
                                remove_stream(adev, acrtc, dm_old_crtc_state->stream);
@@ -8619,6 +8717,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                dm_enable_per_frame_crtc_master_sync(dc_state);
                mutex_lock(&dm->dc_lock);
                WARN_ON(!dc_commit_state(dm->dc, dc_state));
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+               /* Allow idle optimization when vblank count is 0 for display off */
+               if (dm->active_vblank_irq_count == 0)
+                   dc_allow_idle_optimizations(dm->dc,true);
+#endif
                mutex_unlock(&dm->dc_lock);
        }
 
@@ -9207,7 +9310,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
        if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
                goto skip_modeset;
 
-       DRM_DEBUG_DRIVER(
+       DRM_DEBUG_ATOMIC(
                "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
                "planes_changed:%d, mode_changed:%d,active_changed:%d,"
                "connectors_changed:%d\n",
@@ -9291,8 +9394,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
 
                        dc_stream_retain(new_stream);
 
-                       DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
-                                               crtc->base.id);
+                       DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
+                                        crtc->base.id);
 
                        if (dc_add_stream_to_ctx(
                                        dm->dc,
@@ -9637,8 +9740,8 @@ static int dm_update_plane_state(struct dc *dc,
                if (!dc_new_plane_state)
                        return -ENOMEM;
 
-               DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
-                               plane->base.id, new_plane_crtc->base.id);
+               DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
+                                plane->base.id, new_plane_crtc->base.id);
 
                ret = fill_dc_plane_attributes(
                        drm_to_adev(new_plane_crtc->dev),
@@ -9701,7 +9804,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
 
        new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
        new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
-       if (!new_cursor_state || !new_primary_state || !new_cursor_state->fb) {
+       if (!new_cursor_state || !new_primary_state ||
+           !new_cursor_state->fb || !new_primary_state->fb) {
                return 0;
        }
 
index 8f98d44..0189431 100644 (file)
@@ -66,6 +66,7 @@ struct dc_plane_state;
 struct common_irq_params {
        struct amdgpu_device *adev;
        enum dc_irq_source irq_src;
+       atomic64_t previous_timestamp;
 };
 
 /**
@@ -339,6 +340,15 @@ struct amdgpu_display_manager {
        struct common_irq_params
        vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
 
+       /**
+        * @dmub_trace_params:
+        *
+        * DMUB trace event IRQ parameters, passed to registered handlers when
+        * triggered.
+        */
+       struct common_irq_params
+       dmub_trace_params[1];
+
        spinlock_t irq_handler_list_table_lock;
 
        struct backlight_device *backlight_dev;
@@ -385,6 +395,11 @@ struct amdgpu_display_manager {
 #endif
 
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+       /**
+        * @crc_rd_wrk:
+        *
+        * Work to be executed in a separate thread to communicate with PSP.
+        */
        struct crc_rd_work *crc_rd_wrk;
 #endif
 
index c6d6baa..5cd788b 100644 (file)
@@ -307,7 +307,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
                        goto cleanup;
                }
 
-               aux = &aconn->dm_dp_aux.aux;
+               aux = (aconn->port) ? &aconn->port->aux : &aconn->dm_dp_aux.aux;
 
                if (!aux) {
                        DRM_DEBUG_DRIVER("No dp aux for amd connector\n");
index 927de76..9a13f47 100644 (file)
@@ -34,6 +34,7 @@
 #include "resource.h"
 #include "dsc.h"
 #include "dc_link_dp.h"
+#include "link_hwss.h"
 #include "dc/dc_dmub_srv.h"
 
 struct dmub_debugfs_trace_header {
@@ -149,7 +150,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
  *
  * --- to get dp configuration
  *
- * cat link_settings
+ * cat /sys/kernel/debug/dri/0/DP-x/link_settings
  *
  * It will list current, verified, reported, preferred dp configuration.
  * current -- for current video mode
@@ -162,7 +163,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
  * echo <lane_count>  <link_rate> > link_settings
  *
  * for example, to force to  2 lane, 2.7GHz,
- * echo 4 0xa > link_settings
+ * echo 4 0xa > /sys/kernel/debug/dri/0/DP-x/link_settings
  *
  * spread_spectrum could not be changed dynamically.
  *
@@ -170,7 +171,7 @@ static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size,
  * done. please check link settings after force operation to see if HW get
  * programming.
  *
- * cat link_settings
+ * cat /sys/kernel/debug/dri/0/DP-x/link_settings
  *
  * check current and preferred settings.
  *
@@ -246,7 +247,6 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
 {
        struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
        struct dc_link *link = connector->dc_link;
-       struct dc *dc = (struct dc *)link->dc;
        struct dc_link_settings prefer_link_settings;
        char *wr_buf = NULL;
        const uint32_t wr_buf_size = 40;
@@ -254,7 +254,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
        int max_param_num = 2;
        uint8_t param_nums = 0;
        long param[2];
-       bool valid_input = false;
+       bool valid_input = true;
 
        if (size == 0)
                return -EINVAL;
@@ -281,9 +281,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
        case LANE_COUNT_ONE:
        case LANE_COUNT_TWO:
        case LANE_COUNT_FOUR:
-               valid_input = true;
                break;
        default:
+               valid_input = false;
                break;
        }
 
@@ -293,9 +293,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
        case LINK_RATE_RBR2:
        case LINK_RATE_HIGH2:
        case LINK_RATE_HIGH3:
-               valid_input = true;
                break;
        default:
+               valid_input = false;
                break;
        }
 
@@ -309,10 +309,11 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
         * spread spectrum will not be changed
         */
        prefer_link_settings.link_spread = link->cur_link_settings.link_spread;
+       prefer_link_settings.use_link_rate_set = false;
        prefer_link_settings.lane_count = param[0];
        prefer_link_settings.link_rate = param[1];
 
-       dc_link_set_preferred_link_settings(dc, &prefer_link_settings, link);
+       dp_retrain_link_dp_test(link, &prefer_link_settings, false);
 
        kfree(wr_buf);
        return size;
@@ -399,6 +400,70 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
        return result;
 }
 
+static int dp_lttpr_status_show(struct seq_file *m, void *d)
+{
+       char *data;
+       struct amdgpu_dm_connector *connector = file_inode(m->file)->i_private;
+       struct dc_link *link = connector->dc_link;
+       uint32_t read_size = 1;
+       uint8_t repeater_count = 0;
+
+       data = kzalloc(read_size, GFP_KERNEL);
+       if (!data)
+               return 0;
+
+       dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0002, data, read_size);
+
+       switch ((uint8_t)*data) {
+       case 0x80:
+               repeater_count = 1;
+               break;
+       case 0x40:
+               repeater_count = 2;
+               break;
+       case 0x20:
+               repeater_count = 3;
+               break;
+       case 0x10:
+               repeater_count = 4;
+               break;
+       case 0x8:
+               repeater_count = 5;
+               break;
+       case 0x4:
+               repeater_count = 6;
+               break;
+       case 0x2:
+               repeater_count = 7;
+               break;
+       case 0x1:
+               repeater_count = 8;
+               break;
+       case 0x0:
+               repeater_count = 0;
+               break;
+       default:
+               repeater_count = (uint8_t)*data;
+               break;
+       }
+
+       seq_printf(m, "phy repeater count: %d\n", repeater_count);
+
+       dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0003, data, read_size);
+
+       if ((uint8_t)*data == 0x55)
+               seq_printf(m, "phy repeater mode: transparent\n");
+       else if ((uint8_t)*data == 0xAA)
+               seq_printf(m, "phy repeater mode: non-transparent\n");
+       else if ((uint8_t)*data == 0x00)
+               seq_printf(m, "phy repeater mode: non lttpr\n");
+       else
+               seq_printf(m, "phy repeater mode: read error\n");
+
+       kfree(data);
+       return 0;
+}
+
 static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
                                 size_t size, loff_t *pos)
 {
@@ -2300,6 +2365,7 @@ DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
 DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
 DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
 DEFINE_SHOW_ATTRIBUTE(output_bpc);
+DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status);
 #ifdef CONFIG_DRM_AMD_DC_HDCP
 DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
 #endif
@@ -2420,6 +2486,7 @@ static const struct {
 } dp_debugfs_entries[] = {
                {"link_settings", &dp_link_settings_debugfs_fops},
                {"phy_settings", &dp_phy_settings_debugfs_fop},
+               {"lttpr_status", &dp_lttpr_status_fops},
                {"test_pattern", &dp_phy_test_pattern_fops},
 #ifdef CONFIG_DRM_AMD_DC_HDCP
                {"hdcp_sink_capability", &hdcp_sink_capability_fops},
@@ -2900,6 +2967,10 @@ static int mst_topo_show(struct seq_file *m, void *unused)
 
                aconnector = to_amdgpu_dm_connector(connector);
 
+               /* Ensure we're only dumping the topology of a root mst node */
+               if (!aconnector->mst_mgr.mst_state)
+                       continue;
+
                seq_printf(m, "\nMST topology for connector %d\n", aconnector->connector_id);
                drm_dp_mst_dump_topology(m, &aconnector->mst_mgr);
        }
@@ -2909,7 +2980,73 @@ static int mst_topo_show(struct seq_file *m, void *unused)
 }
 
 /*
- * Sets the force_timing_sync debug optino from the given string.
+ * Sets trigger hpd for MST topologies.
+ * All connected connectors will be rediscovered and re started as needed if val of 1 is sent.
+ * All topologies will be disconnected if val of 0 is set .
+ * Usage to enable topologies: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst
+ * Usage to disable topologies: echo 0 > /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst
+ */
+static int trigger_hpd_mst_set(void *data, u64 val)
+{
+       struct amdgpu_device *adev = data;
+       struct drm_device *dev = adev_to_drm(adev);
+       struct drm_connector_list_iter iter;
+       struct amdgpu_dm_connector *aconnector;
+       struct drm_connector *connector;
+       struct dc_link *link = NULL;
+
+       if (val == 1) {
+               drm_connector_list_iter_begin(dev, &iter);
+               drm_for_each_connector_iter(connector, &iter) {
+                       aconnector = to_amdgpu_dm_connector(connector);
+                       if (aconnector->dc_link->type == dc_connection_mst_branch &&
+                           aconnector->mst_mgr.aux) {
+                               dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+                               drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+                       }
+               }
+       } else if (val == 0) {
+               drm_connector_list_iter_begin(dev, &iter);
+               drm_for_each_connector_iter(connector, &iter) {
+                       aconnector = to_amdgpu_dm_connector(connector);
+                       if (!aconnector->dc_link)
+                               continue;
+
+                       if (!(aconnector->port && &aconnector->mst_port->mst_mgr))
+                               continue;
+
+                       link = aconnector->dc_link;
+                       dp_receiver_power_ctrl(link, false);
+                       drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_port->mst_mgr, false);
+                       link->mst_stream_alloc_table.stream_count = 0;
+                       memset(link->mst_stream_alloc_table.stream_allocations, 0,
+                                       sizeof(link->mst_stream_alloc_table.stream_allocations));
+               }
+       } else {
+               return 0;
+       }
+       drm_kms_helper_hotplug_event(dev);
+
+       return 0;
+}
+
+/*
+ * The interface doesn't need get function, so it will return the
+ * value of zero
+ * Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst
+ */
+static int trigger_hpd_mst_get(void *data, u64 *val)
+{
+       *val = 0;
+       return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(trigger_hpd_mst_ops, trigger_hpd_mst_get,
+                        trigger_hpd_mst_set, "%llu\n");
+
+
+/*
+ * Sets the force_timing_sync debug option from the given string.
  * All connected displays will be force synchronized immediately.
  * Usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_force_timing_sync
  */
@@ -2972,6 +3109,64 @@ DEFINE_SHOW_ATTRIBUTE(mst_topo);
 DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get,
                         visual_confirm_set, "%llu\n");
 
+/*
+ * Dumps the DCC_EN bit for each pipe.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dcc_en
+ */
+static ssize_t dcc_en_bits_read(
+       struct file *f,
+       char __user *buf,
+       size_t size,
+       loff_t *pos)
+{
+       struct amdgpu_device *adev = file_inode(f)->i_private;
+       struct dc *dc = adev->dm.dc;
+       char *rd_buf = NULL;
+       const uint32_t rd_buf_size = 32;
+       uint32_t result = 0;
+       int offset = 0;
+       int num_pipes = dc->res_pool->pipe_count;
+       int *dcc_en_bits;
+       int i, r;
+
+       dcc_en_bits = kcalloc(num_pipes, sizeof(int), GFP_KERNEL);
+       if (!dcc_en_bits)
+               return -ENOMEM;
+
+       if (!dc->hwss.get_dcc_en_bits) {
+               kfree(dcc_en_bits);
+               return 0;
+       }
+
+       dc->hwss.get_dcc_en_bits(dc, dcc_en_bits);
+
+       rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
+       if (!rd_buf)
+               return -ENOMEM;
+
+       for (i = 0; i < num_pipes; i++)
+               offset += snprintf(rd_buf + offset, rd_buf_size - offset,
+                                  "%d  ", dcc_en_bits[i]);
+       rd_buf[strlen(rd_buf)] = '\n';
+
+       kfree(dcc_en_bits);
+
+       while (size) {
+               if (*pos >= rd_buf_size)
+                       break;
+               r = put_user(*(rd_buf + result), buf);
+               if (r)
+                       return r; /* r = -EFAULT */
+               buf += 1;
+               size -= 1;
+               *pos += 1;
+               result += 1;
+       }
+
+       kfree(rd_buf);
+       return result;
+}
+
 void dtn_debugfs_init(struct amdgpu_device *adev)
 {
        static const struct file_operations dtn_log_fops = {
@@ -2980,6 +3175,11 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
                .write = dtn_log_write,
                .llseek = default_llseek
        };
+       static const struct file_operations dcc_en_bits_fops = {
+               .owner = THIS_MODULE,
+               .read = dcc_en_bits_read,
+               .llseek = default_llseek
+       };
 
        struct drm_minor *minor = adev_to_drm(adev)->primary;
        struct dentry *root = minor->debugfs_root;
@@ -3007,4 +3207,10 @@ void dtn_debugfs_init(struct amdgpu_device *adev)
 
        debugfs_create_file_unsafe("amdgpu_dm_dmcub_trace_event_en", 0644, root,
                                   adev, &dmcub_trace_event_state_fops);
+
+       debugfs_create_file_unsafe("amdgpu_dm_trigger_hpd_mst", 0644, root,
+                                  adev, &trigger_hpd_mst_ops);
+
+       debugfs_create_file_unsafe("amdgpu_dm_dcc_en", 0644, root, adev,
+                                  &dcc_en_bits_fops);
 }
index 0cdbfcd..60f9185 100644 (file)
@@ -191,7 +191,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
                                psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size,
                                            &hdcp_work->srm_version);
 
-                       display->adjust.disable = 0;
+                       display->adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE;
                        if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) {
                                hdcp_w->link.adjust.hdcp1.disable = 0;
                                hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
@@ -203,7 +203,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
                        schedule_delayed_work(&hdcp_w->property_validate_dwork,
                                              msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
                } else {
-                       display->adjust.disable = 1;
+                       display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
                        hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
                        cancel_delayed_work(&hdcp_w->property_validate_dwork);
                }
@@ -456,7 +456,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
        link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
        link->dp.assr_enabled = config->assr_enabled;
        link->dp.mst_enabled = config->mst_enabled;
-       display->adjust.disable = 1;
+       display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
        link->adjust.auth_delay = 3;
        link->adjust.hdcp1.disable = 0;
 
index 09bdffb..103e299 100644 (file)
@@ -700,6 +700,14 @@ void dm_helpers_free_gpu_mem(
 
 bool dm_helpers_dmub_outbox0_interrupt_control(struct dc_context *ctx, bool enable)
 {
-       // TODO
-       return true;
+       enum dc_irq_source irq_source;
+       bool ret;
+
+       irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0;
+
+       ret = dc_interrupt_set(ctx->dc, irq_source, enable);
+
+       DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n",
+                        enable ? "en" : "dis", ret);
+       return ret;
 }
index d3c687d..b3ed7e7 100644 (file)
@@ -73,6 +73,7 @@
  * @handler_arg: Argument passed to the handler when triggered
  * @dm: DM which this handler belongs to
  * @irq_source: DC interrupt source that this handler is registered for
+ * @work: work struct
  */
 struct amdgpu_dm_irq_handler_data {
        struct list_head list;
@@ -184,6 +185,55 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
        return hnd_list;
 }
 
+/**
+ * unregister_all_irq_handlers() - Cleans up handlers from the DM IRQ table
+ * @adev: The base driver device containing the DM device
+ *
+ * Go through low and high context IRQ tables and deallocate handlers.
+ */
+static void unregister_all_irq_handlers(struct amdgpu_device *adev)
+{
+       struct list_head *hnd_list_low;
+       struct list_head *hnd_list_high;
+       struct list_head *entry, *tmp;
+       struct amdgpu_dm_irq_handler_data *handler;
+       unsigned long irq_table_flags;
+       int i;
+
+       DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+       for (i = 0; i < DAL_IRQ_SOURCES_NUMBER; i++) {
+               hnd_list_low = &adev->dm.irq_handler_list_low_tab[i];
+               hnd_list_high = &adev->dm.irq_handler_list_high_tab[i];
+
+               list_for_each_safe(entry, tmp, hnd_list_low) {
+
+                       handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
+                                            list);
+
+                       if (handler == NULL || handler->handler == NULL)
+                               continue;
+
+                       list_del(&handler->list);
+                       kfree(handler);
+               }
+
+               list_for_each_safe(entry, tmp, hnd_list_high) {
+
+                       handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
+                                            list);
+
+                       if (handler == NULL || handler->handler == NULL)
+                               continue;
+
+                       list_del(&handler->list);
+                       kfree(handler);
+               }
+       }
+
+       DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+}
+
 static bool
 validate_irq_registration_params(struct dc_interrupt_params *int_params,
                                 void (*ih)(void *))
@@ -414,6 +464,8 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
                        }
                }
        }
+       /* Deallocate handlers from the table. */
+       unregister_all_irq_handlers(adev);
 }
 
 int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
@@ -731,6 +783,18 @@ static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
                __func__);
 }
 
+static int amdgpu_dm_set_dmub_trace_irq_state(struct amdgpu_device *adev,
+                                          struct amdgpu_irq_src *source,
+                                          unsigned int type,
+                                          enum amdgpu_interrupt_state state)
+{
+       enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0;
+       bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
+
+       dc_interrupt_set(adev->dm.dc, irq_source, st);
+       return 0;
+}
+
 static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
        .set = amdgpu_dm_set_crtc_irq_state,
        .process = amdgpu_dm_irq_handler,
@@ -746,6 +810,11 @@ static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
        .process = amdgpu_dm_irq_handler,
 };
 
+static const struct amdgpu_irq_src_funcs dm_dmub_trace_irq_funcs = {
+       .set = amdgpu_dm_set_dmub_trace_irq_state,
+       .process = amdgpu_dm_irq_handler,
+};
+
 static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
        .set = amdgpu_dm_set_pflip_irq_state,
        .process = amdgpu_dm_irq_handler,
@@ -768,6 +837,9 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
        adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
        adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
 
+       adev->dmub_trace_irq.num_types = 1;
+       adev->dmub_trace_irq.funcs = &dm_dmub_trace_irq_funcs;
+
        adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
        adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
 
index 607ec09..eba2701 100644 (file)
 #include "amdgpu_dm_irq.h"
 #include "amdgpu_pm.h"
 #include "dm_pp_smu.h"
-#include "amdgpu_smu.h"
-
 
 bool dm_pp_apply_display_requirements(
                const struct dc_context *ctx,
                const struct dm_pp_display_configuration *pp_display_cfg)
 {
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
        int i;
 
        if (adev->pm.dpm_enabled) {
@@ -106,9 +103,6 @@ bool dm_pp_apply_display_requirements(
                        adev->powerplay.pp_funcs->display_configuration_change(
                                adev->powerplay.pp_handle,
                                &adev->pm.pm_display_cfg);
-               else if (adev->smu.ppt_funcs)
-                       smu_display_configuration_change(smu,
-                                                        &adev->pm.pm_display_cfg);
 
                amdgpu_pm_compute_clocks(adev);
        }
@@ -148,36 +142,6 @@ static void get_default_clock_levels(
        }
 }
 
-static enum smu_clk_type dc_to_smu_clock_type(
-               enum dm_pp_clock_type dm_pp_clk_type)
-{
-       enum smu_clk_type smu_clk_type = SMU_CLK_COUNT;
-
-       switch (dm_pp_clk_type) {
-       case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
-               smu_clk_type = SMU_DISPCLK;
-               break;
-       case DM_PP_CLOCK_TYPE_ENGINE_CLK:
-               smu_clk_type = SMU_GFXCLK;
-               break;
-       case DM_PP_CLOCK_TYPE_MEMORY_CLK:
-               smu_clk_type = SMU_MCLK;
-               break;
-       case DM_PP_CLOCK_TYPE_DCEFCLK:
-               smu_clk_type = SMU_DCEFCLK;
-               break;
-       case DM_PP_CLOCK_TYPE_SOCCLK:
-               smu_clk_type = SMU_SOCCLK;
-               break;
-       default:
-               DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
-                         dm_pp_clk_type);
-               break;
-       }
-
-       return smu_clk_type;
-}
-
 static enum amd_pp_clock_type dc_to_pp_clock_type(
                enum dm_pp_clock_type dm_pp_clk_type)
 {
@@ -417,14 +381,8 @@ bool dm_pp_get_clock_levels_by_type_with_latency(
                                                &pp_clks);
                if (ret)
                        return false;
-       } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
-               if (smu_get_clock_by_type_with_latency(&adev->smu,
-                                                      dc_to_smu_clock_type(clk_type),
-                                                      &pp_clks))
-                       return false;
        }
 
-
        pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
 
        return true;
@@ -502,10 +460,6 @@ bool dm_pp_apply_clock_for_voltage_request(
                ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
                        adev->powerplay.pp_handle,
                        &pp_clock_request);
-       else if (adev->smu.ppt_funcs &&
-                adev->smu.ppt_funcs->display_clock_voltage_request)
-               ret = smu_display_clock_voltage_request(&adev->smu,
-                                                       &pp_clock_request);
        if (ret)
                return false;
        return true;
@@ -655,8 +609,11 @@ static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       smu_set_watermarks_for_clock_ranges(&adev->smu, ranges);
+       if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
+               pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, ranges);
 
        return PP_SMU_RESULT_OK;
 }
@@ -665,13 +622,14 @@ static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (!smu->ppt_funcs)
+       if (!pp_funcs || !pp_funcs->set_active_display_count)
                return PP_SMU_RESULT_UNSUPPORTED;
 
        /* 0: successful or smu.ppt_funcs->set_display_count = NULL;  1: fail */
-       if (smu_set_display_count(smu, count))
+       if (pp_funcs->set_active_display_count(pp_handle, count))
                return PP_SMU_RESULT_FAIL;
 
        return PP_SMU_RESULT_OK;
@@ -682,13 +640,14 @@ pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (!smu->ppt_funcs)
+       if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk)
                return PP_SMU_RESULT_UNSUPPORTED;
 
        /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */
-       if (smu_set_deep_sleep_dcefclk(smu, mhz))
+       if (pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, mhz))
                return PP_SMU_RESULT_FAIL;
 
        return PP_SMU_RESULT_OK;
@@ -699,10 +658,11 @@ static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        struct pp_display_clock_request clock_req;
 
-       if (!smu->ppt_funcs)
+       if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
                return PP_SMU_RESULT_UNSUPPORTED;
 
        clock_req.clock_type = amd_pp_dcef_clock;
@@ -711,7 +671,7 @@ static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
        /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
         * 1: fail
         */
-       if (smu_display_clock_voltage_request(smu, &clock_req))
+       if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
                return PP_SMU_RESULT_FAIL;
 
        return PP_SMU_RESULT_OK;
@@ -722,10 +682,11 @@ pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        struct pp_display_clock_request clock_req;
 
-       if (!smu->ppt_funcs)
+       if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
                return PP_SMU_RESULT_UNSUPPORTED;
 
        clock_req.clock_type = amd_pp_mem_clock;
@@ -734,7 +695,7 @@ pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
        /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
         * 1: fail
         */
-       if (smu_display_clock_voltage_request(smu, &clock_req))
+       if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
                return PP_SMU_RESULT_FAIL;
 
        return PP_SMU_RESULT_OK;
@@ -745,10 +706,14 @@ static enum pp_smu_status pp_nv_set_pstate_handshake_support(
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (smu_display_disable_memory_clock_switch(smu, !pstate_handshake_supported))
-               return PP_SMU_RESULT_FAIL;
+       if (pp_funcs && pp_funcs->display_disable_memory_clock_switch) {
+               if (pp_funcs->display_disable_memory_clock_switch(pp_handle,
+                                                                 !pstate_handshake_supported))
+                       return PP_SMU_RESULT_FAIL;
+       }
 
        return PP_SMU_RESULT_OK;
 }
@@ -758,10 +723,11 @@ static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        struct pp_display_clock_request clock_req;
 
-       if (!smu->ppt_funcs)
+       if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
                return PP_SMU_RESULT_UNSUPPORTED;
 
        switch (clock_id) {
@@ -782,7 +748,7 @@ static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
        /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL
         * 1: fail
         */
-       if (smu_display_clock_voltage_request(smu, &clock_req))
+       if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req))
                return PP_SMU_RESULT_FAIL;
 
        return PP_SMU_RESULT_OK;
@@ -793,15 +759,13 @@ static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
-
-       if (!smu->ppt_funcs)
-               return PP_SMU_RESULT_UNSUPPORTED;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (!smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
+       if (!pp_funcs || !pp_funcs->get_max_sustainable_clocks_by_dc)
                return PP_SMU_RESULT_UNSUPPORTED;
 
-       if (!smu_get_max_sustainable_clocks_by_dc(smu, max_clocks))
+       if (!pp_funcs->get_max_sustainable_clocks_by_dc(pp_handle, max_clocks))
                return PP_SMU_RESULT_OK;
 
        return PP_SMU_RESULT_FAIL;
@@ -812,16 +776,15 @@ static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
-
-       if (!smu->ppt_funcs)
-               return PP_SMU_RESULT_UNSUPPORTED;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (!smu->ppt_funcs->get_uclk_dpm_states)
+       if (!pp_funcs || !pp_funcs->get_uclk_dpm_states)
                return PP_SMU_RESULT_UNSUPPORTED;
 
-       if (!smu_get_uclk_dpm_states(smu,
-                       clock_values_in_khz, num_states))
+       if (!pp_funcs->get_uclk_dpm_states(pp_handle,
+                                          clock_values_in_khz,
+                                          num_states))
                return PP_SMU_RESULT_OK;
 
        return PP_SMU_RESULT_FAIL;
@@ -832,15 +795,13 @@ static enum pp_smu_status pp_rn_get_dpm_clock_table(
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
-       struct smu_context *smu = &adev->smu;
-
-       if (!smu->ppt_funcs)
-               return PP_SMU_RESULT_UNSUPPORTED;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       if (!smu->ppt_funcs->get_dpm_clock_table)
+       if (!pp_funcs || !pp_funcs->get_dpm_clock_table)
                return PP_SMU_RESULT_UNSUPPORTED;
 
-       if (!smu_get_dpm_clock_table(smu, clock_table))
+       if (!pp_funcs->get_dpm_clock_table(pp_handle, clock_table))
                return PP_SMU_RESULT_OK;
 
        return PP_SMU_RESULT_FAIL;
@@ -851,8 +812,11 @@ static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
 {
        const struct dc_context *ctx = pp->dm;
        struct amdgpu_device *adev = ctx->driver_context;
+       void *pp_handle = adev->powerplay.pp_handle;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
-       smu_set_watermarks_for_clock_ranges(&adev->smu, ranges);
+       if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
+               pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, ranges);
 
        return PP_SMU_RESULT_OK;
 }
index 8696047..46a33f6 100644 (file)
@@ -597,6 +597,46 @@ TRACE_EVENT(amdgpu_dm_dce_clocks_state,
            )
 );
 
+TRACE_EVENT(amdgpu_dmub_trace_high_irq,
+       TP_PROTO(uint32_t trace_code, uint32_t tick_count, uint32_t param0,
+                uint32_t param1),
+       TP_ARGS(trace_code, tick_count, param0, param1),
+       TP_STRUCT__entry(
+               __field(uint32_t, trace_code)
+               __field(uint32_t, tick_count)
+               __field(uint32_t, param0)
+               __field(uint32_t, param1)
+               ),
+       TP_fast_assign(
+               __entry->trace_code = trace_code;
+               __entry->tick_count = tick_count;
+               __entry->param0 = param0;
+               __entry->param1 = param1;
+       ),
+       TP_printk("trace_code=%u tick_count=%u param0=%u param1=%u",
+                 __entry->trace_code, __entry->tick_count,
+                 __entry->param0, __entry->param1)
+);
+
+TRACE_EVENT(amdgpu_refresh_rate_track,
+       TP_PROTO(int crtc_index, ktime_t refresh_rate_ns, uint32_t refresh_rate_hz),
+       TP_ARGS(crtc_index, refresh_rate_ns, refresh_rate_hz),
+       TP_STRUCT__entry(
+               __field(int, crtc_index)
+               __field(ktime_t, refresh_rate_ns)
+               __field(uint32_t, refresh_rate_hz)
+               ),
+       TP_fast_assign(
+               __entry->crtc_index = crtc_index;
+               __entry->refresh_rate_ns = refresh_rate_ns;
+               __entry->refresh_rate_hz = refresh_rate_hz;
+       ),
+       TP_printk("crtc_index=%d refresh_rate=%dHz (%lld)",
+                 __entry->crtc_index,
+                 __entry->refresh_rate_hz,
+                 __entry->refresh_rate_ns)
+);
+
 #endif /* _AMDGPU_DM_TRACE_H_ */
 
 #undef TRACE_INCLUDE_PATH
index bbde6e6..f338472 100644 (file)
@@ -54,8 +54,9 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI
 
 include $(AMD_DC)
 
-DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
-dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o
+DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
+dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \
+dc_link_enc_cfg.o
 
 ifdef CONFIG_DRM_AMD_DC_DCN
 DISPLAY_CORE += dc_vm_helper.o
index b208f06..d79f4fe 100644 (file)
@@ -916,6 +916,192 @@ static enum bp_result bios_parser_get_soc_bb_info(
        return result;
 }
 
+static enum bp_result get_disp_caps_v4_1(
+       struct bios_parser *bp,
+       uint8_t *dce_caps)
+{
+       enum bp_result result = BP_RESULT_OK;
+       struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL;
+
+       if (!dce_caps)
+               return BP_RESULT_BADINPUT;
+
+       if (!DATA_TABLES(dce_info))
+               return BP_RESULT_BADBIOSTABLE;
+
+       disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_1,
+                                                       DATA_TABLES(dce_info));
+
+       if (!disp_cntl_tbl)
+               return BP_RESULT_BADBIOSTABLE;
+
+       *dce_caps = disp_cntl_tbl->display_caps;
+
+       return result;
+}
+
+static enum bp_result get_disp_caps_v4_2(
+       struct bios_parser *bp,
+       uint8_t *dce_caps)
+{
+       enum bp_result result = BP_RESULT_OK;
+       struct atom_display_controller_info_v4_2 *disp_cntl_tbl = NULL;
+
+       if (!dce_caps)
+               return BP_RESULT_BADINPUT;
+
+       if (!DATA_TABLES(dce_info))
+               return BP_RESULT_BADBIOSTABLE;
+
+       disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_2,
+                                                       DATA_TABLES(dce_info));
+
+       if (!disp_cntl_tbl)
+               return BP_RESULT_BADBIOSTABLE;
+
+       *dce_caps = disp_cntl_tbl->display_caps;
+
+       return result;
+}
+
+static enum bp_result get_disp_caps_v4_3(
+       struct bios_parser *bp,
+       uint8_t *dce_caps)
+{
+       enum bp_result result = BP_RESULT_OK;
+       struct atom_display_controller_info_v4_3 *disp_cntl_tbl = NULL;
+
+       if (!dce_caps)
+               return BP_RESULT_BADINPUT;
+
+       if (!DATA_TABLES(dce_info))
+               return BP_RESULT_BADBIOSTABLE;
+
+       disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_3,
+                                                       DATA_TABLES(dce_info));
+
+       if (!disp_cntl_tbl)
+               return BP_RESULT_BADBIOSTABLE;
+
+       *dce_caps = disp_cntl_tbl->display_caps;
+
+       return result;
+}
+
+static enum bp_result get_disp_caps_v4_4(
+       struct bios_parser *bp,
+       uint8_t *dce_caps)
+{
+       enum bp_result result = BP_RESULT_OK;
+       struct atom_display_controller_info_v4_4 *disp_cntl_tbl = NULL;
+
+       if (!dce_caps)
+               return BP_RESULT_BADINPUT;
+
+       if (!DATA_TABLES(dce_info))
+               return BP_RESULT_BADBIOSTABLE;
+
+       disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_4,
+                                                       DATA_TABLES(dce_info));
+
+       if (!disp_cntl_tbl)
+               return BP_RESULT_BADBIOSTABLE;
+
+       *dce_caps = disp_cntl_tbl->display_caps;
+
+       return result;
+}
+
+static enum bp_result bios_parser_get_lttpr_interop(
+       struct dc_bios *dcb,
+       uint8_t *dce_caps)
+{
+       struct bios_parser *bp = BP_FROM_DCB(dcb);
+       enum bp_result result = BP_RESULT_UNSUPPORTED;
+       struct atom_common_table_header *header;
+       struct atom_data_revision tbl_revision;
+
+       if (!DATA_TABLES(dce_info))
+               return BP_RESULT_UNSUPPORTED;
+
+       header = GET_IMAGE(struct atom_common_table_header,
+                                               DATA_TABLES(dce_info));
+       get_atom_data_table_revision(header, &tbl_revision);
+       switch (tbl_revision.major) {
+       case 4:
+               switch (tbl_revision.minor) {
+               case 1:
+                       result = get_disp_caps_v4_1(bp, dce_caps);
+                       *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE);
+                       break;
+               case 2:
+                       result = get_disp_caps_v4_2(bp, dce_caps);
+                       *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE);
+                       break;
+               case 3:
+                       result = get_disp_caps_v4_3(bp, dce_caps);
+                       *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE);
+                       break;
+               case 4:
+                       result = get_disp_caps_v4_4(bp, dce_caps);
+                       *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+
+       return result;
+}
+
+static enum bp_result bios_parser_get_lttpr_caps(
+       struct dc_bios *dcb,
+       uint8_t *dce_caps)
+{
+       struct bios_parser *bp = BP_FROM_DCB(dcb);
+       enum bp_result result = BP_RESULT_UNSUPPORTED;
+       struct atom_common_table_header *header;
+       struct atom_data_revision tbl_revision;
+
+       if (!DATA_TABLES(dce_info))
+               return BP_RESULT_UNSUPPORTED;
+
+       header = GET_IMAGE(struct atom_common_table_header,
+                                               DATA_TABLES(dce_info));
+       get_atom_data_table_revision(header, &tbl_revision);
+       switch (tbl_revision.major) {
+       case 4:
+               switch (tbl_revision.minor) {
+               case 1:
+                       result = get_disp_caps_v4_1(bp, dce_caps);
+                       *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE);
+                       break;
+               case 2:
+                       result = get_disp_caps_v4_2(bp, dce_caps);
+                       *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE);
+                       break;
+               case 3:
+                       result = get_disp_caps_v4_3(bp, dce_caps);
+                       *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE);
+                       break;
+               case 4:
+                       result = get_disp_caps_v4_4(bp, dce_caps);
+                       *dce_caps = !!(*dce_caps & DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE);
+                       break;
+               default:
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+
+       return result;
+}
+
 static enum bp_result get_embedded_panel_info_v2_1(
                struct bios_parser *bp,
                struct embedded_panel_info *info)
@@ -2531,6 +2717,10 @@ static const struct dc_vbios_funcs vbios_funcs = {
        .get_soc_bb_info = bios_parser_get_soc_bb_info,
 
        .get_disp_connector_caps_info = bios_parser_get_disp_connector_caps_info,
+
+       .get_lttpr_caps = bios_parser_get_lttpr_caps,
+
+       .get_lttpr_interop = bios_parser_get_lttpr_interop,
 };
 
 static bool bios_parser2_construct(
index e633f8a..1244fcb 100644 (file)
@@ -98,16 +98,16 @@ static void calculate_bandwidth(
        int32_t num_cursor_lines;
 
        int32_t i, j, k;
-       struct bw_fixed yclk[3];
-       struct bw_fixed sclk[8];
+       struct bw_fixed *yclk;
+       struct bw_fixed *sclk;
        bool d0_underlay_enable;
        bool d1_underlay_enable;
        bool fbc_enabled;
        bool lpt_enabled;
        enum bw_defines sclk_message;
        enum bw_defines yclk_message;
-       enum bw_defines tiling_mode[maximum_number_of_surfaces];
-       enum bw_defines surface_type[maximum_number_of_surfaces];
+       enum bw_defines *tiling_mode;
+       enum bw_defines *surface_type;
        enum bw_defines voltage;
        enum bw_defines pipe_check;
        enum bw_defines hsr_check;
@@ -122,6 +122,22 @@ static void calculate_bandwidth(
        int32_t number_of_displays_enabled_with_margin = 0;
        int32_t number_of_aligned_displays_with_no_margin = 0;
 
+       yclk = kcalloc(3, sizeof(*yclk), GFP_KERNEL);
+       if (!yclk)
+               return;
+
+       sclk = kcalloc(8, sizeof(*sclk), GFP_KERNEL);
+       if (!sclk)
+               goto free_yclk;
+
+       tiling_mode = kcalloc(maximum_number_of_surfaces, sizeof(*tiling_mode), GFP_KERNEL);
+       if (!tiling_mode)
+               goto free_sclk;
+
+       surface_type = kcalloc(maximum_number_of_surfaces, sizeof(*surface_type), GFP_KERNEL);
+       if (!surface_type)
+               goto free_tiling_mode;
+
        yclk[low] = vbios->low_yclk;
        yclk[mid] = vbios->mid_yclk;
        yclk[high] = vbios->high_yclk;
@@ -2013,6 +2029,14 @@ static void calculate_bandwidth(
                        }
                }
        }
+
+       kfree(surface_type);
+free_tiling_mode:
+       kfree(tiling_mode);
+free_yclk:
+       kfree(yclk);
+free_sclk:
+       kfree(sclk);
 }
 
 /*******************************************************************************
@@ -2022,707 +2046,719 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
        struct bw_calcs_vbios *bw_vbios,
        struct hw_asic_id asic_id)
 {
-       struct bw_calcs_dceip dceip = { 0 };
-       struct bw_calcs_vbios vbios = { 0 };
+       struct bw_calcs_dceip *dceip;
+       struct bw_calcs_vbios *vbios;
 
        enum bw_calcs_version version = bw_calcs_version_from_asic_id(asic_id);
 
-       dceip.version = version;
+       dceip = kzalloc(sizeof(*dceip), GFP_KERNEL);
+       if (!dceip)
+               return;
+
+       vbios = kzalloc(sizeof(*vbios), GFP_KERNEL);
+       if (!vbios) {
+               kfree(dceip);
+               return;
+       }
+
+       dceip->version = version;
 
        switch (version) {
        case BW_CALCS_VERSION_CARRIZO:
-               vbios.memory_type = bw_def_gddr5;
-               vbios.dram_channel_width_in_bits = 64;
-               vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
-               vbios.number_of_dram_banks = 8;
-               vbios.high_yclk = bw_int_to_fixed(1600);
-               vbios.mid_yclk = bw_int_to_fixed(1600);
-               vbios.low_yclk = bw_frc_to_fixed(66666, 100);
-               vbios.low_sclk = bw_int_to_fixed(200);
-               vbios.mid1_sclk = bw_int_to_fixed(300);
-               vbios.mid2_sclk = bw_int_to_fixed(300);
-               vbios.mid3_sclk = bw_int_to_fixed(300);
-               vbios.mid4_sclk = bw_int_to_fixed(300);
-               vbios.mid5_sclk = bw_int_to_fixed(300);
-               vbios.mid6_sclk = bw_int_to_fixed(300);
-               vbios.high_sclk = bw_frc_to_fixed(62609, 100);
-               vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
-               vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
-               vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
-               vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
-               vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.data_return_bus_width = bw_int_to_fixed(32);
-               vbios.trc = bw_int_to_fixed(50);
-               vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
-               vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(153, 10);
-               vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
-               vbios.nbp_state_change_latency = bw_frc_to_fixed(19649, 1000);
-               vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
-               vbios.scatter_gather_enable = true;
-               vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
-               vbios.cursor_width = 32;
-               vbios.average_compression_rate = 4;
-               vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
-               vbios.blackout_duration = bw_int_to_fixed(0); /* us */
-               vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
-               dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
-               dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
-               dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
-               dceip.large_cursor = false;
-               dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
-               dceip.dmif_pipe_en_fbc_chunk_tracker = false;
-               dceip.cursor_max_outstanding_group_num = 1;
-               dceip.lines_interleaved_into_lb = 2;
-               dceip.chunk_width = 256;
-               dceip.number_of_graphics_pipes = 3;
-               dceip.number_of_underlay_pipes = 1;
-               dceip.low_power_tiling_mode = 0;
-               dceip.display_write_back_supported = false;
-               dceip.argb_compression_support = false;
-               dceip.underlay_vscaler_efficiency6_bit_per_component =
+               vbios->memory_type = bw_def_gddr5;
+               vbios->dram_channel_width_in_bits = 64;
+               vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+               vbios->number_of_dram_banks = 8;
+               vbios->high_yclk = bw_int_to_fixed(1600);
+               vbios->mid_yclk = bw_int_to_fixed(1600);
+               vbios->low_yclk = bw_frc_to_fixed(66666, 100);
+               vbios->low_sclk = bw_int_to_fixed(200);
+               vbios->mid1_sclk = bw_int_to_fixed(300);
+               vbios->mid2_sclk = bw_int_to_fixed(300);
+               vbios->mid3_sclk = bw_int_to_fixed(300);
+               vbios->mid4_sclk = bw_int_to_fixed(300);
+               vbios->mid5_sclk = bw_int_to_fixed(300);
+               vbios->mid6_sclk = bw_int_to_fixed(300);
+               vbios->high_sclk = bw_frc_to_fixed(62609, 100);
+               vbios->low_voltage_max_dispclk = bw_int_to_fixed(352);
+               vbios->mid_voltage_max_dispclk = bw_int_to_fixed(467);
+               vbios->high_voltage_max_dispclk = bw_int_to_fixed(643);
+               vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+               vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->data_return_bus_width = bw_int_to_fixed(32);
+               vbios->trc = bw_int_to_fixed(50);
+               vbios->dmifmc_urgent_latency = bw_int_to_fixed(4);
+               vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(153, 10);
+               vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+               vbios->nbp_state_change_latency = bw_frc_to_fixed(19649, 1000);
+               vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+               vbios->scatter_gather_enable = true;
+               vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+               vbios->cursor_width = 32;
+               vbios->average_compression_rate = 4;
+               vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+               vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+               vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+               dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+               dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+               dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+               dceip->large_cursor = false;
+               dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
+               dceip->dmif_pipe_en_fbc_chunk_tracker = false;
+               dceip->cursor_max_outstanding_group_num = 1;
+               dceip->lines_interleaved_into_lb = 2;
+               dceip->chunk_width = 256;
+               dceip->number_of_graphics_pipes = 3;
+               dceip->number_of_underlay_pipes = 1;
+               dceip->low_power_tiling_mode = 0;
+               dceip->display_write_back_supported = false;
+               dceip->argb_compression_support = false;
+               dceip->underlay_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35556, 10000);
-               dceip.underlay_vscaler_efficiency8_bit_per_component =
+               dceip->underlay_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.underlay_vscaler_efficiency10_bit_per_component =
+               dceip->underlay_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.underlay_vscaler_efficiency12_bit_per_component =
+               dceip->underlay_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.graphics_vscaler_efficiency6_bit_per_component =
+               dceip->graphics_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35, 10);
-               dceip.graphics_vscaler_efficiency8_bit_per_component =
+               dceip->graphics_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.graphics_vscaler_efficiency10_bit_per_component =
+               dceip->graphics_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.graphics_vscaler_efficiency12_bit_per_component =
+               dceip->graphics_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
-               dceip.max_dmif_buffer_allocated = 2;
-               dceip.graphics_dmif_size = 12288;
-               dceip.underlay_luma_dmif_size = 19456;
-               dceip.underlay_chroma_dmif_size = 23552;
-               dceip.pre_downscaler_enabled = true;
-               dceip.underlay_downscale_prefetch_enabled = true;
-               dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
-               dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
-               dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
-               dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+               dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+               dceip->max_dmif_buffer_allocated = 2;
+               dceip->graphics_dmif_size = 12288;
+               dceip->underlay_luma_dmif_size = 19456;
+               dceip->underlay_chroma_dmif_size = 23552;
+               dceip->pre_downscaler_enabled = true;
+               dceip->underlay_downscale_prefetch_enabled = true;
+               dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+               dceip->lb_size_per_component444 = bw_int_to_fixed(82176);
+               dceip->graphics_lb_nodownscaling_multi_line_prefetching = false;
+               dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
                        bw_int_to_fixed(0);
-               dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.underlay420_chroma_lb_size_per_component =
+               dceip->underlay420_chroma_lb_size_per_component =
                        bw_int_to_fixed(164352);
-               dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.cursor_chunk_width = bw_int_to_fixed(64);
-               dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
-               dceip.underlay_maximum_width_efficient_for_tiling =
+               dceip->cursor_chunk_width = bw_int_to_fixed(64);
+               dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+               dceip->underlay_maximum_width_efficient_for_tiling =
                        bw_int_to_fixed(1920);
-               dceip.underlay_maximum_height_efficient_for_tiling =
+               dceip->underlay_maximum_height_efficient_for_tiling =
                        bw_int_to_fixed(1080);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
                        bw_frc_to_fixed(3, 10);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
                        bw_int_to_fixed(25);
-               dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+               dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
                        2);
-               dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+               dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
                        bw_int_to_fixed(128);
-               dceip.limit_excessive_outstanding_dmif_requests = true;
-               dceip.linear_mode_line_request_alternation_slice =
+               dceip->limit_excessive_outstanding_dmif_requests = true;
+               dceip->linear_mode_line_request_alternation_slice =
                        bw_int_to_fixed(64);
-               dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+               dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
                        32;
-               dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
-               dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
-               dceip.request_efficiency = bw_frc_to_fixed(8, 10);
-               dceip.dispclk_per_request = bw_int_to_fixed(2);
-               dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
-               dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
-               dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
-               dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
+               dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+               dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+               dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+               dceip->dispclk_per_request = bw_int_to_fixed(2);
+               dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+               dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+               dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+               dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
                break;
        case BW_CALCS_VERSION_POLARIS10:
                /* TODO: Treat VEGAM the same as P10 for now
                 * Need to tune the para for VEGAM if needed */
        case BW_CALCS_VERSION_VEGAM:
-               vbios.memory_type = bw_def_gddr5;
-               vbios.dram_channel_width_in_bits = 32;
-               vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
-               vbios.number_of_dram_banks = 8;
-               vbios.high_yclk = bw_int_to_fixed(6000);
-               vbios.mid_yclk = bw_int_to_fixed(3200);
-               vbios.low_yclk = bw_int_to_fixed(1000);
-               vbios.low_sclk = bw_int_to_fixed(300);
-               vbios.mid1_sclk = bw_int_to_fixed(400);
-               vbios.mid2_sclk = bw_int_to_fixed(500);
-               vbios.mid3_sclk = bw_int_to_fixed(600);
-               vbios.mid4_sclk = bw_int_to_fixed(700);
-               vbios.mid5_sclk = bw_int_to_fixed(800);
-               vbios.mid6_sclk = bw_int_to_fixed(974);
-               vbios.high_sclk = bw_int_to_fixed(1154);
-               vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
-               vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
-               vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
-               vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
-               vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.data_return_bus_width = bw_int_to_fixed(32);
-               vbios.trc = bw_int_to_fixed(48);
-               vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
-               vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
-               vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
-               vbios.nbp_state_change_latency = bw_int_to_fixed(45);
-               vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
-               vbios.scatter_gather_enable = true;
-               vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
-               vbios.cursor_width = 32;
-               vbios.average_compression_rate = 4;
-               vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
-               vbios.blackout_duration = bw_int_to_fixed(0); /* us */
-               vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
-               dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
-               dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
-               dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
-               dceip.large_cursor = false;
-               dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
-               dceip.dmif_pipe_en_fbc_chunk_tracker = false;
-               dceip.cursor_max_outstanding_group_num = 1;
-               dceip.lines_interleaved_into_lb = 2;
-               dceip.chunk_width = 256;
-               dceip.number_of_graphics_pipes = 6;
-               dceip.number_of_underlay_pipes = 0;
-               dceip.low_power_tiling_mode = 0;
-               dceip.display_write_back_supported = false;
-               dceip.argb_compression_support = true;
-               dceip.underlay_vscaler_efficiency6_bit_per_component =
+               vbios->memory_type = bw_def_gddr5;
+               vbios->dram_channel_width_in_bits = 32;
+               vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+               vbios->number_of_dram_banks = 8;
+               vbios->high_yclk = bw_int_to_fixed(6000);
+               vbios->mid_yclk = bw_int_to_fixed(3200);
+               vbios->low_yclk = bw_int_to_fixed(1000);
+               vbios->low_sclk = bw_int_to_fixed(300);
+               vbios->mid1_sclk = bw_int_to_fixed(400);
+               vbios->mid2_sclk = bw_int_to_fixed(500);
+               vbios->mid3_sclk = bw_int_to_fixed(600);
+               vbios->mid4_sclk = bw_int_to_fixed(700);
+               vbios->mid5_sclk = bw_int_to_fixed(800);
+               vbios->mid6_sclk = bw_int_to_fixed(974);
+               vbios->high_sclk = bw_int_to_fixed(1154);
+               vbios->low_voltage_max_dispclk = bw_int_to_fixed(459);
+               vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654);
+               vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108);
+               vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+               vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->data_return_bus_width = bw_int_to_fixed(32);
+               vbios->trc = bw_int_to_fixed(48);
+               vbios->dmifmc_urgent_latency = bw_int_to_fixed(3);
+               vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+               vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+               vbios->nbp_state_change_latency = bw_int_to_fixed(45);
+               vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+               vbios->scatter_gather_enable = true;
+               vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+               vbios->cursor_width = 32;
+               vbios->average_compression_rate = 4;
+               vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+               vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+               vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+               dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+               dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+               dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+               dceip->large_cursor = false;
+               dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
+               dceip->dmif_pipe_en_fbc_chunk_tracker = false;
+               dceip->cursor_max_outstanding_group_num = 1;
+               dceip->lines_interleaved_into_lb = 2;
+               dceip->chunk_width = 256;
+               dceip->number_of_graphics_pipes = 6;
+               dceip->number_of_underlay_pipes = 0;
+               dceip->low_power_tiling_mode = 0;
+               dceip->display_write_back_supported = false;
+               dceip->argb_compression_support = true;
+               dceip->underlay_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35556, 10000);
-               dceip.underlay_vscaler_efficiency8_bit_per_component =
+               dceip->underlay_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.underlay_vscaler_efficiency10_bit_per_component =
+               dceip->underlay_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.underlay_vscaler_efficiency12_bit_per_component =
+               dceip->underlay_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.graphics_vscaler_efficiency6_bit_per_component =
+               dceip->graphics_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35, 10);
-               dceip.graphics_vscaler_efficiency8_bit_per_component =
+               dceip->graphics_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.graphics_vscaler_efficiency10_bit_per_component =
+               dceip->graphics_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.graphics_vscaler_efficiency12_bit_per_component =
+               dceip->graphics_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
-               dceip.max_dmif_buffer_allocated = 4;
-               dceip.graphics_dmif_size = 12288;
-               dceip.underlay_luma_dmif_size = 19456;
-               dceip.underlay_chroma_dmif_size = 23552;
-               dceip.pre_downscaler_enabled = true;
-               dceip.underlay_downscale_prefetch_enabled = true;
-               dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
-               dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
-               dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
-               dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+               dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+               dceip->max_dmif_buffer_allocated = 4;
+               dceip->graphics_dmif_size = 12288;
+               dceip->underlay_luma_dmif_size = 19456;
+               dceip->underlay_chroma_dmif_size = 23552;
+               dceip->pre_downscaler_enabled = true;
+               dceip->underlay_downscale_prefetch_enabled = true;
+               dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+               dceip->lb_size_per_component444 = bw_int_to_fixed(245952);
+               dceip->graphics_lb_nodownscaling_multi_line_prefetching = true;
+               dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
                        bw_int_to_fixed(1);
-               dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.underlay420_chroma_lb_size_per_component =
+               dceip->underlay420_chroma_lb_size_per_component =
                        bw_int_to_fixed(164352);
-               dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.cursor_chunk_width = bw_int_to_fixed(64);
-               dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
-               dceip.underlay_maximum_width_efficient_for_tiling =
+               dceip->cursor_chunk_width = bw_int_to_fixed(64);
+               dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+               dceip->underlay_maximum_width_efficient_for_tiling =
                        bw_int_to_fixed(1920);
-               dceip.underlay_maximum_height_efficient_for_tiling =
+               dceip->underlay_maximum_height_efficient_for_tiling =
                        bw_int_to_fixed(1080);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
                        bw_frc_to_fixed(3, 10);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
                        bw_int_to_fixed(25);
-               dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+               dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
                        2);
-               dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+               dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
                        bw_int_to_fixed(128);
-               dceip.limit_excessive_outstanding_dmif_requests = true;
-               dceip.linear_mode_line_request_alternation_slice =
+               dceip->limit_excessive_outstanding_dmif_requests = true;
+               dceip->linear_mode_line_request_alternation_slice =
                        bw_int_to_fixed(64);
-               dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+               dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
                        32;
-               dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
-               dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
-               dceip.request_efficiency = bw_frc_to_fixed(8, 10);
-               dceip.dispclk_per_request = bw_int_to_fixed(2);
-               dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
-               dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
-               dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
-               dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+               dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+               dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+               dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+               dceip->dispclk_per_request = bw_int_to_fixed(2);
+               dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+               dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+               dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+               dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
                break;
        case BW_CALCS_VERSION_POLARIS11:
-               vbios.memory_type = bw_def_gddr5;
-               vbios.dram_channel_width_in_bits = 32;
-               vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
-               vbios.number_of_dram_banks = 8;
-               vbios.high_yclk = bw_int_to_fixed(6000);
-               vbios.mid_yclk = bw_int_to_fixed(3200);
-               vbios.low_yclk = bw_int_to_fixed(1000);
-               vbios.low_sclk = bw_int_to_fixed(300);
-               vbios.mid1_sclk = bw_int_to_fixed(400);
-               vbios.mid2_sclk = bw_int_to_fixed(500);
-               vbios.mid3_sclk = bw_int_to_fixed(600);
-               vbios.mid4_sclk = bw_int_to_fixed(700);
-               vbios.mid5_sclk = bw_int_to_fixed(800);
-               vbios.mid6_sclk = bw_int_to_fixed(974);
-               vbios.high_sclk = bw_int_to_fixed(1154);
-               vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
-               vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
-               vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
-               vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
-               vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.data_return_bus_width = bw_int_to_fixed(32);
-               vbios.trc = bw_int_to_fixed(48);
-               if (vbios.number_of_dram_channels == 2) // 64-bit
-                       vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+               vbios->memory_type = bw_def_gddr5;
+               vbios->dram_channel_width_in_bits = 32;
+               vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+               vbios->number_of_dram_banks = 8;
+               vbios->high_yclk = bw_int_to_fixed(6000);
+               vbios->mid_yclk = bw_int_to_fixed(3200);
+               vbios->low_yclk = bw_int_to_fixed(1000);
+               vbios->low_sclk = bw_int_to_fixed(300);
+               vbios->mid1_sclk = bw_int_to_fixed(400);
+               vbios->mid2_sclk = bw_int_to_fixed(500);
+               vbios->mid3_sclk = bw_int_to_fixed(600);
+               vbios->mid4_sclk = bw_int_to_fixed(700);
+               vbios->mid5_sclk = bw_int_to_fixed(800);
+               vbios->mid6_sclk = bw_int_to_fixed(974);
+               vbios->high_sclk = bw_int_to_fixed(1154);
+               vbios->low_voltage_max_dispclk = bw_int_to_fixed(459);
+               vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654);
+               vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108);
+               vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+               vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->data_return_bus_width = bw_int_to_fixed(32);
+               vbios->trc = bw_int_to_fixed(48);
+               if (vbios->number_of_dram_channels == 2) // 64-bit
+                       vbios->dmifmc_urgent_latency = bw_int_to_fixed(4);
                else
-                       vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
-               vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
-               vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
-               vbios.nbp_state_change_latency = bw_int_to_fixed(45);
-               vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
-               vbios.scatter_gather_enable = true;
-               vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
-               vbios.cursor_width = 32;
-               vbios.average_compression_rate = 4;
-               vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
-               vbios.blackout_duration = bw_int_to_fixed(0); /* us */
-               vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
-               dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
-               dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
-               dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
-               dceip.large_cursor = false;
-               dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
-               dceip.dmif_pipe_en_fbc_chunk_tracker = false;
-               dceip.cursor_max_outstanding_group_num = 1;
-               dceip.lines_interleaved_into_lb = 2;
-               dceip.chunk_width = 256;
-               dceip.number_of_graphics_pipes = 5;
-               dceip.number_of_underlay_pipes = 0;
-               dceip.low_power_tiling_mode = 0;
-               dceip.display_write_back_supported = false;
-               dceip.argb_compression_support = true;
-               dceip.underlay_vscaler_efficiency6_bit_per_component =
+                       vbios->dmifmc_urgent_latency = bw_int_to_fixed(3);
+               vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+               vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+               vbios->nbp_state_change_latency = bw_int_to_fixed(45);
+               vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+               vbios->scatter_gather_enable = true;
+               vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+               vbios->cursor_width = 32;
+               vbios->average_compression_rate = 4;
+               vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+               vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+               vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+               dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+               dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+               dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+               dceip->large_cursor = false;
+               dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
+               dceip->dmif_pipe_en_fbc_chunk_tracker = false;
+               dceip->cursor_max_outstanding_group_num = 1;
+               dceip->lines_interleaved_into_lb = 2;
+               dceip->chunk_width = 256;
+               dceip->number_of_graphics_pipes = 5;
+               dceip->number_of_underlay_pipes = 0;
+               dceip->low_power_tiling_mode = 0;
+               dceip->display_write_back_supported = false;
+               dceip->argb_compression_support = true;
+               dceip->underlay_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35556, 10000);
-               dceip.underlay_vscaler_efficiency8_bit_per_component =
+               dceip->underlay_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.underlay_vscaler_efficiency10_bit_per_component =
+               dceip->underlay_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.underlay_vscaler_efficiency12_bit_per_component =
+               dceip->underlay_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.graphics_vscaler_efficiency6_bit_per_component =
+               dceip->graphics_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35, 10);
-               dceip.graphics_vscaler_efficiency8_bit_per_component =
+               dceip->graphics_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.graphics_vscaler_efficiency10_bit_per_component =
+               dceip->graphics_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.graphics_vscaler_efficiency12_bit_per_component =
+               dceip->graphics_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
-               dceip.max_dmif_buffer_allocated = 4;
-               dceip.graphics_dmif_size = 12288;
-               dceip.underlay_luma_dmif_size = 19456;
-               dceip.underlay_chroma_dmif_size = 23552;
-               dceip.pre_downscaler_enabled = true;
-               dceip.underlay_downscale_prefetch_enabled = true;
-               dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
-               dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
-               dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
-               dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+               dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+               dceip->max_dmif_buffer_allocated = 4;
+               dceip->graphics_dmif_size = 12288;
+               dceip->underlay_luma_dmif_size = 19456;
+               dceip->underlay_chroma_dmif_size = 23552;
+               dceip->pre_downscaler_enabled = true;
+               dceip->underlay_downscale_prefetch_enabled = true;
+               dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+               dceip->lb_size_per_component444 = bw_int_to_fixed(245952);
+               dceip->graphics_lb_nodownscaling_multi_line_prefetching = true;
+               dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
                        bw_int_to_fixed(1);
-               dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.underlay420_chroma_lb_size_per_component =
+               dceip->underlay420_chroma_lb_size_per_component =
                        bw_int_to_fixed(164352);
-               dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.cursor_chunk_width = bw_int_to_fixed(64);
-               dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
-               dceip.underlay_maximum_width_efficient_for_tiling =
+               dceip->cursor_chunk_width = bw_int_to_fixed(64);
+               dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+               dceip->underlay_maximum_width_efficient_for_tiling =
                        bw_int_to_fixed(1920);
-               dceip.underlay_maximum_height_efficient_for_tiling =
+               dceip->underlay_maximum_height_efficient_for_tiling =
                        bw_int_to_fixed(1080);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
                        bw_frc_to_fixed(3, 10);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
                        bw_int_to_fixed(25);
-               dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+               dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
                        2);
-               dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+               dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
                        bw_int_to_fixed(128);
-               dceip.limit_excessive_outstanding_dmif_requests = true;
-               dceip.linear_mode_line_request_alternation_slice =
+               dceip->limit_excessive_outstanding_dmif_requests = true;
+               dceip->linear_mode_line_request_alternation_slice =
                        bw_int_to_fixed(64);
-               dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+               dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
                        32;
-               dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
-               dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
-               dceip.request_efficiency = bw_frc_to_fixed(8, 10);
-               dceip.dispclk_per_request = bw_int_to_fixed(2);
-               dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
-               dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
-               dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
-               dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+               dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+               dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+               dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+               dceip->dispclk_per_request = bw_int_to_fixed(2);
+               dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+               dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+               dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+               dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
                break;
        case BW_CALCS_VERSION_POLARIS12:
-               vbios.memory_type = bw_def_gddr5;
-               vbios.dram_channel_width_in_bits = 32;
-               vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
-               vbios.number_of_dram_banks = 8;
-               vbios.high_yclk = bw_int_to_fixed(6000);
-               vbios.mid_yclk = bw_int_to_fixed(3200);
-               vbios.low_yclk = bw_int_to_fixed(1000);
-               vbios.low_sclk = bw_int_to_fixed(678);
-               vbios.mid1_sclk = bw_int_to_fixed(864);
-               vbios.mid2_sclk = bw_int_to_fixed(900);
-               vbios.mid3_sclk = bw_int_to_fixed(920);
-               vbios.mid4_sclk = bw_int_to_fixed(940);
-               vbios.mid5_sclk = bw_int_to_fixed(960);
-               vbios.mid6_sclk = bw_int_to_fixed(980);
-               vbios.high_sclk = bw_int_to_fixed(1049);
-               vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
-               vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
-               vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
-               vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
-               vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.data_return_bus_width = bw_int_to_fixed(32);
-               vbios.trc = bw_int_to_fixed(48);
-               if (vbios.number_of_dram_channels == 2) // 64-bit
-                       vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+               vbios->memory_type = bw_def_gddr5;
+               vbios->dram_channel_width_in_bits = 32;
+               vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+               vbios->number_of_dram_banks = 8;
+               vbios->high_yclk = bw_int_to_fixed(6000);
+               vbios->mid_yclk = bw_int_to_fixed(3200);
+               vbios->low_yclk = bw_int_to_fixed(1000);
+               vbios->low_sclk = bw_int_to_fixed(678);
+               vbios->mid1_sclk = bw_int_to_fixed(864);
+               vbios->mid2_sclk = bw_int_to_fixed(900);
+               vbios->mid3_sclk = bw_int_to_fixed(920);
+               vbios->mid4_sclk = bw_int_to_fixed(940);
+               vbios->mid5_sclk = bw_int_to_fixed(960);
+               vbios->mid6_sclk = bw_int_to_fixed(980);
+               vbios->high_sclk = bw_int_to_fixed(1049);
+               vbios->low_voltage_max_dispclk = bw_int_to_fixed(459);
+               vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654);
+               vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108);
+               vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+               vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->data_return_bus_width = bw_int_to_fixed(32);
+               vbios->trc = bw_int_to_fixed(48);
+               if (vbios->number_of_dram_channels == 2) // 64-bit
+                       vbios->dmifmc_urgent_latency = bw_int_to_fixed(4);
                else
-                       vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
-               vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
-               vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
-               vbios.nbp_state_change_latency = bw_int_to_fixed(250);
-               vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
-               vbios.scatter_gather_enable = false;
-               vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
-               vbios.cursor_width = 32;
-               vbios.average_compression_rate = 4;
-               vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
-               vbios.blackout_duration = bw_int_to_fixed(0); /* us */
-               vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
-               dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
-               dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
-               dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
-               dceip.large_cursor = false;
-               dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
-               dceip.dmif_pipe_en_fbc_chunk_tracker = false;
-               dceip.cursor_max_outstanding_group_num = 1;
-               dceip.lines_interleaved_into_lb = 2;
-               dceip.chunk_width = 256;
-               dceip.number_of_graphics_pipes = 5;
-               dceip.number_of_underlay_pipes = 0;
-               dceip.low_power_tiling_mode = 0;
-               dceip.display_write_back_supported = true;
-               dceip.argb_compression_support = true;
-               dceip.underlay_vscaler_efficiency6_bit_per_component =
+                       vbios->dmifmc_urgent_latency = bw_int_to_fixed(3);
+               vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+               vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+               vbios->nbp_state_change_latency = bw_int_to_fixed(250);
+               vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+               vbios->scatter_gather_enable = false;
+               vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+               vbios->cursor_width = 32;
+               vbios->average_compression_rate = 4;
+               vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+               vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+               vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+               dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+               dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+               dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+               dceip->large_cursor = false;
+               dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
+               dceip->dmif_pipe_en_fbc_chunk_tracker = false;
+               dceip->cursor_max_outstanding_group_num = 1;
+               dceip->lines_interleaved_into_lb = 2;
+               dceip->chunk_width = 256;
+               dceip->number_of_graphics_pipes = 5;
+               dceip->number_of_underlay_pipes = 0;
+               dceip->low_power_tiling_mode = 0;
+               dceip->display_write_back_supported = true;
+               dceip->argb_compression_support = true;
+               dceip->underlay_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35556, 10000);
-               dceip.underlay_vscaler_efficiency8_bit_per_component =
+               dceip->underlay_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.underlay_vscaler_efficiency10_bit_per_component =
+               dceip->underlay_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.underlay_vscaler_efficiency12_bit_per_component =
+               dceip->underlay_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.graphics_vscaler_efficiency6_bit_per_component =
+               dceip->graphics_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35, 10);
-               dceip.graphics_vscaler_efficiency8_bit_per_component =
+               dceip->graphics_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.graphics_vscaler_efficiency10_bit_per_component =
+               dceip->graphics_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.graphics_vscaler_efficiency12_bit_per_component =
+               dceip->graphics_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
-               dceip.max_dmif_buffer_allocated = 4;
-               dceip.graphics_dmif_size = 12288;
-               dceip.underlay_luma_dmif_size = 19456;
-               dceip.underlay_chroma_dmif_size = 23552;
-               dceip.pre_downscaler_enabled = true;
-               dceip.underlay_downscale_prefetch_enabled = true;
-               dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
-               dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
-               dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
-               dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+               dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+               dceip->max_dmif_buffer_allocated = 4;
+               dceip->graphics_dmif_size = 12288;
+               dceip->underlay_luma_dmif_size = 19456;
+               dceip->underlay_chroma_dmif_size = 23552;
+               dceip->pre_downscaler_enabled = true;
+               dceip->underlay_downscale_prefetch_enabled = true;
+               dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+               dceip->lb_size_per_component444 = bw_int_to_fixed(245952);
+               dceip->graphics_lb_nodownscaling_multi_line_prefetching = true;
+               dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
                        bw_int_to_fixed(1);
-               dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.underlay420_chroma_lb_size_per_component =
+               dceip->underlay420_chroma_lb_size_per_component =
                        bw_int_to_fixed(164352);
-               dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.cursor_chunk_width = bw_int_to_fixed(64);
-               dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
-               dceip.underlay_maximum_width_efficient_for_tiling =
+               dceip->cursor_chunk_width = bw_int_to_fixed(64);
+               dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+               dceip->underlay_maximum_width_efficient_for_tiling =
                        bw_int_to_fixed(1920);
-               dceip.underlay_maximum_height_efficient_for_tiling =
+               dceip->underlay_maximum_height_efficient_for_tiling =
                        bw_int_to_fixed(1080);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
                        bw_frc_to_fixed(3, 10);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
                        bw_int_to_fixed(25);
-               dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+               dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
                        2);
-               dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+               dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
                        bw_int_to_fixed(128);
-               dceip.limit_excessive_outstanding_dmif_requests = true;
-               dceip.linear_mode_line_request_alternation_slice =
+               dceip->limit_excessive_outstanding_dmif_requests = true;
+               dceip->linear_mode_line_request_alternation_slice =
                        bw_int_to_fixed(64);
-               dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+               dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
                        32;
-               dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
-               dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
-               dceip.request_efficiency = bw_frc_to_fixed(8, 10);
-               dceip.dispclk_per_request = bw_int_to_fixed(2);
-               dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
-               dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
-               dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
-               dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+               dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+               dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+               dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+               dceip->dispclk_per_request = bw_int_to_fixed(2);
+               dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+               dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+               dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+               dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
                break;
        case BW_CALCS_VERSION_STONEY:
-               vbios.memory_type = bw_def_gddr5;
-               vbios.dram_channel_width_in_bits = 64;
-               vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
-               vbios.number_of_dram_banks = 8;
-               vbios.high_yclk = bw_int_to_fixed(1866);
-               vbios.mid_yclk = bw_int_to_fixed(1866);
-               vbios.low_yclk = bw_int_to_fixed(1333);
-               vbios.low_sclk = bw_int_to_fixed(200);
-               vbios.mid1_sclk = bw_int_to_fixed(600);
-               vbios.mid2_sclk = bw_int_to_fixed(600);
-               vbios.mid3_sclk = bw_int_to_fixed(600);
-               vbios.mid4_sclk = bw_int_to_fixed(600);
-               vbios.mid5_sclk = bw_int_to_fixed(600);
-               vbios.mid6_sclk = bw_int_to_fixed(600);
-               vbios.high_sclk = bw_int_to_fixed(800);
-               vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
-               vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
-               vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
-               vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
-               vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.data_return_bus_width = bw_int_to_fixed(32);
-               vbios.trc = bw_int_to_fixed(50);
-               vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
-               vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(158, 10);
-               vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
-               vbios.nbp_state_change_latency = bw_frc_to_fixed(2008, 100);
-               vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
-               vbios.scatter_gather_enable = true;
-               vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
-               vbios.cursor_width = 32;
-               vbios.average_compression_rate = 4;
-               vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
-               vbios.blackout_duration = bw_int_to_fixed(0); /* us */
-               vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
-               dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
-               dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
-               dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
-               dceip.large_cursor = false;
-               dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
-               dceip.dmif_pipe_en_fbc_chunk_tracker = false;
-               dceip.cursor_max_outstanding_group_num = 1;
-               dceip.lines_interleaved_into_lb = 2;
-               dceip.chunk_width = 256;
-               dceip.number_of_graphics_pipes = 2;
-               dceip.number_of_underlay_pipes = 1;
-               dceip.low_power_tiling_mode = 0;
-               dceip.display_write_back_supported = false;
-               dceip.argb_compression_support = true;
-               dceip.underlay_vscaler_efficiency6_bit_per_component =
+               vbios->memory_type = bw_def_gddr5;
+               vbios->dram_channel_width_in_bits = 64;
+               vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+               vbios->number_of_dram_banks = 8;
+               vbios->high_yclk = bw_int_to_fixed(1866);
+               vbios->mid_yclk = bw_int_to_fixed(1866);
+               vbios->low_yclk = bw_int_to_fixed(1333);
+               vbios->low_sclk = bw_int_to_fixed(200);
+               vbios->mid1_sclk = bw_int_to_fixed(600);
+               vbios->mid2_sclk = bw_int_to_fixed(600);
+               vbios->mid3_sclk = bw_int_to_fixed(600);
+               vbios->mid4_sclk = bw_int_to_fixed(600);
+               vbios->mid5_sclk = bw_int_to_fixed(600);
+               vbios->mid6_sclk = bw_int_to_fixed(600);
+               vbios->high_sclk = bw_int_to_fixed(800);
+               vbios->low_voltage_max_dispclk = bw_int_to_fixed(352);
+               vbios->mid_voltage_max_dispclk = bw_int_to_fixed(467);
+               vbios->high_voltage_max_dispclk = bw_int_to_fixed(643);
+               vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+               vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->data_return_bus_width = bw_int_to_fixed(32);
+               vbios->trc = bw_int_to_fixed(50);
+               vbios->dmifmc_urgent_latency = bw_int_to_fixed(4);
+               vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(158, 10);
+               vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+               vbios->nbp_state_change_latency = bw_frc_to_fixed(2008, 100);
+               vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+               vbios->scatter_gather_enable = true;
+               vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+               vbios->cursor_width = 32;
+               vbios->average_compression_rate = 4;
+               vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+               vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+               vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+               dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+               dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+               dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+               dceip->large_cursor = false;
+               dceip->dmif_request_buffer_size = bw_int_to_fixed(768);
+               dceip->dmif_pipe_en_fbc_chunk_tracker = false;
+               dceip->cursor_max_outstanding_group_num = 1;
+               dceip->lines_interleaved_into_lb = 2;
+               dceip->chunk_width = 256;
+               dceip->number_of_graphics_pipes = 2;
+               dceip->number_of_underlay_pipes = 1;
+               dceip->low_power_tiling_mode = 0;
+               dceip->display_write_back_supported = false;
+               dceip->argb_compression_support = true;
+               dceip->underlay_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35556, 10000);
-               dceip.underlay_vscaler_efficiency8_bit_per_component =
+               dceip->underlay_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.underlay_vscaler_efficiency10_bit_per_component =
+               dceip->underlay_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.underlay_vscaler_efficiency12_bit_per_component =
+               dceip->underlay_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.graphics_vscaler_efficiency6_bit_per_component =
+               dceip->graphics_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35, 10);
-               dceip.graphics_vscaler_efficiency8_bit_per_component =
+               dceip->graphics_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.graphics_vscaler_efficiency10_bit_per_component =
+               dceip->graphics_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.graphics_vscaler_efficiency12_bit_per_component =
+               dceip->graphics_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
-               dceip.max_dmif_buffer_allocated = 2;
-               dceip.graphics_dmif_size = 12288;
-               dceip.underlay_luma_dmif_size = 19456;
-               dceip.underlay_chroma_dmif_size = 23552;
-               dceip.pre_downscaler_enabled = true;
-               dceip.underlay_downscale_prefetch_enabled = true;
-               dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
-               dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
-               dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
-               dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+               dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+               dceip->max_dmif_buffer_allocated = 2;
+               dceip->graphics_dmif_size = 12288;
+               dceip->underlay_luma_dmif_size = 19456;
+               dceip->underlay_chroma_dmif_size = 23552;
+               dceip->pre_downscaler_enabled = true;
+               dceip->underlay_downscale_prefetch_enabled = true;
+               dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+               dceip->lb_size_per_component444 = bw_int_to_fixed(82176);
+               dceip->graphics_lb_nodownscaling_multi_line_prefetching = false;
+               dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
                        bw_int_to_fixed(0);
-               dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.underlay420_chroma_lb_size_per_component =
+               dceip->underlay420_chroma_lb_size_per_component =
                        bw_int_to_fixed(164352);
-               dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.cursor_chunk_width = bw_int_to_fixed(64);
-               dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
-               dceip.underlay_maximum_width_efficient_for_tiling =
+               dceip->cursor_chunk_width = bw_int_to_fixed(64);
+               dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+               dceip->underlay_maximum_width_efficient_for_tiling =
                        bw_int_to_fixed(1920);
-               dceip.underlay_maximum_height_efficient_for_tiling =
+               dceip->underlay_maximum_height_efficient_for_tiling =
                        bw_int_to_fixed(1080);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
                        bw_frc_to_fixed(3, 10);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
                        bw_int_to_fixed(25);
-               dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+               dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
                        2);
-               dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+               dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
                        bw_int_to_fixed(128);
-               dceip.limit_excessive_outstanding_dmif_requests = true;
-               dceip.linear_mode_line_request_alternation_slice =
+               dceip->limit_excessive_outstanding_dmif_requests = true;
+               dceip->linear_mode_line_request_alternation_slice =
                        bw_int_to_fixed(64);
-               dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+               dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
                        32;
-               dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
-               dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
-               dceip.request_efficiency = bw_frc_to_fixed(8, 10);
-               dceip.dispclk_per_request = bw_int_to_fixed(2);
-               dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
-               dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
-               dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
-               dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+               dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+               dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+               dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+               dceip->dispclk_per_request = bw_int_to_fixed(2);
+               dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+               dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+               dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+               dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
                break;
        case BW_CALCS_VERSION_VEGA10:
-               vbios.memory_type = bw_def_hbm;
-               vbios.dram_channel_width_in_bits = 128;
-               vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
-               vbios.number_of_dram_banks = 16;
-               vbios.high_yclk = bw_int_to_fixed(2400);
-               vbios.mid_yclk = bw_int_to_fixed(1700);
-               vbios.low_yclk = bw_int_to_fixed(1000);
-               vbios.low_sclk = bw_int_to_fixed(300);
-               vbios.mid1_sclk = bw_int_to_fixed(350);
-               vbios.mid2_sclk = bw_int_to_fixed(400);
-               vbios.mid3_sclk = bw_int_to_fixed(500);
-               vbios.mid4_sclk = bw_int_to_fixed(600);
-               vbios.mid5_sclk = bw_int_to_fixed(700);
-               vbios.mid6_sclk = bw_int_to_fixed(760);
-               vbios.high_sclk = bw_int_to_fixed(776);
-               vbios.low_voltage_max_dispclk = bw_int_to_fixed(460);
-               vbios.mid_voltage_max_dispclk = bw_int_to_fixed(670);
-               vbios.high_voltage_max_dispclk = bw_int_to_fixed(1133);
-               vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
-               vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
-               vbios.data_return_bus_width = bw_int_to_fixed(32);
-               vbios.trc = bw_int_to_fixed(48);
-               vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
-               vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(75, 10);
-               vbios.stutter_self_refresh_entry_latency = bw_frc_to_fixed(19, 10);
-               vbios.nbp_state_change_latency = bw_int_to_fixed(39);
-               vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
-               vbios.scatter_gather_enable = false;
-               vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
-               vbios.cursor_width = 32;
-               vbios.average_compression_rate = 4;
-               vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 8;
-               vbios.blackout_duration = bw_int_to_fixed(0); /* us */
-               vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
-
-               dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
-               dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
-               dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100;
-               dceip.large_cursor = false;
-               dceip.dmif_request_buffer_size = bw_int_to_fixed(2304);
-               dceip.dmif_pipe_en_fbc_chunk_tracker = true;
-               dceip.cursor_max_outstanding_group_num = 1;
-               dceip.lines_interleaved_into_lb = 2;
-               dceip.chunk_width = 256;
-               dceip.number_of_graphics_pipes = 6;
-               dceip.number_of_underlay_pipes = 0;
-               dceip.low_power_tiling_mode = 0;
-               dceip.display_write_back_supported = true;
-               dceip.argb_compression_support = true;
-               dceip.underlay_vscaler_efficiency6_bit_per_component =
+               vbios->memory_type = bw_def_hbm;
+               vbios->dram_channel_width_in_bits = 128;
+               vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits;
+               vbios->number_of_dram_banks = 16;
+               vbios->high_yclk = bw_int_to_fixed(2400);
+               vbios->mid_yclk = bw_int_to_fixed(1700);
+               vbios->low_yclk = bw_int_to_fixed(1000);
+               vbios->low_sclk = bw_int_to_fixed(300);
+               vbios->mid1_sclk = bw_int_to_fixed(350);
+               vbios->mid2_sclk = bw_int_to_fixed(400);
+               vbios->mid3_sclk = bw_int_to_fixed(500);
+               vbios->mid4_sclk = bw_int_to_fixed(600);
+               vbios->mid5_sclk = bw_int_to_fixed(700);
+               vbios->mid6_sclk = bw_int_to_fixed(760);
+               vbios->high_sclk = bw_int_to_fixed(776);
+               vbios->low_voltage_max_dispclk = bw_int_to_fixed(460);
+               vbios->mid_voltage_max_dispclk = bw_int_to_fixed(670);
+               vbios->high_voltage_max_dispclk = bw_int_to_fixed(1133);
+               vbios->low_voltage_max_phyclk = bw_int_to_fixed(540);
+               vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->high_voltage_max_phyclk = bw_int_to_fixed(810);
+               vbios->data_return_bus_width = bw_int_to_fixed(32);
+               vbios->trc = bw_int_to_fixed(48);
+               vbios->dmifmc_urgent_latency = bw_int_to_fixed(3);
+               vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(75, 10);
+               vbios->stutter_self_refresh_entry_latency = bw_frc_to_fixed(19, 10);
+               vbios->nbp_state_change_latency = bw_int_to_fixed(39);
+               vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+               vbios->scatter_gather_enable = false;
+               vbios->down_spread_percentage = bw_frc_to_fixed(5, 10);
+               vbios->cursor_width = 32;
+               vbios->average_compression_rate = 4;
+               vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 8;
+               vbios->blackout_duration = bw_int_to_fixed(0); /* us */
+               vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+               dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100;
+               dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100;
+               dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100;
+               dceip->large_cursor = false;
+               dceip->dmif_request_buffer_size = bw_int_to_fixed(2304);
+               dceip->dmif_pipe_en_fbc_chunk_tracker = true;
+               dceip->cursor_max_outstanding_group_num = 1;
+               dceip->lines_interleaved_into_lb = 2;
+               dceip->chunk_width = 256;
+               dceip->number_of_graphics_pipes = 6;
+               dceip->number_of_underlay_pipes = 0;
+               dceip->low_power_tiling_mode = 0;
+               dceip->display_write_back_supported = true;
+               dceip->argb_compression_support = true;
+               dceip->underlay_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35556, 10000);
-               dceip.underlay_vscaler_efficiency8_bit_per_component =
+               dceip->underlay_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.underlay_vscaler_efficiency10_bit_per_component =
+               dceip->underlay_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.underlay_vscaler_efficiency12_bit_per_component =
+               dceip->underlay_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.graphics_vscaler_efficiency6_bit_per_component =
+               dceip->graphics_vscaler_efficiency6_bit_per_component =
                        bw_frc_to_fixed(35, 10);
-               dceip.graphics_vscaler_efficiency8_bit_per_component =
+               dceip->graphics_vscaler_efficiency8_bit_per_component =
                        bw_frc_to_fixed(34286, 10000);
-               dceip.graphics_vscaler_efficiency10_bit_per_component =
+               dceip->graphics_vscaler_efficiency10_bit_per_component =
                        bw_frc_to_fixed(32, 10);
-               dceip.graphics_vscaler_efficiency12_bit_per_component =
+               dceip->graphics_vscaler_efficiency12_bit_per_component =
                        bw_int_to_fixed(3);
-               dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
-               dceip.max_dmif_buffer_allocated = 4;
-               dceip.graphics_dmif_size = 24576;
-               dceip.underlay_luma_dmif_size = 19456;
-               dceip.underlay_chroma_dmif_size = 23552;
-               dceip.pre_downscaler_enabled = true;
-               dceip.underlay_downscale_prefetch_enabled = false;
-               dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
-               dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
-               dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
-               dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+               dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3);
+               dceip->max_dmif_buffer_allocated = 4;
+               dceip->graphics_dmif_size = 24576;
+               dceip->underlay_luma_dmif_size = 19456;
+               dceip->underlay_chroma_dmif_size = 23552;
+               dceip->pre_downscaler_enabled = true;
+               dceip->underlay_downscale_prefetch_enabled = false;
+               dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+               dceip->lb_size_per_component444 = bw_int_to_fixed(245952);
+               dceip->graphics_lb_nodownscaling_multi_line_prefetching = true;
+               dceip->stutter_and_dram_clock_state_change_gated_before_cursor =
                        bw_int_to_fixed(1);
-               dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.underlay420_chroma_lb_size_per_component =
+               dceip->underlay420_chroma_lb_size_per_component =
                        bw_int_to_fixed(164352);
-               dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+               dceip->underlay422_lb_size_per_component = bw_int_to_fixed(
                        82176);
-               dceip.cursor_chunk_width = bw_int_to_fixed(64);
-               dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
-               dceip.underlay_maximum_width_efficient_for_tiling =
+               dceip->cursor_chunk_width = bw_int_to_fixed(64);
+               dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+               dceip->underlay_maximum_width_efficient_for_tiling =
                        bw_int_to_fixed(1920);
-               dceip.underlay_maximum_height_efficient_for_tiling =
+               dceip->underlay_maximum_height_efficient_for_tiling =
                        bw_int_to_fixed(1080);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
                        bw_frc_to_fixed(3, 10);
-               dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+               dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
                        bw_int_to_fixed(25);
-               dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+               dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed(
                        2);
-               dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+               dceip->maximum_total_outstanding_pte_requests_allowed_by_saw =
                        bw_int_to_fixed(128);
-               dceip.limit_excessive_outstanding_dmif_requests = true;
-               dceip.linear_mode_line_request_alternation_slice =
+               dceip->limit_excessive_outstanding_dmif_requests = true;
+               dceip->linear_mode_line_request_alternation_slice =
                        bw_int_to_fixed(64);
-               dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+               dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode =
                        32;
-               dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
-               dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
-               dceip.request_efficiency = bw_frc_to_fixed(8, 10);
-               dceip.dispclk_per_request = bw_int_to_fixed(2);
-               dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
-               dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
-               dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
-               dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+               dceip->display_write_back420_luma_mcifwr_buffer_size = 12288;
+               dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192;
+               dceip->request_efficiency = bw_frc_to_fixed(8, 10);
+               dceip->dispclk_per_request = bw_int_to_fixed(2);
+               dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+               dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+               dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2;
+               dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
                break;
        default:
                break;
        }
-       *bw_dceip = dceip;
-       *bw_vbios = vbios;
+       *bw_dceip = *dceip;
+       *bw_vbios = *vbios;
 
+       kfree(dceip);
+       kfree(vbios);
 }
 
 /*
index f7c728d..7d6c68c 100644 (file)
@@ -125,87 +125,136 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
 {
        struct hw_asic_id asic_id = ctx->asic_id;
 
-       struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
-
-       if (clk_mgr == NULL) {
-               BREAK_TO_DEBUGGER();
-               return NULL;
-       }
-
        switch (asic_id.chip_family) {
 #if defined(CONFIG_DRM_AMD_DC_SI)
-       case FAMILY_SI:
+       case FAMILY_SI: {
+               struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+               if (clk_mgr == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       return NULL;
+               }
                dce60_clk_mgr_construct(ctx, clk_mgr);
-               break;
+               dce_clk_mgr_construct(ctx, clk_mgr);
+               return &clk_mgr->base;
+       }
 #endif
        case FAMILY_CI:
-       case FAMILY_KV:
+       case FAMILY_KV: {
+               struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+               if (clk_mgr == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       return NULL;
+               }
                dce_clk_mgr_construct(ctx, clk_mgr);
-               break;
-       case FAMILY_CZ:
+               return &clk_mgr->base;
+       }
+       case FAMILY_CZ: {
+               struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+               if (clk_mgr == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       return NULL;
+               }
                dce110_clk_mgr_construct(ctx, clk_mgr);
-               break;
-       case FAMILY_VI:
+               return &clk_mgr->base;
+       }
+       case FAMILY_VI: {
+               struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+               if (clk_mgr == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       return NULL;
+               }
                if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) ||
                                ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) {
                        dce_clk_mgr_construct(ctx, clk_mgr);
-                       break;
+                       return &clk_mgr->base;
                }
                if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) ||
                                ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
                                ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
                        dce112_clk_mgr_construct(ctx, clk_mgr);
-                       break;
+                       return &clk_mgr->base;
                }
                if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) {
                        dce112_clk_mgr_construct(ctx, clk_mgr);
-                       break;
+                       return &clk_mgr->base;
+               }
+               return &clk_mgr->base;
+       }
+       case FAMILY_AI: {
+               struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+               if (clk_mgr == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       return NULL;
                }
-               break;
-       case FAMILY_AI:
                if (ASICREV_IS_VEGA20_P(asic_id.hw_internal_rev))
                        dce121_clk_mgr_construct(ctx, clk_mgr);
                else
                        dce120_clk_mgr_construct(ctx, clk_mgr);
-               break;
-
+               return &clk_mgr->base;
+       }
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-       case FAMILY_RV:
+       case FAMILY_RV: {
+               struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+               if (clk_mgr == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       return NULL;
+               }
+
                if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
                        rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
-                       break;
+                       return &clk_mgr->base;
                }
 
                if (ASICREV_IS_GREEN_SARDINE(asic_id.hw_internal_rev)) {
                        rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
-                       break;
+                       return &clk_mgr->base;
                }
                if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
                        rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
-                       break;
+                       return &clk_mgr->base;
                }
                if (ASICREV_IS_RAVEN(asic_id.hw_internal_rev) ||
                                ASICREV_IS_PICASSO(asic_id.hw_internal_rev)) {
                        rv1_clk_mgr_construct(ctx, clk_mgr, pp_smu);
-                       break;
+                       return &clk_mgr->base;
                }
-               break;
+               return &clk_mgr->base;
+       }
+       case FAMILY_NV: {
+               struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
 
-       case FAMILY_NV:
+               if (clk_mgr == NULL) {
+                       BREAK_TO_DEBUGGER();
+                       return NULL;
+               }
                if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev)) {
                        dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
-                       break;
+                       return &clk_mgr->base;
                }
                if (ASICREV_IS_DIMGREY_CAVEFISH_P(asic_id.hw_internal_rev)) {
                        dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
-                       break;
+                       return &clk_mgr->base;
                }
                dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
-               break;
-
+               return &clk_mgr->base;
+       }
        case FAMILY_VGH:
-               if (ASICREV_IS_VANGOGH(asic_id.hw_internal_rev))
+               if (ASICREV_IS_VANGOGH(asic_id.hw_internal_rev)) {
+                       struct clk_mgr_vgh *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+                       if (clk_mgr == NULL) {
+                               BREAK_TO_DEBUGGER();
+                               return NULL;
+                       }
                        vg_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+                       return &clk_mgr->base.base;
+               }
                break;
 #endif
        default:
@@ -213,7 +262,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
                break;
        }
 
-       return &clk_mgr->base;
+       return NULL;
 }
 
 void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
@@ -226,6 +275,9 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
                if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
                        dcn3_clk_mgr_destroy(clk_mgr);
                }
+               if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {
+                       dcn3_clk_mgr_destroy(clk_mgr);
+               }
                break;
 
        case FAMILY_VGH:
index 01b1853..887a542 100644 (file)
@@ -797,7 +797,18 @@ static struct wm_table lpddr4_wm_table_rn = {
                },
        }
 };
+static unsigned int find_socclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
+{
+       int i;
+
+       for (i = 0; i < PP_SMU_NUM_SOCCLK_DPM_LEVELS; i++) {
+               if (clock_table->SocClocks[i].Vol == voltage)
+                       return clock_table->SocClocks[i].Freq;
+       }
 
+       ASSERT(0);
+       return 0;
+}
 static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
 {
        int i;
@@ -841,6 +852,8 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params
                bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[j].Freq;
                bw_params->clk_table.entries[i].voltage = clock_table->FClocks[j].Vol;
                bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol);
+               bw_params->clk_table.entries[i].socclk_mhz = find_socclk_for_voltage(clock_table,
+                                                                       bw_params->clk_table.entries[i].voltage);
        }
 
        bw_params->vram_type = bios_info->memory_type;
index 81ea5d3..577e7f9 100644 (file)
@@ -432,6 +432,12 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
                        clk_mgr->base.ctx->dc, clk_mgr_base->bw_params);
 }
 
+static bool dcn3_is_smu_prsent(struct clk_mgr *clk_mgr_base)
+{
+       struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+       return clk_mgr->smu_present;
+}
+
 static bool dcn3_are_clock_states_equal(struct dc_clocks *a,
                                        struct dc_clocks *b)
 {
@@ -494,6 +500,7 @@ static struct clk_mgr_funcs dcn3_funcs = {
                .are_clock_states_equal = dcn3_are_clock_states_equal,
                .enable_pme_wa = dcn3_enable_pme_wa,
                .notify_link_rate_change = dcn30_notify_link_rate_change,
+               .is_smu_present = dcn3_is_smu_prsent
 };
 
 static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr)
index 68942bb..07774fa 100644 (file)
@@ -113,10 +113,13 @@ int dcn301_smu_send_msg_with_param(
 
 int dcn301_smu_get_smu_version(struct clk_mgr_internal *clk_mgr)
 {
-       return dcn301_smu_send_msg_with_param(
-                       clk_mgr,
-                       VBIOSSMC_MSG_GetSmuVersion,
-                       0);
+       int smu_version = dcn301_smu_send_msg_with_param(clk_mgr,
+                                                        VBIOSSMC_MSG_GetSmuVersion,
+                                                        0);
+
+       DC_LOG_DEBUG("%s %x\n", __func__, smu_version);
+
+       return smu_version;
 }
 
 
@@ -124,6 +127,8 @@ int dcn301_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dispc
 {
        int actual_dispclk_set_mhz = -1;
 
+       DC_LOG_DEBUG("%s(%d)\n", __func__, requested_dispclk_khz);
+
        /*  Unit of SMU msg parameter is Mhz */
        actual_dispclk_set_mhz = dcn301_smu_send_msg_with_param(
                        clk_mgr,
@@ -137,6 +142,8 @@ int dcn301_smu_set_dprefclk(struct clk_mgr_internal *clk_mgr)
 {
        int actual_dprefclk_set_mhz = -1;
 
+       DC_LOG_DEBUG("%s %d\n", __func__, clk_mgr->base.dprefclk_khz / 1000);
+
        actual_dprefclk_set_mhz = dcn301_smu_send_msg_with_param(
                        clk_mgr,
                        VBIOSSMC_MSG_SetDprefclkFreq,
@@ -151,6 +158,8 @@ int dcn301_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int request
 {
        int actual_dcfclk_set_mhz = -1;
 
+       DC_LOG_DEBUG("%s(%d)\n", __func__, requested_dcfclk_khz);
+
        actual_dcfclk_set_mhz = dcn301_smu_send_msg_with_param(
                        clk_mgr,
                        VBIOSSMC_MSG_SetHardMinDcfclkByFreq,
@@ -163,6 +172,8 @@ int dcn301_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int r
 {
        int actual_min_ds_dcfclk_mhz = -1;
 
+       DC_LOG_DEBUG("%s(%d)\n", __func__, requested_min_ds_dcfclk_khz);
+
        actual_min_ds_dcfclk_mhz = dcn301_smu_send_msg_with_param(
                        clk_mgr,
                        VBIOSSMC_MSG_SetMinDeepSleepDcfclk,
@@ -175,6 +186,8 @@ int dcn301_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_kh
 {
        int actual_dppclk_set_mhz = -1;
 
+       DC_LOG_DEBUG("%s(%d)\n", __func__, requested_dpp_khz);
+
        actual_dppclk_set_mhz = dcn301_smu_send_msg_with_param(
                        clk_mgr,
                        VBIOSSMC_MSG_SetDppclkFreq,
@@ -187,6 +200,8 @@ void dcn301_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr,
 {
        //TODO: Work with smu team to define optimization options.
 
+       DC_LOG_DEBUG("%s(%x)\n", __func__, idle_info);
+
        dcn301_smu_send_msg_with_param(
                clk_mgr,
                VBIOSSMC_MSG_SetDisplayIdleOptimizations,
@@ -202,6 +217,8 @@ void dcn301_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool
                idle_info.idle_info.phy_ref_clk_off = 1;
        }
 
+       DC_LOG_DEBUG("%s(%d)\n", __func__, enable);
+
        dcn301_smu_send_msg_with_param(
                        clk_mgr,
                        VBIOSSMC_MSG_SetDisplayIdleOptimizations,
@@ -218,12 +235,16 @@ void dcn301_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
 
 void dcn301_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
 {
+       DC_LOG_DEBUG("%s(%x)\n", __func__, addr_high);
+
        dcn301_smu_send_msg_with_param(clk_mgr,
                        VBIOSSMC_MSG_SetVbiosDramAddrHigh, addr_high);
 }
 
 void dcn301_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
 {
+       DC_LOG_DEBUG("%s(%x)\n", __func__, addr_low);
+
        dcn301_smu_send_msg_with_param(clk_mgr,
                        VBIOSSMC_MSG_SetVbiosDramAddrLow, addr_low);
 }
index aadb801..c636b58 100644 (file)
@@ -32,9 +32,8 @@
 // For dcn20_update_clocks_update_dpp_dto
 #include "dcn20/dcn20_clk_mgr.h"
 
-
-
 #include "vg_clk_mgr.h"
+#include "dcn301_smu.h"
 #include "reg_helper.h"
 #include "core_types.h"
 #include "dm_helpers.h"
 
 /* Macros */
 
+#define TO_CLK_MGR_VGH(clk_mgr)\
+       container_of(clk_mgr, struct clk_mgr_vgh, base)
+
 #define REG(reg_name) \
        (CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
 
 /* TODO: evaluate how to lower or disable all dcn clocks in screen off case */
-int vg_get_active_display_cnt_wa(
+static int vg_get_active_display_cnt_wa(
                struct dc *dc,
                struct dc_state *context)
 {
@@ -134,13 +136,13 @@ void vg_update_clocks(struct clk_mgr *clk_mgr_base,
                }
        }
 
-       if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
+       if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz) && !dc->debug.disable_min_fclk) {
                clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
                dcn301_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
        }
 
        if (should_set_clock(safe_to_lower,
-                       new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
+                       new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz) && !dc->debug.disable_min_fclk) {
                clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
                dcn301_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
        }
@@ -377,7 +379,7 @@ void vg_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s)
        s->dprefclk_khz = sb.dprefclk * 1000;
 }
 
-void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base)
+static void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base)
 {
        struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
 
@@ -449,15 +451,16 @@ static void vg_build_watermark_ranges(struct clk_bw_params *bw_params, struct wa
 }
 
 
-void vg_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
+static void vg_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
 {
        struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
-       struct watermarks *table = clk_mgr_base->smu_wm_set.wm_set;
+       struct clk_mgr_vgh *clk_mgr_vgh = TO_CLK_MGR_VGH(clk_mgr);
+       struct watermarks *table = clk_mgr_vgh->smu_wm_set.wm_set;
 
        if (!clk_mgr->smu_ver)
                return;
 
-       if (!table || clk_mgr_base->smu_wm_set.mc_address.quad_part == 0)
+       if (!table || clk_mgr_vgh->smu_wm_set.mc_address.quad_part == 0)
                return;
 
        memset(table, 0, sizeof(*table));
@@ -465,9 +468,9 @@ void vg_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
        vg_build_watermark_ranges(clk_mgr_base->bw_params, table);
 
        dcn301_smu_set_dram_addr_high(clk_mgr,
-                       clk_mgr_base->smu_wm_set.mc_address.high_part);
+                       clk_mgr_vgh->smu_wm_set.mc_address.high_part);
        dcn301_smu_set_dram_addr_low(clk_mgr,
-                       clk_mgr_base->smu_wm_set.mc_address.low_part);
+                       clk_mgr_vgh->smu_wm_set.mc_address.low_part);
        dcn301_smu_transfer_wm_table_dram_2_smu(clk_mgr);
 }
 
@@ -625,7 +628,7 @@ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_ta
        return 0;
 }
 
-void vg_clk_mgr_helper_populate_bw_params(
+static void vg_clk_mgr_helper_populate_bw_params(
                struct clk_mgr_internal *clk_mgr,
                struct integrated_info *bios_info,
                const struct vg_dpm_clocks *clock_table)
@@ -703,7 +706,7 @@ static struct vg_dpm_clocks dummy_clocks = {
 
 static struct watermarks dummy_wms = { 0 };
 
-void vg_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
+static void vg_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
                struct smu_dpm_clks *smu_dpm_clks)
 {
        struct vg_dpm_clocks *table = smu_dpm_clks->dpm_clks;
@@ -725,39 +728,39 @@ void vg_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
 
 void vg_clk_mgr_construct(
                struct dc_context *ctx,
-               struct clk_mgr_internal *clk_mgr,
+               struct clk_mgr_vgh *clk_mgr,
                struct pp_smu_funcs *pp_smu,
                struct dccg *dccg)
 {
        struct smu_dpm_clks smu_dpm_clks = { 0 };
 
-       clk_mgr->base.ctx = ctx;
-       clk_mgr->base.funcs = &vg_funcs;
+       clk_mgr->base.base.ctx = ctx;
+       clk_mgr->base.base.funcs = &vg_funcs;
 
-       clk_mgr->pp_smu = pp_smu;
+       clk_mgr->base.pp_smu = pp_smu;
 
-       clk_mgr->dccg = dccg;
-       clk_mgr->dfs_bypass_disp_clk = 0;
+       clk_mgr->base.dccg = dccg;
+       clk_mgr->base.dfs_bypass_disp_clk = 0;
 
-       clk_mgr->dprefclk_ss_percentage = 0;
-       clk_mgr->dprefclk_ss_divider = 1000;
-       clk_mgr->ss_on_dprefclk = false;
-       clk_mgr->dfs_ref_freq_khz = 48000;
+       clk_mgr->base.dprefclk_ss_percentage = 0;
+       clk_mgr->base.dprefclk_ss_divider = 1000;
+       clk_mgr->base.ss_on_dprefclk = false;
+       clk_mgr->base.dfs_ref_freq_khz = 48000;
 
-       clk_mgr->base.smu_wm_set.wm_set = (struct watermarks *)dm_helpers_allocate_gpu_mem(
-                               clk_mgr->base.ctx,
+       clk_mgr->smu_wm_set.wm_set = (struct watermarks *)dm_helpers_allocate_gpu_mem(
+                               clk_mgr->base.base.ctx,
                                DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
                                sizeof(struct watermarks),
-                               &clk_mgr->base.smu_wm_set.mc_address.quad_part);
+                               &clk_mgr->smu_wm_set.mc_address.quad_part);
 
-       if (clk_mgr->base.smu_wm_set.wm_set == 0) {
-               clk_mgr->base.smu_wm_set.wm_set = &dummy_wms;
-               clk_mgr->base.smu_wm_set.mc_address.quad_part = 0;
+       if (clk_mgr->smu_wm_set.wm_set == 0) {
+               clk_mgr->smu_wm_set.wm_set = &dummy_wms;
+               clk_mgr->smu_wm_set.mc_address.quad_part = 0;
        }
-       ASSERT(clk_mgr->base.smu_wm_set.wm_set);
+       ASSERT(clk_mgr->smu_wm_set.wm_set);
 
        smu_dpm_clks.dpm_clks = (struct vg_dpm_clocks *)dm_helpers_allocate_gpu_mem(
-                               clk_mgr->base.ctx,
+                               clk_mgr->base.base.ctx,
                                DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
                                sizeof(struct vg_dpm_clocks),
                                &smu_dpm_clks.mc_address.quad_part);
@@ -771,21 +774,21 @@ void vg_clk_mgr_construct(
 
        if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
                vg_funcs.update_clocks = dcn2_update_clocks_fpga;
-               clk_mgr->base.dentist_vco_freq_khz = 3600000;
+               clk_mgr->base.base.dentist_vco_freq_khz = 3600000;
        } else {
                struct clk_log_info log_info = {0};
 
-               clk_mgr->smu_ver = dcn301_smu_get_smu_version(clk_mgr);
+               clk_mgr->base.smu_ver = dcn301_smu_get_smu_version(&clk_mgr->base);
 
-               if (clk_mgr->smu_ver)
-                       clk_mgr->smu_present = true;
+               if (clk_mgr->base.smu_ver)
+                       clk_mgr->base.smu_present = true;
 
                /* TODO: Check we get what we expect during bringup */
-               clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
+               clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
 
                /* in case we don't get a value from the register, use default */
-               if (clk_mgr->base.dentist_vco_freq_khz == 0)
-                       clk_mgr->base.dentist_vco_freq_khz = 3600000;
+               if (clk_mgr->base.base.dentist_vco_freq_khz == 0)
+                       clk_mgr->base.base.dentist_vco_freq_khz = 3600000;
 
                if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
                        vg_bw_params.wm_table = lpddr5_wm_table;
@@ -793,36 +796,38 @@ void vg_clk_mgr_construct(
                        vg_bw_params.wm_table = ddr4_wm_table;
                }
                /* Saved clocks configured at boot for debug purposes */
-               vg_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
+               vg_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
        }
 
-       clk_mgr->base.dprefclk_khz = 600000;
-       dce_clock_read_ss_info(clk_mgr);
+       clk_mgr->base.base.dprefclk_khz = 600000;
+       dce_clock_read_ss_info(&clk_mgr->base);
 
-       clk_mgr->base.bw_params = &vg_bw_params;
+       clk_mgr->base.base.bw_params = &vg_bw_params;
 
-       vg_get_dpm_table_from_smu(clk_mgr, &smu_dpm_clks);
+       vg_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
        if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
                vg_clk_mgr_helper_populate_bw_params(
-                               clk_mgr,
+                               &clk_mgr->base,
                                ctx->dc_bios->integrated_info,
                                smu_dpm_clks.dpm_clks);
        }
 
        if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
-               dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+               dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
                                smu_dpm_clks.dpm_clks);
 /*
-       if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver) {
+       if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->base.smu_ver) {
                 enable powerfeatures when displaycount goes to 0
                dcn301_smu_enable_phy_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn);
        }
 */
 }
 
-void vg_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
+void vg_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int)
 {
-       if (clk_mgr->base.smu_wm_set.wm_set && clk_mgr->base.smu_wm_set.mc_address.quad_part != 0)
-               dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
-                               clk_mgr->base.smu_wm_set.wm_set);
+       struct clk_mgr_vgh *clk_mgr = TO_CLK_MGR_VGH(clk_mgr_int);
+
+       if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0)
+               dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+                               clk_mgr->smu_wm_set.wm_set);
 }
index b5115b3..7255477 100644 (file)
 
 #ifndef __VG_CLK_MGR_H__
 #define __VG_CLK_MGR_H__
+#include "clk_mgr_internal.h"
 
-int vg_get_active_display_cnt_wa(
-               struct dc *dc,
-               struct dc_state *context);
+struct watermarks;
 
-void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base);
+struct smu_watermark_set {
+       struct watermarks *wm_set;
+       union large_integer mc_address;
+};
+
+struct clk_mgr_vgh {
+       struct clk_mgr_internal base;
+       struct smu_watermark_set smu_wm_set;
+};
 
 void vg_clk_mgr_construct(struct dc_context *ctx,
-               struct clk_mgr_internal *clk_mgr,
+               struct clk_mgr_vgh *clk_mgr,
                struct pp_smu_funcs *pp_smu,
                struct dccg *dccg);
 
 void vg_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr);
 
-#include "dcn301_smu.h"
-void vg_notify_wm_ranges(struct clk_mgr *clk_mgr_base);
-
-void vg_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
-               struct smu_dpm_clks *smu_dpm_clks);
-
-void vg_clk_mgr_helper_populate_bw_params(
-               struct clk_mgr_internal *clk_mgr,
-               struct integrated_info *bios_info,
-               const struct vg_dpm_clocks *clock_table);
-
 #endif //__VG_CLK_MGR_H__
index 8e6c815..8f0a138 100644 (file)
 #include "timing_generator.h"
 #include "abm.h"
 #include "virtual/virtual_link_encoder.h"
+#include "hubp.h"
 
 #include "link_hwss.h"
 #include "link_encoder.h"
+#include "link_enc_cfg.h"
 
 #include "dc_link_ddc.h"
 #include "dm_helpers.h"
@@ -304,7 +306,10 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
        int i = 0;
        bool ret = false;
 
-       stream->adjust = *adjust;
+       stream->adjust.v_total_max = adjust->v_total_max;
+       stream->adjust.v_total_mid = adjust->v_total_mid;
+       stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
+       stream->adjust.v_total_min = adjust->v_total_min;
 
        for (i = 0; i < MAX_PIPES; i++) {
                struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -312,10 +317,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
                if (pipe->stream == stream && pipe->stream_res.tg) {
                        dc->hwss.set_drr(&pipe,
                                        1,
-                                       adjust->v_total_min,
-                                       adjust->v_total_max,
-                                       adjust->v_total_mid,
-                                       adjust->v_total_mid_frame_num);
+                                       *adjust);
 
                        ret = true;
                }
@@ -870,6 +872,9 @@ static bool dc_construct(struct dc *dc,
        if (!create_links(dc, init_params->num_virtual_links))
                goto fail;
 
+       /* Initialise DIG link encoder resource tracking variables. */
+       link_enc_cfg_init(dc, dc->current_state);
+
        return true;
 
 fail:
@@ -2091,6 +2096,10 @@ static enum surface_update_type check_update_surfaces_for_stream(
        if (stream_status == NULL || stream_status->plane_count != surface_count)
                overall_type = UPDATE_TYPE_FULL;
 
+       if (stream_update && stream_update->pending_test_pattern) {
+               overall_type = UPDATE_TYPE_FULL;
+       }
+
        /* some stream updates require passive update */
        if (stream_update) {
                union stream_update_flags *su_flags = &stream_update->stream->update_flags;
@@ -2390,6 +2399,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
        if (update->dither_option)
                stream->dither_option = *update->dither_option;
 
+       if (update->pending_test_pattern)
+               stream->test_pattern = *update->pending_test_pattern;
        /* update current stream with writeback info */
        if (update->wb_update) {
                int i;
@@ -2485,6 +2496,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
                                }
                        }
 
+
                        /* Full fe update*/
                        if (update_type == UPDATE_TYPE_FAST)
                                continue;
@@ -2492,6 +2504,15 @@ static void commit_planes_do_stream_update(struct dc *dc,
                        if (stream_update->dsc_config)
                                dp_update_dsc_config(pipe_ctx);
 
+                       if (stream_update->pending_test_pattern) {
+                               dc_link_dp_set_test_pattern(stream->link,
+                                       stream->test_pattern.type,
+                                       stream->test_pattern.color_space,
+                                       stream->test_pattern.p_link_settings,
+                                       stream->test_pattern.p_custom_pattern,
+                                       stream->test_pattern.cust_pattern_size);
+                       }
+
                        if (stream_update->dpms_off) {
                                if (*stream_update->dpms_off) {
                                        core_link_disable_stream(pipe_ctx);
@@ -2578,6 +2599,17 @@ static void commit_planes_for_stream(struct dc *dc,
                }
        }
 
+#ifdef CONFIG_DRM_AMD_DC_DCN
+       if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
+               struct pipe_ctx *mpcc_pipe;
+               struct pipe_ctx *odm_pipe;
+
+               for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
+                       for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+                               odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
+       }
+#endif
+
        if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
                if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
                        if (should_use_dmub_lock(stream->link)) {
@@ -2784,6 +2816,9 @@ static void commit_planes_for_stream(struct dc *dc,
        for (j = 0; j < dc->res_pool->pipe_count; j++) {
                struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
 
+               if (!pipe_ctx->plane_state)
+                       continue;
+
                if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
                                !pipe_ctx->stream || pipe_ctx->stream != stream ||
                                !pipe_ctx->plane_state->update_flags.bits.addr_update)
@@ -3225,6 +3260,10 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
        if (dc->debug.disable_idle_power_optimizations)
                return;
 
+       if (dc->clk_mgr->funcs->is_smu_present)
+               if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
+                       return;
+
        if (allow == dc->idle_optimizations_allowed)
                return;
 
index f9a33dc..29bc287 100644 (file)
@@ -92,11 +92,14 @@ static void dc_link_destruct(struct dc_link *link)
                link->panel_cntl->funcs->destroy(&link->panel_cntl);
 
        if (link->link_enc) {
-               /* Update link encoder tracking variables. These are used for the dynamic
-                * assignment of link encoders to streams.
+               /* Update link encoder resource tracking variables. These are used for
+                * the dynamic assignment of link encoders to streams. Virtual links
+                * are not assigned encoder resources on creation.
                 */
-               link->dc->res_pool->link_encoders[link->link_enc->preferred_engine] = NULL;
-               link->dc->res_pool->dig_link_enc_count--;
+               if (link->link_id.id != CONNECTOR_ID_VIRTUAL) {
+                       link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = NULL;
+                       link->dc->res_pool->dig_link_enc_count--;
+               }
                link->link_enc->funcs->destroy(&link->link_enc);
        }
 
@@ -1407,6 +1410,8 @@ static bool dc_link_construct(struct dc_link *link,
        link->link_id =
                bios->funcs->get_connector_id(bios, init_params->connector_index);
 
+       link->ep_type = DISPLAY_ENDPOINT_PHY;
+
        DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id);
 
        if (bios->funcs->get_disp_connector_caps_info) {
@@ -1506,10 +1511,12 @@ static bool dc_link_construct(struct dc_link *link,
                (link->link_id.id == CONNECTOR_ID_EDP ||
                        link->link_id.id == CONNECTOR_ID_LVDS)) {
                panel_cntl_init_data.ctx = dc_ctx;
-               panel_cntl_init_data.inst = link->link_index;
+               panel_cntl_init_data.inst =
+                       panel_cntl_init_data.ctx->dc_edp_id_count;
                link->panel_cntl =
                        link->dc->res_pool->funcs->panel_cntl_create(
                                                                &panel_cntl_init_data);
+               panel_cntl_init_data.ctx->dc_edp_id_count++;
 
                if (link->panel_cntl == NULL) {
                        DC_ERROR("Failed to create link panel_cntl!\n");
@@ -1541,7 +1548,8 @@ static bool dc_link_construct(struct dc_link *link,
        /* Update link encoder tracking variables. These are used for the dynamic
         * assignment of link encoders to streams.
         */
-       link->dc->res_pool->link_encoders[link->link_enc->preferred_engine] = link->link_enc;
+       link->eng_id = link->link_enc->preferred_engine;
+       link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = link->link_enc;
        link->dc->res_pool->dig_link_enc_count++;
 
        link->link_enc_hw_inst = link->link_enc->transmitter;
@@ -2883,8 +2891,8 @@ static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
 static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps)
 {
        struct fixed31_32 peak_kbps;
-       uint32_t numerator;
-       uint32_t denominator;
+       uint32_t numerator = 0;
+       uint32_t denominator = 1;
 
        /*
         * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
index 47e6c33..7d2e433 100644 (file)
@@ -284,7 +284,7 @@ static uint8_t dc_dp_initialize_scrambling_data_symbols(
 
 static inline bool is_repeater(struct dc_link *link, uint32_t offset)
 {
-       return (link->lttpr_non_transparent_mode && offset != 0);
+       return (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0);
 }
 
 static void dpcd_set_lt_pattern_and_lane_settings(
@@ -1072,7 +1072,7 @@ static enum link_training_result perform_clock_recovery_sequence(
                /* 3. wait receiver to lock-on*/
                wait_time_microsec = lt_settings->cr_pattern_time;
 
-               if (link->lttpr_non_transparent_mode)
+               if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
                        wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
 
                wait_for_training_aux_rd_interval(
@@ -1098,11 +1098,13 @@ static enum link_training_result perform_clock_recovery_sequence(
                if (is_max_vs_reached(lt_settings))
                        break;
 
-               /* 7. same voltage*/
-               /* Note: VS same for all lanes,
-               * so comparing first lane is sufficient*/
-               if (lt_settings->lane_settings[0].VOLTAGE_SWING ==
+               /* 7. same lane settings*/
+               /* Note: settings are the same for all lanes,
+                * so comparing first lane is sufficient*/
+               if ((lt_settings->lane_settings[0].VOLTAGE_SWING ==
                        req_settings.lane_settings[0].VOLTAGE_SWING)
+                       && (lt_settings->lane_settings[0].PRE_EMPHASIS ==
+                               req_settings.lane_settings[0].PRE_EMPHASIS))
                        retries_cr++;
                else
                        retries_cr = 0;
@@ -1324,7 +1326,17 @@ static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
        return 0; // invalid value
 }
 
-static void configure_lttpr_mode(struct dc_link *link)
+static void configure_lttpr_mode_transparent(struct dc_link *link)
+{
+       uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
+
+       core_link_write_dpcd(link,
+                       DP_PHY_REPEATER_MODE,
+                       (uint8_t *)&repeater_mode,
+                       sizeof(repeater_mode));
+}
+
+static void configure_lttpr_mode_non_transparent(struct dc_link *link)
 {
        /* aux timeout is already set to extended */
        /* RESET/SET lttpr mode to enable non transparent mode */
@@ -1344,7 +1356,7 @@ static void configure_lttpr_mode(struct dc_link *link)
                link->dpcd_caps.lttpr_caps.mode = repeater_mode;
        }
 
-       if (link->lttpr_non_transparent_mode) {
+       if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
 
                DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__);
 
@@ -1560,8 +1572,10 @@ enum link_training_result dc_link_dp_perform_link_training(
                        &lt_settings);
 
        /* Configure lttpr mode */
-       if (link->lttpr_non_transparent_mode)
-               configure_lttpr_mode(link);
+       if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+               configure_lttpr_mode_non_transparent(link);
+       else if (link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+               configure_lttpr_mode_transparent(link);
 
        if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
                start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
@@ -1576,7 +1590,7 @@ enum link_training_result dc_link_dp_perform_link_training(
 
        dp_set_fec_ready(link, fec_enable);
 
-       if (link->lttpr_non_transparent_mode) {
+       if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
 
                /* 2. perform link training (set link training done
                 *  to false is done as well)
@@ -1633,6 +1647,42 @@ enum link_training_result dc_link_dp_perform_link_training(
        return status;
 }
 
+static enum dp_panel_mode try_enable_assr(struct dc_stream_state *stream)
+{
+       struct dc_link *link = stream->link;
+       enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+       struct cp_psp *cp_psp = &stream->ctx->cp_psp;
+#endif
+
+       /* ASSR must be supported on the panel */
+       if (panel_mode == DP_PANEL_MODE_DEFAULT)
+               return panel_mode;
+
+       /* eDP or internal DP only */
+       if (link->connector_signal != SIGNAL_TYPE_EDP &&
+               !(link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+                link->is_internal_display))
+               return DP_PANEL_MODE_DEFAULT;
+
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+       if (cp_psp && cp_psp->funcs.enable_assr) {
+               if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) {
+                       /* since eDP implies ASSR on, change panel
+                        * mode to disable ASSR
+                        */
+                       panel_mode = DP_PANEL_MODE_DEFAULT;
+               }
+       } else
+               panel_mode = DP_PANEL_MODE_DEFAULT;
+
+#else
+       /* turn off ASSR if the implementation is not compiled in */
+       panel_mode = DP_PANEL_MODE_DEFAULT;
+#endif
+       return panel_mode;
+}
+
 bool perform_link_training_with_retries(
        const struct dc_link_settings *link_setting,
        bool skip_video_pattern,
@@ -1644,7 +1694,7 @@ bool perform_link_training_with_retries(
        uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
        struct dc_stream_state *stream = pipe_ctx->stream;
        struct dc_link *link = stream->link;
-       enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
+       enum dp_panel_mode panel_mode;
 
        /* We need to do this before the link training to ensure the idle pattern in SST
         * mode will be sent right after the link training
@@ -1669,32 +1719,25 @@ bool perform_link_training_with_retries(
                        msleep(delay_dp_power_up_in_ms);
                }
 
-#ifdef CONFIG_DRM_AMD_DC_HDCP
-               if (panel_mode == DP_PANEL_MODE_EDP) {
-                       struct cp_psp *cp_psp = &stream->ctx->cp_psp;
-
-                       if (cp_psp && cp_psp->funcs.enable_assr) {
-                               if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) {
-                                       /* since eDP implies ASSR on, change panel
-                                        * mode to disable ASSR
-                                        */
-                                       panel_mode = DP_PANEL_MODE_DEFAULT;
-                               }
-                       } else
-                               panel_mode = DP_PANEL_MODE_DEFAULT;
-               }
-#endif
-
+               panel_mode = try_enable_assr(stream);
                dp_set_panel_mode(link, panel_mode);
+               DC_LOG_DETECTION_DP_CAPS("Link: %d ASSR enabled: %d\n",
+                        link->link_index,
+                        panel_mode != DP_PANEL_MODE_DEFAULT);
 
                if (link->aux_access_disabled) {
                        dc_link_dp_perform_link_training_skip_aux(link, link_setting);
                        return true;
-               } else if (dc_link_dp_perform_link_training(
-                               link,
-                               link_setting,
-                               skip_video_pattern) == LINK_TRAINING_SUCCESS)
-                       return true;
+               } else {
+                       enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
+
+                               status = dc_link_dp_perform_link_training(
+                                                                               link,
+                                                                               link_setting,
+                                                                               skip_video_pattern);
+                       if (status == LINK_TRAINING_SUCCESS)
+                               return true;
+               }
 
                /* latest link training still fail, skip delay and keep PHY on
                 */
@@ -1873,7 +1916,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
         * account for lttpr repeaters cap
         * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
         */
-       if (link->lttpr_non_transparent_mode) {
+       if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
                if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
                        max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
 
@@ -2031,7 +2074,7 @@ bool dp_verify_link_cap(
        max_link_cap = get_max_link_cap(link);
 
        /* Grant extended timeout request */
-       if (link->lttpr_non_transparent_mode && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
+       if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) {
                uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
 
                core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
@@ -2782,10 +2825,27 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
        enum dp_test_pattern test_pattern;
        enum dp_test_pattern_color_space test_pattern_color_space =
                        DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
+       enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED;
+       struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+       struct pipe_ctx *pipe_ctx = NULL;
+       int i;
 
        memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
        memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
 
+       for (i = 0; i < MAX_PIPES; i++) {
+               if (pipes[i].stream == NULL)
+                       continue;
+
+               if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
+                       pipe_ctx = &pipes[i];
+                       break;
+               }
+       }
+
+       if (pipe_ctx == NULL)
+               return;
+
        /* get link test pattern and pattern parameters */
        core_link_read_dpcd(
                        link,
@@ -2823,6 +2883,33 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
                                DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
                                DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
 
+       switch (dpcd_test_params.bits.BPC) {
+       case 0: // 6 bits
+               requestColorDepth = COLOR_DEPTH_666;
+               break;
+       case 1: // 8 bits
+               requestColorDepth = COLOR_DEPTH_888;
+               break;
+       case 2: // 10 bits
+               requestColorDepth = COLOR_DEPTH_101010;
+               break;
+       case 3: // 12 bits
+               requestColorDepth = COLOR_DEPTH_121212;
+               break;
+       default:
+               break;
+       }
+
+       if (requestColorDepth != COLOR_DEPTH_UNDEFINED
+                       && pipe_ctx->stream->timing.display_color_depth != requestColorDepth) {
+               DC_LOG_DEBUG("%s: original bpc %d, changing to %d\n",
+                               __func__,
+                               pipe_ctx->stream->timing.display_color_depth,
+                               requestColorDepth);
+               pipe_ctx->stream->timing.display_color_depth = requestColorDepth;
+               dp_update_dsc_config(pipe_ctx);
+       }
+
        dc_link_dp_set_test_pattern(
                        link,
                        test_pattern,
@@ -3369,6 +3456,9 @@ static bool retrieve_link_cap(struct dc_link *link)
        struct dp_sink_hw_fw_revision dp_hw_fw_revision;
        bool is_lttpr_present = false;
        const uint32_t post_oui_delay = 30; // 30ms
+       bool vbios_lttpr_enable = false;
+       bool vbios_lttpr_interop = false;
+       struct dc_bios *bios = link->dc->ctx->dc_bios;
 
        memset(dpcd_data, '\0', sizeof(dpcd_data));
        memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
@@ -3416,13 +3506,45 @@ static bool retrieve_link_cap(struct dc_link *link)
                return false;
        }
 
-       if (link->dc->caps.extended_aux_timeout_support &&
-                       link->dc->config.allow_lttpr_non_transparent_mode) {
+       /* Query BIOS to determine if LTTPR functionality is forced on by system */
+       if (bios->funcs->get_lttpr_caps) {
+               enum bp_result bp_query_result;
+               uint8_t is_vbios_lttpr_enable = 0;
+
+               bp_query_result = bios->funcs->get_lttpr_caps(bios, &is_vbios_lttpr_enable);
+               vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+       }
+
+       if (bios->funcs->get_lttpr_interop) {
+               enum bp_result bp_query_result;
+               uint8_t is_vbios_interop_enabled = 0;
+
+               bp_query_result = bios->funcs->get_lttpr_interop(bios, &is_vbios_interop_enabled);
+               vbios_lttpr_interop = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
+       }
+
+       /*
+        * Logic to determine LTTPR mode
+        */
+       link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+       if (vbios_lttpr_enable && vbios_lttpr_interop)
+               link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+       else if (!vbios_lttpr_enable && vbios_lttpr_interop) {
+               if (link->dc->config.allow_lttpr_non_transparent_mode)
+                       link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+               else
+                       link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
+       } else if (!vbios_lttpr_enable && !vbios_lttpr_interop) {
+               if (!link->dc->config.allow_lttpr_non_transparent_mode
+                       || !link->dc->caps.extended_aux_timeout_support)
+                       link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+               else
+                       link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
+       }
+
+       if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
                /* By reading LTTPR capability, RX assumes that we will enable
-                * LTTPR non transparent if LTTPR is present.
-                * Therefore, only query LTTPR capability when both LTTPR
-                * extended aux timeout and
-                * non transparent mode is supported by hardware
+                * LTTPR extended aux timeout if LTTPR is present.
                 */
                status = core_link_read_dpcd(
                                link,
@@ -3462,9 +3584,6 @@ static bool retrieve_link_cap(struct dc_link *link)
                        CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
        }
 
-       /* decide lttpr non transparent mode */
-       link->lttpr_non_transparent_mode = is_lttpr_present;
-
        if (!is_lttpr_present)
                dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
 
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
new file mode 100644 (file)
index 0000000..1361b87
--- /dev/null
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "link_enc_cfg.h"
+#include "resource.h"
+#include "dc_link_dp.h"
+
+/* Check whether stream is supported by DIG link encoders. */
+static bool is_dig_link_enc_stream(struct dc_stream_state *stream)
+{
+       bool is_dig_stream = false;
+       struct link_encoder *link_enc = NULL;
+       int i;
+
+       /* Loop over created link encoder objects. */
+       for (i = 0; i < stream->ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
+               link_enc = stream->ctx->dc->res_pool->link_encoders[i];
+
+               if (link_enc &&
+                               ((uint32_t)stream->signal & link_enc->output_signals)) {
+                       if (dc_is_dp_signal(stream->signal)) {
+                               /* DIGs do not support DP2.0 streams with 128b/132b encoding. */
+                               struct dc_link_settings link_settings = {0};
+
+                               decide_link_settings(stream, &link_settings);
+                               if ((link_settings.link_rate >= LINK_RATE_LOW) &&
+                                               link_settings.link_rate <= LINK_RATE_HIGH3) {
+                                       is_dig_stream = true;
+                                       break;
+                               }
+                       } else {
+                               is_dig_stream = true;
+                               break;
+                       }
+               }
+       }
+
+       return is_dig_stream;
+}
+
+/* Update DIG link encoder resource tracking variables in dc_state. */
+static void update_link_enc_assignment(
+               struct dc_state *state,
+               struct dc_stream_state *stream,
+               enum engine_id eng_id,
+               bool add_enc)
+{
+       int eng_idx;
+       int stream_idx;
+       int i;
+
+       if (eng_id != ENGINE_ID_UNKNOWN) {
+               eng_idx = eng_id - ENGINE_ID_DIGA;
+               stream_idx = -1;
+
+               /* Index of stream in dc_state used to update correct entry in
+                * link_enc_assignments table.
+                */
+               for (i = 0; i < state->stream_count; i++) {
+                       if (stream == state->streams[i]) {
+                               stream_idx = i;
+                               break;
+                       }
+               }
+
+               /* Update link encoder assignments table, link encoder availability
+                * pool and link encoder assigned to stream in state.
+                * Add/remove encoder resource to/from stream.
+                */
+               if (stream_idx != -1) {
+                       if (add_enc) {
+                               state->res_ctx.link_enc_assignments[stream_idx] = (struct link_enc_assignment){
+                                       .valid = true,
+                                       .ep_id = (struct display_endpoint_id) {
+                                               .link_id = stream->link->link_id,
+                                               .ep_type = stream->link->ep_type},
+                                       .eng_id = eng_id};
+                               state->res_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN;
+                               stream->link_enc = stream->ctx->dc->res_pool->link_encoders[eng_idx];
+                       } else {
+                               state->res_ctx.link_enc_assignments[stream_idx].valid = false;
+                               state->res_ctx.link_enc_avail[eng_idx] = eng_id;
+                               stream->link_enc = NULL;
+                       }
+               } else {
+                       dm_output_to_console("%s: Stream not found in dc_state.\n", __func__);
+               }
+       }
+}
+
+/* Return first available DIG link encoder. */
+static enum engine_id find_first_avail_link_enc(
+               struct dc_context *ctx,
+               struct dc_state *state)
+{
+       enum engine_id eng_id = ENGINE_ID_UNKNOWN;
+       int i;
+
+       for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
+               eng_id = state->res_ctx.link_enc_avail[i];
+               if (eng_id != ENGINE_ID_UNKNOWN)
+                       break;
+       }
+
+       return eng_id;
+}
+
+/* Return stream using DIG link encoder resource. NULL if unused. */
+static struct dc_stream_state *get_stream_using_link_enc(
+               struct dc_state *state,
+               enum engine_id eng_id)
+{
+       struct dc_stream_state *stream = NULL;
+       int stream_idx = -1;
+       int i;
+
+       for (i = 0; i < state->stream_count; i++) {
+               struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i];
+
+               if (assignment.valid && (assignment.eng_id == eng_id)) {
+                       stream_idx = i;
+                       break;
+               }
+       }
+
+       if (stream_idx != -1)
+               stream = state->streams[stream_idx];
+       else
+               dm_output_to_console("%s: No stream using DIG(%d).\n", __func__, eng_id);
+
+       return stream;
+}
+
+void link_enc_cfg_init(
+               struct dc *dc,
+               struct dc_state *state)
+{
+       int i;
+
+       for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) {
+               if (dc->res_pool->link_encoders[i])
+                       state->res_ctx.link_enc_avail[i] = (enum engine_id) i;
+               else
+                       state->res_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN;
+       }
+}
+
+void link_enc_cfg_link_encs_assign(
+               struct dc *dc,
+               struct dc_state *state,
+               struct dc_stream_state *streams[],
+               uint8_t stream_count)
+{
+       enum engine_id eng_id = ENGINE_ID_UNKNOWN;
+       int i;
+
+       /* Release DIG link encoder resources before running assignment algorithm. */
+       for (i = 0; i < stream_count; i++)
+               dc->res_pool->funcs->link_enc_unassign(state, streams[i]);
+
+       /* (a) Assign DIG link encoders to physical (unmappable) endpoints first. */
+       for (i = 0; i < stream_count; i++) {
+               struct dc_stream_state *stream = streams[i];
+
+               /* Skip stream if not supported by DIG link encoder. */
+               if (!is_dig_link_enc_stream(stream))
+                       continue;
+
+               /* Physical endpoints have a fixed mapping to DIG link encoders. */
+               if (!stream->link->is_dig_mapping_flexible) {
+                       eng_id = stream->link->eng_id;
+                       update_link_enc_assignment(state, stream, eng_id, true);
+               }
+       }
+
+       /* (b) Then assign encoders to mappable endpoints. */
+       eng_id = ENGINE_ID_UNKNOWN;
+
+       for (i = 0; i < stream_count; i++) {
+               struct dc_stream_state *stream = streams[i];
+
+               /* Skip stream if not supported by DIG link encoder. */
+               if (!is_dig_link_enc_stream(stream))
+                       continue;
+
+               /* Mappable endpoints have a flexible mapping to DIG link encoders. */
+               if (stream->link->is_dig_mapping_flexible) {
+                       eng_id = find_first_avail_link_enc(stream->ctx, state);
+                       update_link_enc_assignment(state, stream, eng_id, true);
+               }
+       }
+}
+
+void link_enc_cfg_link_enc_unassign(
+               struct dc_state *state,
+               struct dc_stream_state *stream)
+{
+       enum engine_id eng_id = ENGINE_ID_UNKNOWN;
+
+       /* Only DIG link encoders. */
+       if (!is_dig_link_enc_stream(stream))
+               return;
+
+       if (stream->link_enc)
+               eng_id = stream->link_enc->preferred_engine;
+
+       update_link_enc_assignment(state, stream, eng_id, false);
+}
+
+bool link_enc_cfg_is_transmitter_mappable(
+               struct dc_state *state,
+               struct link_encoder *link_enc)
+{
+       bool is_mappable = false;
+       enum engine_id eng_id = link_enc->preferred_engine;
+       struct dc_stream_state *stream = get_stream_using_link_enc(state, eng_id);
+
+       if (stream)
+               is_mappable = stream->link->is_dig_mapping_flexible;
+
+       return is_mappable;
+}
+
+struct dc_link *link_enc_cfg_get_link_using_link_enc(
+               struct dc_state *state,
+               enum engine_id eng_id)
+{
+       struct dc_link *link = NULL;
+       int stream_idx = -1;
+       int i;
+
+       for (i = 0; i < state->stream_count; i++) {
+               struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i];
+
+               if (assignment.valid && (assignment.eng_id == eng_id)) {
+                       stream_idx = i;
+                       break;
+               }
+       }
+
+       if (stream_idx != -1)
+               link = state->streams[stream_idx]->link;
+       else
+               dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id);
+
+       return link;
+}
+
+struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
+               struct dc_state *state,
+               struct dc_link *link)
+{
+       struct link_encoder *link_enc = NULL;
+       struct display_endpoint_id ep_id;
+       int stream_idx = -1;
+       int i;
+
+       ep_id = (struct display_endpoint_id) {
+               .link_id = link->link_id,
+               .ep_type = link->ep_type};
+
+       for (i = 0; i < state->stream_count; i++) {
+               struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i];
+
+               if (assignment.valid &&
+                               assignment.ep_id.link_id.id == ep_id.link_id.id &&
+                               assignment.ep_id.link_id.enum_id == ep_id.link_id.enum_id &&
+                               assignment.ep_id.link_id.type == ep_id.link_id.type &&
+                               assignment.ep_id.ep_type == ep_id.ep_type) {
+                       stream_idx = i;
+                       break;
+               }
+       }
+
+       if (stream_idx != -1)
+               link_enc = state->streams[stream_idx]->link_enc;
+       else
+               dm_output_to_console("%s: No link encoder used by link(%d).\n", __func__, link->link_index);
+
+       return link_enc;
+}
index 124ce21..48ad1a8 100644 (file)
@@ -14,6 +14,7 @@
 #include "dpcd_defs.h"
 #include "dsc.h"
 #include "resource.h"
+#include "link_enc_cfg.h"
 #include "clk_mgr.h"
 
 static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
@@ -95,7 +96,7 @@ void dp_enable_link_phy(
        enum clock_source_id clock_source,
        const struct dc_link_settings *link_settings)
 {
-       struct link_encoder *link_enc = link->link_enc;
+       struct link_encoder *link_enc;
        struct dc  *dc = link->ctx->dc;
        struct dmcu *dmcu = dc->res_pool->dmcu;
 
@@ -105,6 +106,13 @@ void dp_enable_link_phy(
                        link->dc->res_pool->dp_clock_source;
        unsigned int i;
 
+       /* Link should always be assigned encoder when en-/disabling. */
+       if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign)
+               link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+       else
+               link_enc = link->link_enc;
+       ASSERT(link_enc);
+
        if (link->connector_signal == SIGNAL_TYPE_EDP) {
                link->dc->hwss.edp_power_control(link, true);
                link->dc->hwss.edp_wait_for_hpd_ready(link, true);
@@ -227,6 +235,14 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
 {
        struct dc  *dc = link->ctx->dc;
        struct dmcu *dmcu = dc->res_pool->dmcu;
+       struct link_encoder *link_enc;
+
+       /* Link should always be assigned encoder when en-/disabling. */
+       if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign)
+               link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+       else
+               link_enc = link->link_enc;
+       ASSERT(link_enc);
 
        if (!link->wa_flags.dp_keep_receiver_powered)
                dp_receiver_power_ctrl(link, false);
@@ -234,13 +250,13 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
        if (signal == SIGNAL_TYPE_EDP) {
                if (link->dc->hwss.edp_backlight_control)
                        link->dc->hwss.edp_backlight_control(link, false);
-               link->link_enc->funcs->disable_output(link->link_enc, signal);
+               link_enc->funcs->disable_output(link_enc, signal);
                link->dc->hwss.edp_power_control(link, false);
        } else {
                if (dmcu != NULL && dmcu->funcs->lock_phy)
                        dmcu->funcs->lock_phy(dmcu);
 
-               link->link_enc->funcs->disable_output(link->link_enc, signal);
+               link_enc->funcs->disable_output(link_enc, signal);
 
                if (dmcu != NULL && dmcu->funcs->unlock_phy)
                        dmcu->funcs->unlock_phy(dmcu);
@@ -302,7 +318,7 @@ void dp_set_hw_lane_settings(
 {
        struct link_encoder *encoder = link->link_enc;
 
-       if (link->lttpr_non_transparent_mode && !is_immediate_downstream(link, offset))
+       if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset))
                return;
 
        /* call Encoder to set lane settings */
index 3c91d16..ac7a758 100644 (file)
@@ -1930,6 +1930,9 @@ enum dc_status dc_remove_stream_from_ctx(
                                dc->res_pool,
                        del_pipe->stream_res.stream_enc,
                        false);
+       /* Release link encoder from stream in new dc_state. */
+       if (dc->res_pool->funcs->link_enc_unassign)
+               dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream);
 
        if (del_pipe->stream_res.audio)
                update_audio_usage(
@@ -2842,6 +2845,10 @@ bool pipe_need_reprogram(
        if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc)
                return true;
 
+       /* DIG link encoder resource assignment for stream changed. */
+       if (pipe_ctx_old->stream->link_enc != pipe_ctx->stream->link_enc)
+               return true;
+
        return false;
 }
 
index d163007..8108b82 100644 (file)
@@ -45,7 +45,7 @@
 /* forward declaration */
 struct aux_payload;
 
-#define DC_VER "3.2.127"
+#define DC_VER "3.2.130"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -460,6 +460,7 @@ struct dc_debug_options {
        enum pipe_split_policy pipe_split_policy;
        bool force_single_disp_pipe_split;
        bool voltage_align_fclk;
+       bool disable_min_fclk;
 
        bool disable_dfs_bypass;
        bool disable_dpp_power_gate;
index 86ab8f1..67abda4 100644 (file)
@@ -150,6 +150,12 @@ struct dc_vbios_funcs {
                        struct dc_bios *dcb,
                        struct graphics_object_id object_id,
                        struct bp_disp_connector_caps_info *info);
+       enum bp_result (*get_lttpr_caps)(
+                       struct dc_bios *dcb,
+                       uint8_t *dce_caps);
+       enum bp_result (*get_lttpr_interop)(
+                       struct dc_bios *dcb,
+                       uint8_t *dce_caps);
 };
 
 struct bios_registers {
index c50ef5a..b0013e6 100644 (file)
@@ -35,6 +35,13 @@ enum dc_link_fec_state {
        dc_link_fec_ready,
        dc_link_fec_enabled
 };
+
+enum lttpr_mode {
+       LTTPR_MODE_NON_LTTPR,
+       LTTPR_MODE_TRANSPARENT,
+       LTTPR_MODE_NON_TRANSPARENT,
+};
+
 struct dc_link_status {
        bool link_active;
        struct dpcd_caps *dpcd_caps;
@@ -100,7 +107,7 @@ struct dc_link {
        bool link_state_valid;
        bool aux_access_disabled;
        bool sync_lt_in_progress;
-       bool lttpr_non_transparent_mode;
+       enum lttpr_mode lttpr_mode;
        bool is_internal_display;
 
        /* TODO: Rename. Flag an endpoint as having a programmable mapping to a
@@ -125,6 +132,11 @@ struct dc_link {
        uint8_t hpd_src;
 
        uint8_t link_enc_hw_inst;
+       /* DIG link encoder ID. Used as index in link encoder resource pool.
+        * For links with fixed mapping to DIG, this is not changed after dc_link
+        * object creation.
+        */
+       enum engine_id eng_id;
 
        bool test_pattern_enabled;
        union compliance_test_state compliance_test_state;
@@ -144,6 +156,11 @@ struct dc_link {
        struct panel_cntl *panel_cntl;
        struct link_encoder *link_enc;
        struct graphics_object_id link_id;
+       /* Endpoint type distinguishes display endpoints which do not have entries
+        * in the BIOS connector table from those that do. Helps when tracking link
+        * encoder to display endpoint assignments.
+        */
+       enum display_endpoint_type ep_type;
        union ddi_channel_mapping ddi_channel_mapping;
        struct connector_device_tag_info device_tag;
        struct dpcd_caps dpcd_caps;
index e747370..b0297f0 100644 (file)
@@ -130,12 +130,24 @@ union stream_update_flags {
        uint32_t raw;
 };
 
+struct test_pattern {
+       enum dp_test_pattern type;
+       enum dp_test_pattern_color_space color_space;
+       struct link_training_settings const *p_link_settings;
+       unsigned char const *p_custom_pattern;
+       unsigned int cust_pattern_size;
+};
+
 struct dc_stream_state {
        // sink is deprecated, new code should not reference
        // this pointer
        struct dc_sink *sink;
 
        struct dc_link *link;
+       /* For dynamic link encoder assignment, update the link encoder assigned to
+        * a stream via the volatile dc_state rather than the static dc_link.
+        */
+       struct link_encoder *link_enc;
        struct dc_panel_patch sink_patches;
        union display_content_support content_support;
        struct dc_crtc_timing timing;
@@ -227,6 +239,8 @@ struct dc_stream_state {
 
        uint32_t stream_id;
        bool is_dsc_enabled;
+
+       struct test_pattern test_pattern;
        union stream_update_flags update_flags;
 
        bool has_non_synchronizable_pclk;
@@ -264,6 +278,8 @@ struct dc_stream_update {
        struct dc_dsc_config *dsc_config;
        struct dc_transfer_func *func_shaper;
        struct dc_3dlut *lut3d_func;
+
+       struct test_pattern *pending_test_pattern;
 };
 
 bool dc_is_stream_unchanged(
index 80757a0..432754e 100644 (file)
@@ -113,6 +113,7 @@ struct dc_context {
        struct gpio_service *gpio_service;
        uint32_t dc_sink_id_count;
        uint32_t dc_stream_id_count;
+       uint32_t dc_edp_id_count;
        uint64_t fbc_gpu_addr;
        struct dc_dmub_srv *dmub_srv;
 
@@ -687,7 +688,8 @@ enum dc_psr_state {
        PSR_STATE5,
        PSR_STATE5a,
        PSR_STATE5b,
-       PSR_STATE5c
+       PSR_STATE5c,
+       PSR_STATE_INVALID = 0xFF
 };
 
 struct psr_config {
@@ -934,4 +936,19 @@ enum dc_psr_version {
        DC_PSR_VERSION_UNSUPPORTED              = 0xFFFFFFFF,
 };
 
+/* Possible values of display_endpoint_id.endpoint */
+enum display_endpoint_type {
+       DISPLAY_ENDPOINT_PHY = 0, /* Physical connector. */
+       DISPLAY_ENDPOINT_UNKNOWN = -1
+};
+
+/* Extends graphics_object_id with an additional member 'ep_type' for
+ * distinguishing between physical endpoints (with entries in BIOS connector table) and
+ * logical endpoints.
+ */
+struct display_endpoint_id {
+       struct graphics_object_id link_id;
+       enum display_endpoint_type ep_type;
+};
+
 #endif /* DC_TYPES_H_ */
index 4e87e70..874b132 100644 (file)
@@ -283,7 +283,7 @@ struct abm *dce_abm_create(
        const struct dce_abm_shift *abm_shift,
        const struct dce_abm_mask *abm_mask)
 {
-       struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
+       struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC);
 
        if (abm_dce == NULL) {
                BREAK_TO_DEBUGGER();
index 4f86450..8cd8413 100644 (file)
@@ -1133,7 +1133,7 @@ struct dmcu *dcn10_dmcu_create(
        const struct dce_dmcu_shift *dmcu_shift,
        const struct dce_dmcu_mask *dmcu_mask)
 {
-       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
 
        if (dmcu_dce == NULL) {
                BREAK_TO_DEBUGGER();
@@ -1154,7 +1154,7 @@ struct dmcu *dcn20_dmcu_create(
        const struct dce_dmcu_shift *dmcu_shift,
        const struct dce_dmcu_mask *dmcu_mask)
 {
-       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
 
        if (dmcu_dce == NULL) {
                BREAK_TO_DEBUGGER();
@@ -1175,7 +1175,7 @@ struct dmcu *dcn21_dmcu_create(
        const struct dce_dmcu_shift *dmcu_shift,
        const struct dce_dmcu_mask *dmcu_mask)
 {
-       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+       struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
 
        if (dmcu_dce == NULL) {
                BREAK_TO_DEBUGGER();
index 15ed09b..28ff059 100644 (file)
@@ -80,19 +80,26 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
 static void dmub_psr_get_state(struct dmub_psr *dmub, enum dc_psr_state *state)
 {
        struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
-       uint32_t raw_state;
+       uint32_t raw_state = 0;
+       uint32_t retry_count = 0;
        enum dmub_status status;
 
-       // Send gpint command and wait for ack
-       status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
-
-       if (status == DMUB_STATUS_OK) {
-               // GPINT was executed, get response
-               dmub_srv_get_gpint_response(srv, &raw_state);
-               *state = convert_psr_state(raw_state);
-       } else
-               // Return invalid state when GPINT times out
-               *state = 0xFF;
+       do {
+               // Send gpint command and wait for ack
+               status = dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
+
+               if (status == DMUB_STATUS_OK) {
+                       // GPINT was executed, get response
+                       dmub_srv_get_gpint_response(srv, &raw_state);
+                       *state = convert_psr_state(raw_state);
+               } else
+                       // Return invalid state when GPINT times out
+                       *state = PSR_STATE_INVALID;
+
+               // Assert if max retry hit
+               if (retry_count >= 1000)
+                       ASSERT(0);
+       } while (++retry_count <= 1000 && *state == PSR_STATE_INVALID);
 }
 
 /*
index 804092f..873c6f2 100644 (file)
@@ -1846,8 +1846,7 @@ void dce110_set_safe_displaymarks(
  ******************************************************************************/
 
 static void set_drr(struct pipe_ctx **pipe_ctx,
-               int num_pipes, unsigned int vmin, unsigned int vmax,
-               unsigned int vmid, unsigned int vmid_frame_number)
+               int num_pipes, struct dc_crtc_timing_adjust adjust)
 {
        int i = 0;
        struct drr_params params = {0};
@@ -1856,8 +1855,8 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
        // Note DRR trigger events are generated regardless of whether num frames met.
        unsigned int num_frames = 2;
 
-       params.vertical_total_max = vmax;
-       params.vertical_total_min = vmin;
+       params.vertical_total_max = adjust.v_total_max;
+       params.vertical_total_min = adjust.v_total_min;
 
        /* TODO: If multiple pipes are to be supported, you need
         * some GSL stuff. Static screen triggers may be programmed differently
@@ -1867,7 +1866,7 @@ static void set_drr(struct pipe_ctx **pipe_ctx,
                pipe_ctx[i]->stream_res.tg->funcs->set_drr(
                        pipe_ctx[i]->stream_res.tg, &params);
 
-               if (vmax != 0 && vmin != 0)
+               if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
                        pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
                                        pipe_ctx[i]->stream_res.tg,
                                        event_triggers, num_frames);
index 612450f..725d92e 100644 (file)
@@ -526,7 +526,7 @@ static struct output_pixel_processor *dce80_opp_create(
        return &opp->base;
 }
 
-struct dce_aux *dce80_aux_engine_create(
+static struct dce_aux *dce80_aux_engine_create(
        struct dc_context *ctx,
        uint32_t inst)
 {
@@ -564,7 +564,7 @@ static const struct dce_i2c_mask i2c_masks = {
                I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
 };
 
-struct dce_i2c_hw *dce80_i2c_hw_create(
+static struct dce_i2c_hw *dce80_i2c_hw_create(
        struct dc_context *ctx,
        uint32_t inst)
 {
@@ -580,7 +580,7 @@ struct dce_i2c_hw *dce80_i2c_hw_create(
        return dce_i2c_hw;
 }
 
-struct dce_i2c_sw *dce80_i2c_sw_create(
+static struct dce_i2c_sw *dce80_i2c_sw_create(
        struct dc_context *ctx)
 {
        struct dce_i2c_sw *dce_i2c_sw =
@@ -714,7 +714,7 @@ static const struct encoder_feature_support link_enc_feature = {
                .flags.bits.IS_TPS3_CAPABLE = true
 };
 
-struct link_encoder *dce80_link_encoder_create(
+static struct link_encoder *dce80_link_encoder_create(
        const struct encoder_init_data *enc_init_data)
 {
        struct dce110_link_encoder *enc110 =
@@ -753,7 +753,7 @@ static struct panel_cntl *dce80_panel_cntl_create(const struct panel_cntl_init_d
        return &panel_cntl->base;
 }
 
-struct clock_source *dce80_clock_source_create(
+static struct clock_source *dce80_clock_source_create(
        struct dc_context *ctx,
        struct dc_bios *bios,
        enum clock_source_id id,
@@ -777,7 +777,7 @@ struct clock_source *dce80_clock_source_create(
        return NULL;
 }
 
-void dce80_clock_source_destroy(struct clock_source **clk_src)
+static void dce80_clock_source_destroy(struct clock_source **clk_src)
 {
        kfree(TO_DCE110_CLK_SRC(*clk_src));
        *clk_src = NULL;
@@ -867,7 +867,7 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool)
        }
 }
 
-bool dce80_validate_bandwidth(
+static bool dce80_validate_bandwidth(
        struct dc *dc,
        struct dc_state *context,
        bool fast_validate)
@@ -912,7 +912,7 @@ static bool dce80_validate_surface_sets(
        return true;
 }
 
-enum dc_status dce80_validate_global(
+static enum dc_status dce80_validate_global(
                struct dc *dc,
                struct dc_state *context)
 {
index 9eb33ea..7c939c0 100644 (file)
@@ -1893,7 +1893,7 @@ uint64_t reduceSizeAndFraction(
        num = *numerator;
        denom = *denominator;
        for (i = 0; i < count; i++) {
-               uint32_t num_reminder, denom_reminder;
+               uint32_t num_remainder, denom_remainder;
                uint64_t num_result, denom_result;
                if (checkUint32Bounary &&
                        num <= max_int32 && denom <= max_int32) {
@@ -1901,13 +1901,13 @@ uint64_t reduceSizeAndFraction(
                        break;
                }
                do {
-                       num_result = div_u64_rem(num, prime_numbers[i], &num_reminder);
-                       denom_result = div_u64_rem(denom, prime_numbers[i], &denom_reminder);
-                       if (num_reminder == 0 && denom_reminder == 0) {
+                       num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
+                       denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
+                       if (num_remainder == 0 && denom_remainder == 0) {
                                num = num_result;
                                denom = denom_result;
                        }
-               } while (num_reminder == 0 && denom_reminder == 0);
+               } while (num_remainder == 0 && denom_remainder == 0);
        }
        *numerator = num;
        *denominator = denom;
@@ -3271,8 +3271,7 @@ void dcn10_optimize_bandwidth(
 }
 
 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
-               int num_pipes, unsigned int vmin, unsigned int vmax,
-               unsigned int vmid, unsigned int vmid_frame_number)
+               int num_pipes, struct dc_crtc_timing_adjust adjust)
 {
        int i = 0;
        struct drr_params params = {0};
@@ -3281,11 +3280,10 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
        // Note DRR trigger events are generated regardless of whether num frames met.
        unsigned int num_frames = 2;
 
-       params.vertical_total_max = vmax;
-       params.vertical_total_min = vmin;
-       params.vertical_total_mid = vmid;
-       params.vertical_total_mid_frame_num = vmid_frame_number;
-
+       params.vertical_total_max = adjust.v_total_max;
+       params.vertical_total_min = adjust.v_total_min;
+       params.vertical_total_mid = adjust.v_total_mid;
+       params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
        /* TODO: If multiple pipes are to be supported, you need
         * some GSL stuff. Static screen triggers may be programmed differently
         * as well.
@@ -3293,7 +3291,7 @@ void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
        for (i = 0; i < num_pipes; i++) {
                pipe_ctx[i]->stream_res.tg->funcs->set_drr(
                        pipe_ctx[i]->stream_res.tg, &params);
-               if (vmax != 0 && vmin != 0)
+               if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
                        pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
                                        pipe_ctx[i]->stream_res.tg,
                                        event_triggers, num_frames);
@@ -3981,3 +3979,19 @@ void dcn10_get_clock(struct dc *dc,
                                dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
 
 }
+
+void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
+{
+       struct resource_pool *pool = dc->res_pool;
+       int i;
+
+       for (i = 0; i < pool->pipe_count; i++) {
+               struct hubp *hubp = pool->hubps[i];
+               struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
+
+               hubp->funcs->hubp_read_state(hubp);
+
+               if (!s->blank_en)
+                       dcc_en_bits[i] = s->dcc_en ? 1 : 0;
+       }
+}
index e0800cd..37bec42 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright 2016 Advanced Micro Devices, Inc.
+* Copyright 2016-2020 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -145,8 +145,7 @@ bool dcn10_dummy_display_power_gating(
                struct dc_bios *dcb,
                enum pipe_gating_control power_gating);
 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
-               int num_pipes, unsigned int vmin, unsigned int vmax,
-               unsigned int vmid, unsigned int vmid_frame_number);
+               int num_pipes, struct dc_crtc_timing_adjust adjust);
 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
                int num_pipes,
                struct crtc_position *position);
@@ -210,4 +209,6 @@ void dcn10_wait_for_pending_cleared(struct dc *dc,
 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
 void dcn10_verify_allow_pstate_change_high(struct dc *dc);
 
+void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits);
+
 #endif /* __DC_HWSS_DCN10_H__ */
index 254300b..d532c78 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2016-2020 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -79,6 +79,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .set_backlight_level = dce110_set_backlight_level,
        .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
        .set_pipe = dce110_set_pipe,
+       .get_dcc_en_bits = dcn10_get_dcc_en_bits,
 };
 
 static const struct hwseq_private_funcs dcn10_private_funcs = {
index 6138f48..677663c 100644 (file)
@@ -131,6 +131,22 @@ void optc1_setup_vertical_interrupt2(
                        OTG_VERTICAL_INTERRUPT2_LINE_START, start_line);
 }
 
+/**
+ * Vupdate keepout can be set to a window to block the update lock for that pipe from changing.
+ * Start offset begins with vstartup and goes for x number of clocks,
+ * end offset starts from end of vupdate to x number of clocks.
+ */
+void optc1_set_vupdate_keepout(struct timing_generator *optc,
+                              struct vupdate_keepout_params *params)
+{
+       struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+       REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
+                 MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, params->start_offset,
+                 MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, params->end_offset,
+                 OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, params->enable);
+}
+
 /**
  * program_timing_generator   used by mode timing set
  * Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition.
index 2529723..cabfe83 100644 (file)
@@ -194,6 +194,9 @@ struct dcn_optc_registers {
        SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\
        SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh),\
        SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\
+       SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \
+       SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \
+       SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \
        SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\
        SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\
        SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\
index d079f4e..f962b90 100644 (file)
@@ -82,7 +82,7 @@ const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
        .meta_chunk_size_kbytes = 2,
        .writeback_chunk_size_kbytes = 2,
        .line_buffer_size_bits = 589824,
-       .max_line_buffer_lines = 32,
+       .max_line_buffer_lines = 12,
        .IsLineBufferBppFixed = 0,
        .LineBufferFixedBpp = -1,
        .writeback_luma_buffer_size_kbytes = 12,
@@ -619,7 +619,6 @@ static const struct dc_debug_options debug_defaults_drv = {
                .recovery_enabled = false, /*enable this by default after testing.*/
                .max_downscale_src_width = 3840,
                .underflow_assert_delay_us = 0xFFFFFFFF,
-               .use_max_lb = true
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
@@ -631,7 +630,6 @@ static const struct dc_debug_options debug_defaults_diags = {
                .disable_pplib_clock_request = true,
                .disable_pplib_wm_range = true,
                .underflow_assert_delay_us = 0xFFFFFFFF,
-               .use_max_lb = true
 };
 
 static void dcn10_dpp_destroy(struct dpp **dpp)
index 62cc265..8774406 100644 (file)
@@ -112,7 +112,7 @@ struct dccg *dccg2_create(
        const struct dccg_shift *dccg_shift,
        const struct dccg_mask *dccg_mask)
 {
-       struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
+       struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
        struct dccg *base;
 
        if (dccg_dcn == NULL) {
index 7218ed9..b5bb613 100644 (file)
@@ -95,6 +95,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
        .optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft,
 #endif
        .set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+       .get_dcc_en_bits = dcn10_get_dcc_en_bits,
 };
 
 static const struct hwseq_private_funcs dcn20_private_funcs = {
index fa01349..2f9bfae 100644 (file)
@@ -341,8 +341,7 @@ void enc2_hw_init(struct link_encoder *enc)
        } else {
                AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
 
-               AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c4d);
-
+               AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
        }
 
        //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
index ea7eaf7..3139d90 100644 (file)
@@ -134,22 +134,6 @@ void optc2_set_gsl_window(struct timing_generator *optc,
                OTG_GSL_WINDOW_END_Y, params->gsl_window_end_y);
 }
 
-/**
- * Vupdate keepout can be set to a window to block the update lock for that pipe from changing.
- * Start offset begins with vstartup and goes for x number of clocks,
- * end offset starts from end of vupdate to x number of clocks.
- */
-void optc2_set_vupdate_keepout(struct timing_generator *optc,
-                  const struct vupdate_keepout_params *params)
-{
-       struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
-       REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
-               MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, params->start_offset,
-               MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, params->end_offset,
-               OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, params->enable);
-}
-
 void optc2_set_gsl_source_select(
                struct timing_generator *optc,
                int group_idx,
index e0a0a8a..3dee2ec 100644 (file)
@@ -56,9 +56,6 @@
        SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \
        SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\
        SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\
-       SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \
-       SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \
-       SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \
        SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \
        SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \
        SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \
index 2307b35..f65a690 100644 (file)
@@ -112,7 +112,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
        .is_line_buffer_bpp_fixed = 0,
        .line_buffer_fixed_bpp = 0,
        .dcc_supported = true,
-       .max_line_buffer_lines = 32,
+       .max_line_buffer_lines = 12,
        .writeback_luma_buffer_size_kbytes = 12,
        .writeback_chroma_buffer_size_kbytes = 8,
        .writeback_chroma_line_buffer_width_pixels = 4,
@@ -180,7 +180,7 @@ static struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
        .is_line_buffer_bpp_fixed = 0,
        .line_buffer_fixed_bpp = 0,
        .dcc_supported = true,
-       .max_line_buffer_lines = 32,
+       .max_line_buffer_lines = 12,
        .writeback_luma_buffer_size_kbytes = 12,
        .writeback_chroma_buffer_size_kbytes = 8,
        .writeback_chroma_line_buffer_width_pixels = 4,
@@ -1075,7 +1075,6 @@ static const struct dc_debug_options debug_defaults_drv = {
                .scl_reset_length10 = true,
                .sanity_checks = false,
                .underflow_assert_delay_us = 0xFFFFFFFF,
-               .use_max_lb = true
 };
 
 static const struct dc_debug_options debug_defaults_diags = {
@@ -1092,7 +1091,6 @@ static const struct dc_debug_options debug_defaults_diags = {
                .scl_reset_length10 = true,
                .underflow_assert_delay_us = 0xFFFFFFFF,
                .enable_tri_buf = true,
-               .use_max_lb = true
 };
 
 void dcn20_dpp_destroy(struct dpp **dpp)
@@ -1106,7 +1104,7 @@ struct dpp *dcn20_dpp_create(
        uint32_t inst)
 {
        struct dcn20_dpp *dpp =
-               kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
+               kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC);
 
        if (!dpp)
                return NULL;
@@ -1124,7 +1122,7 @@ struct input_pixel_processor *dcn20_ipp_create(
        struct dc_context *ctx, uint32_t inst)
 {
        struct dcn10_ipp *ipp =
-               kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
+               kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
 
        if (!ipp) {
                BREAK_TO_DEBUGGER();
@@ -1141,7 +1139,7 @@ struct output_pixel_processor *dcn20_opp_create(
        struct dc_context *ctx, uint32_t inst)
 {
        struct dcn20_opp *opp =
-               kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
+               kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC);
 
        if (!opp) {
                BREAK_TO_DEBUGGER();
@@ -1158,7 +1156,7 @@ struct dce_aux *dcn20_aux_engine_create(
        uint32_t inst)
 {
        struct aux_engine_dce110 *aux_engine =
-               kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+               kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
 
        if (!aux_engine)
                return NULL;
@@ -1196,7 +1194,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
        uint32_t inst)
 {
        struct dce_i2c_hw *dce_i2c_hw =
-               kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
+               kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
 
        if (!dce_i2c_hw)
                return NULL;
@@ -1209,7 +1207,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
 struct mpc *dcn20_mpc_create(struct dc_context *ctx)
 {
        struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
-                                         GFP_KERNEL);
+                                         GFP_ATOMIC);
 
        if (!mpc20)
                return NULL;
@@ -1227,7 +1225,7 @@ struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
 {
        int i;
        struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
-                                         GFP_KERNEL);
+                                         GFP_ATOMIC);
 
        if (!hubbub)
                return NULL;
@@ -1255,7 +1253,7 @@ struct timing_generator *dcn20_timing_generator_create(
                uint32_t instance)
 {
        struct optc *tgn10 =
-               kzalloc(sizeof(struct optc), GFP_KERNEL);
+               kzalloc(sizeof(struct optc), GFP_ATOMIC);
 
        if (!tgn10)
                return NULL;
@@ -1334,7 +1332,7 @@ static struct clock_source *dcn20_clock_source_create(
        bool dp_clk_src)
 {
        struct dce110_clk_src *clk_src =
-               kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+               kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
 
        if (!clk_src)
                return NULL;
@@ -1440,7 +1438,7 @@ struct display_stream_compressor *dcn20_dsc_create(
        struct dc_context *ctx, uint32_t inst)
 {
        struct dcn20_dsc *dsc =
-               kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
+               kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC);
 
        if (!dsc) {
                BREAK_TO_DEBUGGER();
@@ -1574,7 +1572,7 @@ struct hubp *dcn20_hubp_create(
        uint32_t inst)
 {
        struct dcn20_hubp *hubp2 =
-               kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
+               kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC);
 
        if (!hubp2)
                return NULL;
@@ -2218,7 +2216,7 @@ int dcn20_populate_dml_pipes_from_context(
                        pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
 
                /* todo: default max for now, until there is logic reflecting this in dc*/
-               pipes[pipe_cnt].dout.output_bpc = 12;
+               pipes[pipe_cnt].dout.dsc_input_bpc = 12;
                /*fill up the audio sample rate (unit in kHz)*/
                get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check);
                pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000;
@@ -3396,7 +3394,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
 
 static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
 {
-       struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
+       struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC);
 
        if (!pp_smu)
                return pp_smu;
@@ -4042,7 +4040,7 @@ struct resource_pool *dcn20_create_resource_pool(
                struct dc *dc)
 {
        struct dcn20_resource_pool *pool =
-               kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
+               kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC);
 
        if (!pool)
                return NULL;
index 074e271..4f20a85 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2016 Advanced Micro Devices, Inc.
+ * Copyright 2016-2020 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -99,6 +99,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
 #endif
        .is_abm_supported = dcn21_is_abm_supported,
        .set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+       .get_dcc_en_bits = dcn10_get_dcc_en_bits,
 };
 
 static const struct hwseq_private_funcs dcn21_private_funcs = {
index e62f931..8e3f1d0 100644 (file)
@@ -55,7 +55,6 @@
 #include "dce/dce_audio.h"
 #include "dce/dce_hwseq.h"
 #include "virtual/virtual_stream_encoder.h"
-#include "dce110/dce110_resource.h"
 #include "dml/display_mode_vba.h"
 #include "dcn20/dcn20_dccg.h"
 #include "dcn21/dcn21_dccg.h"
@@ -115,7 +114,7 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = {
        .is_line_buffer_bpp_fixed = 0,
        .line_buffer_fixed_bpp = 0,
        .dcc_supported = true,
-       .max_line_buffer_lines = 32,
+       .max_line_buffer_lines = 12,
        .writeback_luma_buffer_size_kbytes = 12,
        .writeback_chroma_buffer_size_kbytes = 8,
        .writeback_chroma_line_buffer_width_pixels = 4,
index 705fbfc..8a32772 100644 (file)
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK, mask_sh),\
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK_C, mask_sh),\
+       HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\
        HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
        HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
        HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
index c4c14e9..bf7fa98 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2020 Advanced Micro Devices, Inc.
+ * Copyright 2016-2020 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -98,6 +98,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
        .hardware_release = dcn30_hardware_release,
        .set_pipe = dcn21_set_pipe,
        .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+       .get_dcc_en_bits = dcn10_get_dcc_en_bits,
 };
 
 static const struct hwseq_private_funcs dcn30_private_funcs = {
index 263c298..4a5fa23 100644 (file)
@@ -120,7 +120,7 @@ struct _vcs_dpi_ip_params_st dcn3_0_ip = {
        .dcc_supported = true,
        .writeback_interface_buffer_size_kbytes = 90,
        .writeback_line_buffer_buffer_size = 0,
-       .max_line_buffer_lines = 32,
+       .max_line_buffer_lines = 12,
        .writeback_luma_buffer_size_kbytes = 12,  // writeback_line_buffer_buffer_size = 656640
        .writeback_chroma_buffer_size_kbytes = 8,
        .writeback_chroma_line_buffer_width_pixels = 4,
index bdad721..0d90523 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2020 Advanced Micro Devices, Inc.
+ * Copyright 2016-2020 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -98,6 +98,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
        .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
        .set_pipe = dcn21_set_pipe,
        .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+       .get_dcc_en_bits = dcn10_get_dcc_en_bits,
 };
 
 static const struct hwseq_private_funcs dcn301_private_funcs = {
index 622a5bf..5b54b7f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright 2020 Advanced Micro Devices, Inc.
+ * Copyright 2019-2021 Advanced Micro Devices, Inc.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -116,7 +116,7 @@ struct _vcs_dpi_ip_params_st dcn3_01_ip = {
        .dcc_supported = true,
        .writeback_interface_buffer_size_kbytes = 90,
        .writeback_line_buffer_buffer_size = 656640,
-       .max_line_buffer_lines = 32,
+       .max_line_buffer_lines = 12,
        .writeback_luma_buffer_size_kbytes = 12,  // writeback_line_buffer_buffer_size = 656640
        .writeback_chroma_buffer_size_kbytes = 8,
        .writeback_chroma_line_buffer_width_pixels = 4,
index 0723e29..fc2dea2 100644 (file)
@@ -101,7 +101,7 @@ struct _vcs_dpi_ip_params_st dcn3_02_ip = {
                .dcc_supported = true,
                .writeback_interface_buffer_size_kbytes = 90,
                .writeback_line_buffer_buffer_size = 0,
-               .max_line_buffer_lines = 32,
+               .max_line_buffer_lines = 12,
                .writeback_luma_buffer_size_kbytes = 12,  // writeback_line_buffer_buffer_size = 656640
                .writeback_chroma_buffer_size_kbytes = 8,
                .writeback_chroma_line_buffer_width_pixels = 4,
@@ -164,7 +164,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = {
 
                .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
                .num_states = 1,
-               .sr_exit_time_us = 12,
+               .sr_exit_time_us = 15.5,
                .sr_enter_plus_exit_time_us = 20,
                .urgent_latency_us = 4.0,
                .urgent_latency_pixel_data_only_us = 4.0,
index 72423dc..799bae2 100644 (file)
@@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
        if (surf_linear) {
                log2_swath_height_l = 0;
                log2_swath_height_c = 0;
-       } else if (!surf_vert) {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
        } else {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+               unsigned int swath_height_l;
+               unsigned int swath_height_c;
+
+               if (!surf_vert) {
+                       swath_height_l = rq_param->misc.rq_l.blk256_height;
+                       swath_height_c = rq_param->misc.rq_c.blk256_height;
+               } else {
+                       swath_height_l = rq_param->misc.rq_l.blk256_width;
+                       swath_height_c = rq_param->misc.rq_c.blk256_width;
+               }
+
+               if (swath_height_l > 0)
+                       log2_swath_height_l = dml_log2(swath_height_l);
+
+               if (req128_l && log2_swath_height_l > 0)
+                       log2_swath_height_l -= 1;
+
+               if (swath_height_c > 0)
+                       log2_swath_height_c = dml_log2(swath_height_c);
+
+               if (req128_c && log2_swath_height_c > 0)
+                       log2_swath_height_c -= 1;
        }
+
        rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
        rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
 
index 9c78446..6a6d597 100644 (file)
@@ -293,13 +293,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
        if (surf_linear) {
                log2_swath_height_l = 0;
                log2_swath_height_c = 0;
-       } else if (!surf_vert) {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
        } else {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+               unsigned int swath_height_l;
+               unsigned int swath_height_c;
+
+               if (!surf_vert) {
+                       swath_height_l = rq_param->misc.rq_l.blk256_height;
+                       swath_height_c = rq_param->misc.rq_c.blk256_height;
+               } else {
+                       swath_height_l = rq_param->misc.rq_l.blk256_width;
+                       swath_height_c = rq_param->misc.rq_c.blk256_width;
+               }
+
+               if (swath_height_l > 0)
+                       log2_swath_height_l = dml_log2(swath_height_l);
+
+               if (req128_l && log2_swath_height_l > 0)
+                       log2_swath_height_l -= 1;
+
+               if (swath_height_c > 0)
+                       log2_swath_height_c = dml_log2(swath_height_c);
+
+               if (req128_c && log2_swath_height_c > 0)
+                       log2_swath_height_c -= 1;
        }
+
        rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
        rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
 
index edd41d3..dc1c81a 100644 (file)
@@ -277,13 +277,31 @@ static void handle_det_buf_split(
        if (surf_linear) {
                log2_swath_height_l = 0;
                log2_swath_height_c = 0;
-       } else if (!surf_vert) {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
        } else {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+               unsigned int swath_height_l;
+               unsigned int swath_height_c;
+
+               if (!surf_vert) {
+                       swath_height_l = rq_param->misc.rq_l.blk256_height;
+                       swath_height_c = rq_param->misc.rq_c.blk256_height;
+               } else {
+                       swath_height_l = rq_param->misc.rq_l.blk256_width;
+                       swath_height_c = rq_param->misc.rq_c.blk256_width;
+               }
+
+               if (swath_height_l > 0)
+                       log2_swath_height_l = dml_log2(swath_height_l);
+
+               if (req128_l && log2_swath_height_l > 0)
+                       log2_swath_height_l -= 1;
+
+               if (swath_height_c > 0)
+                       log2_swath_height_c = dml_log2(swath_height_c);
+
+               if (req128_c && log2_swath_height_c > 0)
+                       log2_swath_height_c -= 1;
        }
+
        rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
        rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
 
index 0f14f20..04601a7 100644 (file)
@@ -237,13 +237,31 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
        if (surf_linear) {
                log2_swath_height_l = 0;
                log2_swath_height_c = 0;
-       } else if (!surf_vert) {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
        } else {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+               unsigned int swath_height_l;
+               unsigned int swath_height_c;
+
+               if (!surf_vert) {
+                       swath_height_l = rq_param->misc.rq_l.blk256_height;
+                       swath_height_c = rq_param->misc.rq_c.blk256_height;
+               } else {
+                       swath_height_l = rq_param->misc.rq_l.blk256_width;
+                       swath_height_c = rq_param->misc.rq_c.blk256_width;
+               }
+
+               if (swath_height_l > 0)
+                       log2_swath_height_l = dml_log2(swath_height_l);
+
+               if (req128_l && log2_swath_height_l > 0)
+                       log2_swath_height_l -= 1;
+
+               if (swath_height_c > 0)
+                       log2_swath_height_c = dml_log2(swath_height_c);
+
+               if (req128_c && log2_swath_height_c > 0)
+                       log2_swath_height_c -= 1;
        }
+
        rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
        rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
 
index 098d643..1f7b6dd 100644 (file)
@@ -226,7 +226,7 @@ void dml_log_pipe_params(
                dml_print("DML PARAMS: PIPE [%d] DISPLAY OUTPUT PARAMS:\n", i);
                dml_print("DML PARAMS:     output_type                = %d\n", dout->output_type);
                dml_print("DML PARAMS:     output_format              = %d\n", dout->output_format);
-               dml_print("DML PARAMS:     output_bpc                 = %d\n", dout->output_bpc);
+               dml_print("DML PARAMS:     dsc_input_bpc              = %d\n", dout->dsc_input_bpc);
                dml_print("DML PARAMS:     output_bpp                 = %3.4f\n", dout->output_bpp);
                dml_print("DML PARAMS:     dp_lanes                   = %d\n", dout->dp_lanes);
                dml_print("DML PARAMS:     dsc_enable                 = %d\n", dout->dsc_enable);
index 0c51281..2ece369 100644 (file)
@@ -164,7 +164,7 @@ struct _vcs_dpi_ip_params_st {
        double writeback_max_vscl_ratio;
        double writeback_min_hscl_ratio;
        double writeback_min_vscl_ratio;
-       double maximum_dsc_bits_per_component;
+       unsigned int maximum_dsc_bits_per_component;
        unsigned int writeback_max_hscl_taps;
        unsigned int writeback_max_vscl_taps;
        unsigned int writeback_line_buffer_luma_buffer_size;
@@ -292,10 +292,10 @@ struct writeback_st {
 struct _vcs_dpi_display_output_params_st {
        int dp_lanes;
        double output_bpp;
+       unsigned int dsc_input_bpc;
        int dsc_enable;
        int wb_enable;
        int num_active_wb;
-       int output_bpc;
        int output_type;
        int is_virtual;
        int output_format;
index 94036a9..2a96745 100644 (file)
@@ -471,7 +471,13 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
                mode_lib->vba.DSCEnable[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
                mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] =
                                dout->dsc_slices;
-               mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] = dout->output_bpc;
+               if (!dout->dsc_input_bpc) {
+                       mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
+                               ip->maximum_dsc_bits_per_component;
+               } else {
+                       mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
+                               dout->dsc_input_bpc;
+               }
                mode_lib->vba.WritebackEnable[mode_lib->vba.NumberOfActivePlanes] = dout->wb_enable;
                mode_lib->vba.ActiveWritebacksPerPlane[mode_lib->vba.NumberOfActivePlanes] =
                                dout->num_active_wb;
index 4c3e9cc..414da64 100644 (file)
@@ -344,13 +344,31 @@ static void handle_det_buf_split(
        if (surf_linear) {
                log2_swath_height_l = 0;
                log2_swath_height_c = 0;
-       } else if (!surf_vert) {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
        } else {
-               log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
-               log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+               unsigned int swath_height_l;
+               unsigned int swath_height_c;
+
+               if (!surf_vert) {
+                       swath_height_l = rq_param->misc.rq_l.blk256_height;
+                       swath_height_c = rq_param->misc.rq_c.blk256_height;
+               } else {
+                       swath_height_l = rq_param->misc.rq_l.blk256_width;
+                       swath_height_c = rq_param->misc.rq_c.blk256_width;
+               }
+
+               if (swath_height_l > 0)
+                       log2_swath_height_l = dml_log2(swath_height_l);
+
+               if (req128_l && log2_swath_height_l > 0)
+                       log2_swath_height_l -= 1;
+
+               if (swath_height_c > 0)
+                       log2_swath_height_c = dml_log2(swath_height_c);
+
+               if (req128_c && log2_swath_height_c > 0)
+                       log2_swath_height_c -= 1;
        }
+
        rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
        rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
 
index eb1a19b..81b92f2 100644 (file)
@@ -118,6 +118,27 @@ struct resource_funcs {
                display_e2e_pipe_params_st *pipes,
                bool fast_validate);
 
+       /*
+        * Algorithm for assigning available link encoders to links.
+        *
+        * Update link_enc_assignments table and link_enc_avail list accordingly in
+        * struct resource_context.
+        */
+       void (*link_encs_assign)(
+                       struct dc *dc,
+                       struct dc_state *state,
+                       struct dc_stream_state *streams[],
+                       uint8_t stream_count);
+       /*
+        * Unassign a link encoder from a stream.
+        *
+        * Update link_enc_assignments table and link_enc_avail list accordingly in
+        * struct resource_context.
+        */
+       void (*link_enc_unassign)(
+                       struct dc_state *state,
+                       struct dc_stream_state *stream);
+
        enum dc_status (*validate_global)(
                struct dc *dc,
                struct dc_state *context);
@@ -358,6 +379,12 @@ struct resource_context {
        uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES];
        uint8_t dp_clock_source_ref_count;
        bool is_dsc_acquired[MAX_PIPES];
+       /* A table/array of encoder-to-link assignments. One entry per stream.
+        * Indexed by stream index in dc_state.
+        */
+       struct link_enc_assignment link_enc_assignments[MAX_PIPES];
+       /* List of available link encoders. Uses engine ID as encoder identifier. */
+       enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS];
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        bool is_mpc_3dlut_acquired[MAX_PIPES];
 #endif
index 3a29f37..5dc8d02 100644 (file)
@@ -262,14 +262,9 @@ struct clk_mgr_funcs {
 
        /* Get current memclk states from PMFW, update relevant structures */
        void (*get_memclk_states_from_smu)(struct clk_mgr *clk_mgr);
-};
-
-struct dpm_clocks;
-struct wartermarks;
 
-struct smu_watermark_set {
-       struct watermarks *wm_set;
-       union large_integer mc_address;
+       /* Get SMU present */
+       bool (*is_smu_present)(struct clk_mgr *clk_mgr);
 };
 
 struct clk_mgr {
@@ -283,7 +278,6 @@ struct clk_mgr {
        struct clk_state_registers_and_bypass boot_snapshot;
        struct clk_bw_params *bw_params;
        struct pp_smu_wm_range_sets ranges;
-       struct smu_watermark_set smu_wm_set;
 };
 
 /* forward declarations */
index 346dcd8..80e1a32 100644 (file)
@@ -29,6 +29,7 @@
 #include "mem_input.h"
 
 #define OPP_ID_INVALID 0xf
+#define MAX_TTU 0xffffff
 
 
 enum cursor_pitch {
index 7f5acd8..80bc995 100644 (file)
@@ -187,4 +187,17 @@ struct link_encoder_funcs {
                struct link_encoder *enc);
 };
 
+/*
+ * Used to track assignments of links (display endpoints) to link encoders.
+ *
+ * Entry in link_enc_assignments table in struct resource_context.
+ * Entries only marked valid once encoder assigned to a link and invalidated once unassigned.
+ * Uses engine ID as identifier since PHY ID not relevant for USB4 DPIA endpoint.
+ */
+struct link_enc_assignment {
+       bool valid;
+       struct display_endpoint_id ep_id;
+       enum engine_id eng_id;
+};
+
 #endif /* LINK_ENCODER_H_ */
index 2fedfca..1d5853c 100644 (file)
@@ -118,8 +118,7 @@ struct hw_sequencer_funcs {
                        struct pipe_ctx *pipe_ctx,
                        enum vline_select vline);
        void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
-                       unsigned int vmin, unsigned int vmax,
-                       unsigned int vmid, unsigned int vmid_frame_number);
+                       struct dc_crtc_timing_adjust adjust);
        void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
                        int num_pipes,
                        const struct dc_static_screen_params *events);
@@ -218,6 +217,8 @@ struct hw_sequencer_funcs {
 
        void (*set_pipe)(struct pipe_ctx *pipe_ctx);
 
+       void (*get_dcc_en_bits)(struct dc *dc, int *dcc_en_bits);
+
        /* Idle Optimization Related */
        bool (*apply_idle_power_optimizations)(struct dc *dc, bool enable);
 
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
new file mode 100644 (file)
index 0000000..7d36e55
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_INC_LINK_ENC_CFG_H_
+#define DC_INC_LINK_ENC_CFG_H_
+
+/* This module implements functionality for dynamically assigning DIG link
+ * encoder resources to display endpoints (links).
+ */
+
+#include "core_types.h"
+
+/*
+ * Initialise link encoder resource tracking.
+ */
+void link_enc_cfg_init(
+               struct dc *dc,
+               struct dc_state *state);
+
+/*
+ * Algorithm for assigning available DIG link encoders to streams.
+ *
+ * Update link_enc_assignments table and link_enc_avail list accordingly in
+ * struct resource_context.
+ *
+ * Loop over all streams twice:
+ * a) First assign encoders to unmappable endpoints.
+ * b) Then assign encoders to mappable endpoints.
+ */
+void link_enc_cfg_link_encs_assign(
+               struct dc *dc,
+               struct dc_state *state,
+               struct dc_stream_state *streams[],
+               uint8_t stream_count);
+
+/*
+ * Unassign a link encoder from a stream.
+ *
+ * Update link_enc_assignments table and link_enc_avail list accordingly in
+ * struct resource_context.
+ */
+void link_enc_cfg_link_enc_unassign(
+               struct dc_state *state,
+               struct dc_stream_state *stream);
+
+/*
+ * Check whether the transmitter driven by a link encoder is a mappable
+ * endpoint.
+ */
+bool link_enc_cfg_is_transmitter_mappable(
+               struct dc_state *state,
+               struct link_encoder *link_enc);
+
+/* Return link using DIG link encoder resource. NULL if unused. */
+struct dc_link *link_enc_cfg_get_link_using_link_enc(
+               struct dc_state *state,
+               enum engine_id eng_id);
+
+/* Return DIG link encoder used by link. NULL if unused. */
+struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
+               struct dc_state *state,
+               struct dc_link *link);
+
+#endif /* DC_INC_LINK_ENC_CFG_H_ */
index 6ee9dd8..1a5be27 100644 (file)
@@ -187,6 +187,10 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
        .ack = NULL
 };
 
+static const struct irq_source_info_funcs dmub_trace_irq_info_funcs = {
+       .set = NULL,
+       .ack = NULL
+};
 
 static const struct irq_source_info_funcs vline0_irq_info_funcs = {
        .set = NULL,
@@ -205,6 +209,9 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
        BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
                        mm ## block ## id ## _ ## reg_name
 
+#define SRI_DMUB(reg_name)\
+       BASE(mm ## reg_name ## _BASE_IDX) + \
+                       mm ## reg_name
 
 #define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
        .enable_reg = SRI(reg1, block, reg_num),\
@@ -220,7 +227,19 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
        .ack_value = \
                block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
 
-
+#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\
+       .enable_reg = SRI_DMUB(reg1),\
+       .enable_mask = \
+               reg1 ## __ ## mask1 ## _MASK,\
+       .enable_value = {\
+               reg1 ## __ ## mask1 ## _MASK,\
+               ~reg1 ## __ ## mask1 ## _MASK \
+       },\
+       .ack_reg = SRI_DMUB(reg2),\
+       .ack_mask = \
+               reg2 ## __ ## mask2 ## _MASK,\
+       .ack_value = \
+               reg2 ## __ ## mask2 ## _MASK \
 
 #define hpd_int_entry(reg_num)\
        [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
@@ -282,6 +301,13 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
                .funcs = &vline0_irq_info_funcs\
        }
 
+#define dmub_trace_int_entry()\
+       [DC_IRQ_SOURCE_DMCUB_OUTBOX0] = {\
+               IRQ_REG_ENTRY_DMUB(DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX0_READY_INT_EN,\
+                       DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX0_READY_INT_ACK),\
+               .funcs = &dmub_trace_irq_info_funcs\
+       }
+
 #define dummy_irq_entry() \
        {\
                .funcs = &dummy_irq_info_funcs\
@@ -400,6 +426,7 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = {
        vline0_int_entry(3),
        vline0_int_entry(4),
        vline0_int_entry(5),
+       dmub_trace_int_entry(),
 };
 
 static const struct irq_service_funcs irq_service_funcs_dcn21 = {
index 4ec6f6a..914ce2c 100644 (file)
@@ -215,6 +215,9 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
        BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
                        mm ## block ## id ## _ ## reg_name
 
+#define SRI_DMUB(reg_name)\
+       BASE(mm ## reg_name ## _BASE_IDX) + \
+                       mm ## reg_name
 
 #define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
        .enable_reg = SRI(reg1, block, reg_num),\
@@ -230,7 +233,19 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
        .ack_value = \
                block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
 
-
+#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\
+       .enable_reg = SRI_DMUB(reg1),\
+       .enable_mask = \
+               reg1 ## __ ## mask1 ## _MASK,\
+       .enable_value = {\
+               reg1 ## __ ## mask1 ## _MASK,\
+               ~reg1 ## __ ## mask1 ## _MASK \
+       },\
+       .ack_reg = SRI_DMUB(reg2),\
+       .ack_mask = \
+               reg2 ## __ ## mask2 ## _MASK,\
+       .ack_value = \
+               reg2 ## __ ## mask2 ## _MASK \
 
 #define hpd_int_entry(reg_num)\
        [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
@@ -284,6 +299,13 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
                .funcs = &vline0_irq_info_funcs\
        }
 
+#define dmub_trace_int_entry()\
+       [DC_IRQ_SOURCE_DMCUB_OUTBOX0] = {\
+               IRQ_REG_ENTRY_DMUB(DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX0_READY_INT_EN,\
+                       DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX0_READY_INT_ACK),\
+               .funcs = &dmub_trace_irq_info_funcs\
+       }
+
 #define dummy_irq_entry() \
        {\
                .funcs = &dummy_irq_info_funcs\
@@ -398,6 +420,7 @@ irq_source_info_dcn30[DAL_IRQ_SOURCES_NUMBER] = {
        vline0_int_entry(3),
        vline0_int_entry(4),
        vline0_int_entry(5),
+       dmub_trace_int_entry(),
 };
 
 static const struct irq_service_funcs irq_service_funcs_dcn30 = {
index 2313a56..40fd34f 100644 (file)
@@ -50,6 +50,8 @@ static enum dc_irq_source to_dal_irq_source_dcn302(struct irq_service *irq_servi
                return DC_IRQ_SOURCE_VBLANK5;
        case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
                return DC_IRQ_SOURCE_VBLANK6;
+       case DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT:
+               return DC_IRQ_SOURCE_DMCUB_OUTBOX0;
        case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
                return DC_IRQ_SOURCE_DC1_VLINE0;
        case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
@@ -166,6 +168,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
                .ack = NULL
 };
 
+static const struct irq_source_info_funcs dmub_trace_irq_info_funcs = {
+       .set = NULL,
+       .ack = NULL
+};
+
 static const struct irq_source_info_funcs vline0_irq_info_funcs = {
        .set = NULL,
        .ack = NULL
@@ -181,6 +188,9 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
                BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
                mm ## block ## id ## _ ## reg_name
 
+#define SRI_DMUB(reg_name)\
+               BASE(mm ## reg_name ## _BASE_IDX) + \
+                       mm ## reg_name
 
 #define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
                .enable_reg = SRI(reg1, block, reg_num),\
@@ -193,7 +203,26 @@ static const struct irq_source_info_funcs vline0_irq_info_funcs = {
                .ack_mask = block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
                .ack_value = block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
 
+#define dmub_trace_int_entry()\
+       [DC_IRQ_SOURCE_DMCUB_OUTBOX0] = {\
+               IRQ_REG_ENTRY_DMUB(DMCUB_INTERRUPT_ENABLE, DMCUB_OUTBOX0_READY_INT_EN,\
+                       DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX0_READY_INT_ACK),\
+               .funcs = &dmub_trace_irq_info_funcs\
+       }
 
+#define IRQ_REG_ENTRY_DMUB(reg1, mask1, reg2, mask2)\
+       .enable_reg = SRI_DMUB(reg1),\
+       .enable_mask = \
+               reg1 ## __ ## mask1 ## _MASK,\
+       .enable_value = {\
+               reg1 ## __ ## mask1 ## _MASK,\
+               ~reg1 ## __ ## mask1 ## _MASK \
+       },\
+       .ack_reg = SRI_DMUB(reg2),\
+       .ack_mask = \
+               reg2 ## __ ## mask2 ## _MASK,\
+       .ack_value = \
+               reg2 ## __ ## mask2 ## _MASK \
 
 #define hpd_int_entry(reg_num)\
                [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
@@ -348,6 +377,7 @@ static const struct irq_source_info irq_source_info_dcn302[DAL_IRQ_SOURCES_NUMBE
                vline0_int_entry(2),
                vline0_int_entry(3),
                vline0_int_entry(4),
+               dmub_trace_int_entry(),
 };
 
 static const struct irq_service_funcs irq_service_funcs_dcn302 = {
index f07b348..4400383 100644 (file)
 
 /* Firmware versioning. */
 #ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0xc29b1734b
+#define DMUB_FW_VERSION_GIT_HASH 0x7f2db1846
 #define DMUB_FW_VERSION_MAJOR 0
 #define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 56
+#define DMUB_FW_VERSION_REVISION 59
 #define DMUB_FW_VERSION_TEST 0
 #define DMUB_FW_VERSION_VBIOS 0
 #define DMUB_FW_VERSION_HOTFIX 0
@@ -202,12 +202,7 @@ struct dmub_feature_caps {
         * Max PSR version supported by FW.
         */
        uint8_t psr;
-#ifndef TRIM_FAMS
-       uint8_t fw_assisted_mclk_switch;
-       uint8_t reserved[6];
-#else
        uint8_t reserved[7];
-#endif
 };
 
 #if defined(__cplusplus)
@@ -532,10 +527,6 @@ enum dmub_cmd_type {
         * Command type used for OUTBOX1 notification enable
         */
        DMUB_CMD__OUTBOX1_ENABLE = 71,
-#ifndef TRIM_FAMS
-       DMUB_CMD__FW_ASSISTED_MCLK_SWITCH = 76,
-#endif
-
        /**
         * Command type used for all VBIOS interface commands.
         */
@@ -1115,13 +1106,6 @@ enum dmub_cmd_psr_type {
        DMUB_CMD__PSR_FORCE_STATIC              = 5,
 };
 
-#ifndef TRIM_FAMS
-enum dmub_cmd_fams_type {
-       DMUB_CMD__FAMS_SETUP_FW_CTRL    = 0,
-       DMUB_CMD__FAMS_DRR_UPDATE               = 1,
-};
-#endif
-
 /**
  * PSR versions.
  */
@@ -1245,6 +1229,19 @@ struct dmub_cmd_psr_copy_settings_data {
         * Length of each horizontal line in us.
         */
        uint32_t line_time_in_us;
+       /**
+        * FEC enable status in driver
+        */
+       uint8_t fec_enable_status;
+       /**
+        * FEC re-enable delay when PSR exit.
+        * unit is 100us, range form 0~255(0xFF).
+        */
+       uint8_t fec_enable_delay_in100us;
+       /**
+        * Explicit padding to 4 byte boundary.
+        */
+       uint8_t pad3[2];
 };
 
 /**
@@ -1791,24 +1788,6 @@ struct dmub_rb_cmd_drr_update {
                struct dmub_optc_state dmub_optc_state_req;
 };
 
-#ifndef TRIM_FAMS
-struct dmub_cmd_fw_assisted_mclk_switch_pipe_data {
-       uint32_t pix_clk_100hz;
-       uint32_t min_refresh_in_uhz;
-       uint32_t max_ramp_step;
-};
-
-struct dmub_cmd_fw_assisted_mclk_switch_config {
-       uint32_t fams_enabled;
-       struct dmub_cmd_fw_assisted_mclk_switch_pipe_data pipe_data[DMUB_MAX_STREAMS];
-};
-
-struct dmub_rb_cmd_fw_assisted_mclk_switch {
-       struct dmub_cmd_header header;
-       struct dmub_cmd_fw_assisted_mclk_switch_config config_data;
-};
-#endif
-
 /**
  * Data passed from driver to FW in a DMUB_CMD__VBIOS_LVTMA_CONTROL command.
  */
@@ -1951,9 +1930,6 @@ union dmub_rb_cmd {
         */
        struct dmub_rb_cmd_query_feature_caps query_feature_caps;
        struct dmub_rb_cmd_drr_update drr_update;
-#ifndef TRIM_FAMS
-       struct dmub_rb_cmd_fw_assisted_mclk_switch fw_assisted_mclk_switch;
-#endif
        /**
         * Definition of a DMUB_CMD__VBIOS_LVTMA_CONTROL command.
         */
index 8ba0a9e..1cbb125 100644 (file)
@@ -415,6 +415,12 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
        if (!dmub->sw_init)
                return DMUB_STATUS_INVALID;
 
+       if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb ||
+               !tracebuff_fb || !fw_state_fb || !scratch_mem_fb) {
+               ASSERT(0);
+               return DMUB_STATUS_INVALID;
+       }
+
        dmub->fb_base = params->fb_base;
        dmub->fb_offset = params->fb_offset;
        dmub->psp_version = params->psp_version;
@@ -422,101 +428,89 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
        if (dmub->hw_funcs.reset)
                dmub->hw_funcs.reset(dmub);
 
-       if (inst_fb && data_fb) {
-               cw0.offset.quad_part = inst_fb->gpu_addr;
-               cw0.region.base = DMUB_CW0_BASE;
-               cw0.region.top = cw0.region.base + inst_fb->size - 1;
-
-               cw1.offset.quad_part = stack_fb->gpu_addr;
-               cw1.region.base = DMUB_CW1_BASE;
-               cw1.region.top = cw1.region.base + stack_fb->size - 1;
-
-               if (params->load_inst_const && dmub->hw_funcs.backdoor_load) {
-                   /**
-                    * Read back all the instruction memory so we don't hang the
-                    * DMCUB when backdoor loading if the write from x86 hasn't been
-                    * flushed yet. This only occurs in backdoor loading.
-                    */
-                   dmub_flush_buffer_mem(inst_fb);
-                   dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1);
-               }
-
-       }
-
-       if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb &&
-           fw_state_fb && scratch_mem_fb) {
-               cw2.offset.quad_part = data_fb->gpu_addr;
-               cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
-               cw2.region.top = cw2.region.base + data_fb->size;
-
-               cw3.offset.quad_part = bios_fb->gpu_addr;
-               cw3.region.base = DMUB_CW3_BASE;
-               cw3.region.top = cw3.region.base + bios_fb->size;
+       cw0.offset.quad_part = inst_fb->gpu_addr;
+       cw0.region.base = DMUB_CW0_BASE;
+       cw0.region.top = cw0.region.base + inst_fb->size - 1;
 
-               cw4.offset.quad_part = mail_fb->gpu_addr;
-               cw4.region.base = DMUB_CW4_BASE;
-               cw4.region.top = cw4.region.base + mail_fb->size;
+       cw1.offset.quad_part = stack_fb->gpu_addr;
+       cw1.region.base = DMUB_CW1_BASE;
+       cw1.region.top = cw1.region.base + stack_fb->size - 1;
 
+       if (params->load_inst_const && dmub->hw_funcs.backdoor_load) {
                /**
-                * Doubled the mailbox region to accomodate inbox and outbox.
-                * Note: Currently, currently total mailbox size is 16KB. It is split
-                * equally into 8KB between inbox and outbox. If this config is
-                * changed, then uncached base address configuration of outbox1
-                * has to be updated in funcs->setup_out_mailbox.
+                * Read back all the instruction memory so we don't hang the
+                * DMCUB when backdoor loading if the write from x86 hasn't been
+                * flushed yet. This only occurs in backdoor loading.
                 */
-               inbox1.base = cw4.region.base;
-               inbox1.top = cw4.region.base + DMUB_RB_SIZE;
-               outbox1.base = inbox1.top;
-               outbox1.top = cw4.region.top;
+               dmub_flush_buffer_mem(inst_fb);
+               dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1);
+       }
 
-               cw5.offset.quad_part = tracebuff_fb->gpu_addr;
-               cw5.region.base = DMUB_CW5_BASE;
-               cw5.region.top = cw5.region.base + tracebuff_fb->size;
+       cw2.offset.quad_part = data_fb->gpu_addr;
+       cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
+       cw2.region.top = cw2.region.base + data_fb->size;
 
-               outbox0.base = DMUB_REGION5_BASE + TRACE_BUFFER_ENTRY_OFFSET;
-               outbox0.top = outbox0.base + tracebuff_fb->size - TRACE_BUFFER_ENTRY_OFFSET;
+       cw3.offset.quad_part = bios_fb->gpu_addr;
+       cw3.region.base = DMUB_CW3_BASE;
+       cw3.region.top = cw3.region.base + bios_fb->size;
 
+       cw4.offset.quad_part = mail_fb->gpu_addr;
+       cw4.region.base = DMUB_CW4_BASE;
+       cw4.region.top = cw4.region.base + mail_fb->size;
 
-               cw6.offset.quad_part = fw_state_fb->gpu_addr;
-               cw6.region.base = DMUB_CW6_BASE;
-               cw6.region.top = cw6.region.base + fw_state_fb->size;
+       /**
+        * Doubled the mailbox region to accomodate inbox and outbox.
+        * Note: Currently, currently total mailbox size is 16KB. It is split
+        * equally into 8KB between inbox and outbox. If this config is
+        * changed, then uncached base address configuration of outbox1
+        * has to be updated in funcs->setup_out_mailbox.
+        */
+       inbox1.base = cw4.region.base;
+       inbox1.top = cw4.region.base + DMUB_RB_SIZE;
+       outbox1.base = inbox1.top;
+       outbox1.top = cw4.region.top;
 
-               dmub->fw_state = fw_state_fb->cpu_addr;
+       cw5.offset.quad_part = tracebuff_fb->gpu_addr;
+       cw5.region.base = DMUB_CW5_BASE;
+       cw5.region.top = cw5.region.base + tracebuff_fb->size;
 
-               dmub->scratch_mem_fb = *scratch_mem_fb;
+       outbox0.base = DMUB_REGION5_BASE + TRACE_BUFFER_ENTRY_OFFSET;
+       outbox0.top = outbox0.base + tracebuff_fb->size - TRACE_BUFFER_ENTRY_OFFSET;
 
-               if (dmub->hw_funcs.setup_windows)
-                       dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4,
-                                                    &cw5, &cw6);
+       cw6.offset.quad_part = fw_state_fb->gpu_addr;
+       cw6.region.base = DMUB_CW6_BASE;
+       cw6.region.top = cw6.region.base + fw_state_fb->size;
 
-               if (dmub->hw_funcs.setup_outbox0)
-                       dmub->hw_funcs.setup_outbox0(dmub, &outbox0);
+       dmub->fw_state = fw_state_fb->cpu_addr;
 
-               if (dmub->hw_funcs.setup_mailbox)
-                       dmub->hw_funcs.setup_mailbox(dmub, &inbox1);
-               if (dmub->hw_funcs.setup_out_mailbox)
-                       dmub->hw_funcs.setup_out_mailbox(dmub, &outbox1);
-       }
+       dmub->scratch_mem_fb = *scratch_mem_fb;
 
-       if (mail_fb) {
-               dmub_memset(&rb_params, 0, sizeof(rb_params));
-               rb_params.ctx = dmub;
-               rb_params.base_address = mail_fb->cpu_addr;
-               rb_params.capacity = DMUB_RB_SIZE;
+       if (dmub->hw_funcs.setup_windows)
+               dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6);
 
-               dmub_rb_init(&dmub->inbox1_rb, &rb_params);
+       if (dmub->hw_funcs.setup_outbox0)
+               dmub->hw_funcs.setup_outbox0(dmub, &outbox0);
 
-               // Initialize outbox1 ring buffer
-               rb_params.ctx = dmub;
-               rb_params.base_address = (void *) ((uint8_t *) (mail_fb->cpu_addr) + DMUB_RB_SIZE);
-               rb_params.capacity = DMUB_RB_SIZE;
-               dmub_rb_init(&dmub->outbox1_rb, &rb_params);
+       if (dmub->hw_funcs.setup_mailbox)
+               dmub->hw_funcs.setup_mailbox(dmub, &inbox1);
+       if (dmub->hw_funcs.setup_out_mailbox)
+               dmub->hw_funcs.setup_out_mailbox(dmub, &outbox1);
 
-       }
+       dmub_memset(&rb_params, 0, sizeof(rb_params));
+       rb_params.ctx = dmub;
+       rb_params.base_address = mail_fb->cpu_addr;
+       rb_params.capacity = DMUB_RB_SIZE;
+       dmub_rb_init(&dmub->inbox1_rb, &rb_params);
+
+       // Initialize outbox1 ring buffer
+       rb_params.ctx = dmub;
+       rb_params.base_address = (void *) ((uint8_t *) (mail_fb->cpu_addr) + DMUB_RB_SIZE);
+       rb_params.capacity = DMUB_RB_SIZE;
+       dmub_rb_init(&dmub->outbox1_rb, &rb_params);
 
        dmub_memset(&outbox0_rb_params, 0, sizeof(outbox0_rb_params));
        outbox0_rb_params.ctx = dmub;
-       outbox0_rb_params.base_address = (void *)((uint64_t)(tracebuff_fb->cpu_addr) + TRACE_BUFFER_ENTRY_OFFSET);
+       outbox0_rb_params.base_address = (void *)((uintptr_t)(tracebuff_fb->cpu_addr) + TRACE_BUFFER_ENTRY_OFFSET);
        outbox0_rb_params.capacity = tracebuff_fb->size - dmub_align(TRACE_BUFFER_ENTRY_OFFSET, 64);
        dmub_rb_init(&dmub->outbox0_rb, &outbox0_rb_params);
 
@@ -653,6 +647,8 @@ dmub_srv_send_gpint_command(struct dmub_srv *dmub,
        dmub->hw_funcs.set_gpint(dmub, reg);
 
        for (i = 0; i < timeout_us; ++i) {
+               udelay(1);
+
                if (dmub->hw_funcs.is_gpint_acked(dmub, reg))
                        return DMUB_STATUS_OK;
        }
index 21bbee1..571fcf2 100644 (file)
@@ -36,6 +36,9 @@
 #define DC_LOG_DC(...) DRM_DEBUG_KMS(__VA_ARGS__)
 #define DC_LOG_DTN(...) DRM_DEBUG_KMS(__VA_ARGS__)
 #define DC_LOG_SURFACE(...) pr_debug("[SURFACE]:"__VA_ARGS__)
+#define DC_LOG_CURSOR(...) pr_debug("[CURSOR]:"__VA_ARGS__)
+#define DC_LOG_PFLIP(...) pr_debug("[PFLIP]:"__VA_ARGS__)
+#define DC_LOG_VBLANK(...) pr_debug("[VBLANK]:"__VA_ARGS__)
 #define DC_LOG_HW_HOTPLUG(...) DRM_DEBUG_KMS(__VA_ARGS__)
 #define DC_LOG_HW_LINK_TRAINING(...) pr_debug("[HW_LINK_TRAINING]:"__VA_ARGS__)
 #define DC_LOG_HW_SET_MODE(...) DRM_DEBUG_KMS(__VA_ARGS__)
index 5c67e12..ef742d9 100644 (file)
@@ -942,7 +942,7 @@ static void hermite_spline_eetf(struct fixed31_32 input_x,
 static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
                uint32_t hw_points_num,
                const struct hw_x_point *coordinate_x,
-               const struct freesync_hdr_tf_params *fs_params,
+               const struct hdr_tm_params *fs_params,
                struct calculate_buffer *cal_buffer)
 {
        uint32_t i;
@@ -2027,7 +2027,7 @@ rgb_user_alloc_fail:
 static bool calculate_curve(enum dc_transfer_func_predefined trans,
                                struct dc_transfer_func_distributed_points *points,
                                struct pwl_float_data_ex *rgb_regamma,
-                               const struct freesync_hdr_tf_params *fs_params,
+                               const struct hdr_tm_params *fs_params,
                                uint32_t sdr_ref_white_level,
                                struct calculate_buffer *cal_buffer)
 {
@@ -2106,7 +2106,7 @@ static bool calculate_curve(enum dc_transfer_func_predefined trans,
 
 bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
                const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
-               const struct freesync_hdr_tf_params *fs_params,
+               const struct hdr_tm_params *fs_params,
                struct calculate_buffer *cal_buffer)
 {
        struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
index 7563457..2893abf 100644 (file)
@@ -76,7 +76,7 @@ struct regamma_lut {
        };
 };
 
-struct freesync_hdr_tf_params {
+struct hdr_tm_params {
        unsigned int sdr_white_level;
        unsigned int min_content; // luminance in 1/10000 nits
        unsigned int max_content; // luminance in nits
@@ -108,7 +108,7 @@ void precompute_de_pq(void);
 
 bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
                const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
-               const struct freesync_hdr_tf_params *fs_params,
+               const struct hdr_tm_params *fs_params,
                struct calculate_buffer *cal_buffer);
 
 bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
index e5f9d77..3f4f44b 100644 (file)
@@ -118,7 +118,7 @@ static unsigned int calc_duration_in_us_from_v_total(
        return duration_in_us;
 }
 
-static unsigned int calc_v_total_from_refresh(
+unsigned int mod_freesync_calc_v_total_from_refresh(
                const struct dc_stream_state *stream,
                unsigned int refresh_in_uhz)
 {
@@ -280,10 +280,10 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
 
                /* Restore FreeSync */
                in_out_vrr->adjust.v_total_min =
-                       calc_v_total_from_refresh(stream,
+                       mod_freesync_calc_v_total_from_refresh(stream,
                                in_out_vrr->max_refresh_in_uhz);
                in_out_vrr->adjust.v_total_max =
-                       calc_v_total_from_refresh(stream,
+                       mod_freesync_calc_v_total_from_refresh(stream,
                                in_out_vrr->min_refresh_in_uhz);
        /* BTR set to "active" so engage */
        } else {
@@ -442,16 +442,16 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
        if (update) {
                if (in_out_vrr->fixed.fixed_active) {
                        in_out_vrr->adjust.v_total_min =
-                               calc_v_total_from_refresh(
+                               mod_freesync_calc_v_total_from_refresh(
                                stream, in_out_vrr->max_refresh_in_uhz);
                        in_out_vrr->adjust.v_total_max =
                                        in_out_vrr->adjust.v_total_min;
                } else {
                        in_out_vrr->adjust.v_total_min =
-                               calc_v_total_from_refresh(stream,
+                               mod_freesync_calc_v_total_from_refresh(stream,
                                        in_out_vrr->max_refresh_in_uhz);
                        in_out_vrr->adjust.v_total_max =
-                               calc_v_total_from_refresh(stream,
+                               mod_freesync_calc_v_total_from_refresh(stream,
                                        in_out_vrr->min_refresh_in_uhz);
                }
        }
@@ -543,8 +543,8 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
                infopacket->sb[6] |= 0x02;
 
        /* PB6 = [Bit 2 = FreeSync Active] */
-       if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
-                       vrr->state == VRR_STATE_ACTIVE_FIXED)
+       if (vrr->state != VRR_STATE_DISABLED &&
+                       vrr->state != VRR_STATE_UNSUPPORTED)
                infopacket->sb[6] |= 0x04;
 
        // For v1 & 2 infoframes program nominal if non-fs mode, otherwise full range
@@ -1082,10 +1082,10 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
                        refresh_range >= MIN_REFRESH_RANGE) {
 
                in_out_vrr->adjust.v_total_min =
-                       calc_v_total_from_refresh(stream,
+                       mod_freesync_calc_v_total_from_refresh(stream,
                                in_out_vrr->max_refresh_in_uhz);
                in_out_vrr->adjust.v_total_max =
-                       calc_v_total_from_refresh(stream,
+                       mod_freesync_calc_v_total_from_refresh(stream,
                                in_out_vrr->min_refresh_in_uhz);
        } else if (in_out_vrr->state == VRR_STATE_ACTIVE_FIXED) {
                in_out_vrr->fixed.target_refresh_in_uhz =
@@ -1099,7 +1099,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
                } else {
                        in_out_vrr->fixed.fixed_active = true;
                        in_out_vrr->adjust.v_total_min =
-                               calc_v_total_from_refresh(stream,
+                               mod_freesync_calc_v_total_from_refresh(stream,
                                        in_out_vrr->fixed.target_refresh_in_uhz);
                        in_out_vrr->adjust.v_total_max =
                                in_out_vrr->adjust.v_total_min;
@@ -1206,10 +1206,10 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
                /* Restore FreeSync */
                if (in_out_vrr->btr.frame_counter == 0) {
                        in_out_vrr->adjust.v_total_min =
-                               calc_v_total_from_refresh(stream,
+                               mod_freesync_calc_v_total_from_refresh(stream,
                                in_out_vrr->max_refresh_in_uhz);
                        in_out_vrr->adjust.v_total_max =
-                               calc_v_total_from_refresh(stream,
+                               mod_freesync_calc_v_total_from_refresh(stream,
                                in_out_vrr->min_refresh_in_uhz);
                }
        }
@@ -1267,6 +1267,21 @@ unsigned long long mod_freesync_calc_nominal_field_rate(
        return nominal_field_rate_in_uhz;
 }
 
+unsigned long long mod_freesync_calc_field_rate_from_timing(
+               unsigned int vtotal, unsigned int htotal, unsigned int pix_clk)
+{
+       unsigned long long field_rate_in_uhz = 0;
+       unsigned int total = htotal * vtotal;
+
+       /* Calculate nominal field rate for stream, rounded up to nearest integer */
+       field_rate_in_uhz = pix_clk;
+       field_rate_in_uhz *= 1000000ULL;
+
+       field_rate_in_uhz =     div_u64(field_rate_in_uhz, total);
+
+       return field_rate_in_uhz;
+}
+
 bool mod_freesync_is_valid_range(uint32_t min_refresh_cap_in_uhz,
                uint32_t max_refresh_cap_in_uhz,
                uint32_t nominal_field_rate_in_uhz) 
index 20e554e..68a6481 100644 (file)
@@ -53,7 +53,7 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
         */
        for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
                if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
-                               !hdcp->displays[i].adjust.disable) {
+                               hdcp->displays[i].adjust.disable != MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION) {
                        is_auth_needed = 1;
                        break;
                }
@@ -74,7 +74,7 @@ static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)
         */
        for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
                if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
-                               !hdcp->displays[i].adjust.disable) {
+                               hdcp->displays[i].adjust.disable != MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION) {
                        is_auth_needed = 1;
                        break;
                }
@@ -314,6 +314,9 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
                goto out;
        }
 
+       /* save current encryption states to restore after next authentication */
+       mod_hdcp_save_current_encryption_states(hdcp);
+
        /* reset existing authentication status */
        status = reset_authentication(hdcp, output);
        if (status != MOD_HDCP_STATUS_SUCCESS)
@@ -360,6 +363,9 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
                goto out;
        }
 
+       /* save current encryption states to restore after next authentication */
+       mod_hdcp_save_current_encryption_states(hdcp);
+
        /* stop current authentication */
        status = reset_authentication(hdcp, output);
        if (status != MOD_HDCP_STATUS_SUCCESS)
index 5c22cf7..3ce91db 100644 (file)
@@ -331,6 +331,8 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(
                struct mod_hdcp *hdcp, struct mod_hdcp_display *display);
 enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
                struct mod_hdcp *hdcp, uint8_t index);
+bool mod_hdcp_is_link_encryption_enabled(struct mod_hdcp *hdcp);
+void mod_hdcp_save_current_encryption_states(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp);
@@ -339,8 +341,6 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(
        struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp);
-enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
-                                                              enum mod_hdcp_encryption_status *encryption_status);
 enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp);
 enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp);
index 73ca49f..eeac143 100644 (file)
@@ -256,10 +256,12 @@ static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
                goto out;
        }
 
-       if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_link_maintenance,
+       mod_hdcp_execute_and_set(mod_hdcp_hdcp1_link_maintenance,
                        &input->link_maintenance, &status,
-                       hdcp, "link_maintenance"))
-               goto out;
+                       hdcp, "link_maintenance");
+
+       if (status != MOD_HDCP_STATUS_SUCCESS)
+               mod_hdcp_save_current_encryption_states(hdcp);
 out:
        return status;
 }
@@ -425,19 +427,24 @@ static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp,
                event_ctx->unexpected_event = 1;
                goto out;
        }
-
-       if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
-                       &input->bstatus_read, &status,
-                       hdcp, "bstatus_read"))
-               goto out;
-       if (!mod_hdcp_execute_and_set(check_link_integrity_dp,
-                       &input->link_integrity_check, &status,
-                       hdcp, "link_integrity_check"))
-               goto out;
-       if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
-                       &input->reauth_request_check, &status,
-                       hdcp, "reauth_request_check"))
+       if (!mod_hdcp_is_link_encryption_enabled(hdcp))
                goto out;
+
+       if (status == MOD_HDCP_STATUS_SUCCESS)
+               mod_hdcp_execute_and_set(mod_hdcp_read_bstatus,
+                               &input->bstatus_read, &status,
+                               hdcp, "bstatus_read");
+       if (status == MOD_HDCP_STATUS_SUCCESS)
+               mod_hdcp_execute_and_set(check_link_integrity_dp,
+                               &input->link_integrity_check, &status,
+                               hdcp, "link_integrity_check");
+       if (status == MOD_HDCP_STATUS_SUCCESS)
+               mod_hdcp_execute_and_set(check_no_reauthentication_request_dp,
+                               &input->reauth_request_check, &status,
+                               hdcp, "reauth_request_check");
+
+       if (status != MOD_HDCP_STATUS_SUCCESS)
+               mod_hdcp_save_current_encryption_states(hdcp);
 out:
        return status;
 }
index 24ab95b..3dda8c1 100644 (file)
@@ -93,7 +93,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
                }
                break;
        case H1_A45_AUTHENTICATED:
-               if (input->link_maintenance != PASS) {
+               if (input->link_maintenance == FAIL) {
                        /* 1A-07: consider invalid ri' a failure */
                        /* 1A-07a: consider read ri' not returned a failure */
                        fail_and_restart_in_ms(0, &status, output);
@@ -243,8 +243,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
                }
                break;
        case D1_A4_AUTHENTICATED:
-               if (input->link_integrity_check != PASS ||
-                               input->reauth_request_check != PASS) {
+               if (input->link_integrity_check == FAIL ||
+                               input->reauth_request_check == FAIL) {
                        /* 1A-07: restart hdcp on a link integrity failure */
                        fail_and_restart_in_ms(0, &status, output);
                        break;
index a0895a7..f164f6a 100644 (file)
@@ -564,11 +564,13 @@ static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp,
                event_ctx->unexpected_event = 1;
                goto out;
        }
-
-       if (!process_rxstatus(hdcp, event_ctx, input, &status))
-               goto out;
-       if (event_ctx->rx_id_list_ready)
+       if (!mod_hdcp_is_link_encryption_enabled(hdcp))
                goto out;
+
+       process_rxstatus(hdcp, event_ctx, input, &status);
+
+       if (status != MOD_HDCP_STATUS_SUCCESS)
+               mod_hdcp_save_current_encryption_states(hdcp);
 out:
        return status;
 }
index e738c7a..b0306ed 100644 (file)
@@ -245,8 +245,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
                HDCP_FULL_DDC_TRACE(hdcp);
                break;
        case H2_A5_AUTHENTICATED:
-               if (input->rxstatus_read != PASS ||
-                               input->reauth_request_check != PASS) {
+               if (input->rxstatus_read == FAIL ||
+                               input->reauth_request_check == FAIL) {
                        fail_and_restart_in_ms(0, &status, output);
                        break;
                } else if (event_ctx->rx_id_list_ready && conn->is_repeater) {
@@ -562,11 +562,11 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
                HDCP_FULL_DDC_TRACE(hdcp);
                break;
        case D2_A5_AUTHENTICATED:
-               if (input->rxstatus_read != PASS ||
-                               input->reauth_request_check != PASS) {
+               if (input->rxstatus_read == FAIL ||
+                               input->reauth_request_check == FAIL) {
                        fail_and_restart_in_ms(0, &status, output);
                        break;
-               } else if (input->link_integrity_check_dp != PASS) {
+               } else if (input->link_integrity_check_dp == FAIL) {
                        if (hdcp->connection.hdcp2_retry_count >= 1)
                                adjust->hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
                        fail_and_restart_in_ms(0, &status, output);
index 904ce9b..9d7ca31 100644 (file)
@@ -914,3 +914,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
        return status;
 }
 
+bool mod_hdcp_is_link_encryption_enabled(struct mod_hdcp *hdcp)
+{
+       /* unsupported */
+       return true;
+}
+
+void mod_hdcp_save_current_encryption_states(struct mod_hdcp *hdcp)
+{
+       /* unsupported */
+}
index b64cd5b..75a158a 100644 (file)
@@ -171,10 +171,15 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
 unsigned long long mod_freesync_calc_nominal_field_rate(
                        const struct dc_stream_state *stream);
 
+unsigned long long mod_freesync_calc_field_rate_from_timing(
+               unsigned int vtotal, unsigned int htotal, unsigned int pix_clk);
+
 bool mod_freesync_is_valid_range(uint32_t min_refresh_cap_in_uhz,
                uint32_t max_refresh_cap_in_uhz,
                uint32_t nominal_field_rate_in_uhz);
 
-
+unsigned int mod_freesync_calc_v_total_from_refresh(
+               const struct dc_stream_state *stream,
+               unsigned int refresh_in_uhz);
 
 #endif
index d223ed3..acbeada 100644 (file)
@@ -120,6 +120,12 @@ enum mod_hdcp_display_state {
        MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
 };
 
+enum mod_hdcp_display_disable_option {
+       MOD_HDCP_DISPLAY_NOT_DISABLE = 0,
+       MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION,
+       MOD_HDCP_DISPLAY_DISABLE_ENCRYPTION,
+};
+
 struct mod_hdcp_ddc {
        void *handle;
        struct {
@@ -149,8 +155,8 @@ struct mod_hdcp_psp {
 };
 
 struct mod_hdcp_display_adjustment {
-       uint8_t disable                 : 1;
-       uint8_t reserved                : 7;
+       uint8_t disable                 : 2;
+       uint8_t reserved                : 6;
 };
 
 struct mod_hdcp_link_adjustment_hdcp1 {
@@ -255,8 +261,6 @@ struct mod_hdcp_config {
        uint8_t index;
 };
 
-struct mod_hdcp;
-
 /* dm allocates memory of mod_hdcp per dc_link on dm init based on memory size*/
 size_t mod_hdcp_get_memory_size(void);
 
index 0102487..f21554a 100644 (file)
 #define mmCP_CE_IB2_BASE_HI_BASE_IDX                                                                   1
 #define mmCP_CE_IB2_BUFSZ                                                                              0x20cb
 #define mmCP_CE_IB2_BUFSZ_BASE_IDX                                                                     1
+#define mmCP_IB1_BASE_LO                                                                               0x20cc
+#define mmCP_IB1_BASE_LO_BASE_IDX                                                                      1
+#define mmCP_IB1_BASE_HI                                                                               0x20cd
+#define mmCP_IB1_BASE_HI_BASE_IDX                                                                      1
+#define mmCP_IB1_BUFSZ                                                                                 0x20ce
+#define mmCP_IB1_BUFSZ_BASE_IDX                                                                        1
 #define mmCP_IB2_BASE_LO                                                                               0x20cf
 #define mmCP_IB2_BASE_LO_BASE_IDX                                                                      1
 #define mmCP_IB2_BASE_HI                                                                               0x20d0
index 4d2a143..a827b0f 100644 (file)
 //CP_CE_IB2_BUFSZ
 #define CP_CE_IB2_BUFSZ__IB2_BUFSZ__SHIFT                                                                     0x0
 #define CP_CE_IB2_BUFSZ__IB2_BUFSZ_MASK                                                                       0x000FFFFFL
+//CP_IB1_BASE_LO
+#define CP_IB1_BASE_LO__IB1_BASE_LO__SHIFT                                                                    0x2
+#define CP_IB1_BASE_LO__IB1_BASE_LO_MASK                                                                      0xFFFFFFFCL
+//CP_IB1_BASE_HI
+#define CP_IB1_BASE_HI__IB1_BASE_HI__SHIFT                                                                    0x0
+#define CP_IB1_BASE_HI__IB1_BASE_HI_MASK                                                                      0x0000FFFFL
+//CP_IB1_BUFSZ
+#define CP_IB1_BUFSZ__IB1_BUFSZ__SHIFT                                                                        0x0
+#define CP_IB1_BUFSZ__IB1_BUFSZ_MASK                                                                          0x000FFFFFL
 //CP_IB2_BASE_LO
 #define CP_IB2_BASE_LO__IB2_BASE_LO__SHIFT                                                                    0x2
 #define CP_IB2_BASE_LO__IB2_BASE_LO_MASK                                                                      0xFFFFFFFCL
index c1d7b1d..47eb845 100644 (file)
@@ -1987,9 +1987,9 @@ typedef struct _PIXEL_CLOCK_PARAMETERS_V6
 #define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK           0x0c
 #define PIXEL_CLOCK_V6_MISC_HDMI_24BPP              0x00
 #define PIXEL_CLOCK_V6_MISC_HDMI_36BPP              0x04
-#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6           0x08    //for V6, the correct defintion for 36bpp should be 2 for 36bpp(2:1)
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6           0x08    //for V6, the correct definition for 36bpp should be 2 for 36bpp(2:1)
 #define PIXEL_CLOCK_V6_MISC_HDMI_30BPP              0x08
-#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6           0x04    //for V6, the correct defintion for 30bpp should be 1 for 36bpp(5:4)
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6           0x04    //for V6, the correct definition for 30bpp should be 1 for 36bpp(5:4)
 #define PIXEL_CLOCK_V6_MISC_HDMI_48BPP              0x0c
 #define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC             0x10
 #define PIXEL_CLOCK_V6_MISC_GEN_DPREFCLK            0x40
index 58364a8..c77ed38 100644 (file)
@@ -981,6 +981,40 @@ struct atom_display_controller_info_v4_2
   uint8_t  reserved3[8];
 };
 
+struct atom_display_controller_info_v4_3
+{
+  struct  atom_common_table_header  table_header;
+  uint32_t display_caps;
+  uint32_t bootup_dispclk_10khz;
+  uint16_t dce_refclk_10khz;
+  uint16_t i2c_engine_refclk_10khz;
+  uint16_t dvi_ss_percentage;       // in unit of 0.001%
+  uint16_t dvi_ss_rate_10hz;
+  uint16_t hdmi_ss_percentage;      // in unit of 0.001%
+  uint16_t hdmi_ss_rate_10hz;
+  uint16_t dp_ss_percentage;        // in unit of 0.001%
+  uint16_t dp_ss_rate_10hz;
+  uint8_t  dvi_ss_mode;             // enum of atom_spread_spectrum_mode
+  uint8_t  hdmi_ss_mode;            // enum of atom_spread_spectrum_mode
+  uint8_t  dp_ss_mode;              // enum of atom_spread_spectrum_mode
+  uint8_t  ss_reserved;
+  uint8_t  dfp_hardcode_mode_num;   // DFP hardcode mode number defined in StandardVESA_TimingTable when EDID is not available
+  uint8_t  dfp_hardcode_refreshrate;// DFP hardcode mode refreshrate defined in StandardVESA_TimingTable when EDID is not available
+  uint8_t  vga_hardcode_mode_num;   // VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable
+  uint8_t  vga_hardcode_refreshrate;// VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable
+  uint16_t dpphy_refclk_10khz;
+  uint16_t reserved2;
+  uint8_t  dcnip_min_ver;
+  uint8_t  dcnip_max_ver;
+  uint8_t  max_disp_pipe_num;
+  uint8_t  max_vbios_active_disp_pipe_num;
+  uint8_t  max_ppll_num;
+  uint8_t  max_disp_phy_num;
+  uint8_t  max_aux_pairs;
+  uint8_t  remotedisplayconfig;
+  uint8_t  reserved3[8];
+};
+
 struct atom_display_controller_info_v4_4 {
        struct atom_common_table_header table_header;
        uint32_t display_caps;
@@ -1043,7 +1077,9 @@ enum dce_info_caps_def
   DCE_INFO_CAPS_DISABLE_DFP_DP_HBR2      =0x04,
   // only for VBIOS
   DCE_INFO_CAPS_ENABLE_INTERLAC_TIMING   =0x08,
-
+  // only for VBIOS
+  DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE    =0x20,
+  DCE_INFO_CAPS_VBIOS_LTTPR_TRANSPARENT_ENABLE = 0x40,
 };
 
 /* 
index e2bffca..754170a 100644 (file)
 
 #define DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT       0x68
 #define DCN_1_0__CTXID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT       6
+#define DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT        0x68 // DMCUB_IHC_outbox1_ready_int IHC_DMCUB_outbox1_ready_int_ack DMCUB_OUTBOX_LOW_PRIORITY_READY_INTERRUPT DISP_INTERRUPT_STATUS_CONTINUE24 Level/Pulse
+#define DCN_1_0__CTXID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT        8
 
 #endif // __IRQSRCS_DCN_1_0_H__
index dd69581..3534686 100644 (file)
@@ -242,6 +242,9 @@ struct pp_display_clock_request;
 struct pp_clock_levels_with_voltage;
 struct pp_clock_levels_with_latency;
 struct amd_pp_clocks;
+struct pp_smu_wm_range_sets;
+struct pp_smu_nv_clock_table;
+struct dpm_clocks;
 
 struct amd_pm_funcs {
 /* export for dpm on ci and si */
@@ -336,6 +339,17 @@ struct amd_pm_funcs {
        int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
        int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
        ssize_t (*get_gpu_metrics)(void *handle, void **table);
+       int (*set_watermarks_for_clock_ranges)(void *handle,
+                                              struct pp_smu_wm_range_sets *ranges);
+       int (*display_disable_memory_clock_switch)(void *handle,
+                                                  bool disable_memory_clock_switch);
+       int (*get_max_sustainable_clocks_by_dc)(void *handle,
+                                               struct pp_smu_nv_clock_table *max_clocks);
+       int (*get_uclk_dpm_states)(void *handle,
+                                  unsigned int *clock_values_in_khz,
+                                  unsigned int *num_states);
+       int (*get_dpm_clock_table)(void *handle,
+                                  struct dpm_clocks *clock_table);
 };
 
 struct metrics_table_header {
index 0a6bb33..03581d5 100644 (file)
@@ -927,7 +927,6 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
 {
        int ret = 0;
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-       bool swsmu = is_support_sw_smu(adev);
 
        switch (block_type) {
        case AMD_IP_BLOCK_TYPE_UVD:
@@ -968,15 +967,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
        case AMD_IP_BLOCK_TYPE_GFX:
        case AMD_IP_BLOCK_TYPE_VCN:
        case AMD_IP_BLOCK_TYPE_SDMA:
-               if (pp_funcs && pp_funcs->set_powergating_by_smu) {
-                       ret = (pp_funcs->set_powergating_by_smu(
-                               (adev)->powerplay.pp_handle, block_type, gate));
-               }
-               break;
        case AMD_IP_BLOCK_TYPE_JPEG:
-               if (swsmu)
-                       ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
-               break;
        case AMD_IP_BLOCK_TYPE_GMC:
        case AMD_IP_BLOCK_TYPE_ACP:
                if (pp_funcs && pp_funcs->set_powergating_by_smu) {
@@ -1606,7 +1597,10 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
                        pr_err("smu firmware loading failed\n");
                        return r;
                }
-               *smu_version = adev->pm.fw_version;
+
+               if (smu_version)
+                       *smu_version = adev->pm.fw_version;
        }
+
        return 0;
 }
index 2627870..204e345 100644 (file)
@@ -27,7 +27,6 @@
 #include "amdgpu_drv.h"
 #include "amdgpu_pm.h"
 #include "amdgpu_dpm.h"
-#include "amdgpu_smu.h"
 #include "atom.h"
 #include <linux/pci.h>
 #include <linux/hwmon.h>
@@ -129,6 +128,8 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -145,9 +146,9 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
-                       (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
-                       (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
+       return sysfs_emit(buf, "%s\n",
+                         (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
+                         (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
 }
 
 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
@@ -162,6 +163,8 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (strncmp("battery", buf, strlen("battery")) == 0)
                state = POWER_STATE_TYPE_BATTERY;
@@ -268,6 +271,8 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -283,17 +288,17 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
-                       (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
-                       (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
-                       (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
-                       (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
-                       (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
-                       (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
-                       (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
-                       (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
-                       (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
-                       "unknown");
+       return sysfs_emit(buf, "%s\n",
+                         (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
+                         (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
+                         (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
+                         (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
+                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
+                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
+                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
+                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
+                         (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
+                         "unknown");
 }
 
 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
@@ -310,6 +315,8 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (strncmp("low", buf, strlen("low")) == 0) {
                level = AMD_DPM_FORCED_LEVEL_LOW;
@@ -408,6 +415,8 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -448,6 +457,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -472,7 +483,7 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
        if (i == data.nums)
                i = -EINVAL;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", i);
+       return sysfs_emit(buf, "%d\n", i);
 }
 
 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
@@ -484,11 +495,13 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (adev->pp_force_state_enabled)
                return amdgpu_get_pp_cur_state(dev, attr, buf);
        else
-               return snprintf(buf, PAGE_SIZE, "\n");
+               return sysfs_emit(buf, "\n");
 }
 
 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
@@ -504,6 +517,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (strlen(buf) == 1)
                adev->pp_force_state_enabled = false;
@@ -564,6 +579,8 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -602,6 +619,8 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -764,6 +783,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (count > 127)
                return -EINVAL;
@@ -865,6 +886,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -916,6 +939,8 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = kstrtou64(buf, 0, &featuremask);
        if (ret)
@@ -927,14 +952,7 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev)) {
-               ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
-               if (ret) {
-                       pm_runtime_mark_last_busy(ddev->dev);
-                       pm_runtime_put_autosuspend(ddev->dev);
-                       return -EINVAL;
-               }
-       } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
+       if (adev->powerplay.pp_funcs->set_ppfeature_status) {
                ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
                if (ret) {
                        pm_runtime_mark_last_busy(ddev->dev);
@@ -959,6 +977,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -1018,6 +1038,8 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -1083,6 +1105,8 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = amdgpu_read_mask(buf, count, &mask);
        if (ret)
@@ -1239,6 +1263,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -1254,7 +1280,7 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+       return sysfs_emit(buf, "%d\n", value);
 }
 
 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
@@ -1269,6 +1295,8 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = kstrtol(buf, 0, &value);
 
@@ -1312,6 +1340,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -1327,7 +1357,7 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+       return sysfs_emit(buf, "%d\n", value);
 }
 
 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
@@ -1342,6 +1372,8 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = kstrtol(buf, 0, &value);
 
@@ -1405,6 +1437,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -1443,6 +1477,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        tmp[0] = *(buf);
        tmp[1] = '\0';
@@ -1506,6 +1542,8 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(ddev->dev);
        if (r < 0) {
@@ -1523,7 +1561,7 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+       return sysfs_emit(buf, "%d\n", value);
 }
 
 /**
@@ -1544,6 +1582,8 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(ddev->dev);
        if (r < 0) {
@@ -1561,7 +1601,7 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+       return sysfs_emit(buf, "%d\n", value);
 }
 
 /**
@@ -1587,6 +1627,8 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (adev->flags & AMD_IS_APU)
                return -ENODATA;
@@ -1605,8 +1647,8 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
-       return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
-                       count0, count1, pcie_get_mps(adev->pdev));
+       return sysfs_emit(buf, "%llu %llu %i\n",
+                         count0, count1, pcie_get_mps(adev->pdev));
 }
 
 /**
@@ -1628,9 +1670,11 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (adev->unique_id)
-               return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
+               return sysfs_emit(buf, "%016llx\n", adev->unique_id);
 
        return 0;
 }
@@ -1657,10 +1701,10 @@ static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
 
-       return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
-                       adev_to_drm(adev)->unique,
-                       atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
-                       adev->throttling_logging_rs.interval / HZ + 1);
+       return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
+                         adev_to_drm(adev)->unique,
+                         atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
+                         adev->throttling_logging_rs.interval / HZ + 1);
 }
 
 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
@@ -1726,6 +1770,8 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -1954,6 +2000,8 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (channel >= PP_TEMP_MAX)
                return -EINVAL;
@@ -1991,7 +2039,7 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
@@ -2007,7 +2055,7 @@ static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
        else
                temp = adev->pm.dpm.thermal.max_temp;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
@@ -2023,7 +2071,7 @@ static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
        else
                temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
@@ -2039,7 +2087,7 @@ static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
        else
                temp = adev->pm.dpm.thermal.max_mem_crit_temp;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
@@ -2051,7 +2099,7 @@ static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
        if (channel >= PP_TEMP_MAX)
                return -EINVAL;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
+       return sysfs_emit(buf, "%s\n", temp_label[channel].label);
 }
 
 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
@@ -2077,7 +2125,7 @@ static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
                break;
        }
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
@@ -2090,6 +2138,8 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (ret < 0) {
@@ -2122,6 +2172,8 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = kstrtoint(buf, 10, &value);
        if (err)
@@ -2172,6 +2224,8 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
@@ -2220,6 +2274,8 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
@@ -2253,6 +2309,8 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
@@ -2285,6 +2343,8 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2301,7 +2361,7 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
+       return sysfs_emit(buf, "%d\n", min_rpm);
 }
 
 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
@@ -2315,6 +2375,8 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2331,7 +2393,7 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
+       return sysfs_emit(buf, "%d\n", max_rpm);
 }
 
 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
@@ -2344,6 +2406,8 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
@@ -2376,6 +2440,8 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
@@ -2422,6 +2488,8 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (ret < 0) {
@@ -2455,6 +2523,8 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = kstrtoint(buf, 10, &value);
        if (err)
@@ -2496,6 +2566,8 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2513,14 +2585,14 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
+       return sysfs_emit(buf, "%d\n", vddgfx);
 }
 
 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
                                              struct device_attribute *attr,
                                              char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "vddgfx\n");
+       return sysfs_emit(buf, "vddgfx\n");
 }
 
 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
@@ -2533,6 +2605,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        /* only APUs have vddnb */
        if  (!(adev->flags & AMD_IS_APU))
@@ -2554,14 +2628,14 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
+       return sysfs_emit(buf, "%d\n", vddnb);
 }
 
 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
                                              struct device_attribute *attr,
                                              char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "vddnb\n");
+       return sysfs_emit(buf, "vddnb\n");
 }
 
 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
@@ -2575,6 +2649,8 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2595,7 +2671,7 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
        /* convert to microwatts */
        uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", uw);
+       return sysfs_emit(buf, "%u\n", uw);
 }
 
 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
@@ -2619,6 +2695,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2656,6 +2734,8 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2693,6 +2773,8 @@ static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2722,7 +2804,7 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
 {
        int limit_type = to_sensor_dev_attr(attr)->index;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
+       return sysfs_emit(buf, "%s\n",
                limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
 }
 
@@ -2739,6 +2821,8 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (amdgpu_sriov_vf(adev))
                return -EINVAL;
@@ -2780,6 +2864,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2797,14 +2883,14 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
+       return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
 }
 
 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
                                            struct device_attribute *attr,
                                            char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "sclk\n");
+       return sysfs_emit(buf, "sclk\n");
 }
 
 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
@@ -2817,6 +2903,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2834,14 +2922,14 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
+       return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
 }
 
 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
                                            struct device_attribute *attr,
                                            char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "mclk\n");
+       return sysfs_emit(buf, "mclk\n");
 }
 
 /**
@@ -3390,6 +3478,8 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(dev->dev);
        if (r < 0) {
index 433dd1e..6102660 100644 (file)
 #define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh 0x40
 #define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow  0x41
 
-#define PPSMC_Message_Count                      0x42
+#define PPSMC_MSG_GfxDriverResetRecovery       0x42
+#define PPSMC_Message_Count                    0x43
 
 //PPSMC Reset Types
 #define PPSMC_RESET_TYPE_WARM_RESET              0x00
index 25d5f03..8bb224f 100644 (file)
@@ -195,6 +195,11 @@ struct smu_user_dpm_profile {
        uint32_t clk_dependency;
 };
 
+enum smu_event_type {
+
+       SMU_EVENT_RESET_COMPLETE = 0,
+};
+
 #define SMU_TABLE_INIT(tables, table_id, s, a, d)      \
        do {                                            \
                tables[table_id].size = s;              \
@@ -338,7 +343,6 @@ struct smu_power_context {
        struct smu_power_gate power_gate;
 };
 
-
 #define SMU_FEATURE_MAX        (64)
 struct smu_feature
 {
@@ -806,6 +810,13 @@ struct pptable_funcs {
         */
        int (*check_fw_status)(struct smu_context *smu);
 
+       /**
+        * @set_mp1_state: put SMU into a correct state for comming
+        *                 resume from runpm or gpu reset.
+        */
+       int (*set_mp1_state)(struct smu_context *smu,
+                            enum pp_mp1_state mp1_state);
+
        /**
         * @setup_pptable: Initialize the power play table and populate it with
         *                 default values.
@@ -1160,6 +1171,12 @@ struct pptable_funcs {
         * @set_light_sbr:  Set light sbr mode for the SMU.
         */
        int (*set_light_sbr)(struct smu_context *smu, bool enable);
+
+       /**
+        * @wait_for_event:  Wait for events from SMU.
+        */
+       int (*wait_for_event)(struct smu_context *smu,
+                             enum smu_event_type event, uint64_t event_arg);
 };
 
 typedef enum {
@@ -1235,64 +1252,13 @@ enum smu_cmn2asic_mapping_type {
        [profile] = {1, (workload)}
 
 #if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
-int smu_load_microcode(struct smu_context *smu);
-
-int smu_check_fw_status(struct smu_context *smu);
-
-int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
-
-int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
-
 int smu_get_power_limit(struct smu_context *smu,
                        uint32_t *limit,
                        enum smu_ppt_limit_level limit_level);
 
-int smu_set_power_limit(void *handle, uint32_t limit);
-int smu_print_ppclk_levels(void *handle, enum pp_clock_type type, char *buf);
-
-int smu_od_edit_dpm_table(void *handle,
-                         enum PP_OD_DPM_TABLE_COMMAND type,
-                         long *input, uint32_t size);
-
-int smu_read_sensor(void *handle, int sensor, void *data, int *size);
-int smu_get_power_profile_mode(void *handle, char *buf);
-int smu_set_power_profile_mode(void *handle, long *param, uint32_t param_size);
-u32 smu_get_fan_control_mode(void *handle);
-int smu_set_fan_control_mode(struct smu_context *smu, int value);
-void smu_pp_set_fan_control_mode(void *handle, u32 value);
-int smu_get_fan_speed_percent(void *handle, u32 *speed);
-int smu_set_fan_speed_percent(void *handle, u32 speed);
-int smu_get_fan_speed_rpm(void *handle, uint32_t *speed);
-
-int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk);
-
-int smu_get_clock_by_type_with_latency(struct smu_context *smu,
-                                      enum smu_clk_type clk_type,
-                                      struct pp_clock_levels_with_latency *clocks);
-
-int smu_display_clock_voltage_request(struct smu_context *smu,
-                                     struct pp_display_clock_request *clock_req);
-int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch);
-
-int smu_set_xgmi_pstate(void *handle,
-                       uint32_t pstate);
-
-int smu_set_azalia_d3_pme(struct smu_context *smu);
-
-bool smu_baco_is_support(struct smu_context *smu);
-int smu_get_baco_capability(void *handle, bool *cap);
-
-int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state);
-
-int smu_baco_enter(struct smu_context *smu);
-int smu_baco_exit(struct smu_context *smu);
-int smu_baco_set_state(void *handle, int state);
-
-
 bool smu_mode1_reset_is_support(struct smu_context *smu);
 bool smu_mode2_reset_is_support(struct smu_context *smu);
 int smu_mode1_reset(struct smu_context *smu);
-int smu_mode2_reset(void *handle);
 
 extern const struct amd_ip_funcs smu_ip_funcs;
 
@@ -1302,68 +1268,24 @@ extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;
 
 bool is_support_sw_smu(struct amdgpu_device *adev);
 bool is_support_cclk_dpm(struct amdgpu_device *adev);
-int smu_reset(struct smu_context *smu);
-int smu_sys_get_pp_table(void *handle, char **table);
-int smu_sys_set_pp_table(void *handle, const char *buf, size_t size);
-int smu_get_power_num_states(void *handle, struct pp_states_info *state_info);
-enum amd_pm_state_type smu_get_current_power_state(void *handle);
 int smu_write_watermarks_table(struct smu_context *smu);
-int smu_set_watermarks_for_clock_ranges(
-               struct smu_context *smu,
-               struct pp_smu_wm_range_sets *clock_ranges);
-
-/* smu to display interface */
-extern int smu_display_configuration_change(struct smu_context *smu, const
-                                           struct amd_pp_display_configuration
-                                           *display_config);
-extern int smu_dpm_set_power_gate(void *handle, uint32_t block_type, bool gate);
-extern int smu_handle_task(struct smu_context *smu,
-                          enum amd_dpm_forced_level level,
-                          enum amd_pp_task task_id,
-                          bool lock_needed);
-extern int smu_handle_dpm_task(void *handle,
-                              enum amd_pp_task task_id,
-                              enum amd_pm_state_type *user_state);
-int smu_switch_power_profile(void *handle,
-                            enum PP_SMC_POWER_PROFILE type,
-                            bool en);
+
 int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
                           uint32_t *min, uint32_t *max);
-u32 smu_get_mclk(void *handle, bool low);
-u32 smu_get_sclk(void *handle, bool low);
+
 int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
                            uint32_t min, uint32_t max);
-enum amd_dpm_forced_level smu_get_performance_level(void *handle);
-int smu_force_performance_level(void *handle, enum amd_dpm_forced_level level);
-int smu_set_display_count(struct smu_context *smu, uint32_t count);
-int smu_set_ac_dc(struct smu_context *smu);
-int smu_sys_get_pp_feature_mask(void *handle, char *buf);
-int smu_sys_set_pp_feature_mask(void *handle, uint64_t new_mask);
-int smu_force_ppclk_levels(void *handle, enum pp_clock_type type, uint32_t mask);
-int smu_set_mp1_state(void *handle,
-                     enum pp_mp1_state mp1_state);
-int smu_set_df_cstate(void *handle,
-                     enum pp_df_cstate state);
-int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
 
-int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
-                                        struct pp_smu_nv_clock_table *max_clocks);
-
-int smu_get_uclk_dpm_states(struct smu_context *smu,
-                           unsigned int *clock_values_in_khz,
-                           unsigned int *num_states);
+int smu_set_ac_dc(struct smu_context *smu);
 
-int smu_get_dpm_clock_table(struct smu_context *smu,
-                           struct dpm_clocks *clock_table);
+int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
 
 int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
 
-ssize_t smu_sys_get_gpu_metrics(void *handle, void **table);
-
-int smu_enable_mgpu_fan_boost(void *handle);
-int smu_gfx_state_change_set(struct smu_context *smu, uint32_t state);
-
 int smu_set_light_sbr(struct smu_context *smu, bool enable);
 
+int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
+                      uint64_t event_arg);
+
 #endif
 #endif
index df2ead2..d23533b 100644 (file)
@@ -435,8 +435,12 @@ typedef struct {
   uint8_t  GpioI2cSda; // Serial Data
   uint16_t spare5;
 
+  uint16_t XgmiMaxCurrent; // in Amps
+  int8_t   XgmiOffset;     // in Amps
+  uint8_t  Padding_TelemetryXgmi;
+
   //reserved
-  uint32_t reserved[16];
+  uint32_t reserved[15];
 
 } PPTable_t;
 
@@ -481,7 +485,10 @@ typedef struct {
   uint16_t TemperatureAllHBM[4]  ;
   uint32_t GfxBusyAcc            ;
   uint32_t DramBusyAcc           ;
-  uint32_t Spare[4];
+  uint32_t EnergyAcc64bitLow     ; //15.259uJ resolution
+  uint32_t EnergyAcc64bitHigh    ;
+  uint32_t TimeStampLow          ; //10ns resolution
+  uint32_t TimeStampHigh         ;
 
   // Padding - ignore
   uint32_t     MmHubPadding[8]; // SMU internal use
index 5bfb60f..89a16dc 100644 (file)
        __SMU_DUMMY_MAP(DisableDeterminism),            \
        __SMU_DUMMY_MAP(SetUclkDpmMode),                \
        __SMU_DUMMY_MAP(LightSBR),                      \
+       __SMU_DUMMY_MAP(GfxDriverResetRecovery),
 
 #undef __SMU_DUMMY_MAP
 #define __SMU_DUMMY_MAP(type)  SMU_MSG_##type
index ad4db2e..d5182bb 100644 (file)
@@ -61,8 +61,8 @@
 #define LINK_WIDTH_MAX                 6
 #define LINK_SPEED_MAX                 3
 
-static __maybe_unused uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
-static __maybe_unused uint16_t link_speed[] = {25, 50, 80, 160};
+static const __maybe_unused uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static const __maybe_unused uint16_t link_speed[] = {25, 50, 80, 160};
 
 static const
 struct smu_temperature_range __maybe_unused smu11_thermal_policy[] =
index 80208e1..8145e1c 100644 (file)
@@ -26,7 +26,7 @@
 #include "amdgpu_smu.h"
 
 #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
-#define SMU13_DRIVER_IF_VERSION_ALDE 0x5
+#define SMU13_DRIVER_IF_VERSION_ALDE 0x6
 
 /* MP Apertures */
 #define MP0_Public                     0x03800000
@@ -268,5 +268,8 @@ int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu);
 int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
                              bool enablement);
 
+int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
+                            uint64_t event_arg);
+
 #endif
 #endif
index f5d59fa..f5fe540 100644 (file)
@@ -1297,19 +1297,18 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_GPU_LOAD:
-               if (has_gfx_busy) {
+               if (!has_gfx_busy)
+                       ret = -EOPNOTSUPP;
+               else {
                        ret = smum_send_msg_to_smc(hwmgr,
                                                   PPSMC_MSG_GetGfxBusy,
                                                   &activity_percent);
                        if (!ret)
-                               activity_percent = activity_percent > 100 ? 100 : activity_percent;
+                               *((uint32_t *)value) = min(activity_percent, (u32)100);
                        else
-                               return -EIO;
-                       *((uint32_t *)value) = activity_percent;
-                       return 0;
-               } else {
-                       return -EOPNOTSUPP;
+                               ret = -EIO;
                }
+               break;
        default:
                ret = -EOPNOTSUPP;
                break;
index 7edafef..0541bfc 100644 (file)
@@ -1224,7 +1224,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                    (hwmgr->chip_id == CHIP_POLARIS10) ||
                    (hwmgr->chip_id == CHIP_POLARIS11) ||
                    (hwmgr->chip_id == CHIP_POLARIS12) ||
-                   (hwmgr->chip_id == CHIP_TONGA))
+                   (hwmgr->chip_id == CHIP_TONGA) ||
+                   (hwmgr->chip_id == CHIP_TOPAZ))
                        PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
 
 
@@ -3330,7 +3331,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 
        disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&
                                                !hwmgr->display_config->multi_monitor_in_sync) ||
-                                               smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
+                                               (hwmgr->display_config->num_display &&
+                                               smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
 
        disable_mclk_switching = disable_mclk_switching_for_frame_lock ||
                                         disable_mclk_switching_for_display;
index b6d7b7b..1a097e6 100644 (file)
@@ -52,8 +52,8 @@
 
 #define LINK_WIDTH_MAX                         6
 #define LINK_SPEED_MAX                         3
-static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
-static int link_speed[] = {25, 50, 80, 160};
+static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static const int link_speed[] = {25, 50, 80, 160};
 
 static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
                enum pp_clock_type type, uint32_t mask);
index 213c9c6..d3177a5 100644 (file)
@@ -57,8 +57,8 @@
 
 #define LINK_WIDTH_MAX                         6
 #define LINK_SPEED_MAX                         3
-static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
-static int link_speed[] = {25, 50, 80, 160};
+static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static const int link_speed[] = {25, 50, 80, 160};
 
 static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
 {
index e722adc..e0eb7ca 100644 (file)
@@ -51,8 +51,19 @@ static const struct amd_pm_funcs swsmu_pm_funcs;
 static int smu_force_smuclk_levels(struct smu_context *smu,
                                   enum smu_clk_type clk_type,
                                   uint32_t mask);
-
-int smu_sys_get_pp_feature_mask(void *handle, char *buf)
+static int smu_handle_task(struct smu_context *smu,
+                          enum amd_dpm_forced_level level,
+                          enum amd_pp_task task_id,
+                          bool lock_needed);
+static int smu_reset(struct smu_context *smu);
+static int smu_set_fan_speed_percent(void *handle, u32 speed);
+static int smu_set_fan_control_mode(struct smu_context *smu, int value);
+static int smu_set_power_limit(void *handle, uint32_t limit);
+static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
+static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
+
+static int smu_sys_get_pp_feature_mask(void *handle,
+                                      char *buf)
 {
        struct smu_context *smu = handle;
        int size = 0;
@@ -69,7 +80,8 @@ int smu_sys_get_pp_feature_mask(void *handle, char *buf)
        return size;
 }
 
-int smu_sys_set_pp_feature_mask(void *handle, uint64_t new_mask)
+static int smu_sys_set_pp_feature_mask(void *handle,
+                                      uint64_t new_mask)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -142,7 +154,7 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
        return ret;
 }
 
-u32 smu_get_mclk(void *handle, bool low)
+static u32 smu_get_mclk(void *handle, bool low)
 {
        struct smu_context *smu = handle;
        uint32_t clk_freq;
@@ -156,7 +168,7 @@ u32 smu_get_mclk(void *handle, bool low)
        return clk_freq * 100;
 }
 
-u32 smu_get_sclk(void *handle, bool low)
+static u32 smu_get_sclk(void *handle, bool low)
 {
        struct smu_context *smu = handle;
        uint32_t clk_freq;
@@ -256,8 +268,9 @@ static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
  *    Under this case, the smu->mutex lock protection is already enforced on
  *    the parent API smu_force_performance_level of the call path.
  */
-int smu_dpm_set_power_gate(void *handle, uint32_t block_type,
-                          bool gate)
+static int smu_dpm_set_power_gate(void *handle,
+                                 uint32_t block_type,
+                                 bool gate)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -406,8 +419,8 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
        smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
 }
 
-int smu_get_power_num_states(void *handle,
-                            struct pp_states_info *state_info)
+static int smu_get_power_num_states(void *handle,
+                                   struct pp_states_info *state_info)
 {
        if (!state_info)
                return -EINVAL;
@@ -442,7 +455,8 @@ bool is_support_cclk_dpm(struct amdgpu_device *adev)
 }
 
 
-int smu_sys_get_pp_table(void *handle, char **table)
+static int smu_sys_get_pp_table(void *handle,
+                               char **table)
 {
        struct smu_context *smu = handle;
        struct smu_table_context *smu_table = &smu->smu_table;
@@ -468,7 +482,9 @@ int smu_sys_get_pp_table(void *handle, char **table)
        return powerplay_table_size;
 }
 
-int smu_sys_set_pp_table(void *handle, const char *buf, size_t size)
+static int smu_sys_set_pp_table(void *handle,
+                               const char *buf,
+                               size_t size)
 {
        struct smu_context *smu = handle;
        struct smu_table_context *smu_table = &smu->smu_table;
@@ -632,6 +648,7 @@ err0_out:
        return ret;
 }
 
+
 static int smu_late_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1337,7 +1354,7 @@ static int smu_disable_dpms(struct smu_context *smu)
        bool use_baco = !smu->is_apu &&
                ((amdgpu_in_reset(adev) &&
                  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
-                ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
+                ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
 
        /*
         * For custom pptable uploading, skip the DPM features
@@ -1430,7 +1447,7 @@ static int smu_hw_fini(void *handle)
        return smu_smc_hw_cleanup(smu);
 }
 
-int smu_reset(struct smu_context *smu)
+static int smu_reset(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
        int ret;
@@ -1474,7 +1491,8 @@ static int smu_suspend(void *handle)
 
        smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
 
-       if (smu->is_apu)
+       /* skip CGPG when in S0ix */
+       if (smu->is_apu && !adev->in_s0ix)
                smu_set_gfx_cgpg(&adev->smu, false);
 
        return 0;
@@ -1518,9 +1536,10 @@ static int smu_resume(void *handle)
        return 0;
 }
 
-int smu_display_configuration_change(struct smu_context *smu,
-                                    const struct amd_pp_display_configuration *display_config)
+static int smu_display_configuration_change(void *handle,
+                                           const struct amd_pp_display_configuration *display_config)
 {
+       struct smu_context *smu = handle;
        int index = 0;
        int num_of_active_display = 0;
 
@@ -1676,10 +1695,10 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
        return ret;
 }
 
-int smu_handle_task(struct smu_context *smu,
-                   enum amd_dpm_forced_level level,
-                   enum amd_pp_task task_id,
-                   bool lock_needed)
+static int smu_handle_task(struct smu_context *smu,
+                          enum amd_dpm_forced_level level,
+                          enum amd_pp_task task_id,
+                          bool lock_needed)
 {
        int ret = 0;
 
@@ -1711,9 +1730,9 @@ out:
        return ret;
 }
 
-int smu_handle_dpm_task(void *handle,
-                       enum amd_pp_task task_id,
-                       enum amd_pm_state_type *user_state)
+static int smu_handle_dpm_task(void *handle,
+                              enum amd_pp_task task_id,
+                              enum amd_pm_state_type *user_state)
 {
        struct smu_context *smu = handle;
        struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
@@ -1722,10 +1741,9 @@ int smu_handle_dpm_task(void *handle,
 
 }
 
-
-int smu_switch_power_profile(void *handle,
-                            enum PP_SMC_POWER_PROFILE type,
-                            bool en)
+static int smu_switch_power_profile(void *handle,
+                                   enum PP_SMC_POWER_PROFILE type,
+                                   bool en)
 {
        struct smu_context *smu = handle;
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
@@ -1761,7 +1779,7 @@ int smu_switch_power_profile(void *handle,
        return 0;
 }
 
-enum amd_dpm_forced_level smu_get_performance_level(void *handle)
+static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
 {
        struct smu_context *smu = handle;
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
@@ -1780,7 +1798,8 @@ enum amd_dpm_forced_level smu_get_performance_level(void *handle)
        return level;
 }
 
-int smu_force_performance_level(void *handle, enum amd_dpm_forced_level level)
+static int smu_force_performance_level(void *handle,
+                                      enum amd_dpm_forced_level level)
 {
        struct smu_context *smu = handle;
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
@@ -1815,8 +1834,9 @@ int smu_force_performance_level(void *handle, enum amd_dpm_forced_level level)
        return ret;
 }
 
-int smu_set_display_count(struct smu_context *smu, uint32_t count)
+static int smu_set_display_count(void *handle, uint32_t count)
 {
+       struct smu_context *smu = handle;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -1859,7 +1879,9 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
        return ret;
 }
 
-int smu_force_ppclk_levels(void *handle, enum pp_clock_type type, uint32_t mask)
+static int smu_force_ppclk_levels(void *handle,
+                                 enum pp_clock_type type,
+                                 uint32_t mask)
 {
        struct smu_context *smu = handle;
        enum smu_clk_type clk_type;
@@ -1903,48 +1925,28 @@ int smu_force_ppclk_levels(void *handle, enum pp_clock_type type, uint32_t mask)
  * However, the mp1 state setting should still be granted
  * even if the dpm_enabled cleared.
  */
-int smu_set_mp1_state(void *handle,
-                     enum pp_mp1_state mp1_state)
+static int smu_set_mp1_state(void *handle,
+                            enum pp_mp1_state mp1_state)
 {
        struct smu_context *smu = handle;
-       uint16_t msg;
-       int ret;
+       int ret = 0;
 
        if (!smu->pm_enabled)
                return -EOPNOTSUPP;
 
        mutex_lock(&smu->mutex);
 
-       switch (mp1_state) {
-       case PP_MP1_STATE_SHUTDOWN:
-               msg = SMU_MSG_PrepareMp1ForShutdown;
-               break;
-       case PP_MP1_STATE_UNLOAD:
-               msg = SMU_MSG_PrepareMp1ForUnload;
-               break;
-       case PP_MP1_STATE_RESET:
-               msg = SMU_MSG_PrepareMp1ForReset;
-               break;
-       case PP_MP1_STATE_NONE:
-       default:
-               mutex_unlock(&smu->mutex);
-               return 0;
-       }
-
-       ret = smu_send_smc_msg(smu, msg, NULL);
-       /* some asics may not support those messages */
-       if (ret == -EINVAL)
-               ret = 0;
-       if (ret)
-               dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
+       if (smu->ppt_funcs &&
+           smu->ppt_funcs->set_mp1_state)
+               ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
 
        mutex_unlock(&smu->mutex);
 
        return ret;
 }
 
-int smu_set_df_cstate(void *handle,
-                     enum pp_df_cstate state)
+static int smu_set_df_cstate(void *handle,
+                            enum pp_df_cstate state)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2003,9 +2005,10 @@ int smu_write_watermarks_table(struct smu_context *smu)
        return ret;
 }
 
-int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
-               struct pp_smu_wm_range_sets *clock_ranges)
+static int smu_set_watermarks_for_clock_ranges(void *handle,
+                                              struct pp_smu_wm_range_sets *clock_ranges)
 {
+       struct smu_context *smu = handle;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2092,41 +2095,39 @@ const struct amdgpu_ip_block_version smu_v13_0_ip_block =
        .funcs = &smu_ip_funcs,
 };
 
-int smu_load_microcode(struct smu_context *smu)
+static int smu_load_microcode(void *handle)
 {
+       struct smu_context *smu = handle;
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
-       if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+       if (!smu->pm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
+       /* This should be used for non PSP loading */
+       if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
+               return 0;
 
-       if (smu->ppt_funcs->load_microcode)
+       if (smu->ppt_funcs->load_microcode) {
                ret = smu->ppt_funcs->load_microcode(smu);
+               if (ret) {
+                       dev_err(adev->dev, "Load microcode failed\n");
+                       return ret;
+               }
+       }
 
-       mutex_unlock(&smu->mutex);
-
-       return ret;
-}
-
-int smu_check_fw_status(struct smu_context *smu)
-{
-       int ret = 0;
-
-       if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
-               return -EOPNOTSUPP;
-
-       mutex_lock(&smu->mutex);
-
-       if (smu->ppt_funcs->check_fw_status)
+       if (smu->ppt_funcs->check_fw_status) {
                ret = smu->ppt_funcs->check_fw_status(smu);
-
-       mutex_unlock(&smu->mutex);
+               if (ret) {
+                       dev_err(adev->dev, "SMC is not ready\n");
+                       return ret;
+               }
+       }
 
        return ret;
 }
 
-int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
+static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
 {
        int ret = 0;
 
@@ -2140,7 +2141,7 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
        return ret;
 }
 
-int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
+static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
 {
        struct smu_context *smu = handle;
        u32 percent;
@@ -2199,7 +2200,7 @@ int smu_get_power_limit(struct smu_context *smu,
        return ret;
 }
 
-int smu_set_power_limit(void *handle, uint32_t limit)
+static int smu_set_power_limit(void *handle, uint32_t limit)
 {
        struct smu_context *smu = handle;
        uint32_t limit_type = limit >> 24;
@@ -2255,7 +2256,9 @@ static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type cl
        return ret;
 }
 
-int smu_print_ppclk_levels(void *handle, enum pp_clock_type type, char *buf)
+static int smu_print_ppclk_levels(void *handle,
+                                 enum pp_clock_type type,
+                                 char *buf)
 {
        struct smu_context *smu = handle;
        enum smu_clk_type clk_type;
@@ -2296,9 +2299,9 @@ int smu_print_ppclk_levels(void *handle, enum pp_clock_type type, char *buf)
        return smu_print_smuclk_levels(smu, clk_type, buf);
 }
 
-int smu_od_edit_dpm_table(void *handle,
-                         enum PP_OD_DPM_TABLE_COMMAND type,
-                         long *input, uint32_t size)
+static int smu_od_edit_dpm_table(void *handle,
+                                enum PP_OD_DPM_TABLE_COMMAND type,
+                                long *input, uint32_t size)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2317,7 +2320,10 @@ int smu_od_edit_dpm_table(void *handle,
        return ret;
 }
 
-int smu_read_sensor(void *handle, int sensor, void *data, int *size_arg)
+static int smu_read_sensor(void *handle,
+                          int sensor,
+                          void *data,
+                          int *size_arg)
 {
        struct smu_context *smu = handle;
        struct smu_umd_pstate_table *pstate_table =
@@ -2384,7 +2390,7 @@ unlock:
        return ret;
 }
 
-int smu_get_power_profile_mode(void *handle, char *buf)
+static int smu_get_power_profile_mode(void *handle, char *buf)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2402,7 +2408,9 @@ int smu_get_power_profile_mode(void *handle, char *buf)
        return ret;
 }
 
-int smu_set_power_profile_mode(void *handle, long *param, uint32_t param_size)
+static int smu_set_power_profile_mode(void *handle,
+                                     long *param,
+                                     uint32_t param_size)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2420,7 +2428,7 @@ int smu_set_power_profile_mode(void *handle, long *param, uint32_t param_size)
 }
 
 
-u32 smu_get_fan_control_mode(void *handle)
+static u32 smu_get_fan_control_mode(void *handle)
 {
        struct smu_context *smu = handle;
        u32 ret = 0;
@@ -2438,7 +2446,7 @@ u32 smu_get_fan_control_mode(void *handle)
        return ret;
 }
 
-int smu_set_fan_control_mode(struct smu_context *smu, int value)
+static int smu_set_fan_control_mode(struct smu_context *smu, int value)
 {
        int ret = 0;
 
@@ -2463,14 +2471,15 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
        return ret;
 }
 
-void smu_pp_set_fan_control_mode(void *handle, u32 value) {
+static void smu_pp_set_fan_control_mode(void *handle, u32 value)
+{
        struct smu_context *smu = handle;
 
        smu_set_fan_control_mode(smu, value);
 }
 
 
-int smu_get_fan_speed_percent(void *handle, u32 *speed)
+static int smu_get_fan_speed_percent(void *handle, u32 *speed)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2494,7 +2503,7 @@ int smu_get_fan_speed_percent(void *handle, u32 *speed)
        return ret;
 }
 
-int smu_set_fan_speed_percent(void *handle, u32 speed)
+static int smu_set_fan_speed_percent(void *handle, u32 speed)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2517,7 +2526,7 @@ int smu_set_fan_speed_percent(void *handle, u32 speed)
        return ret;
 }
 
-int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
+static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2538,8 +2547,9 @@ int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
        return ret;
 }
 
-int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
+static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
 {
+       struct smu_context *smu = handle;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2554,10 +2564,12 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
        return ret;
 }
 
-int smu_get_clock_by_type_with_latency(struct smu_context *smu,
-                                      enum smu_clk_type clk_type,
-                                      struct pp_clock_levels_with_latency *clocks)
+static int smu_get_clock_by_type_with_latency(void *handle,
+                                             enum amd_pp_clock_type type,
+                                             struct pp_clock_levels_with_latency *clocks)
 {
+       struct smu_context *smu = handle;
+       enum smu_clk_type clk_type;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2565,17 +2577,38 @@ int smu_get_clock_by_type_with_latency(struct smu_context *smu,
 
        mutex_lock(&smu->mutex);
 
-       if (smu->ppt_funcs->get_clock_by_type_with_latency)
+       if (smu->ppt_funcs->get_clock_by_type_with_latency) {
+               switch (type) {
+               case amd_pp_sys_clock:
+                       clk_type = SMU_GFXCLK;
+                       break;
+               case amd_pp_mem_clock:
+                       clk_type = SMU_MCLK;
+                       break;
+               case amd_pp_dcef_clock:
+                       clk_type = SMU_DCEFCLK;
+                       break;
+               case amd_pp_disp_clock:
+                       clk_type = SMU_DISPCLK;
+                       break;
+               default:
+                       dev_err(smu->adev->dev, "Invalid clock type!\n");
+                       mutex_unlock(&smu->mutex);
+                       return -EINVAL;
+               }
+
                ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
+       }
 
        mutex_unlock(&smu->mutex);
 
        return ret;
 }
 
-int smu_display_clock_voltage_request(struct smu_context *smu,
-                                     struct pp_display_clock_request *clock_req)
+static int smu_display_clock_voltage_request(void *handle,
+                                            struct pp_display_clock_request *clock_req)
 {
+       struct smu_context *smu = handle;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2592,8 +2625,10 @@ int smu_display_clock_voltage_request(struct smu_context *smu,
 }
 
 
-int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
+static int smu_display_disable_memory_clock_switch(void *handle,
+                                                  bool disable_memory_clock_switch)
 {
+       struct smu_context *smu = handle;
        int ret = -EINVAL;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2609,8 +2644,8 @@ int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disabl
        return ret;
 }
 
-int smu_set_xgmi_pstate(void *handle,
-                       uint32_t pstate)
+static int smu_set_xgmi_pstate(void *handle,
+                              uint32_t pstate)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2631,49 +2666,7 @@ int smu_set_xgmi_pstate(void *handle,
        return ret;
 }
 
-int smu_set_azalia_d3_pme(struct smu_context *smu)
-{
-       int ret = 0;
-
-       if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
-               return -EOPNOTSUPP;
-
-       mutex_lock(&smu->mutex);
-
-       if (smu->ppt_funcs->set_azalia_d3_pme)
-               ret = smu->ppt_funcs->set_azalia_d3_pme(smu);
-
-       mutex_unlock(&smu->mutex);
-
-       return ret;
-}
-
-/*
- * On system suspending or resetting, the dpm_enabled
- * flag will be cleared. So that those SMU services which
- * are not supported will be gated.
- *
- * However, the baco/mode1 reset should still be granted
- * as they are still supported and necessary.
- */
-bool smu_baco_is_support(struct smu_context *smu)
-{
-       bool ret = false;
-
-       if (!smu->pm_enabled)
-               return false;
-
-       mutex_lock(&smu->mutex);
-
-       if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
-               ret = smu->ppt_funcs->baco_is_support(smu);
-
-       mutex_unlock(&smu->mutex);
-
-       return ret;
-}
-
-int smu_get_baco_capability(void *handle, bool *cap)
+static int smu_get_baco_capability(void *handle, bool *cap)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2693,60 +2686,7 @@ int smu_get_baco_capability(void *handle, bool *cap)
        return ret;
 }
 
-
-int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state)
-{
-       if (smu->ppt_funcs->baco_get_state)
-               return -EINVAL;
-
-       mutex_lock(&smu->mutex);
-       *state = smu->ppt_funcs->baco_get_state(smu);
-       mutex_unlock(&smu->mutex);
-
-       return 0;
-}
-
-int smu_baco_enter(struct smu_context *smu)
-{
-       int ret = 0;
-
-       if (!smu->pm_enabled)
-               return -EOPNOTSUPP;
-
-       mutex_lock(&smu->mutex);
-
-       if (smu->ppt_funcs->baco_enter)
-               ret = smu->ppt_funcs->baco_enter(smu);
-
-       mutex_unlock(&smu->mutex);
-
-       if (ret)
-               dev_err(smu->adev->dev, "Failed to enter BACO state!\n");
-
-       return ret;
-}
-
-int smu_baco_exit(struct smu_context *smu)
-{
-       int ret = 0;
-
-       if (!smu->pm_enabled)
-               return -EOPNOTSUPP;
-
-       mutex_lock(&smu->mutex);
-
-       if (smu->ppt_funcs->baco_exit)
-               ret = smu->ppt_funcs->baco_exit(smu);
-
-       mutex_unlock(&smu->mutex);
-
-       if (ret)
-               dev_err(smu->adev->dev, "Failed to exit BACO state!\n");
-
-       return ret;
-}
-
-int smu_baco_set_state(void *handle, int state)
+static int smu_baco_set_state(void *handle, int state)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2831,7 +2771,7 @@ int smu_mode1_reset(struct smu_context *smu)
        return ret;
 }
 
-int smu_mode2_reset(void *handle)
+static int smu_mode2_reset(void *handle)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2852,9 +2792,10 @@ int smu_mode2_reset(void *handle)
        return ret;
 }
 
-int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
-                                        struct pp_smu_nv_clock_table *max_clocks)
+static int smu_get_max_sustainable_clocks_by_dc(void *handle,
+                                               struct pp_smu_nv_clock_table *max_clocks)
 {
+       struct smu_context *smu = handle;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2870,10 +2811,11 @@ int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
        return ret;
 }
 
-int smu_get_uclk_dpm_states(struct smu_context *smu,
-                           unsigned int *clock_values_in_khz,
-                           unsigned int *num_states)
+static int smu_get_uclk_dpm_states(void *handle,
+                                  unsigned int *clock_values_in_khz,
+                                  unsigned int *num_states)
 {
+       struct smu_context *smu = handle;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2889,7 +2831,7 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
        return ret;
 }
 
-enum amd_pm_state_type smu_get_current_power_state(void *handle)
+static enum amd_pm_state_type smu_get_current_power_state(void *handle)
 {
        struct smu_context *smu = handle;
        enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
@@ -2907,9 +2849,10 @@ enum amd_pm_state_type smu_get_current_power_state(void *handle)
        return pm_state;
 }
 
-int smu_get_dpm_clock_table(struct smu_context *smu,
-                           struct dpm_clocks *clock_table)
+static int smu_get_dpm_clock_table(void *handle,
+                                  struct dpm_clocks *clock_table)
 {
+       struct smu_context *smu = handle;
        int ret = 0;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2925,7 +2868,7 @@ int smu_get_dpm_clock_table(struct smu_context *smu,
        return ret;
 }
 
-ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
+static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
 {
        struct smu_context *smu = handle;
        ssize_t size;
@@ -2945,7 +2888,7 @@ ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
        return size;
 }
 
-int smu_enable_mgpu_fan_boost(void *handle)
+static int smu_enable_mgpu_fan_boost(void *handle)
 {
        struct smu_context *smu = handle;
        int ret = 0;
@@ -2963,8 +2906,10 @@ int smu_enable_mgpu_fan_boost(void *handle)
        return ret;
 }
 
-int smu_gfx_state_change_set(struct smu_context *smu, uint32_t state)
+static int smu_gfx_state_change_set(void *handle,
+                                   uint32_t state)
 {
+       struct smu_context *smu = handle;
        int ret = 0;
 
        mutex_lock(&smu->mutex);
@@ -3026,4 +2971,31 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
        .get_power_profile_mode  = smu_get_power_profile_mode,
        .force_clock_level       = smu_force_ppclk_levels,
        .print_clock_levels      = smu_print_ppclk_levels,
+       .get_uclk_dpm_states     = smu_get_uclk_dpm_states,
+       .get_dpm_clock_table     = smu_get_dpm_clock_table,
+       .display_configuration_change        = smu_display_configuration_change,
+       .get_clock_by_type_with_latency      = smu_get_clock_by_type_with_latency,
+       .display_clock_voltage_request       = smu_display_clock_voltage_request,
+       .set_active_display_count            = smu_set_display_count,
+       .set_min_deep_sleep_dcefclk          = smu_set_deep_sleep_dcefclk,
+       .set_watermarks_for_clock_ranges     = smu_set_watermarks_for_clock_ranges,
+       .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
+       .get_max_sustainable_clocks_by_dc    = smu_get_max_sustainable_clocks_by_dc,
+       .load_firmware           = smu_load_microcode,
+       .gfx_state_change_set    = smu_gfx_state_change_set,
 };
+
+int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
+                      uint64_t event_arg)
+{
+       int ret = -EINVAL;
+       struct smu_context *smu = &adev->smu;
+
+       if (smu->ppt_funcs->wait_for_event) {
+               mutex_lock(&smu->mutex);
+               ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
+               mutex_unlock(&smu->mutex);
+       }
+
+       return ret;
+}
index bbc0309..77693bf 100644 (file)
@@ -2365,6 +2365,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
        .get_fan_parameters = arcturus_get_fan_parameters,
        .interrupt_work = smu_v11_0_interrupt_work,
        .set_light_sbr = smu_v11_0_set_light_sbr,
+       .set_mp1_state = smu_cmn_set_mp1_state,
 };
 
 void arcturus_set_ppt_funcs(struct smu_context *smu)
index 3d0de2c..f827096 100644 (file)
@@ -431,6 +431,30 @@ static int navi10_store_powerplay_table(struct smu_context *smu)
        return 0;
 }
 
+static int navi10_set_mp1_state(struct smu_context *smu,
+                               enum pp_mp1_state mp1_state)
+{
+       struct amdgpu_device *adev = smu->adev;
+       uint32_t mp1_fw_flags;
+       int ret = 0;
+
+       ret = smu_cmn_set_mp1_state(smu, mp1_state);
+       if (ret)
+               return ret;
+
+       if (mp1_state == PP_MP1_STATE_UNLOAD) {
+               mp1_fw_flags = RREG32_PCIE(MP1_Public |
+                                          (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+               mp1_fw_flags &= ~MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK;
+
+               WREG32_PCIE(MP1_Public |
+                           (smnMP1_FIRMWARE_FLAGS & 0xffffffff), mp1_fw_flags);
+       }
+
+       return 0;
+}
+
 static int navi10_setup_pptable(struct smu_context *smu)
 {
        int ret = 0;
@@ -3031,6 +3055,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
        .get_fan_parameters = navi10_get_fan_parameters,
        .post_init = navi10_post_smu_init,
        .interrupt_work = smu_v11_0_interrupt_work,
+       .set_mp1_state = navi10_set_mp1_state,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
index 3621884..72d9c1b 100644 (file)
@@ -3110,6 +3110,23 @@ static int sienna_cichlid_system_features_control(struct smu_context *smu,
        return smu_v11_0_system_features_control(smu, en);
 }
 
+static int sienna_cichlid_set_mp1_state(struct smu_context *smu,
+                                       enum pp_mp1_state mp1_state)
+{
+       int ret;
+
+       switch (mp1_state) {
+       case PP_MP1_STATE_UNLOAD:
+               ret = smu_cmn_set_mp1_state(smu, mp1_state);
+               break;
+       default:
+               /* Ignore others */
+               ret = 0;
+       }
+
+       return ret;
+}
+
 static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
        .get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
        .set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
@@ -3195,6 +3212,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
        .get_fan_parameters = sienna_cichlid_get_fan_parameters,
        .interrupt_work = smu_v11_0_interrupt_work,
        .gpo_control = sienna_cichlid_gpo_control,
+       .set_mp1_state = sienna_cichlid_set_mp1_state,
 };
 
 void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
index 0d137af..6274cae 100644 (file)
@@ -561,6 +561,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
                smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability;
                break;
        case 3:
+       case 4:
        default:
                v_3_3 = (struct atom_firmware_info_v3_3 *)header;
                smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
index c5b3873..7bcd358 100644 (file)
@@ -384,10 +384,15 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
 
 static bool vangogh_is_dpm_running(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
        uint32_t feature_mask[2];
        uint64_t feature_enabled;
 
+       /* we need to re-init after suspend so return false */
+       if (adev->in_suspend)
+               return false;
+
        ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
 
        if (ret)
@@ -1889,6 +1894,7 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
        .get_ppt_limit = vangogh_get_ppt_limit,
        .get_power_limit = vangogh_get_power_limit,
        .set_power_limit = vangogh_set_power_limit,
+       .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
 };
 
 void vangogh_set_ppt_funcs(struct smu_context *smu)
index 9813a86..bca02a9 100644 (file)
@@ -126,7 +126,8 @@ static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT
        MSG_MAP(SetExecuteDMATest,                   PPSMC_MSG_SetExecuteDMATest,               0),
        MSG_MAP(EnableDeterminism,                   PPSMC_MSG_EnableDeterminism,               0),
        MSG_MAP(DisableDeterminism,                  PPSMC_MSG_DisableDeterminism,              0),
-       MSG_MAP(SetUclkDpmMode,                          PPSMC_MSG_SetUclkDpmMode,              0),
+       MSG_MAP(SetUclkDpmMode,                      PPSMC_MSG_SetUclkDpmMode,                  0),
+       MSG_MAP(GfxDriverResetRecovery,              PPSMC_MSG_GfxDriverResetRecovery,          0),
 };
 
 static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
@@ -1265,6 +1266,233 @@ static bool aldebaran_is_dpm_running(struct smu_context *smu)
        return !!(feature_enabled & SMC_DPM_FEATURE);
 }
 
+static void aldebaran_fill_i2c_req(SwI2cRequest_t  *req, bool write,
+                                 uint8_t address, uint32_t numbytes,
+                                 uint8_t *data)
+{
+       int i;
+
+       req->I2CcontrollerPort = 0;
+       req->I2CSpeed = 2;
+       req->SlaveAddress = address;
+       req->NumCmds = numbytes;
+
+       for (i = 0; i < numbytes; i++) {
+               SwI2cCmd_t *cmd =  &req->SwI2cCmds[i];
+
+               /* First 2 bytes are always write for lower 2b EEPROM address */
+               if (i < 2)
+                       cmd->CmdConfig = CMDCONFIG_READWRITE_MASK;
+               else
+                       cmd->CmdConfig = write ? CMDCONFIG_READWRITE_MASK : 0;
+
+
+               /* Add RESTART for read  after address filled */
+               cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
+
+               /* Add STOP in the end */
+               cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
+
+               /* Fill with data regardless if read or write to simplify code */
+               cmd->ReadWriteData = data[i];
+       }
+}
+
+static int aldebaran_i2c_read_data(struct i2c_adapter *control,
+                                              uint8_t address,
+                                              uint8_t *data,
+                                              uint32_t numbytes)
+{
+       uint32_t  i, ret = 0;
+       SwI2cRequest_t req;
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct smu_table_context *smu_table = &adev->smu.smu_table;
+       struct smu_table *table = &smu_table->driver_table;
+
+       if (numbytes > MAX_SW_I2C_COMMANDS) {
+               dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+                       numbytes, MAX_SW_I2C_COMMANDS);
+               return -EINVAL;
+       }
+
+       memset(&req, 0, sizeof(req));
+       aldebaran_fill_i2c_req(&req, false, address, numbytes, data);
+
+       mutex_lock(&adev->smu.mutex);
+       /* Now read data starting with that address */
+       ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
+                                       true);
+       mutex_unlock(&adev->smu.mutex);
+
+       if (!ret) {
+               SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
+
+               /* Assume SMU  fills res.SwI2cCmds[i].Data with read bytes */
+               for (i = 0; i < numbytes; i++)
+                       data[i] = res->SwI2cCmds[i].ReadWriteData;
+
+               dev_dbg(adev->dev, "aldebaran_i2c_read_data, address = %x, bytes = %d, data :",
+                                 (uint16_t)address, numbytes);
+
+               print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+                              8, 1, data, numbytes, false);
+       } else
+               dev_err(adev->dev, "aldebaran_i2c_read_data - error occurred :%x", ret);
+
+       return ret;
+}
+
+static int aldebaran_i2c_write_data(struct i2c_adapter *control,
+                                               uint8_t address,
+                                               uint8_t *data,
+                                               uint32_t numbytes)
+{
+       uint32_t ret;
+       SwI2cRequest_t req;
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+
+       if (numbytes > MAX_SW_I2C_COMMANDS) {
+               dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+                       numbytes, MAX_SW_I2C_COMMANDS);
+               return -EINVAL;
+       }
+
+       memset(&req, 0, sizeof(req));
+       aldebaran_fill_i2c_req(&req, true, address, numbytes, data);
+
+       mutex_lock(&adev->smu.mutex);
+       ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
+       mutex_unlock(&adev->smu.mutex);
+
+       if (!ret) {
+               dev_dbg(adev->dev, "aldebaran_i2c_write(), address = %x, bytes = %d , data: ",
+                                        (uint16_t)address, numbytes);
+
+               print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+                              8, 1, data, numbytes, false);
+               /*
+                * According to EEPROM spec there is a MAX of 10 ms required for
+                * EEPROM to flush internal RX buffer after STOP was issued at the
+                * end of write transaction. During this time the EEPROM will not be
+                * responsive to any more commands - so wait a bit more.
+                */
+               msleep(10);
+
+       } else
+               dev_err(adev->dev, "aldebaran_i2c_write- error occurred :%x", ret);
+
+       return ret;
+}
+
+static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
+                             struct i2c_msg *msgs, int num)
+{
+       uint32_t  i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
+       uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
+
+       for (i = 0; i < num; i++) {
+               /*
+                * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
+                * once and hence the data needs to be spliced into chunks and sent each
+                * chunk separately
+                */
+               data_size = msgs[i].len - 2;
+               data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
+               next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
+               data_ptr = msgs[i].buf + 2;
+
+               for (j = 0; j < data_size / data_chunk_size; j++) {
+                       /* Insert the EEPROM dest addess, bits 0-15 */
+                       data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+                       data_chunk[1] = (next_eeprom_addr & 0xff);
+
+                       if (msgs[i].flags & I2C_M_RD) {
+                               ret = aldebaran_i2c_read_data(i2c_adap,
+                                                            (uint8_t)msgs[i].addr,
+                                                            data_chunk, MAX_SW_I2C_COMMANDS);
+
+                               memcpy(data_ptr, data_chunk + 2, data_chunk_size);
+                       } else {
+
+                               memcpy(data_chunk + 2, data_ptr, data_chunk_size);
+
+                               ret = aldebaran_i2c_write_data(i2c_adap,
+                                                             (uint8_t)msgs[i].addr,
+                                                             data_chunk, MAX_SW_I2C_COMMANDS);
+                       }
+
+                       if (ret) {
+                               num = -EIO;
+                               goto fail;
+                       }
+
+                       next_eeprom_addr += data_chunk_size;
+                       data_ptr += data_chunk_size;
+               }
+
+               if (data_size % data_chunk_size) {
+                       data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+                       data_chunk[1] = (next_eeprom_addr & 0xff);
+
+                       if (msgs[i].flags & I2C_M_RD) {
+                               ret = aldebaran_i2c_read_data(i2c_adap,
+                                                            (uint8_t)msgs[i].addr,
+                                                            data_chunk, (data_size % data_chunk_size) + 2);
+
+                               memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
+                       } else {
+                               memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
+
+                               ret = aldebaran_i2c_write_data(i2c_adap,
+                                                             (uint8_t)msgs[i].addr,
+                                                             data_chunk, (data_size % data_chunk_size) + 2);
+                       }
+
+                       if (ret) {
+                               num = -EIO;
+                               goto fail;
+                       }
+               }
+       }
+
+fail:
+       return num;
+}
+
+static u32 aldebaran_i2c_func(struct i2c_adapter *adap)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+
+static const struct i2c_algorithm aldebaran_i2c_algo = {
+       .master_xfer = aldebaran_i2c_xfer,
+       .functionality = aldebaran_i2c_func,
+};
+
+static int aldebaran_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+{
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+       int res;
+
+       control->owner = THIS_MODULE;
+       control->class = I2C_CLASS_SPD;
+       control->dev.parent = &adev->pdev->dev;
+       control->algo = &aldebaran_i2c_algo;
+       snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+
+       res = i2c_add_adapter(control);
+       if (res)
+               DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+
+       return res;
+}
+
+static void aldebaran_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+{
+       i2c_del_adapter(control);
+}
+
 static void aldebaran_get_unique_id(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
@@ -1432,6 +1660,57 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
        return sizeof(struct gpu_metrics_v1_1);
 }
 
+static int aldebaran_mode2_reset(struct smu_context *smu)
+{
+       u32 smu_version;
+       int ret = 0, index;
+       struct amdgpu_device *adev = smu->adev;
+       int timeout = 10;
+
+       smu_cmn_get_smc_version(smu, NULL, &smu_version);
+
+       index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+                                               SMU_MSG_GfxDeviceDriverReset);
+
+       mutex_lock(&smu->message_lock);
+       if (smu_version >= 0x00441400) {
+               ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2);
+               /* This is similar to FLR, wait till max FLR timeout */
+               msleep(100);
+               dev_dbg(smu->adev->dev, "restore config space...\n");
+               /* Restore the config space saved during init */
+               amdgpu_device_load_pci_state(adev->pdev);
+
+               dev_dbg(smu->adev->dev, "wait for reset ack\n");
+               while (ret == -ETIME && timeout)  {
+                       ret = smu_cmn_wait_for_response(smu);
+                       /* Wait a bit more time for getting ACK */
+                       if (ret == -ETIME) {
+                               --timeout;
+                               usleep_range(500, 1000);
+                               continue;
+                       }
+
+                       if (ret != 1) {
+                               dev_err(adev->dev, "failed to send mode2 message \tparam: 0x%08x response %#x\n",
+                                               SMU_RESET_MODE_2, ret);
+                               goto out;
+                       }
+               }
+
+       } else {
+               dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n",
+                               smu_version);
+       }
+
+       if (ret == 1)
+               ret = 0;
+out:
+       mutex_unlock(&smu->message_lock);
+
+       return ret;
+}
+
 static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
 {
 #if 0
@@ -1460,6 +1739,19 @@ static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu)
        return true;
 }
 
+static int aldebaran_set_mp1_state(struct smu_context *smu,
+                                  enum pp_mp1_state mp1_state)
+{
+       switch (mp1_state) {
+       case PP_MP1_STATE_UNLOAD:
+               return smu_cmn_set_mp1_state(smu, mp1_state);
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static const struct pptable_funcs aldebaran_ppt_funcs = {
        /* init dpm */
        .get_allowed_feature_mask = aldebaran_get_allowed_feature_mask,
@@ -1517,7 +1809,11 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
        .mode1_reset_is_support = aldebaran_is_mode1_reset_supported,
        .mode2_reset_is_support = aldebaran_is_mode2_reset_supported,
        .mode1_reset = smu_v13_0_mode1_reset,
-       .mode2_reset = smu_v13_0_mode2_reset,
+       .set_mp1_state = aldebaran_set_mp1_state,
+       .mode2_reset = aldebaran_mode2_reset,
+       .wait_for_event = smu_v13_0_wait_for_event,
+       .i2c_init = aldebaran_i2c_control_init,
+       .i2c_fini = aldebaran_i2c_control_fini,
 };
 
 void aldebaran_set_ppt_funcs(struct smu_context *smu)
index bd3a9c8..30c9ac6 100644 (file)
@@ -72,8 +72,8 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
 
-static int link_width[] = {0, 1, 2, 4, 8, 12, 16};
-static int link_speed[] = {25, 50, 80, 160};
+static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
+static const int link_speed[] = {25, 50, 80, 160};
 
 int smu_v13_0_init_microcode(struct smu_context *smu)
 {
@@ -1374,19 +1374,43 @@ int smu_v13_0_mode1_reset(struct smu_context *smu)
        return ret;
 }
 
-int smu_v13_0_mode2_reset(struct smu_context *smu)
+static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu,
+                                            uint64_t event_arg)
 {
-       u32 smu_version;
        int ret = 0;
-       struct amdgpu_device *adev = smu->adev;
-       smu_cmn_get_smc_version(smu, NULL, &smu_version);
-       if (smu_version >= 0x00440700)
-               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
-       else
-               dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n", smu_version);
-       /*TODO: mode2 reset wait time should be shorter, will modify it later*/
+
+       dev_dbg(smu->adev->dev, "waiting for smu reset complete\n");
+       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL);
+
+       return ret;
+}
+
+int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
+                            uint64_t event_arg)
+{
+       int ret = -EINVAL;
+
+       switch (event) {
+       case SMU_EVENT_RESET_COMPLETE:
+               ret = smu_v13_0_wait_for_reset_complete(smu, event_arg);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+int smu_v13_0_mode2_reset(struct smu_context *smu)
+{
+       int ret;
+
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+                       SMU_RESET_MODE_2, NULL);
+       /*TODO: mode2 reset wait time should be shorter, add ASIC specific func if required */
        if (!ret)
                msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
+
        return ret;
 }
 
@@ -1686,10 +1710,14 @@ int smu_v13_0_get_dpm_level_count(struct smu_context *smu,
                                  enum smu_clk_type clk_type,
                                  uint32_t *value)
 {
-       return smu_v13_0_get_dpm_freq_by_index(smu,
-                                              clk_type,
-                                              0xff,
-                                              value);
+       int ret;
+
+       ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
+       /* FW returns 0 based max level, increment by one */
+       if (!ret && value)
+               ++(*value);
+
+       return ret;
 }
 
 int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
index 4b45953..dc7d2e7 100644 (file)
@@ -76,10 +76,10 @@ static void smu_cmn_read_arg(struct smu_context *smu,
        *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
 }
 
-static int smu_cmn_wait_for_response(struct smu_context *smu)
+int smu_cmn_wait_for_response(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
-       uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
+       uint32_t cur_value, i, timeout = adev->usec_timeout * 20;
 
        for (i = 0; i < timeout; i++) {
                cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
@@ -780,3 +780,31 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
        header->structure_size = structure_size;
 
 }
+
+int smu_cmn_set_mp1_state(struct smu_context *smu,
+                         enum pp_mp1_state mp1_state)
+{
+       enum smu_message_type msg;
+       int ret;
+
+       switch (mp1_state) {
+       case PP_MP1_STATE_SHUTDOWN:
+               msg = SMU_MSG_PrepareMp1ForShutdown;
+               break;
+       case PP_MP1_STATE_UNLOAD:
+               msg = SMU_MSG_PrepareMp1ForUnload;
+               break;
+       case PP_MP1_STATE_RESET:
+               msg = SMU_MSG_PrepareMp1ForReset;
+               break;
+       case PP_MP1_STATE_NONE:
+       default:
+               return 0;
+       }
+
+       ret = smu_cmn_send_smc_msg(smu, msg, NULL);
+       if (ret)
+               dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
+
+       return ret;
+}
index c692501..da6ff6f 100644 (file)
@@ -37,6 +37,8 @@ int smu_cmn_send_smc_msg(struct smu_context *smu,
                         enum smu_message_type msg,
                         uint32_t *read_arg);
 
+int smu_cmn_wait_for_response(struct smu_context *smu);
+
 int smu_cmn_to_asic_specific_index(struct smu_context *smu,
                                   enum smu_cmn2asic_mapping_type type,
                                   uint32_t index);
@@ -99,5 +101,8 @@ int smu_cmn_get_metrics_table(struct smu_context *smu,
 
 void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
 
+int smu_cmn_set_mp1_state(struct smu_context *smu,
+                         enum pp_mp1_state mp1_state);
+
 #endif
 #endif
index 3bc383d..49a1d7f 100644 (file)
@@ -13,9 +13,6 @@
 #define has_bit(nr, mask)      (BIT(nr) & (mask))
 #define has_bits(bits, mask)   (((bits) & (mask)) == (bits))
 
-#define dp_for_each_set_bit(bit, mask) \
-       for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8)
-
 #define dp_wait_cond(__cond, __tries, __min_range, __max_range)        \
 ({                                                     \
        int num_tries = __tries;                        \
index ca891ae..cc7664c 100644 (file)
@@ -62,7 +62,7 @@ core_id_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct komeda_dev *mdev = dev_to_mdev(dev);
 
-       return snprintf(buf, PAGE_SIZE, "0x%08x\n", mdev->chip.core_id);
+       return sysfs_emit(buf, "0x%08x\n", mdev->chip.core_id);
 }
 static DEVICE_ATTR_RO(core_id);
 
@@ -85,7 +85,7 @@ config_id_show(struct device *dev, struct device_attribute *attr, char *buf)
                if (pipe->layers[i]->layer_type == KOMEDA_FMT_RICH_LAYER)
                        config_id.n_richs++;
        }
-       return snprintf(buf, PAGE_SIZE, "0x%08x\n", config_id.value);
+       return sysfs_emit(buf, "0x%08x\n", config_id.value);
 }
 static DEVICE_ATTR_RO(config_id);
 
@@ -94,7 +94,7 @@ aclk_hz_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct komeda_dev *mdev = dev_to_mdev(dev);
 
-       return snprintf(buf, PAGE_SIZE, "%lu\n", clk_get_rate(mdev->aclk));
+       return sysfs_emit(buf, "%lu\n", clk_get_rate(mdev->aclk));
 }
 static DEVICE_ATTR_RO(aclk_hz);
 
index 719a797..06c5953 100644 (file)
@@ -46,8 +46,9 @@ void komeda_pipeline_destroy(struct komeda_dev *mdev,
 {
        struct komeda_component *c;
        int i;
+       unsigned long avail_comps = pipe->avail_comps;
 
-       dp_for_each_set_bit(i, pipe->avail_comps) {
+       for_each_set_bit(i, &avail_comps, 32) {
                c = komeda_pipeline_get_component(pipe, i);
                komeda_component_destroy(mdev, c);
        }
@@ -247,6 +248,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
 {
        struct komeda_component *c;
        int id;
+       unsigned long avail_comps = pipe->avail_comps;
 
        DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s.\n",
                 pipe->id, pipe->n_layers, pipe->n_scalers,
@@ -258,7 +260,7 @@ static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
                 pipe->of_output_links[1] ?
                 pipe->of_output_links[1]->full_name : "none");
 
-       dp_for_each_set_bit(id, pipe->avail_comps) {
+       for_each_set_bit(id, &avail_comps, 32) {
                c = komeda_pipeline_get_component(pipe, id);
 
                komeda_component_dump(c);
@@ -270,8 +272,9 @@ static void komeda_component_verify_inputs(struct komeda_component *c)
        struct komeda_pipeline *pipe = c->pipeline;
        struct komeda_component *input;
        int id;
+       unsigned long supported_inputs = c->supported_inputs;
 
-       dp_for_each_set_bit(id, c->supported_inputs) {
+       for_each_set_bit(id, &supported_inputs, 32) {
                input = komeda_pipeline_get_component(pipe, id);
                if (!input) {
                        c->supported_inputs &= ~(BIT(id));
@@ -302,8 +305,9 @@ static void komeda_pipeline_assemble(struct komeda_pipeline *pipe)
        struct komeda_component *c;
        struct komeda_layer *layer;
        int i, id;
+       unsigned long avail_comps = pipe->avail_comps;
 
-       dp_for_each_set_bit(id, pipe->avail_comps) {
+       for_each_set_bit(id, &avail_comps, 32) {
                c = komeda_pipeline_get_component(pipe, id);
                komeda_component_verify_inputs(c);
        }
@@ -355,13 +359,15 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
 {
        struct komeda_component *c;
        u32 id;
+       unsigned long avail_comps;
 
        seq_printf(sf, "\n======== Pipeline-%d ==========\n", pipe->id);
 
        if (pipe->funcs && pipe->funcs->dump_register)
                pipe->funcs->dump_register(pipe, sf);
 
-       dp_for_each_set_bit(id, pipe->avail_comps) {
+       avail_comps = pipe->avail_comps;
+       for_each_set_bit(id, &avail_comps, 32) {
                c = komeda_pipeline_get_component(pipe, id);
 
                seq_printf(sf, "\n------%s------\n", c->name);
index 5c08511..e672b9c 100644 (file)
@@ -1231,14 +1231,15 @@ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
        struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
        struct komeda_component_state *c_st;
        struct komeda_component *c;
-       u32 disabling_comps, id;
+       u32 id;
+       unsigned long disabling_comps;
 
        WARN_ON(!old);
 
        disabling_comps = (~new->active_comps) & old->active_comps;
 
        /* unbound all disabling component */
-       dp_for_each_set_bit(id, disabling_comps) {
+       for_each_set_bit(id, &disabling_comps, 32) {
                c = komeda_pipeline_get_component(pipe, id);
                c_st = komeda_component_get_state_and_set_user(c,
                                drm_st, NULL, new->crtc);
@@ -1286,7 +1287,8 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
        struct komeda_pipeline_state *old;
        struct komeda_component *c;
        struct komeda_component_state *c_st;
-       u32 id, disabling_comps = 0;
+       u32 id;
+       unsigned long disabling_comps;
 
        old = komeda_pipeline_get_old_state(pipe, old_state);
 
@@ -1296,10 +1298,10 @@ bool komeda_pipeline_disable(struct komeda_pipeline *pipe,
                disabling_comps = old->active_comps &
                                  pipe->standalone_disabled_comps;
 
-       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%x.\n",
+       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%lx.\n",
                         pipe->id, old->active_comps, disabling_comps);
 
-       dp_for_each_set_bit(id, disabling_comps) {
+       for_each_set_bit(id, &disabling_comps, 32) {
                c = komeda_pipeline_get_component(pipe, id);
                c_st = priv_to_comp_st(c->obj.state);
 
@@ -1330,16 +1332,17 @@ void komeda_pipeline_update(struct komeda_pipeline *pipe,
        struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
        struct komeda_pipeline_state *old;
        struct komeda_component *c;
-       u32 id, changed_comps = 0;
+       u32 id;
+       unsigned long changed_comps;
 
        old = komeda_pipeline_get_old_state(pipe, old_state);
 
        changed_comps = new->active_comps | old->active_comps;
 
-       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%x.\n",
+       DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%lx.\n",
                         pipe->id, new->active_comps, changed_comps);
 
-       dp_for_each_set_bit(id, changed_comps) {
+       for_each_set_bit(id, &changed_comps, 32) {
                c = komeda_pipeline_get_component(pipe, id);
 
                if (new->active_comps & BIT(c->id))
index 7de170e..400193e 100644 (file)
@@ -55,7 +55,7 @@ config DRM_DISPLAY_CONNECTOR
        depends on OF
        help
          Driver for display connectors with support for DDC and hot-plug
-         detection. Most display controller handle display connectors
+         detection. Most display controllers handle display connectors
          internally and don't need this driver, but the DRM subsystem is
          moving towards separating connector handling from display controllers
          on ARM-based platforms. Saying Y here when this driver is not needed
@@ -213,6 +213,7 @@ config DRM_TOSHIBA_TC358762
        tristate "TC358762 DSI/DPI bridge"
        depends on OF
        select DRM_MIPI_DSI
+       select DRM_KMS_HELPER
        select DRM_PANEL_BRIDGE
        help
          Toshiba TC358762 DSI/DPI bridge driver.
index 55d8aa2..443f1b4 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/device.h>
 #include <linux/delay.h>
+#include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
 #include <linux/gpio.h>
 #include <linux/of_gpio.h>
index 5b4547e..dd9ed00 100644 (file)
@@ -1183,7 +1183,7 @@ EXPORT_SYMBOL(drm_atomic_add_encoder_bridges);
  * This function walks the current configuration and adds all connectors
  * currently using @crtc to the atomic configuration @state. Note that this
  * function must acquire the connection mutex. This can potentially cause
- * unneeded seralization if the update is just for the planes on one CRTC. Hence
+ * unneeded serialization if the update is just for the planes on one CRTC. Hence
  * drivers and helpers should only call this when really needed (e.g. when a
  * full modeset needs to happen due to some change).
  *
@@ -1248,7 +1248,7 @@ EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
  *
  * Since acquiring a plane state will always also acquire the w/w mutex of the
  * current CRTC for that plane (if there is any) adding all the plane states for
- * a CRTC will not reduce parallism of atomic updates.
+ * a CRTC will not reduce parallelism of atomic updates.
  *
  * Returns:
  * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
index 01f47e5..1590144 100644 (file)
@@ -4109,10 +4109,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
                return 0;
 
        up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
-       if (!up_req) {
-               DRM_ERROR("Not enough memory to process MST up req\n");
+       if (!up_req)
                return -ENOMEM;
-       }
+
        INIT_LIST_HEAD(&up_req->next);
 
        drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
index fad2249..1265de2 100644 (file)
@@ -170,7 +170,6 @@ void drm_sysfs_connector_remove(struct drm_connector *connector);
 void drm_sysfs_lease_event(struct drm_device *dev);
 
 /* drm_gem.c */
-struct drm_gem_object;
 int drm_gem_init(struct drm_device *dev);
 int drm_gem_handle_create_tail(struct drm_file *file_priv,
                               struct drm_gem_object *obj,
index 1ac67d4..33a93fa 100644 (file)
@@ -1864,6 +1864,9 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
 {
        struct drm_display_mode *mode;
 
+       if (cmd->xres == 0 || cmd->yres == 0)
+               return NULL;
+
        if (cmd->cvt)
                mode = drm_cvt_mode(dev,
                                    cmd->xres, cmd->yres,
index 6231a82..fdd2ec8 100644 (file)
@@ -350,12 +350,16 @@ EXPORT_SYMBOL(drm_syncobj_replace_fence);
  *
  * Assign a already signaled stub fence to the sync object.
  */
-static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
+static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
 {
-       struct dma_fence *fence = dma_fence_get_stub();
+       struct dma_fence *fence = dma_fence_allocate_private_stub();
+
+       if (IS_ERR(fence))
+               return PTR_ERR(fence);
 
        drm_syncobj_replace_fence(syncobj, fence);
        dma_fence_put(fence);
+       return 0;
 }
 
 /* 5s default for wait submission */
@@ -478,6 +482,7 @@ EXPORT_SYMBOL(drm_syncobj_free);
 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
                       struct dma_fence *fence)
 {
+       int ret;
        struct drm_syncobj *syncobj;
 
        syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
@@ -488,8 +493,13 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
        INIT_LIST_HEAD(&syncobj->cb_list);
        spin_lock_init(&syncobj->lock);
 
-       if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
-               drm_syncobj_assign_null_handle(syncobj);
+       if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
+               ret = drm_syncobj_assign_null_handle(syncobj);
+               if (ret < 0) {
+                       drm_syncobj_put(syncobj);
+                       return ret;
+               }
+       }
 
        if (fence)
                drm_syncobj_replace_fence(syncobj, fence);
@@ -1334,8 +1344,11 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
        if (ret < 0)
                return ret;
 
-       for (i = 0; i < args->count_handles; i++)
-               drm_syncobj_assign_null_handle(syncobjs[i]);
+       for (i = 0; i < args->count_handles; i++) {
+               ret = drm_syncobj_assign_null_handle(syncobjs[i]);
+               if (ret < 0)
+                       break;
+       }
 
        drm_syncobj_array_free(syncobjs, args->count_handles);
 
index 2bd9896..3417e1a 100644 (file)
@@ -1478,6 +1478,7 @@ static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
        u64 diff_ns;
        u32 cur_vblank, diff = 1;
        int count = DRM_TIMESTAMP_MAXRETRIES;
+       u32 max_vblank_count = drm_max_vblank_count(dev, pipe);
 
        if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
                return;
@@ -1504,7 +1505,7 @@ static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
        drm_dbg_vbl(dev,
                    "missed %d vblanks in %lld ns, frame duration=%d ns, hw_diff=%d\n",
                    diff, diff_ns, framedur_ns, cur_vblank - vblank->last);
-       store_vblank(dev, pipe, diff, t_vblank, cur_vblank);
+       vblank->last = (cur_vblank - diff) & max_vblank_count;
 }
 
 /**
index 6d38c5c..db69f19 100644 (file)
@@ -689,7 +689,8 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
                struct page **pages = pvec + pinned;
 
                ret = pin_user_pages_fast(ptr, num_pages,
-                                         !userptr->ro ? FOLL_WRITE : 0, pages);
+                                         FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM,
+                                         pages);
                if (ret < 0) {
                        unpin_user_pages(pvec, pinned);
                        kvfree(pvec);
index c277d2f..b9a4b76 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/irq.h>
 #include <linux/mfd/syscon.h>
 #include <linux/of_device.h>
-#include <linux/of_gpio.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
index 56ef882..f07641d 100644 (file)
@@ -36,7 +36,7 @@
 #include <linux/pm_runtime.h>
 
 static struct mutex power_mutex;       /* Serialize power ops */
-static spinlock_t power_ctrl_lock;     /* Serialize power claim */
+static DEFINE_SPINLOCK(power_ctrl_lock);       /* Serialize power claim */
 
 /**
  *     gma_power_init          -       initialise power manager
@@ -55,7 +55,6 @@ void gma_power_init(struct drm_device *dev)
        dev_priv->display_power = true; /* We start active */
        dev_priv->display_count = 0;    /* Currently no users */
        dev_priv->suspended = false;    /* And not suspended */
-       spin_lock_init(&power_ctrl_lock);
        mutex_init(&power_mutex);
 
        if (dev_priv->ops->init_pm)
index e21fb14..833d0c1 100644 (file)
@@ -84,13 +84,31 @@ static void intel_dsm_platform_mux_info(acpi_handle dhandle)
                return;
        }
 
+       if (!pkg->package.count) {
+               DRM_DEBUG_DRIVER("no connection in _DSM\n");
+               return;
+       }
+
        connector_count = &pkg->package.elements[0];
        DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
                  (unsigned long long)connector_count->integer.value);
        for (i = 1; i < pkg->package.count; i++) {
                union acpi_object *obj = &pkg->package.elements[i];
-               union acpi_object *connector_id = &obj->package.elements[0];
-               union acpi_object *info = &obj->package.elements[1];
+               union acpi_object *connector_id;
+               union acpi_object *info;
+
+               if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) {
+                       DRM_DEBUG_DRIVER("Invalid object for MUX #%d\n", i);
+                       continue;
+               }
+
+               connector_id = &obj->package.elements[0];
+               info = &obj->package.elements[1];
+               if (info->type != ACPI_TYPE_BUFFER || info->buffer.length < 4) {
+                       DRM_DEBUG_DRIVER("Invalid info for MUX obj #%d\n", i);
+                       continue;
+               }
+
                DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
                          (unsigned long long)connector_id->integer.value);
                DRM_DEBUG_DRIVER("  port id: %s\n",
index d1a9841..e6a88c8 100644 (file)
@@ -215,7 +215,7 @@ static int imx_drm_bind(struct device *dev)
 
        ret = drmm_mode_config_init(drm);
        if (ret)
-               return ret;
+               goto err_kms;
 
        ret = drm_vblank_init(drm, MAX_CRTC);
        if (ret)
index dbfe39e..ffdc492 100644 (file)
@@ -197,6 +197,11 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
        int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
        int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
 
+       if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+               dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+               return;
+       }
+
        drm_panel_prepare(imx_ldb_ch->panel);
 
        if (dual) {
@@ -255,6 +260,11 @@ imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder,
        int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
        u32 bus_format = imx_ldb_ch->bus_format;
 
+       if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+               dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+               return;
+       }
+
        if (mode->clock > 170000) {
                dev_warn(ldb->dev,
                         "%s: mode exceeds 170 MHz pixel clock\n", __func__);
@@ -583,7 +593,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                struct imx_ldb_channel *channel = &imx_ldb->channel[i];
 
                if (!channel->ldb)
-                       break;
+                       continue;
 
                ret = imx_ldb_register(drm, channel);
                if (ret)
index dabb4a1..10f693e 100644 (file)
@@ -20,6 +20,7 @@ config DRM_MSM
        select SND_SOC_HDMI_CODEC if SND_SOC
        select SYNC_FILE
        select PM_OPP
+       select NVMEM
        help
          DRM/KMS driver for MSM/snapdragon.
 
@@ -76,14 +77,6 @@ config DRM_MSM_DSI
          Choose this option if you have a need for MIPI DSI connector
          support.
 
-config DRM_MSM_DSI_PLL
-       bool "Enable DSI PLL driver in MSM DRM"
-       depends on DRM_MSM_DSI && COMMON_CLK
-       default y
-       help
-         Choose this option to enable DSI PLL driver which provides DSI
-         source clocks under common clock framework.
-
 config DRM_MSM_DSI_28NM_PHY
        bool "Enable DSI 28nm PHY driver in MSM DRM"
        depends on DRM_MSM_DSI
index 3cc9061..610d630 100644 (file)
@@ -136,13 +136,4 @@ msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
 msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
 msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o
 
-ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
-msm-y += dsi/pll/dsi_pll.o
-msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
-msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
-msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o
-msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/pll/dsi_pll_10nm.o
-msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/pll/dsi_pll_7nm.o
-endif
-
 obj-$(CONFIG_DRM_MSM)  += msm.o
index 7e553d3..ce13d49 100644 (file)
@@ -1386,8 +1386,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
 
 static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
 {
-       *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
-               REG_A5XX_RBBM_PERFCTR_CP_0_HI);
+       *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
+               REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
 
        return 0;
 }
index 5ccc9da..c35b06b 100644 (file)
@@ -304,7 +304,7 @@ int a5xx_power_init(struct msm_gpu *gpu)
        /* Set up the limits management */
        if (adreno_is_a530(adreno_gpu))
                a530_lm_setup(gpu);
-       else
+       else if (adreno_is_a540(adreno_gpu))
                a540_lm_setup(gpu);
 
        /* Set up SP/TP power collpase */
index 71c917f..3d55e15 100644 (file)
@@ -246,7 +246,7 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
 }
 
 struct a6xx_gmu_oob_bits {
-       int set, ack, set_new, ack_new;
+       int set, ack, set_new, ack_new, clear, clear_new;
        const char *name;
 };
 
@@ -260,6 +260,8 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
                .ack = 24,
                .set_new = 30,
                .ack_new = 31,
+               .clear = 24,
+               .clear_new = 31,
        },
 
        [GMU_OOB_PERFCOUNTER_SET] = {
@@ -268,18 +270,22 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
                .ack = 25,
                .set_new = 28,
                .ack_new = 30,
+               .clear = 25,
+               .clear_new = 29,
        },
 
        [GMU_OOB_BOOT_SLUMBER] = {
                .name = "BOOT_SLUMBER",
                .set = 22,
                .ack = 30,
+               .clear = 30,
        },
 
        [GMU_OOB_DCVS_SET] = {
                .name = "GPU_DCVS",
                .set = 23,
                .ack = 31,
+               .clear = 31,
        },
 };
 
@@ -335,11 +341,11 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
                return;
 
        if (gmu->legacy)
-               bit = a6xx_gmu_oob_bits[state].ack;
+               bit = a6xx_gmu_oob_bits[state].clear;
        else
-               bit = a6xx_gmu_oob_bits[state].ack_new;
+               bit = a6xx_gmu_oob_bits[state].clear_new;
 
-       gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, bit);
+       gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
 }
 
 /* Enable CPU control of SPTP power power collapse */
index ba8e9d3..d553f62 100644 (file)
@@ -522,28 +522,73 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
        return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
 }
 
-static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
+/*
+ * Check that the microcode version is new enough to include several key
+ * security fixes. Return true if the ucode is safe.
+ */
+static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
                struct drm_gem_object *obj)
 {
+       struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+       struct msm_gpu *gpu = &adreno_gpu->base;
        u32 *buf = msm_gem_get_vaddr(obj);
+       bool ret = false;
 
        if (IS_ERR(buf))
-               return;
+               return false;
 
        /*
-        * If the lowest nibble is 0xa that is an indication that this microcode
-        * has been patched. The actual version is in dword [3] but we only care
-        * about the patchlevel which is the lowest nibble of dword [3]
-        *
-        * Otherwise check that the firmware is greater than or equal to 1.90
-        * which was the first version that had this fix built in
+        * Targets up to a640 (a618, a630 and a640) need to check for a
+        * microcode version that is patched to support the whereami opcode or
+        * one that is new enough to include it by default.
         */
-       if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
-               a6xx_gpu->has_whereami = true;
-       else if ((buf[0] & 0xfff) > 0x190)
-               a6xx_gpu->has_whereami = true;
+       if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) ||
+               adreno_is_a640(adreno_gpu)) {
+               /*
+                * If the lowest nibble is 0xa that is an indication that this
+                * microcode has been patched. The actual version is in dword
+                * [3] but we only care about the patchlevel which is the lowest
+                * nibble of dword [3]
+                *
+                * Otherwise check that the firmware is greater than or equal
+                * to 1.90 which was the first version that had this fix built
+                * in
+                */
+               if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) ||
+                       (buf[0] & 0xfff) >= 0x190) {
+                       a6xx_gpu->has_whereami = true;
+                       ret = true;
+                       goto out;
+               }
 
+               DRM_DEV_ERROR(&gpu->pdev->dev,
+                       "a630 SQE ucode is too old. Have version %x need at least %x\n",
+                       buf[0] & 0xfff, 0x190);
+       }  else {
+               /*
+                * a650 tier targets don't need whereami but still need to be
+                * equal to or newer than 0.95 for other security fixes
+                */
+               if (adreno_is_a650(adreno_gpu)) {
+                       if ((buf[0] & 0xfff) >= 0x095) {
+                               ret = true;
+                               goto out;
+                       }
+
+                       DRM_DEV_ERROR(&gpu->pdev->dev,
+                               "a650 SQE ucode is too old. Have version %x need at least %x\n",
+                               buf[0] & 0xfff, 0x095);
+               }
+
+               /*
+                * When a660 is added those targets should return true here
+                * since those have all the critical security fixes built in
+                * from the start
+                */
+       }
+out:
        msm_gem_put_vaddr(obj);
+       return ret;
 }
 
 static int a6xx_ucode_init(struct msm_gpu *gpu)
@@ -566,7 +611,13 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
                }
 
                msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
-               a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo);
+               if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) {
+                       msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+                       drm_gem_object_put(a6xx_gpu->sqe_bo);
+
+                       a6xx_gpu->sqe_bo = NULL;
+                       return -EPERM;
+               }
        }
 
        gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@@ -1177,8 +1228,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
        /* Force the GPU power on so we can read this register */
        a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
 
-       *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
-               REG_A6XX_RBBM_PERFCTR_CP_0_HI);
+       *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+               REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
 
        a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
        mutex_unlock(&perfcounter_oob);
@@ -1350,35 +1401,26 @@ static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
                u32 revn)
 {
        struct opp_table *opp_table;
-       struct nvmem_cell *cell;
        u32 supp_hw = UINT_MAX;
-       void *buf;
+       u16 speedbin;
+       int ret;
 
-       cell = nvmem_cell_get(dev, "speed_bin");
+       ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin);
        /*
         * -ENOENT means that the platform doesn't support speedbin which is
         * fine
         */
-       if (PTR_ERR(cell) == -ENOENT)
+       if (ret == -ENOENT) {
                return 0;
-       else if (IS_ERR(cell)) {
-               DRM_DEV_ERROR(dev,
-                               "failed to read speed-bin. Some OPPs may not be supported by hardware");
-               goto done;
-       }
-
-       buf = nvmem_cell_read(cell, NULL);
-       if (IS_ERR(buf)) {
-               nvmem_cell_put(cell);
+       } else if (ret) {
                DRM_DEV_ERROR(dev,
-                               "failed to read speed-bin. Some OPPs may not be supported by hardware");
+                             "failed to read speed-bin (%d). Some OPPs may not be supported by hardware",
+                             ret);
                goto done;
        }
+       speedbin = le16_to_cpu(speedbin);
 
-       supp_hw = fuse_to_supp_hw(dev, revn, *((u32 *) buf));
-
-       kfree(buf);
-       nvmem_cell_put(cell);
+       supp_hw = fuse_to_supp_hw(dev, revn, speedbin);
 
 done:
        opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1);
index 0f184c3..6a35a30 100644 (file)
@@ -273,6 +273,9 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
        case MSM_PARAM_FAULTS:
                *value = gpu->global_faults;
                return 0;
+       case MSM_PARAM_SUSPENDS:
+               *value = gpu->suspend_count;
+               return 0;
        default:
                DBG("%s: invalid param: %u", gpu->name, param);
                return -EINVAL;
index 84ea09d..cdec3fb 100644 (file)
@@ -58,8 +58,8 @@ int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
        if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.irq_idx_lookup)
                return -EINVAL;
 
-       return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
-                       instance_idx);
+       return dpu_kms->hw_intr->ops.irq_idx_lookup(dpu_kms->hw_intr,
+                       intr_type, instance_idx);
 }
 
 /**
index b6b3bba..7cba5bb 100644 (file)
@@ -380,7 +380,6 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
        } else {
                DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
                memset(old, 0, sizeof(*old));
-               memset(new, 0, sizeof(*new));
                update_bus = true;
                update_clk = true;
        }
index 9607a76..7c29976 100644 (file)
@@ -66,6 +66,83 @@ static void dpu_crtc_destroy(struct drm_crtc *crtc)
        kfree(dpu_crtc);
 }
 
+static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_encoder *encoder;
+
+       drm_for_each_encoder(encoder, dev)
+               if (encoder->crtc == crtc)
+                       return encoder;
+
+       return NULL;
+}
+
+static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+       struct drm_encoder *encoder;
+
+       encoder = get_encoder_from_crtc(crtc);
+       if (!encoder) {
+               DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
+               return false;
+       }
+
+       return dpu_encoder_get_frame_count(encoder);
+}
+
+static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
+                                          bool in_vblank_irq,
+                                          int *vpos, int *hpos,
+                                          ktime_t *stime, ktime_t *etime,
+                                          const struct drm_display_mode *mode)
+{
+       unsigned int pipe = crtc->index;
+       struct drm_encoder *encoder;
+       int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
+
+       encoder = get_encoder_from_crtc(crtc);
+       if (!encoder) {
+               DRM_ERROR("no encoder found for crtc %d\n", pipe);
+               return false;
+       }
+
+       vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
+       vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
+
+       /*
+        * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
+        * the end of VFP. Translate the porch values relative to the line
+        * counter positions.
+        */
+
+       vactive_start = vsw + vbp + 1;
+       vactive_end = vactive_start + mode->crtc_vdisplay;
+
+       /* last scan line before VSYNC */
+       vfp_end = mode->crtc_vtotal;
+
+       if (stime)
+               *stime = ktime_get();
+
+       line = dpu_encoder_get_linecount(encoder);
+
+       if (line < vactive_start)
+               line -= vactive_start;
+       else if (line > vactive_end)
+               line = line - vfp_end - vactive_start;
+       else
+               line -= vactive_start;
+
+       *vpos = line;
+       *hpos = 0;
+
+       if (etime)
+               *etime = ktime_get();
+
+       return true;
+}
+
 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
                struct dpu_plane_state *pstate, struct dpu_format *format)
 {
@@ -130,7 +207,9 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
        uint32_t stage_idx, lm_idx;
        int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
        bool bg_alpha_enable = false;
+       DECLARE_BITMAP(fetch_active, SSPP_MAX);
 
+       memset(fetch_active, 0, sizeof(fetch_active));
        drm_atomic_crtc_for_each_plane(plane, crtc) {
                state = plane->state;
                if (!state)
@@ -140,7 +219,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
                fb = state->fb;
 
                dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
-
+               set_bit(dpu_plane_pipe(plane), fetch_active);
                DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
                                crtc->base.id,
                                pstate->stage,
@@ -180,6 +259,9 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
                }
        }
 
+       if (ctl->ops.set_active_pipes)
+               ctl->ops.set_active_pipes(ctl, fetch_active);
+
         _dpu_crtc_program_lm_output_roi(crtc);
 }
 
@@ -839,6 +921,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
                DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
                                crtc->base.id, crtc_state->enable,
                                crtc_state->active);
+               memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
                goto end;
        }
 
@@ -1247,6 +1330,8 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {
        .early_unregister = dpu_crtc_early_unregister,
        .enable_vblank  = msm_crtc_enable_vblank,
        .disable_vblank = msm_crtc_disable_vblank,
+       .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+       .get_vblank_counter = dpu_crtc_get_vblank_counter,
 };
 
 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
@@ -1255,6 +1340,7 @@ static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
        .atomic_check = dpu_crtc_atomic_check,
        .atomic_begin = dpu_crtc_atomic_begin,
        .atomic_flush = dpu_crtc_atomic_flush,
+       .get_scanout_position = dpu_crtc_get_scanout_position,
 };
 
 /* initialize crtc */
index 288e95e..8d94205 100644 (file)
@@ -426,6 +426,36 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
        return 0;
 }
 
+int dpu_encoder_get_frame_count(struct drm_encoder *drm_enc)
+{
+       struct dpu_encoder_virt *dpu_enc;
+       struct dpu_encoder_phys *phys;
+       int framecount = 0;
+
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+       phys = dpu_enc ? dpu_enc->cur_master : NULL;
+
+       if (phys && phys->ops.get_frame_count)
+               framecount = phys->ops.get_frame_count(phys);
+
+       return framecount;
+}
+
+int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
+{
+       struct dpu_encoder_virt *dpu_enc;
+       struct dpu_encoder_phys *phys;
+       int linecount = 0;
+
+       dpu_enc = to_dpu_encoder_virt(drm_enc);
+       phys = dpu_enc ? dpu_enc->cur_master : NULL;
+
+       if (phys && phys->ops.get_line_count)
+               linecount = phys->ops.get_line_count(phys);
+
+       return linecount;
+}
+
 void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
                                  struct dpu_encoder_hw_resources *hw_res)
 {
index b491346..99a5d73 100644 (file)
@@ -156,5 +156,16 @@ void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc);
  */
 void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
                                                        u32 idle_timeout);
+/**
+ * dpu_encoder_get_linecount - get interface line count for the encoder.
+ * @drm_enc:    Pointer to previously created drm encoder structure
+ */
+int dpu_encoder_get_linecount(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_get_frame_count - get interface frame count for the encoder.
+ * @drm_enc:    Pointer to previously created drm encoder structure
+ */
+int dpu_encoder_get_frame_count(struct drm_encoder *drm_enc);
 
 #endif /* __DPU_ENCODER_H__ */
index f8f2515..ecbc4be 100644 (file)
@@ -143,6 +143,7 @@ struct dpu_encoder_phys_ops {
        void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
        void (*restore)(struct dpu_encoder_phys *phys);
        int (*get_line_count)(struct dpu_encoder_phys *phys);
+       int (*get_frame_count)(struct dpu_encoder_phys *phys);
 };
 
 /**
index 9a69fad..0e06b7e 100644 (file)
@@ -658,6 +658,31 @@ static int dpu_encoder_phys_vid_get_line_count(
        return phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf);
 }
 
+static int dpu_encoder_phys_vid_get_frame_count(
+               struct dpu_encoder_phys *phys_enc)
+{
+       struct intf_status s = {0};
+       u32 fetch_start = 0;
+       struct drm_display_mode mode = phys_enc->cached_mode;
+
+       if (!dpu_encoder_phys_vid_is_master(phys_enc))
+               return -EINVAL;
+
+       if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_status)
+               return -EINVAL;
+
+       phys_enc->hw_intf->ops.get_status(phys_enc->hw_intf, &s);
+
+       if (s.is_prog_fetch_en && s.is_en) {
+               fetch_start = mode.vtotal - (mode.vsync_start - mode.vdisplay);
+               if ((s.line_count > fetch_start) &&
+                       (s.line_count <= mode.vtotal))
+                       return s.frame_count + 1;
+       }
+
+       return s.frame_count;
+}
+
 static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
 {
        ops->is_master = dpu_encoder_phys_vid_is_master;
@@ -676,6 +701,7 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
        ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
        ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
        ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
+       ops->get_frame_count = dpu_encoder_phys_vid_get_frame_count;
 }
 
 struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
index 189f353..b569030 100644 (file)
@@ -22,7 +22,7 @@
        (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED4))
 
 #define VIG_SM8250_MASK \
-       (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3LITE))
+       (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE))
 
 #define DMA_SDM845_MASK \
        (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
@@ -43,6 +43,9 @@
 #define PINGPONG_SDM845_SPLIT_MASK \
        (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
 
+#define CTL_SC7280_MASK \
+       (BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_FETCH_ACTIVE))
+
 #define MERGE_3D_SM8150_MASK (0)
 
 #define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC)
 
 #define INTF_SC7180_MASK BIT(DPU_INTF_INPUT_CTRL) | BIT(DPU_INTF_TE)
 
+#define INTF_SC7280_MASK INTF_SC7180_MASK | BIT(DPU_DATA_HCTL_EN)
+
+#define INTR_SC7180_MASK \
+       (BIT(DPU_IRQ_TYPE_PING_PONG_RD_PTR) |\
+       BIT(DPU_IRQ_TYPE_PING_PONG_WR_PTR) |\
+       BIT(DPU_IRQ_TYPE_PING_PONG_AUTO_REF) |\
+       BIT(DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK) |\
+       BIT(DPU_IRQ_TYPE_PING_PONG_TE_CHECK))
+
 #define DEFAULT_PIXEL_RAM_SIZE         (50 * 1024)
 #define DEFAULT_DPU_LINE_WIDTH         2048
 #define DEFAULT_DPU_OUTPUT_LINE_WIDTH  2560
@@ -199,6 +211,18 @@ static const struct dpu_caps sm8250_dpu_caps = {
        .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
 };
 
+static const struct dpu_caps sc7280_dpu_caps = {
+       .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+       .max_mixer_blendstages = 0x7,
+       .qseed_type = DPU_SSPP_SCALER_QSEED4,
+       .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+       .ubwc_version = DPU_HW_UBWC_VER_30,
+       .has_dim_layer = true,
+       .has_idle_pc = true,
+       .max_linewidth = 2400,
+       .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
 static const struct dpu_mdp_cfg sdm845_mdp[] = {
        {
        .name = "top_0", .id = MDP_TOP,
@@ -268,6 +292,22 @@ static const struct dpu_mdp_cfg sm8250_mdp[] = {
        },
 };
 
+static const struct dpu_mdp_cfg sc7280_mdp[] = {
+       {
+       .name = "top_0", .id = MDP_TOP,
+       .base = 0x0, .len = 0x2014,
+       .highest_bank_bit = 0x1,
+       .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+               .reg_off = 0x2AC, .bit_off = 0},
+       .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+               .reg_off = 0x2AC, .bit_off = 8},
+       .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+               .reg_off = 0x2B4, .bit_off = 8},
+       .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+               .reg_off = 0x2C4, .bit_off = 8},
+       },
+};
+
 /*************************************************************
  * CTL sub blocks config
  *************************************************************/
@@ -350,6 +390,29 @@ static const struct dpu_ctl_cfg sm8150_ctl[] = {
        },
 };
 
+static const struct dpu_ctl_cfg sc7280_ctl[] = {
+       {
+       .name = "ctl_0", .id = CTL_0,
+       .base = 0x15000, .len = 0x1E8,
+       .features = CTL_SC7280_MASK
+       },
+       {
+       .name = "ctl_1", .id = CTL_1,
+       .base = 0x16000, .len = 0x1E8,
+       .features = CTL_SC7280_MASK
+       },
+       {
+       .name = "ctl_2", .id = CTL_2,
+       .base = 0x17000, .len = 0x1E8,
+       .features = CTL_SC7280_MASK
+       },
+       {
+       .name = "ctl_3", .id = CTL_3,
+       .base = 0x18000, .len = 0x1E8,
+       .features = CTL_SC7280_MASK
+       },
+};
+
 /*************************************************************
  * SSPP sub blocks config
  *************************************************************/
@@ -475,6 +538,17 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = {
                sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
 };
 
+static const struct dpu_sspp_cfg sc7280_sspp[] = {
+       SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
+               sc7180_vig_sblk_0, 0,  SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+       SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000,  DMA_SDM845_MASK,
+               sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+       SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000,  DMA_CURSOR_SDM845_MASK,
+               sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+       SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000,  DMA_CURSOR_SDM845_MASK,
+               sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+};
+
 /*************************************************************
  * MIXER sub blocks config
  *************************************************************/
@@ -550,6 +624,15 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
                &sdm845_lm_sblk, PINGPONG_5, LM_4, 0),
 };
 
+static const struct dpu_lm_cfg sc7280_lm[] = {
+       LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK,
+               &sc7180_lm_sblk, PINGPONG_0, 0, 0),
+       LM_BLK("lm_2", LM_2, 0x46000, MIXER_SC7180_MASK,
+               &sc7180_lm_sblk, PINGPONG_2, LM_3, 0),
+       LM_BLK("lm_3", LM_3, 0x47000, MIXER_SC7180_MASK,
+               &sc7180_lm_sblk, PINGPONG_3, LM_2, 0),
+};
+
 /*************************************************************
  * DSPP sub blocks config
  *************************************************************/
@@ -602,42 +685,47 @@ static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
                .len = 0x20, .version = 0x10000},
 };
 
-#define PP_BLK_TE(_name, _id, _base, _merge_3d) \
+static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
+       .dither = {.id = DPU_PINGPONG_DITHER, .base = 0xe0,
+       .len = 0x20, .version = 0x20000},
+};
+
+#define PP_BLK_TE(_name, _id, _base, _merge_3d, _sblk) \
        {\
        .name = _name, .id = _id, \
        .base = _base, .len = 0xd4, \
        .features = PINGPONG_SDM845_SPLIT_MASK, \
        .merge_3d = _merge_3d, \
-       .sblk = &sdm845_pp_sblk_te \
+       .sblk = &_sblk \
        }
-#define PP_BLK(_name, _id, _base, _merge_3d) \
+#define PP_BLK(_name, _id, _base, _merge_3d, _sblk) \
        {\
        .name = _name, .id = _id, \
        .base = _base, .len = 0xd4, \
        .features = PINGPONG_SDM845_MASK, \
        .merge_3d = _merge_3d, \
-       .sblk = &sdm845_pp_sblk \
+       .sblk = &_sblk \
        }
 
 static const struct dpu_pingpong_cfg sdm845_pp[] = {
-       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0),
-       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0),
-       PP_BLK("pingpong_2", PINGPONG_2, 0x71000, 0),
-       PP_BLK("pingpong_3", PINGPONG_3, 0x71800, 0),
+       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te),
+       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te),
+       PP_BLK("pingpong_2", PINGPONG_2, 0x71000, 0, sdm845_pp_sblk),
+       PP_BLK("pingpong_3", PINGPONG_3, 0x71800, 0, sdm845_pp_sblk),
 };
 
 static struct dpu_pingpong_cfg sc7180_pp[] = {
-       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0),
-       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0),
+       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te),
+       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te),
 };
 
 static const struct dpu_pingpong_cfg sm8150_pp[] = {
-       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0),
-       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0),
-       PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1),
-       PP_BLK("pingpong_3", PINGPONG_3, 0x71800, MERGE_3D_1),
-       PP_BLK("pingpong_4", PINGPONG_4, 0x72000, MERGE_3D_2),
-       PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2),
+       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te),
+       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te),
+       PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk),
+       PP_BLK("pingpong_3", PINGPONG_3, 0x71800, MERGE_3D_1, sdm845_pp_sblk),
+       PP_BLK("pingpong_4", PINGPONG_4, 0x72000, MERGE_3D_2, sdm845_pp_sblk),
+       PP_BLK("pingpong_5", PINGPONG_5, 0x72800, MERGE_3D_2, sdm845_pp_sblk),
 };
 
 /*************************************************************
@@ -657,6 +745,12 @@ static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = {
        MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x83200),
 };
 
+static const struct dpu_pingpong_cfg sc7280_pp[] = {
+       PP_BLK("pingpong_0", PINGPONG_0, 0x59000, 0, sc7280_pp_sblk),
+       PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk),
+       PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk),
+       PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk),
+};
 /*************************************************************
  * INTF sub blocks config
  *************************************************************/
@@ -689,6 +783,12 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
        INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1, 24, INTF_SC7180_MASK),
 };
 
+static const struct dpu_intf_cfg sc7280_intf[] = {
+       INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, 0, 24, INTF_SC7280_MASK),
+       INTF_BLK("intf_1", INTF_1, 0x35000, INTF_DSI, 0, 24, INTF_SC7280_MASK),
+       INTF_BLK("intf_5", INTF_5, 0x39000, INTF_EDP, 0, 24, INTF_SC7280_MASK),
+};
+
 /*************************************************************
  * VBIF sub blocks config
  *************************************************************/
@@ -817,6 +917,8 @@ static const struct dpu_perf_cfg sdm845_perf_data = {
                {.rd_enable = 1, .wr_enable = 1},
                {.rd_enable = 1, .wr_enable = 0}
        },
+       .clk_inefficiency_factor = 105,
+       .bw_inefficiency_factor = 120,
 };
 
 static const struct dpu_perf_cfg sc7180_perf_data = {
@@ -852,6 +954,7 @@ static const struct dpu_perf_cfg sm8150_perf_data = {
        .min_core_ib = 2400000,
        .min_llcc_ib = 800000,
        .min_dram_ib = 800000,
+       .min_prefill_lines = 24,
        .danger_lut_tbl = {0xf, 0xffff, 0x0},
        .qos_lut_tbl = {
                {.nentry = ARRAY_SIZE(sm8150_qos_linear),
@@ -869,6 +972,8 @@ static const struct dpu_perf_cfg sm8150_perf_data = {
                {.rd_enable = 1, .wr_enable = 1},
                {.rd_enable = 1, .wr_enable = 0}
        },
+       .clk_inefficiency_factor = 105,
+       .bw_inefficiency_factor = 120,
 };
 
 static const struct dpu_perf_cfg sm8250_perf_data = {
@@ -877,6 +982,7 @@ static const struct dpu_perf_cfg sm8250_perf_data = {
        .min_core_ib = 4800000,
        .min_llcc_ib = 0,
        .min_dram_ib = 800000,
+       .min_prefill_lines = 35,
        .danger_lut_tbl = {0xf, 0xffff, 0x0},
        .qos_lut_tbl = {
                {.nentry = ARRAY_SIZE(sc7180_qos_linear),
@@ -894,6 +1000,35 @@ static const struct dpu_perf_cfg sm8250_perf_data = {
                {.rd_enable = 1, .wr_enable = 1},
                {.rd_enable = 1, .wr_enable = 0}
        },
+       .clk_inefficiency_factor = 105,
+       .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_perf_cfg sc7280_perf_data = {
+       .max_bw_low = 4700000,
+       .max_bw_high = 8800000,
+       .min_core_ib = 2500000,
+       .min_llcc_ib = 0,
+       .min_dram_ib = 1600000,
+       .min_prefill_lines = 24,
+       .danger_lut_tbl = {0xffff, 0xffff, 0x0},
+       .qos_lut_tbl = {
+               {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+               .entries = sc7180_qos_macrotile
+               },
+               {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+               .entries = sc7180_qos_macrotile
+               },
+               {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+               .entries = sc7180_qos_nrt
+               },
+       },
+       .cdp_cfg = {
+               {.rd_enable = 1, .wr_enable = 1},
+               {.rd_enable = 1, .wr_enable = 0}
+       },
+       .clk_inefficiency_factor = 105,
+       .bw_inefficiency_factor = 120,
 };
 
 /*************************************************************
@@ -957,6 +1092,7 @@ static void sc7180_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
                .dma_cfg = sdm845_regdma,
                .perf = sc7180_perf_data,
                .mdss_irqs = 0x3f,
+               .obsolete_irq = INTR_SC7180_MASK,
        };
 }
 
@@ -1026,6 +1162,30 @@ static void sm8250_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
        };
 }
 
+static void sc7280_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+       *dpu_cfg = (struct dpu_mdss_cfg){
+               .caps = &sc7280_dpu_caps,
+               .mdp_count = ARRAY_SIZE(sc7280_mdp),
+               .mdp = sc7280_mdp,
+               .ctl_count = ARRAY_SIZE(sc7280_ctl),
+               .ctl = sc7280_ctl,
+               .sspp_count = ARRAY_SIZE(sc7280_sspp),
+               .sspp = sc7280_sspp,
+               .mixer_count = ARRAY_SIZE(sc7280_lm),
+               .mixer = sc7280_lm,
+               .pingpong_count = ARRAY_SIZE(sc7280_pp),
+               .pingpong = sc7280_pp,
+               .intf_count = ARRAY_SIZE(sc7280_intf),
+               .intf = sc7280_intf,
+               .vbif_count = ARRAY_SIZE(sdm845_vbif),
+               .vbif = sdm845_vbif,
+               .perf = sc7280_perf_data,
+               .mdss_irqs = 0x1c07,
+               .obsolete_irq = INTR_SC7180_MASK,
+       };
+}
+
 static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
        { .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
        { .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
@@ -1033,6 +1193,7 @@ static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
        { .hw_rev = DPU_HW_VER_501, .cfg_init = sm8150_cfg_init},
        { .hw_rev = DPU_HW_VER_600, .cfg_init = sm8250_cfg_init},
        { .hw_rev = DPU_HW_VER_620, .cfg_init = sc7180_cfg_init},
+       { .hw_rev = DPU_HW_VER_720, .cfg_init = sc7280_cfg_init},
 };
 
 void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg)
index ea4647d..4dfd8a2 100644 (file)
@@ -41,7 +41,7 @@
 #define DPU_HW_VER_501 DPU_HW_VER(5, 0, 1) /* sm8150 v2.0 */
 #define DPU_HW_VER_600 DPU_HW_VER(6, 0, 0) /* sm8250 */
 #define DPU_HW_VER_620 DPU_HW_VER(6, 2, 0) /* sc7180 v1.0 */
-
+#define DPU_HW_VER_720 DPU_HW_VER(7, 2, 0) /* sc7280 */
 
 #define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
 #define IS_MSM8998_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_300)
@@ -49,7 +49,7 @@
 #define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
 #define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
 #define IS_SC7180_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_620)
-
+#define IS_SC7280_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_720)
 
 #define DPU_HW_BLK_NAME_LEN    16
 
@@ -185,6 +185,7 @@ enum {
 enum {
        DPU_CTL_SPLIT_DISPLAY = 0x1,
        DPU_CTL_ACTIVE_CFG,
+       DPU_CTL_FETCH_ACTIVE,
        DPU_CTL_MAX
 };
 
@@ -193,11 +194,14 @@ enum {
  * @DPU_INTF_INPUT_CTRL         Supports the setting of pp block from which
  *                              pixel data arrives to this INTF
  * @DPU_INTF_TE                 INTF block has TE configuration support
+ * @DPU_DATA_HCTL_EN            Allows data to be transferred at different rate
+                                than video timing
  * @DPU_INTF_MAX
  */
 enum {
        DPU_INTF_INPUT_CTRL = 0x1,
        DPU_INTF_TE,
+       DPU_DATA_HCTL_EN,
        DPU_INTF_MAX
 };
 
@@ -719,6 +723,7 @@ struct dpu_perf_cfg {
  * @cursor_formats     Supported formats for cursor pipe
  * @vig_formats        Supported formats for vig pipe
  * @mdss_irqs:         Bitmap with the irqs supported by the target
+ * @obsolete_irq:       Irq types that are obsolete for a particular target
  */
 struct dpu_mdss_cfg {
        u32 hwversion;
@@ -765,6 +770,7 @@ struct dpu_mdss_cfg {
        const struct dpu_format_extended *vig_formats;
 
        unsigned long mdss_irqs;
+       unsigned long obsolete_irq;
 };
 
 struct dpu_mdss_hw_cfg_handler {
index 8981cfa..2d4645e 100644 (file)
@@ -27,6 +27,7 @@
 #define   CTL_MERGE_3D_FLUSH            0x100
 #define   CTL_INTF_FLUSH                0x110
 #define   CTL_INTF_MASTER               0x134
+#define   CTL_FETCH_PIPE_ACTIVE         0x0FC
 
 #define CTL_MIXER_BORDER_OUT            BIT(24)
 #define CTL_FLUSH_MASK_CTL              BIT(17)
 #define DPU_REG_RESET_TIMEOUT_US        2000
 #define  MERGE_3D_IDX   23
 #define  INTF_IDX       31
+#define CTL_INVALID_BIT                 0xffff
+
+static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
+       CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
+       1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT};
 
 static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
                const struct dpu_mdss_cfg *m,
@@ -344,6 +350,8 @@ static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
                DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
                DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
        }
+
+       DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
 }
 
 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
@@ -496,7 +504,9 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
 
        DPU_REG_WRITE(c, CTL_TOP, mode_sel);
        DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
-       DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, BIT(cfg->merge_3d - MERGE_3D_0));
+       if (cfg->merge_3d)
+               DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
+                             BIT(cfg->merge_3d - MERGE_3D_0));
 }
 
 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
@@ -529,6 +539,23 @@ static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
        DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
 }
 
+static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
+       unsigned long *fetch_active)
+{
+       int i;
+       u32 val = 0;
+
+       if (fetch_active) {
+               for (i = 0; i < SSPP_MAX; i++) {
+                       if (test_bit(i, fetch_active) &&
+                               fetch_tbl[i] != CTL_INVALID_BIT)
+                               val |= BIT(fetch_tbl[i]);
+               }
+       }
+
+       DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
+}
+
 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
                unsigned long cap)
 {
@@ -558,6 +585,8 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
        ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
        ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
        ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
+       if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
+               ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
 };
 
 static struct dpu_hw_blk_ops dpu_hw_ops;
index e93a42a..806c171 100644 (file)
@@ -167,6 +167,9 @@ struct dpu_hw_ctl_ops {
         */
        void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
                enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
+
+       void (*set_active_pipes)(struct dpu_hw_ctl *ctx,
+               unsigned long *fetch_active);
 };
 
 /**
index 5c521de..48c96b8 100644 (file)
@@ -25,6 +25,9 @@
 #define MDP_AD4_INTR_EN_OFF            0x41c
 #define MDP_AD4_INTR_CLEAR_OFF         0x424
 #define MDP_AD4_INTR_STATUS_OFF                0x420
+#define MDP_INTF_0_OFF_REV_7xxx             0x34000
+#define MDP_INTF_1_OFF_REV_7xxx             0x35000
+#define MDP_INTF_5_OFF_REV_7xxx             0x39000
 
 /**
  * WB interrupt status bit definitions
 #define DPU_INTR_INTF_1_UNDERRUN BIT(26)
 #define DPU_INTR_INTF_2_UNDERRUN BIT(28)
 #define DPU_INTR_INTF_3_UNDERRUN BIT(30)
+#define DPU_INTR_INTF_5_UNDERRUN BIT(22)
 #define DPU_INTR_INTF_0_VSYNC BIT(25)
 #define DPU_INTR_INTF_1_VSYNC BIT(27)
 #define DPU_INTR_INTF_2_VSYNC BIT(29)
 #define DPU_INTR_INTF_3_VSYNC BIT(31)
+#define DPU_INTR_INTF_5_VSYNC BIT(23)
 
 /**
  * Pingpong Secondary interrupt status bit definitions
@@ -242,7 +247,22 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
                MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
                MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
                MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
-       }
+       },
+       {
+               MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
+               MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
+               MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
+       },
+       {
+               MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
+               MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
+               MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
+       },
+       {
+               MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
+               MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
+               MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
+       },
 };
 
 /*
@@ -308,24 +328,59 @@ static const struct dpu_irq_type dpu_irq_map[] = {
        { DPU_IRQ_TYPE_INTF_VSYNC, INTF_2, DPU_INTR_INTF_2_VSYNC, 0},
        { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, DPU_INTR_INTF_3_UNDERRUN, 0},
        { DPU_IRQ_TYPE_INTF_VSYNC, INTF_3, DPU_INTR_INTF_3_VSYNC, 0},
-
-       /* BEGIN MAP_RANGE: 32-64, INTR2 */
-       /* irq_idx: 32-35 */
+       /* irq_idx:32-33 */
+       { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_5, DPU_INTR_INTF_5_UNDERRUN, 0},
+       { DPU_IRQ_TYPE_INTF_VSYNC, INTF_5, DPU_INTR_INTF_5_VSYNC, 0},
+       /* irq_idx:34-63 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 0},
+       /* BEGIN MAP_RANGE: 64-95, INTR2 */
+       /* irq_idx: 64-67 */
        { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
                DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
-       /* irq_idx: 36-39 */
+       /* irq_idx: 68-71 */
        { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
                DPU_INTR_PING_PONG_S0_WR_PTR, 1},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
-       /* irq_idx: 40 */
+       /* irq_idx: 72 */
        { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
                DPU_INTR_PING_PONG_S0_RD_PTR, 1},
-       /* irq_idx: 41-45 */
+       /* irq_idx: 73-77 */
        { DPU_IRQ_TYPE_CTL_START, CTL_0,
                DPU_INTR_CTL_0_START, 1},
        { DPU_IRQ_TYPE_CTL_START, CTL_1,
@@ -336,10 +391,10 @@ static const struct dpu_irq_type dpu_irq_map[] = {
                DPU_INTR_CTL_3_START, 1},
        { DPU_IRQ_TYPE_CTL_START, CTL_4,
                DPU_INTR_CTL_4_START, 1},
-       /* irq_idx: 46-47 */
+       /* irq_idx: 78-79 */
        { DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_2, DPU_INTR_CWB_2_OVERFLOW, 1},
        { DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_3, DPU_INTR_CWB_3_OVERFLOW, 1},
-       /* irq_idx: 48-51 */
+       /* irq_idx: 80-83 */
        { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
                DPU_INTR_PING_PONG_0_TEAR_DETECTED, 1},
        { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
@@ -348,13 +403,13 @@ static const struct dpu_irq_type dpu_irq_map[] = {
                DPU_INTR_PING_PONG_2_TEAR_DETECTED, 1},
        { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
                DPU_INTR_PING_PONG_3_TEAR_DETECTED, 1},
-       /* irq_idx: 52-55 */
+       /* irq_idx: 84-87 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
        { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
                DPU_INTR_PING_PONG_S0_TEAR_DETECTED, 1},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
-       /* irq_idx: 56-59 */
+       /* irq_idx: 88-91 */
        { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
                DPU_INTR_PING_PONG_0_TE_DETECTED, 1},
        { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
@@ -363,65 +418,129 @@ static const struct dpu_irq_type dpu_irq_map[] = {
                DPU_INTR_PING_PONG_2_TE_DETECTED, 1},
        { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
                DPU_INTR_PING_PONG_3_TE_DETECTED, 1},
-       /* irq_idx: 60-63 */
+       /* irq_idx: 92-95 */
        { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
                DPU_INTR_PING_PONG_S0_TE_DETECTED, 1},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
-
-       /* BEGIN MAP_RANGE: 64-95 HIST */
-       /* irq_idx: 64-67 */
+       /* irq_idx: 96-127 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+       /* BEGIN MAP_RANGE: 128-159 HIST */
+       /* irq_idx: 128-131 */
        { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, DPU_INTR_HIST_VIG_0_DONE, 2},
        { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
                DPU_INTR_HIST_VIG_0_RSTSEQ_DONE, 2},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
-       /* irq_idx: 68-71 */
+       /* irq_idx: 132-135 */
        { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, DPU_INTR_HIST_VIG_1_DONE, 2},
        { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
                DPU_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
-       /* irq_idx: 72-75 */
+       /* irq_idx: 136-139 */
        { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, DPU_INTR_HIST_VIG_2_DONE, 2},
        { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
                DPU_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
        { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, DPU_INTR_HIST_VIG_3_DONE, 2},
        { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
                DPU_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
-       /* irq_idx: 76-79 */
+       /* irq_idx: 140-143 */
        { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, DPU_INTR_HIST_DSPP_0_DONE, 2},
        { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
                DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
-       /* irq_idx: 80-83 */
+       /* irq_idx: 144-147 */
        { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, DPU_INTR_HIST_DSPP_1_DONE, 2},
        { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
                DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
-       /* irq_idx: 84-87 */
+       /* irq_idx: 148-151 */
        { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, DPU_INTR_HIST_DSPP_2_DONE, 2},
        { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
                DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
        { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, DPU_INTR_HIST_DSPP_3_DONE, 2},
        { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
                DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
-       /* irq_idx: 88-91 */
+       /* irq_idx: 152-155 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
-       /* irq_idx: 92-95 */
+       /* irq_idx: 156-159 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
-
-       /* BEGIN MAP_RANGE: 96-127 INTF_0_INTR */
-       /* irq_idx: 96-99 */
+       /* irq_idx: 160-191 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+       /* BEGIN MAP_RANGE: 192-255 INTF_0_INTR */
+       /* irq_idx: 192-195 */
        { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
                DPU_INTR_VIDEO_INTO_STATIC, 3},
        { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
@@ -430,7 +549,7 @@ static const struct dpu_irq_type dpu_irq_map[] = {
                DPU_INTR_DSICMD_0_INTO_STATIC, 3},
        { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
                DPU_INTR_DSICMD_0_OUTOF_STATIC, 3},
-       /* irq_idx: 100-103 */
+       /* irq_idx: 196-199 */
        { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
                DPU_INTR_DSICMD_1_INTO_STATIC, 3},
        { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
@@ -439,39 +558,71 @@ static const struct dpu_irq_type dpu_irq_map[] = {
                DPU_INTR_DSICMD_2_INTO_STATIC, 3},
        { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
                DPU_INTR_DSICMD_2_OUTOF_STATIC, 3},
-       /* irq_idx: 104-107 */
+       /* irq_idx: 200-203 */
        { DPU_IRQ_TYPE_PROG_LINE, INTF_0, DPU_INTR_PROG_LINE, 3},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
-       /* irq_idx: 108-111 */
+       /* irq_idx: 204-207 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       /* irq_idx: 208-211 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
-       /* irq_idx: 112-115 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       /* irq_idx: 212-215 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
-       /* irq_idx: 116-119 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       /* irq_idx: 216-219 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
-       /* irq_idx: 120-123 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       /* irq_idx: 220-223 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
-       /* irq_idx: 124-127 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       /* irq_idx: 224-255 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
-
-       /* BEGIN MAP_RANGE: 128-159 INTF_1_INTR */
-       /* irq_idx: 128-131 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+       /* BEGIN MAP_RANGE: 256-319 INTF_1_INTR */
+       /* irq_idx: 256-259 */
        { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
                DPU_INTR_VIDEO_INTO_STATIC, 4},
        { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
@@ -480,7 +631,7 @@ static const struct dpu_irq_type dpu_irq_map[] = {
                DPU_INTR_DSICMD_0_INTO_STATIC, 4},
        { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
                DPU_INTR_DSICMD_0_OUTOF_STATIC, 4},
-       /* irq_idx: 132-135 */
+       /* irq_idx: 260-263 */
        { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
                DPU_INTR_DSICMD_1_INTO_STATIC, 4},
        { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
@@ -489,39 +640,71 @@ static const struct dpu_irq_type dpu_irq_map[] = {
                DPU_INTR_DSICMD_2_INTO_STATIC, 4},
        { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
                DPU_INTR_DSICMD_2_OUTOF_STATIC, 4},
-       /* irq_idx: 136-139 */
+       /* irq_idx: 264-267 */
        { DPU_IRQ_TYPE_PROG_LINE, INTF_1, DPU_INTR_PROG_LINE, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
-       /* irq_idx: 140-143 */
+       /* irq_idx: 268-271 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
-       /* irq_idx: 144-147 */
+       /* irq_idx: 272-275 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
-       /* irq_idx: 148-151 */
+       /* irq_idx: 276-279 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
-       /* irq_idx: 152-155 */
+       /* irq_idx: 280-283 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
-       /* irq_idx: 156-159 */
+       /* irq_idx: 284-287 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
-
-       /* BEGIN MAP_RANGE: 160-191 INTF_2_INTR */
-       /* irq_idx: 160-163 */
+       /* irq_idx: 288-319 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+       /* BEGIN MAP_RANGE: 320-383 INTF_2_INTR */
+       /* irq_idx: 320-323 */
        { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
                DPU_INTR_VIDEO_INTO_STATIC, 5},
        { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
@@ -530,7 +713,7 @@ static const struct dpu_irq_type dpu_irq_map[] = {
                DPU_INTR_DSICMD_0_INTO_STATIC, 5},
        { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
                DPU_INTR_DSICMD_0_OUTOF_STATIC, 5},
-       /* irq_idx: 164-167 */
+       /* irq_idx: 324-327 */
        { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
                DPU_INTR_DSICMD_1_INTO_STATIC, 5},
        { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
@@ -539,39 +722,71 @@ static const struct dpu_irq_type dpu_irq_map[] = {
                DPU_INTR_DSICMD_2_INTO_STATIC, 5},
        { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
                DPU_INTR_DSICMD_2_OUTOF_STATIC, 5},
-       /* irq_idx: 168-171 */
+       /* irq_idx: 328-331 */
        { DPU_IRQ_TYPE_PROG_LINE, INTF_2, DPU_INTR_PROG_LINE, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
-       /* irq_idx: 172-175 */
+       /* irq_idx: 332-335 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
-       /* irq_idx: 176-179 */
+       /* irq_idx: 336-339 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
-       /* irq_idx: 180-183 */
+       /* irq_idx: 340-343 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
-       /* irq_idx: 184-187 */
+       /* irq_idx: 344-347 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
-       /* irq_idx: 188-191 */
+       /* irq_idx: 348-351 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
-
-       /* BEGIN MAP_RANGE: 192-223 INTF_3_INTR */
-       /* irq_idx: 192-195 */
+       /* irq_idx: 352-383 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+       /* BEGIN MAP_RANGE: 384-447 INTF_3_INTR */
+       /* irq_idx: 384-387 */
        { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
                DPU_INTR_VIDEO_INTO_STATIC, 6},
        { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
@@ -580,7 +795,7 @@ static const struct dpu_irq_type dpu_irq_map[] = {
                DPU_INTR_DSICMD_0_INTO_STATIC, 6},
        { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
                DPU_INTR_DSICMD_0_OUTOF_STATIC, 6},
-       /* irq_idx: 196-199 */
+       /* irq_idx: 388-391 */
        { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
                DPU_INTR_DSICMD_1_INTO_STATIC, 6},
        { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
@@ -589,39 +804,71 @@ static const struct dpu_irq_type dpu_irq_map[] = {
                DPU_INTR_DSICMD_2_INTO_STATIC, 6},
        { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
                DPU_INTR_DSICMD_2_OUTOF_STATIC, 6},
-       /* irq_idx: 200-203 */
+       /* irq_idx: 392-395 */
        { DPU_IRQ_TYPE_PROG_LINE, INTF_3, DPU_INTR_PROG_LINE, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
-       /* irq_idx: 204-207 */
+       /* irq_idx: 396-399 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
-       /* irq_idx: 208-211 */
+       /* irq_idx: 400-403 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
-       /* irq_idx: 212-215 */
+       /* irq_idx: 404-407 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
-       /* irq_idx: 216-219 */
+       /* irq_idx: 408-411 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
-       /* irq_idx: 220-223 */
+       /* irq_idx: 412-415 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
-
-       /* BEGIN MAP_RANGE: 224-255 INTF_4_INTR */
-       /* irq_idx: 224-227 */
+       /* irq_idx: 416-447*/
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+       /* BEGIN MAP_RANGE: 448-511 INTF_4_INTR */
+       /* irq_idx: 448-451 */
        { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
                DPU_INTR_VIDEO_INTO_STATIC, 7},
        { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
@@ -630,7 +877,7 @@ static const struct dpu_irq_type dpu_irq_map[] = {
                DPU_INTR_DSICMD_0_INTO_STATIC, 7},
        { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
                DPU_INTR_DSICMD_0_OUTOF_STATIC, 7},
-       /* irq_idx: 228-231 */
+       /* irq_idx: 452-455 */
        { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
                DPU_INTR_DSICMD_1_INTO_STATIC, 7},
        { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
@@ -639,130 +886,474 @@ static const struct dpu_irq_type dpu_irq_map[] = {
                DPU_INTR_DSICMD_2_INTO_STATIC, 7},
        { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
                DPU_INTR_DSICMD_2_OUTOF_STATIC, 7},
-       /* irq_idx: 232-235 */
+       /* irq_idx: 456-459 */
        { DPU_IRQ_TYPE_PROG_LINE, INTF_4, DPU_INTR_PROG_LINE, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
-       /* irq_idx: 236-239 */
+       /* irq_idx: 460-463 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
-       /* irq_idx: 240-243 */
+       /* irq_idx: 464-467 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
-       /* irq_idx: 244-247 */
+       /* irq_idx: 468-471 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
-       /* irq_idx: 248-251 */
+       /* irq_idx: 472-475 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
-       /* irq_idx: 252-255 */
+       /* irq_idx: 476-479 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
-
-       /* BEGIN MAP_RANGE: 256-287 AD4_0_INTR */
-       /* irq_idx: 256-259 */
+       /* irq_idx: 480-511 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+       /* BEGIN MAP_RANGE: 512-575 AD4_0_INTR */
+       /* irq_idx: 512-515 */
        { DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_0, DPU_INTR_BACKLIGHT_UPDATED, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
-       /* irq_idx: 260-263 */
+       /* irq_idx: 516-519 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
-       /* irq_idx: 264-267 */
+       /* irq_idx: 520-523 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
-       /* irq_idx: 268-271 */
+       /* irq_idx: 524-527 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
-       /* irq_idx: 272-275 */
+       /* irq_idx: 528-531 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
-       /* irq_idx: 276-279 */
+       /* irq_idx: 532-535 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
-       /* irq_idx: 280-283 */
+       /* irq_idx: 536-539 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
-       /* irq_idx: 284-287 */
+       /* irq_idx: 540-543 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
-
-       /* BEGIN MAP_RANGE: 288-319 AD4_1_INTR */
-       /* irq_idx: 288-291 */
+       /* irq_idx: 544-575*/
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+       /* BEGIN MAP_RANGE: 576-639 AD4_1_INTR */
+       /* irq_idx: 576-579 */
        { DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_1, DPU_INTR_BACKLIGHT_UPDATED, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
-       /* irq_idx: 292-295 */
+       /* irq_idx: 580-583 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       /* irq_idx: 584-587 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
-       /* irq_idx: 296-299 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       /* irq_idx: 588-591 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
-       /* irq_idx: 300-303 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       /* irq_idx: 592-595 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
-       /* irq_idx: 304-307 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       /* irq_idx: 596-599 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
-       /* irq_idx: 308-311 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       /* irq_idx: 600-603 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
-       /* irq_idx: 312-315 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       /* irq_idx: 604-607 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
-       /* irq_idx: 315-319 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       /* irq_idx: 608-639 */
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
        { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+       /* BEGIN MAP_RANGE: 640-703 INTF_0_SC7280_INTR */
+       /* irq_idx: 640-643 */
+       { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
+               DPU_INTR_VIDEO_INTO_STATIC, 10},
+       { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
+               DPU_INTR_VIDEO_OUTOF_STATIC, 10},
+       { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
+               DPU_INTR_DSICMD_0_INTO_STATIC, 10},
+       { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
+               DPU_INTR_DSICMD_0_OUTOF_STATIC, 10},
+       /* irq_idx: 644-647 */
+       { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
+               DPU_INTR_DSICMD_1_INTO_STATIC, 10},
+       { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
+               DPU_INTR_DSICMD_1_OUTOF_STATIC, 10},
+       { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
+               DPU_INTR_DSICMD_2_INTO_STATIC, 10},
+       { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
+               DPU_INTR_DSICMD_2_OUTOF_STATIC, 10},
+       /* irq_idx: 648-651 */
+       { DPU_IRQ_TYPE_PROG_LINE, INTF_0, DPU_INTR_PROG_LINE, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       /* irq_idx: 652-655 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       /* irq_idx: 656-659 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       /* irq_idx: 660-663 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       /* irq_idx: 664-667 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       /* irq_idx: 668-671 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       /* irq_idx: 672-703 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 10},
+       /* BEGIN MAP_RANGE: 704-767 INTF_1_SC7280_INTR */
+       /* irq_idx: 704-707 */
+       { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
+               DPU_INTR_VIDEO_INTO_STATIC, 11},
+       { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
+               DPU_INTR_VIDEO_OUTOF_STATIC, 11},
+       { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
+               DPU_INTR_DSICMD_0_INTO_STATIC, 11},
+       { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
+               DPU_INTR_DSICMD_0_OUTOF_STATIC, 11},
+       /* irq_idx: 708-711 */
+       { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
+               DPU_INTR_DSICMD_1_INTO_STATIC, 11},
+       { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
+               DPU_INTR_DSICMD_1_OUTOF_STATIC, 11},
+       { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
+               DPU_INTR_DSICMD_2_INTO_STATIC, 11},
+       { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
+               DPU_INTR_DSICMD_2_OUTOF_STATIC, 11},
+       /* irq_idx: 712-715 */
+       { DPU_IRQ_TYPE_PROG_LINE, INTF_1, DPU_INTR_PROG_LINE, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       /* irq_idx: 716-719 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       /* irq_idx: 720-723 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       /* irq_idx: 724-727 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       /* irq_idx: 728-731 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       /* irq_idx: 732-735 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       /* irq_idx: 736-767 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 11},
+       /* BEGIN MAP_RANGE: 768-831 INTF_5_SC7280_INTR */
+       /* irq_idx: 768-771 */
+       { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_5,
+               DPU_INTR_VIDEO_INTO_STATIC, 12},
+       { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_5,
+               DPU_INTR_VIDEO_OUTOF_STATIC, 12},
+       { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_5,
+               DPU_INTR_DSICMD_0_INTO_STATIC, 12},
+       { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_5,
+               DPU_INTR_DSICMD_0_OUTOF_STATIC, 12},
+       /* irq_idx: 772-775 */
+       { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_5,
+               DPU_INTR_DSICMD_1_INTO_STATIC, 12},
+       { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_5,
+               DPU_INTR_DSICMD_1_OUTOF_STATIC, 12},
+       { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_5,
+               DPU_INTR_DSICMD_2_INTO_STATIC, 12},
+       { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_5,
+               DPU_INTR_DSICMD_2_OUTOF_STATIC, 12},
+       /* irq_idx: 776-779 */
+       { DPU_IRQ_TYPE_PROG_LINE, INTF_5, DPU_INTR_PROG_LINE, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       /* irq_idx: 780-783 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       /* irq_idx: 784-787 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       /* irq_idx: 788-791 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       /* irq_idx: 792-795 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       /* irq_idx: 796-799 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       /* irq_idx: 800-831 */
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
+       { DPU_IRQ_TYPE_RESERVED, 0, 0, 12},
 };
 
-static int dpu_hw_intr_irqidx_lookup(enum dpu_intr_type intr_type,
-               u32 instance_idx)
+static int dpu_hw_intr_irqidx_lookup(struct dpu_hw_intr *intr,
+       enum dpu_intr_type intr_type, u32 instance_idx)
 {
        int i;
 
        for (i = 0; i < ARRAY_SIZE(dpu_irq_map); i++) {
                if (intr_type == dpu_irq_map[i].intr_type &&
-                       instance_idx == dpu_irq_map[i].instance_idx)
+                       instance_idx == dpu_irq_map[i].instance_idx &&
+                       !(intr->obsolete_irq & BIT(dpu_irq_map[i].intr_type)))
                        return i;
        }
 
@@ -795,11 +1386,11 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
                irq_status = intr->save_irq_status[reg_idx];
 
                /*
-                * Each Interrupt register has a range of 32 indexes, and
+                * Each Interrupt register has a range of 64 indexes, and
                 * that is static for dpu_irq_map.
                 */
-               start_idx = reg_idx * 32;
-               end_idx = start_idx + 32;
+               start_idx = reg_idx * 64;
+               end_idx = start_idx + 64;
 
                if (!test_bit(reg_idx, &intr->irq_mask) ||
                        start_idx >= ARRAY_SIZE(dpu_irq_map))
@@ -814,7 +1405,9 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
                                (irq_idx < end_idx) && irq_status;
                                irq_idx++)
                        if ((irq_status & dpu_irq_map[irq_idx].irq_mask) &&
-                               (dpu_irq_map[irq_idx].reg_idx == reg_idx)) {
+                               (dpu_irq_map[irq_idx].reg_idx == reg_idx) &&
+                               !(intr->obsolete_irq &
+                               BIT(dpu_irq_map[irq_idx].intr_type))) {
                                /*
                                 * Once a match on irq mask, perform a callback
                                 * to the given cbfunc. cbfunc will take care
@@ -1126,6 +1719,8 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
        }
 
        intr->irq_mask = m->mdss_irqs;
+       intr->obsolete_irq = m->obsolete_irq;
+
        spin_lock_init(&intr->irq_lock);
 
        return intr;
index fc9c986..5d6f9a7 100644 (file)
@@ -83,11 +83,12 @@ struct dpu_hw_intr_ops {
        /**
         * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
         *                 Used for all irq related ops
+        * @intr:               HW interrupt handle
         * @intr_type:          Interrupt type defined in dpu_intr_type
         * @instance_idx:       HW interrupt block instance
         * @return:             irq_idx or -EINVAL for lookup fail
         */
-       int (*irq_idx_lookup)(
+       int (*irq_idx_lookup)(struct dpu_hw_intr *intr,
                        enum dpu_intr_type intr_type,
                        u32 instance_idx);
 
@@ -179,6 +180,7 @@ struct dpu_hw_intr_ops {
  * @save_irq_status:  array of IRQ status reg storage created during init
  * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
  * @irq_lock:         spinlock for accessing IRQ resources
+ * @obsolete_irq:      irq types that are obsolete for a particular target
  */
 struct dpu_hw_intr {
        struct dpu_hw_blk_reg_map hw;
@@ -188,6 +190,7 @@ struct dpu_hw_intr {
        u32 irq_idx_tbl_size;
        spinlock_t irq_lock;
        unsigned long irq_mask;
+       unsigned long obsolete_irq;
 };
 
 /**
index 6f0f545..1599e3f 100644 (file)
@@ -31,6 +31,8 @@
 #define INTF_TEST_CTL                   0x054
 #define INTF_TP_COLOR0                  0x058
 #define INTF_TP_COLOR1                  0x05C
+#define INTF_CONFIG2                    0x060
+#define INTF_DISPLAY_DATA_HCTL          0x064
 #define INTF_FRAME_LINE_COUNT_EN        0x0A8
 #define INTF_FRAME_COUNT                0x0AC
 #define   INTF_LINE_COUNT               0x0B0
@@ -93,7 +95,7 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
        u32 active_hctl, display_hctl, hsync_ctl;
        u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
        u32 panel_format;
-       u32 intf_cfg;
+       u32 intf_cfg, intf_cfg2 = 0, display_data_hctl = 0;
 
        /* read interface_cfg */
        intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
@@ -178,6 +180,13 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
                                (COLOR_8BIT << 4) |
                                (0x21 << 8));
 
+       if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
+               intf_cfg2 |= BIT(4);
+               display_data_hctl = display_hctl;
+               DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
+               DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
+       }
+
        DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
        DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
        DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
@@ -256,6 +265,7 @@ static void dpu_hw_intf_get_status(
        struct dpu_hw_blk_reg_map *c = &intf->hw;
 
        s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
+       s->is_prog_fetch_en = !!(DPU_REG_READ(c, INTF_CONFIG) & BIT(31));
        if (s->is_en) {
                s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT);
                s->line_count = DPU_REG_READ(c, INTF_LINE_COUNT);
index 0ead64d..3568be8 100644 (file)
@@ -40,6 +40,7 @@ struct intf_prog_fetch {
 
 struct intf_status {
        u8 is_en;               /* interface timing engine is enabled or not */
+       u8 is_prog_fetch_en;    /* interface prog fetch counter is enabled or not */
        u32 frame_count;        /* frame count since timing engine enabled */
        u32 line_count;         /* current line count including blanking */
 };
index 8018fff..3aa10c8 100644 (file)
@@ -30,7 +30,7 @@ struct traffic_shaper_cfg {
 
 /**
  * struct split_pipe_cfg - pipe configuration for dual display panels
- * @en        : Enable/disable dual pipe confguration
+ * @en        : Enable/disable dual pipe configuration
  * @mode      : Panel interface mode
  * @intf      : Interface id for main control path
  * @split_flush_en: Allows both the paths to be flushed when master path is
@@ -76,7 +76,7 @@ struct dpu_vsync_source_cfg {
  * @setup_traffic_shaper : programs traffic shaper control
  */
 struct dpu_hw_mdp_ops {
-       /** setup_split_pipe() : Regsiters are not double buffered, thisk
+       /** setup_split_pipe() : Registers are not double buffered, thisk
         * function should be called before timing control enable
         * @mdp  : mdp top context driver
         * @cfg  : upper and lower part of pipe configuration
index 5a8e3e1..88e9cc3 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <drm/drm_crtc.h>
 #include <drm/drm_file.h>
+#include <drm/drm_vblank.h>
 
 #include "msm_drv.h"
 #include "msm_mmu.h"
@@ -43,6 +44,8 @@
 #define DPU_DEBUGFS_DIR "msm_dpu"
 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
 
+#define MIN_IB_BW      400000000ULL /* Min ib vote 400MB */
+
 static int dpu_kms_hw_init(struct msm_kms *kms);
 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
 
@@ -931,6 +934,8 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
                DPU_DEBUG("REG_DMA is not defined");
        }
 
+       dpu_kms_parse_data_bus_icc_path(dpu_kms);
+
        pm_runtime_get_sync(&dpu_kms->pdev->dev);
 
        dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
@@ -1020,6 +1025,10 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
         */
        dev->mode_config.allow_fb_modifiers = true;
 
+       dev->max_vblank_count = 0xffffffff;
+       /* Disable vblank irqs aggressively for power-saving */
+       dev->vblank_disable_immediate = true;
+
        /*
         * _dpu_kms_drm_obj_init should create the DRM related objects
         * i.e. CRTCs, planes, encoders, connectors and so forth
@@ -1032,9 +1041,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
 
        dpu_vbif_init_memtypes(dpu_kms);
 
-       if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
-               dpu_kms_parse_data_bus_icc_path(dpu_kms);
-
        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 
        return 0;
@@ -1191,10 +1197,10 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
 
        ddev = dpu_kms->dev;
 
+       WARN_ON(!(dpu_kms->num_paths));
        /* Min vote of BW is required before turning on AXI clk */
        for (i = 0; i < dpu_kms->num_paths; i++)
-               icc_set_bw(dpu_kms->path[i], 0,
-                       dpu_kms->catalog->perf.min_dram_ib);
+               icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW));
 
        rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
        if (rc) {
@@ -1219,6 +1225,9 @@ static const struct dev_pm_ops dpu_pm_ops = {
 static const struct of_device_id dpu_dt_match[] = {
        { .compatible = "qcom,sdm845-dpu", },
        { .compatible = "qcom,sc7180-dpu", },
+       { .compatible = "qcom,sc7280-dpu", },
+       { .compatible = "qcom,sm8150-dpu", },
+       { .compatible = "qcom,sm8250-dpu", },
        {}
 };
 MODULE_DEVICE_TABLE(of, dpu_dt_match);
index cd40788..06b56fe 100644 (file)
@@ -31,40 +31,8 @@ struct dpu_mdss {
        void __iomem *mmio;
        struct dss_module_power mp;
        struct dpu_irq_controller irq_controller;
-       struct icc_path *path[2];
-       u32 num_paths;
 };
 
-static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev,
-                                               struct dpu_mdss *dpu_mdss)
-{
-       struct icc_path *path0 = of_icc_get(dev->dev, "mdp0-mem");
-       struct icc_path *path1 = of_icc_get(dev->dev, "mdp1-mem");
-
-       if (IS_ERR_OR_NULL(path0))
-               return PTR_ERR_OR_ZERO(path0);
-
-       dpu_mdss->path[0] = path0;
-       dpu_mdss->num_paths = 1;
-
-       if (!IS_ERR_OR_NULL(path1)) {
-               dpu_mdss->path[1] = path1;
-               dpu_mdss->num_paths++;
-       }
-
-       return 0;
-}
-
-static void dpu_mdss_icc_request_bw(struct msm_mdss *mdss)
-{
-       struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
-       int i;
-       u64 avg_bw = dpu_mdss->num_paths ? MAX_BW / dpu_mdss->num_paths : 0;
-
-       for (i = 0; i < dpu_mdss->num_paths; i++)
-               icc_set_bw(dpu_mdss->path[i], avg_bw, kBps_to_icc(MAX_BW));
-}
-
 static void dpu_mdss_irq(struct irq_desc *desc)
 {
        struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
@@ -178,8 +146,6 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
        struct dss_module_power *mp = &dpu_mdss->mp;
        int ret;
 
-       dpu_mdss_icc_request_bw(mdss);
-
        ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
        if (ret) {
                DPU_ERROR("clock enable failed, ret:%d\n", ret);
@@ -204,6 +170,9 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
        case DPU_HW_VER_620:
                writel_relaxed(0x1e, dpu_mdss->mmio + UBWC_STATIC);
                break;
+       case DPU_HW_VER_720:
+               writel_relaxed(0x101e, dpu_mdss->mmio + UBWC_STATIC);
+               break;
        }
 
        return ret;
@@ -213,15 +182,12 @@ static int dpu_mdss_disable(struct msm_mdss *mdss)
 {
        struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
        struct dss_module_power *mp = &dpu_mdss->mp;
-       int ret, i;
+       int ret;
 
        ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
        if (ret)
                DPU_ERROR("clock disable failed, ret:%d\n", ret);
 
-       for (i = 0; i < dpu_mdss->num_paths; i++)
-               icc_set_bw(dpu_mdss->path[i], 0, 0);
-
        return ret;
 }
 
@@ -232,7 +198,6 @@ static void dpu_mdss_destroy(struct drm_device *dev)
        struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
        struct dss_module_power *mp = &dpu_mdss->mp;
        int irq;
-       int i;
 
        pm_runtime_suspend(dev->dev);
        pm_runtime_disable(dev->dev);
@@ -242,9 +207,6 @@ static void dpu_mdss_destroy(struct drm_device *dev)
        msm_dss_put_clk(mp->clk_config, mp->num_clk);
        devm_kfree(&pdev->dev, mp->clk_config);
 
-       for (i = 0; i < dpu_mdss->num_paths; i++)
-               icc_put(dpu_mdss->path[i]);
-
        if (dpu_mdss->mmio)
                devm_iounmap(&pdev->dev, dpu_mdss->mmio);
        dpu_mdss->mmio = NULL;
@@ -276,12 +238,6 @@ int dpu_mdss_init(struct drm_device *dev)
 
        DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
 
-       if (!of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) {
-               ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss);
-               if (ret)
-                       return ret;
-       }
-
        mp = &dpu_mdss->mp;
        ret = msm_dss_parse_clock(pdev, mp);
        if (ret) {
@@ -307,8 +263,6 @@ int dpu_mdss_init(struct drm_device *dev)
 
        pm_runtime_enable(dev->dev);
 
-       dpu_mdss_icc_request_bw(priv->mdss);
-
        return ret;
 
 irq_error:
index ff2c1d5..ec6c7b0 100644 (file)
@@ -20,7 +20,7 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
 {
        struct mdp5_kms *mdp5_kms = get_kms(encoder);
        struct device *dev = encoder->dev->dev;
-       u32 total_lines_x100, vclks_line, cfg;
+       u32 total_lines, vclks_line, cfg;
        long vsync_clk_speed;
        struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
        int pp_id = mixer->pp;
@@ -30,8 +30,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
                return -EINVAL;
        }
 
-       total_lines_x100 = mode->vtotal * drm_mode_vrefresh(mode);
-       if (!total_lines_x100) {
+       total_lines = mode->vtotal * drm_mode_vrefresh(mode);
+       if (!total_lines) {
                DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
                              __func__, mode->vtotal, drm_mode_vrefresh(mode));
                return -EINVAL;
@@ -43,15 +43,23 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
                                                        vsync_clk_speed);
                return -EINVAL;
        }
-       vclks_line = vsync_clk_speed * 100 / total_lines_x100;
+       vclks_line = vsync_clk_speed / total_lines;
 
        cfg = MDP5_PP_SYNC_CONFIG_VSYNC_COUNTER_EN
                | MDP5_PP_SYNC_CONFIG_VSYNC_IN_EN;
        cfg |= MDP5_PP_SYNC_CONFIG_VSYNC_COUNT(vclks_line);
 
+       /*
+        * Tearcheck emits a blanking signal every vclks_line * vtotal * 2 ticks on
+        * the vsync_clk equating to roughly half the desired panel refresh rate.
+        * This is only necessary as stability fallback if interrupts from the
+        * panel arrive too late or not at all, but is currently used by default
+        * because these panel interrupts are not wired up yet.
+        */
        mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_CONFIG_VSYNC(pp_id), cfg);
        mdp5_write(mdp5_kms,
-               REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), 0xfff0);
+               REG_MDP5_PP_SYNC_CONFIG_HEIGHT(pp_id), (2 * mode->vtotal));
+
        mdp5_write(mdp5_kms,
                REG_MDP5_PP_VSYNC_INIT_VAL(pp_id), mode->vdisplay);
        mdp5_write(mdp5_kms, REG_MDP5_PP_RD_PTR_IRQ(pp_id), mode->vdisplay + 1);
@@ -59,6 +67,7 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
        mdp5_write(mdp5_kms, REG_MDP5_PP_SYNC_THRESH(pp_id),
                        MDP5_PP_SYNC_THRESH_START(4) |
                        MDP5_PP_SYNC_THRESH_CONTINUE(4));
+       mdp5_write(mdp5_kms, REG_MDP5_PP_AUTOREFRESH_CONFIG(pp_id), 0x0);
 
        return 0;
 }
index 1c6e1d2..7c22bfe 100644 (file)
@@ -32,6 +32,8 @@ struct dp_aux_private {
        struct drm_dp_aux dp_aux;
 };
 
+#define MAX_AUX_RETRIES                        5
+
 static const char *dp_aux_get_error(u32 aux_error)
 {
        switch (aux_error) {
@@ -377,6 +379,11 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
        ret = dp_aux_cmd_fifo_tx(aux, msg);
 
        if (ret < 0) {
+               if (aux->native) {
+                       aux->retry_cnt++;
+                       if (!(aux->retry_cnt % MAX_AUX_RETRIES))
+                               dp_catalog_aux_update_cfg(aux->catalog);
+               }
                usleep_range(400, 500); /* at least 400us to next try */
                goto unlock_exit;
        }
index 84670bc..2f6247e 100644 (file)
@@ -226,7 +226,7 @@ static int dp_test_data_show(struct seq_file *m, void *data)
                                        debug->link->test_video.test_h_width);
                        seq_printf(m, "vdisplay: %d\n",
                                        debug->link->test_video.test_v_height);
-                                       seq_printf(m, "bpc: %u\n",
+                       seq_printf(m, "bpc: %u\n",
                                        dp_link_bit_depth_to_bpc(bpc));
                } else
                        seq_puts(m, "0");
@@ -368,44 +368,21 @@ static int dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor)
        int rc = 0;
        struct dp_debug_private *debug = container_of(dp_debug,
                        struct dp_debug_private, dp_debug);
-       struct dentry *file;
-       struct dentry *test_active;
-       struct dentry *test_data, *test_type;
 
-       file = debugfs_create_file("dp_debug", 0444, minor->debugfs_root,
+       debugfs_create_file("dp_debug", 0444, minor->debugfs_root,
                        debug, &dp_debug_fops);
-       if (IS_ERR_OR_NULL(file)) {
-               rc = PTR_ERR(file);
-               DRM_ERROR("[%s] debugfs create file failed, rc=%d\n",
-                                 DEBUG_NAME, rc);
-       }
 
-       test_active = debugfs_create_file("msm_dp_test_active", 0444,
+       debugfs_create_file("msm_dp_test_active", 0444,
                        minor->debugfs_root,
                        debug, &test_active_fops);
-       if (IS_ERR_OR_NULL(test_active)) {
-               rc = PTR_ERR(test_active);
-               DRM_ERROR("[%s] debugfs test_active failed, rc=%d\n",
-                                 DEBUG_NAME, rc);
-       }
 
-       test_data = debugfs_create_file("msm_dp_test_data", 0444,
+       debugfs_create_file("msm_dp_test_data", 0444,
                        minor->debugfs_root,
                        debug, &dp_test_data_fops);
-       if (IS_ERR_OR_NULL(test_data)) {
-               rc = PTR_ERR(test_data);
-               DRM_ERROR("[%s] debugfs test_data failed, rc=%d\n",
-                                 DEBUG_NAME, rc);
-       }
 
-       test_type = debugfs_create_file("msm_dp_test_type", 0444,
+       debugfs_create_file("msm_dp_test_type", 0444,
                        minor->debugfs_root,
                        debug, &dp_test_type_fops);
-       if (IS_ERR_OR_NULL(test_type)) {
-               rc = PTR_ERR(test_type);
-               DRM_ERROR("[%s] debugfs test_type failed, rc=%d\n",
-                                 DEBUG_NAME, rc);
-       }
 
        debug->root = minor->debugfs_root;
 
index 5b8fe32..e1c90fa 100644 (file)
@@ -34,8 +34,8 @@ int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
 
        dp_usbpd->hpd_high = hpd;
 
-       if (!hpd_priv->dp_cb && !hpd_priv->dp_cb->configure
-                               && !hpd_priv->dp_cb->disconnect) {
+       if (!hpd_priv->dp_cb || !hpd_priv->dp_cb->configure
+                               || !hpd_priv->dp_cb->disconnect) {
                pr_err("hpd dp_cb not initialized\n");
                return -EINVAL;
        }
index 9c4ea00..3961ba4 100644 (file)
@@ -269,7 +269,7 @@ int dp_power_clk_enable(struct dp_power *dp_power,
                DRM_ERROR("failed to '%s' clks for: %s. err=%d\n",
                        enable ? "enable" : "disable",
                        dp_parser_pm_name(pm_type), rc);
-                       return rc;
+               return rc;
        }
 
        if (pm_type == DP_CORE_PM)
index 78ef5d4..7abfeab 100644 (file)
 struct msm_dsi_phy_shared_timings;
 struct msm_dsi_phy_clk_request;
 
-enum msm_dsi_phy_type {
-       MSM_DSI_PHY_28NM_HPM,
-       MSM_DSI_PHY_28NM_LP,
-       MSM_DSI_PHY_20NM,
-       MSM_DSI_PHY_28NM_8960,
-       MSM_DSI_PHY_14NM,
-       MSM_DSI_PHY_10NM,
-       MSM_DSI_PHY_7NM,
-       MSM_DSI_PHY_7NM_V4_1,
-       MSM_DSI_PHY_MAX
-};
-
 enum msm_dsi_phy_usecase {
        MSM_DSI_PHY_STANDALONE,
        MSM_DSI_PHY_MASTER,
@@ -104,45 +92,6 @@ static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
 
 struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
 
-/* dsi pll */
-struct msm_dsi_pll;
-#ifdef CONFIG_DRM_MSM_DSI_PLL
-struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
-                       enum msm_dsi_phy_type type, int dsi_id);
-void msm_dsi_pll_destroy(struct msm_dsi_pll *pll);
-int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
-       struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
-void msm_dsi_pll_save_state(struct msm_dsi_pll *pll);
-int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll);
-int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
-                           enum msm_dsi_phy_usecase uc);
-#else
-static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
-                        enum msm_dsi_phy_type type, int id) {
-       return ERR_PTR(-ENODEV);
-}
-static inline void msm_dsi_pll_destroy(struct msm_dsi_pll *pll)
-{
-}
-static inline int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
-       struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
-{
-       return -ENODEV;
-}
-static inline void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
-{
-}
-static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
-{
-       return 0;
-}
-static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
-                                         enum msm_dsi_phy_usecase uc)
-{
-       return -ENODEV;
-}
-#endif
-
 /* dsi host */
 struct msm_dsi_host;
 int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
@@ -169,7 +118,7 @@ struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
 int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
 void msm_dsi_host_unregister(struct mipi_dsi_host *host);
 int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
-                       struct msm_dsi_pll *src_pll);
+                       struct msm_dsi_phy *src_phy);
 void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
 void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
        struct msm_dsi_phy_clk_request *clk_req,
@@ -213,14 +162,17 @@ struct msm_dsi_phy_clk_request {
 
 void msm_dsi_phy_driver_register(void);
 void msm_dsi_phy_driver_unregister(void);
-int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
                        struct msm_dsi_phy_clk_request *clk_req);
 void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
 void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
                        struct msm_dsi_phy_shared_timings *shared_timing);
-struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy);
 void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
                             enum msm_dsi_phy_usecase uc);
+int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
+       struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
+void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy);
+int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy);
 
 #endif /* __DSI_CONNECTOR_H__ */
 
index b2ff68a..f3f1c03 100644 (file)
@@ -106,12 +106,8 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
        .num_dsi = 2,
 };
 
-/*
- * TODO: core_mmss_clk fails to enable for some reason, but things work fine
- * without it too. Figure out why it doesn't enable and uncomment below
- */
 static const char * const dsi_8996_bus_clk_names[] = {
-       "mdp_core", "iface", "bus", /* "core_mmss", */
+       "mdp_core", "iface", "bus", "core_mmss",
 };
 
 static const struct msm_dsi_config msm8996_dsi_cfg = {
index ab281cb..8a10e43 100644 (file)
@@ -1826,8 +1826,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
 
        msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
        if (!msm_host) {
-               pr_err("%s: FAILED: cannot alloc dsi host\n",
-                      __func__);
                ret = -ENOMEM;
                goto fail;
        }
@@ -2226,13 +2224,13 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
 }
 
 int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
-       struct msm_dsi_pll *src_pll)
+       struct msm_dsi_phy *src_phy)
 {
        struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
        struct clk *byte_clk_provider, *pixel_clk_provider;
        int ret;
 
-       ret = msm_dsi_pll_get_clk_provider(src_pll,
+       ret = msm_dsi_phy_get_clk_provider(src_phy,
                                &byte_clk_provider, &pixel_clk_provider);
        if (ret) {
                pr_info("%s: can't get provider from pll, don't set parent\n",
index 1d28dfb..cd01657 100644 (file)
@@ -70,7 +70,6 @@ static int dsi_mgr_setup_components(int id)
        struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
        struct msm_dsi *clk_master_dsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
        struct msm_dsi *clk_slave_dsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
-       struct msm_dsi_pll *src_pll;
        int ret;
 
        if (!IS_DUAL_DSI()) {
@@ -79,10 +78,7 @@ static int dsi_mgr_setup_components(int id)
                        return ret;
 
                msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
-               src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
-               if (IS_ERR(src_pll))
-                       return PTR_ERR(src_pll);
-               ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
+               ret = msm_dsi_host_set_src_pll(msm_dsi->host, msm_dsi->phy);
        } else if (!other_dsi) {
                ret = 0;
        } else {
@@ -109,19 +105,16 @@ static int dsi_mgr_setup_components(int id)
                                        MSM_DSI_PHY_MASTER);
                msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
                                        MSM_DSI_PHY_SLAVE);
-               src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy);
-               if (IS_ERR(src_pll))
-                       return PTR_ERR(src_pll);
-               ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
+               ret = msm_dsi_host_set_src_pll(msm_dsi->host, clk_master_dsi->phy);
                if (ret)
                        return ret;
-               ret = msm_dsi_host_set_src_pll(other_dsi->host, src_pll);
+               ret = msm_dsi_host_set_src_pll(other_dsi->host, clk_master_dsi->phy);
        }
 
        return ret;
 }
 
-static int enable_phy(struct msm_dsi *msm_dsi, int src_pll_id,
+static int enable_phy(struct msm_dsi *msm_dsi,
                      struct msm_dsi_phy_shared_timings *shared_timings)
 {
        struct msm_dsi_phy_clk_request clk_req;
@@ -130,7 +123,7 @@ static int enable_phy(struct msm_dsi *msm_dsi, int src_pll_id,
 
        msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_dual_dsi);
 
-       ret = msm_dsi_phy_enable(msm_dsi->phy, src_pll_id, &clk_req);
+       ret = msm_dsi_phy_enable(msm_dsi->phy, &clk_req);
        msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings);
 
        return ret;
@@ -143,7 +136,6 @@ dsi_mgr_phy_enable(int id,
        struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
        struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
        struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE);
-       int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id;
        int ret;
 
        /* In case of dual DSI, some registers in PHY1 have been programmed
@@ -156,11 +148,11 @@ dsi_mgr_phy_enable(int id,
                        msm_dsi_host_reset_phy(mdsi->host);
                        msm_dsi_host_reset_phy(sdsi->host);
 
-                       ret = enable_phy(mdsi, src_pll_id,
+                       ret = enable_phy(mdsi,
                                         &shared_timings[DSI_CLOCK_MASTER]);
                        if (ret)
                                return ret;
-                       ret = enable_phy(sdsi, src_pll_id,
+                       ret = enable_phy(sdsi,
                                         &shared_timings[DSI_CLOCK_SLAVE]);
                        if (ret) {
                                msm_dsi_phy_disable(mdsi->phy);
@@ -169,7 +161,7 @@ dsi_mgr_phy_enable(int id,
                }
        } else {
                msm_dsi_host_reset_phy(msm_dsi->host);
-               ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]);
+               ret = enable_phy(msm_dsi, &shared_timings[id]);
                if (ret)
                        return ret;
        }
@@ -505,7 +497,6 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
        struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
        struct mipi_dsi_host *host = msm_dsi->host;
        struct drm_panel *panel = msm_dsi->panel;
-       struct msm_dsi_pll *src_pll;
        bool is_dual_dsi = IS_DUAL_DSI();
        int ret;
 
@@ -539,9 +530,8 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
                                                                id, ret);
        }
 
-       /* Save PLL status if it is a clock source */
-       src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
-       msm_dsi_pll_save_state(src_pll);
+       /* Save PHY status if it is a clock source */
+       msm_dsi_phy_pll_save_state(msm_dsi->phy);
 
        ret = msm_dsi_host_power_off(host);
        if (ret)
index e8c1a72..f0a2ddf 100644 (file)
@@ -3,6 +3,7 @@
  * Copyright (c) 2015, The Linux Foundation. All rights reserved.
  */
 
+#include <linux/clk-provider.h>
 #include <linux/platform_device.h>
 
 #include "dsi_phy.h"
@@ -460,23 +461,6 @@ int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
        return 0;
 }
 
-void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
-                               u32 bit_mask)
-{
-       int phy_id = phy->id;
-       u32 val;
-
-       if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX))
-               return;
-
-       val = dsi_phy_read(phy->base + reg);
-
-       if (phy->cfg->src_pll_truthtable[phy_id][pll_id])
-               dsi_phy_write(phy->base + reg, val | bit_mask);
-       else
-               dsi_phy_write(phy->base + reg, val & (~bit_mask));
-}
-
 static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
 {
        struct regulator_bulk_data *s = phy->supplies;
@@ -637,24 +621,6 @@ static int dsi_phy_get_id(struct msm_dsi_phy *phy)
        return -EINVAL;
 }
 
-int msm_dsi_phy_init_common(struct msm_dsi_phy *phy)
-{
-       struct platform_device *pdev = phy->pdev;
-       int ret = 0;
-
-       phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
-                               "DSI_PHY_REG");
-       if (IS_ERR(phy->reg_base)) {
-               DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n",
-                       __func__);
-               ret = -ENOMEM;
-               goto fail;
-       }
-
-fail:
-       return ret;
-}
-
 static int dsi_phy_driver_probe(struct platform_device *pdev)
 {
        struct msm_dsi_phy *phy;
@@ -670,6 +636,14 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
        if (!match)
                return -ENODEV;
 
+       phy->provided_clocks = devm_kzalloc(dev,
+                       struct_size(phy->provided_clocks, hws, NUM_PROVIDED_CLKS),
+                       GFP_KERNEL);
+       if (!phy->provided_clocks)
+               return -ENOMEM;
+
+       phy->provided_clocks->num = NUM_PROVIDED_CLKS;
+
        phy->cfg = match->data;
        phy->pdev = pdev;
 
@@ -691,6 +665,31 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
                goto fail;
        }
 
+       phy->pll_base = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+       if (IS_ERR(phy->pll_base)) {
+               DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       if (phy->cfg->has_phy_lane) {
+               phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane", "DSI_PHY_LANE");
+               if (IS_ERR(phy->lane_base)) {
+                       DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n", __func__);
+                       ret = -ENOMEM;
+                       goto fail;
+               }
+       }
+
+       if (phy->cfg->has_phy_regulator) {
+               phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG");
+               if (IS_ERR(phy->reg_base)) {
+                       DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n", __func__);
+                       ret = -ENOMEM;
+                       goto fail;
+               }
+       }
+
        ret = dsi_phy_regulator_init(phy);
        if (ret)
                goto fail;
@@ -702,12 +701,6 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
                goto fail;
        }
 
-       if (phy->cfg->ops.init) {
-               ret = phy->cfg->ops.init(phy);
-               if (ret)
-                       goto fail;
-       }
-
        /* PLL init will call into clk_register which requires
         * register access, so we need to enable power and ahb clock.
         */
@@ -715,12 +708,21 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
        if (ret)
                goto fail;
 
-       phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
-       if (IS_ERR_OR_NULL(phy->pll)) {
-               DRM_DEV_INFO(dev,
-                       "%s: pll init failed: %ld, need separate pll clk driver\n",
-                       __func__, PTR_ERR(phy->pll));
-               phy->pll = NULL;
+       if (phy->cfg->ops.pll_init) {
+               ret = phy->cfg->ops.pll_init(phy);
+               if (ret) {
+                       DRM_DEV_INFO(dev,
+                               "%s: pll init failed: %d, need separate pll clk driver\n",
+                               __func__, ret);
+                       goto fail;
+               }
+       }
+
+       ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+                                    phy->provided_clocks);
+       if (ret) {
+               DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret);
+               goto fail;
        }
 
        dsi_phy_disable_resource(phy);
@@ -733,23 +735,8 @@ fail:
        return ret;
 }
 
-static int dsi_phy_driver_remove(struct platform_device *pdev)
-{
-       struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
-
-       if (phy && phy->pll) {
-               msm_dsi_pll_destroy(phy->pll);
-               phy->pll = NULL;
-       }
-
-       platform_set_drvdata(pdev, NULL);
-
-       return 0;
-}
-
 static struct platform_driver dsi_phy_platform_driver = {
        .probe      = dsi_phy_driver_probe,
-       .remove     = dsi_phy_driver_remove,
        .driver     = {
                .name   = "msm_dsi_phy",
                .of_match_table = dsi_phy_dt_match,
@@ -766,7 +753,7 @@ void __exit msm_dsi_phy_driver_unregister(void)
        platform_driver_unregister(&dsi_phy_platform_driver);
 }
 
-int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
                        struct msm_dsi_phy_clk_request *clk_req)
 {
        struct device *dev = &phy->pdev->dev;
@@ -789,7 +776,7 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
                goto reg_en_fail;
        }
 
-       ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req);
+       ret = phy->cfg->ops.enable(phy, clk_req);
        if (ret) {
                DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
                goto phy_en_fail;
@@ -802,9 +789,9 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
         * source.
         */
        if (phy->usecase != MSM_DSI_PHY_SLAVE) {
-               ret = msm_dsi_pll_restore_state(phy->pll);
+               ret = msm_dsi_phy_pll_restore_state(phy);
                if (ret) {
-                       DRM_DEV_ERROR(dev, "%s: failed to restore pll state, %d\n",
+                       DRM_DEV_ERROR(dev, "%s: failed to restore phy state, %d\n",
                                __func__, ret);
                        goto pll_restor_fail;
                }
@@ -841,17 +828,43 @@ void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy,
               sizeof(*shared_timings));
 }
 
-struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
-{
-       if (!phy)
-               return NULL;
-
-       return phy->pll;
-}
-
 void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
                             enum msm_dsi_phy_usecase uc)
 {
        if (phy)
                phy->usecase = uc;
 }
+
+int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
+       struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
+{
+       if (byte_clk_provider)
+               *byte_clk_provider = phy->provided_clocks->hws[DSI_BYTE_PLL_CLK]->clk;
+       if (pixel_clk_provider)
+               *pixel_clk_provider = phy->provided_clocks->hws[DSI_PIXEL_PLL_CLK]->clk;
+
+       return -EINVAL;
+}
+
+void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy)
+{
+       if (phy->cfg->ops.save_pll_state) {
+               phy->cfg->ops.save_pll_state(phy);
+               phy->state_saved = true;
+       }
+}
+
+int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy)
+{
+       int ret;
+
+       if (phy->cfg->ops.restore_pll_state && phy->state_saved) {
+               ret = phy->cfg->ops.restore_pll_state(phy);
+               if (ret)
+                       return ret;
+
+               phy->state_saved = false;
+       }
+
+       return 0;
+}
index d2bd74b..94a77ac 100644 (file)
@@ -6,37 +6,38 @@
 #ifndef __DSI_PHY_H__
 #define __DSI_PHY_H__
 
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
 #include <linux/regulator/consumer.h>
 
 #include "dsi.h"
 
 #define dsi_phy_read(offset) msm_readl((offset))
 #define dsi_phy_write(offset, data) msm_writel((data), (offset))
-
-/* v3.0.0 10nm implementation that requires the old timings settings */
-#define V3_0_0_10NM_OLD_TIMINGS_QUIRK  BIT(0)
+#define dsi_phy_write_udelay(offset, data, delay_us) { msm_writel((data), (offset)); udelay(delay_us); }
+#define dsi_phy_write_ndelay(offset, data, delay_ns) { msm_writel((data), (offset)); ndelay(delay_ns); }
 
 struct msm_dsi_phy_ops {
-       int (*init) (struct msm_dsi_phy *phy);
-       int (*enable)(struct msm_dsi_phy *phy, int src_pll_id,
+       int (*pll_init)(struct msm_dsi_phy *phy);
+       int (*enable)(struct msm_dsi_phy *phy,
                        struct msm_dsi_phy_clk_request *clk_req);
        void (*disable)(struct msm_dsi_phy *phy);
+       void (*save_pll_state)(struct msm_dsi_phy *phy);
+       int (*restore_pll_state)(struct msm_dsi_phy *phy);
 };
 
 struct msm_dsi_phy_cfg {
-       enum msm_dsi_phy_type type;
        struct dsi_reg_config reg_cfg;
        struct msm_dsi_phy_ops ops;
 
-       /*
-        * Each cell {phy_id, pll_id} of the truth table indicates
-        * if the source PLL selection bit should be set for each PHY.
-        * Fill default H/W values in illegal cells, eg. cell {0, 1}.
-        */
-       bool src_pll_truthtable[DSI_MAX][DSI_MAX];
+       unsigned long   min_pll_rate;
+       unsigned long   max_pll_rate;
+
        const resource_size_t io_start[DSI_MAX];
        const int num_dsi_phy;
        const int quirks;
+       bool has_phy_regulator;
+       bool has_phy_lane;
 };
 
 extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
@@ -74,9 +75,14 @@ struct msm_dsi_dphy_timing {
        u8 hs_halfbyte_en_ckln;
 };
 
+#define DSI_BYTE_PLL_CLK               0
+#define DSI_PIXEL_PLL_CLK              1
+#define NUM_PROVIDED_CLKS              2
+
 struct msm_dsi_phy {
        struct platform_device *pdev;
        void __iomem *base;
+       void __iomem *pll_base;
        void __iomem *reg_base;
        void __iomem *lane_base;
        int id;
@@ -90,7 +96,12 @@ struct msm_dsi_phy {
        enum msm_dsi_phy_usecase usecase;
        bool regulator_ldo_mode;
 
-       struct msm_dsi_pll *pll;
+       struct clk_hw *vco_hw;
+       bool pll_on;
+
+       struct clk_hw_onecell_data *provided_clocks;
+
+       bool state_saved;
 };
 
 /*
@@ -104,9 +115,5 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
                                struct msm_dsi_phy_clk_request *clk_req);
 int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
                                struct msm_dsi_phy_clk_request *clk_req);
-void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
-                               u32 bit_mask);
-int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
 
 #endif /* __DSI_PHY_H__ */
-
index d1b92d4..34bc935 100644 (file)
  * Copyright (c) 2018, The Linux Foundation
  */
 
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/iopoll.h>
 
 #include "dsi_phy.h"
 #include "dsi.xml.h"
 
+/*
+ * DSI PLL 10nm - clock diagram (eg: DSI0):
+ *
+ *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk
+ *                              |                |
+ *                              |                |
+ *                 +---------+  |  +----------+  |  +----+
+ *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
+ *                 +---------+  |  +----------+  |  +----+
+ *                              |                |
+ *                              |                |         dsi0_pll_by_2_bit_clk
+ *                              |                |          |
+ *                              |                |  +----+  |  |\  dsi0_pclk_mux
+ *                              |                |--| /2 |--o--| \   |
+ *                              |                |  +----+     |  \  |  +---------+
+ *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
+ *                              |------------------------------|  /     +---------+
+ *                              |          +-----+             | /
+ *                              -----------| /4? |--o----------|/
+ *                                         +-----+  |           |
+ *                                                  |           |dsiclk_sel
+ *                                                  |
+ *                                                  dsi0_pll_post_out_div_clk
+ */
+
+#define VCO_REF_CLK_RATE               19200000
+#define FRAC_BITS 18
+
+/* v3.0.0 10nm implementation that requires the old timings settings */
+#define DSI_PHY_10NM_QUIRK_OLD_TIMINGS BIT(0)
+
+struct dsi_pll_config {
+       bool enable_ssc;
+       bool ssc_center;
+       u32 ssc_freq;
+       u32 ssc_offset;
+       u32 ssc_adj_per;
+
+       /* out */
+       u32 pll_prop_gain_rate;
+       u32 decimal_div_start;
+       u32 frac_div_start;
+       u32 pll_clock_inverters;
+       u32 ssc_stepsize;
+       u32 ssc_div_per;
+};
+
+struct pll_10nm_cached_state {
+       unsigned long vco_rate;
+       u8 bit_clk_div;
+       u8 pix_clk_div;
+       u8 pll_out_div;
+       u8 pll_mux;
+};
+
+struct dsi_pll_10nm {
+       struct clk_hw clk_hw;
+
+       struct msm_dsi_phy *phy;
+
+       u64 vco_current_rate;
+
+       /* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */
+       spinlock_t postdiv_lock;
+
+       struct pll_10nm_cached_state cached_state;
+
+       struct dsi_pll_10nm *slave;
+};
+
+#define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, clk_hw)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
+
+static void dsi_pll_setup_config(struct dsi_pll_config *config)
+{
+       config->ssc_freq = 31500;
+       config->ssc_offset = 5000;
+       config->ssc_adj_per = 2;
+
+       config->enable_ssc = false;
+       config->ssc_center = false;
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
+{
+       u64 fref = VCO_REF_CLK_RATE;
+       u64 pll_freq;
+       u64 divider;
+       u64 dec, dec_multiple;
+       u32 frac;
+       u64 multiplier;
+
+       pll_freq = pll->vco_current_rate;
+
+       divider = fref * 2;
+
+       multiplier = 1 << FRAC_BITS;
+       dec_multiple = div_u64(pll_freq * multiplier, divider);
+       dec = div_u64_rem(dec_multiple, multiplier, &frac);
+
+       if (pll_freq <= 1900000000UL)
+               config->pll_prop_gain_rate = 8;
+       else if (pll_freq <= 3000000000UL)
+               config->pll_prop_gain_rate = 10;
+       else
+               config->pll_prop_gain_rate = 12;
+       if (pll_freq < 1100000000UL)
+               config->pll_clock_inverters = 8;
+       else
+               config->pll_clock_inverters = 0;
+
+       config->decimal_div_start = dec;
+       config->frac_div_start = frac;
+}
+
+#define SSC_CENTER             BIT(0)
+#define SSC_EN                 BIT(1)
+
+static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
+{
+       u32 ssc_per;
+       u32 ssc_mod;
+       u64 ssc_step_size;
+       u64 frac;
+
+       if (!config->enable_ssc) {
+               DBG("SSC not enabled\n");
+               return;
+       }
+
+       ssc_per = DIV_ROUND_CLOSEST(VCO_REF_CLK_RATE, config->ssc_freq) / 2 - 1;
+       ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+       ssc_per -= ssc_mod;
+
+       frac = config->frac_div_start;
+       ssc_step_size = config->decimal_div_start;
+       ssc_step_size *= (1 << FRAC_BITS);
+       ssc_step_size += frac;
+       ssc_step_size *= config->ssc_offset;
+       ssc_step_size *= (config->ssc_adj_per + 1);
+       ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+       ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+       config->ssc_div_per = ssc_per;
+       config->ssc_stepsize = ssc_step_size;
+
+       pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+                config->decimal_div_start, frac, FRAC_BITS);
+       pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+                ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
+{
+       void __iomem *base = pll->phy->pll_base;
+
+       if (config->enable_ssc) {
+               pr_debug("SSC is enabled\n");
+
+               dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
+                         config->ssc_stepsize & 0xff);
+               dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
+                         config->ssc_stepsize >> 8);
+               dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
+                         config->ssc_div_per & 0xff);
+               dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
+                         config->ssc_div_per >> 8);
+               dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
+                         config->ssc_adj_per & 0xff);
+               dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
+                         config->ssc_adj_per >> 8);
+               dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
+                         SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
+       }
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
+{
+       void __iomem *base = pll->phy->pll_base;
+
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
+                 0xba);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
+                 0x4c);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
+}
+
+static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
+{
+       void __iomem *base = pll->phy->pll_base;
+
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
+                 config->decimal_div_start);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
+                 config->frac_div_start & 0xff);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
+                 (config->frac_div_start & 0xff00) >> 8);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
+                 (config->frac_div_start & 0x30000) >> 16);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 64);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
+       dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
+                 config->pll_clock_inverters);
+}
+
+static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+                                    unsigned long parent_rate)
+{
+       struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
+       struct dsi_pll_config config;
+
+       DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->phy->id, rate,
+           parent_rate);
+
+       pll_10nm->vco_current_rate = rate;
+
+       dsi_pll_setup_config(&config);
+
+       dsi_pll_calc_dec_frac(pll_10nm, &config);
+
+       dsi_pll_calc_ssc(pll_10nm, &config);
+
+       dsi_pll_commit(pll_10nm, &config);
+
+       dsi_pll_config_hzindep_reg(pll_10nm);
+
+       dsi_pll_ssc_commit(pll_10nm, &config);
+
+       /* flush, ensure all register writes are done*/
+       wmb();
+
+       return 0;
+}
+
+static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
+{
+       struct device *dev = &pll->phy->pdev->dev;
+       int rc;
+       u32 status = 0;
+       u32 const delay_us = 100;
+       u32 const timeout_us = 5000;
+
+       rc = readl_poll_timeout_atomic(pll->phy->pll_base +
+                                      REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,
+                                      status,
+                                      ((status & BIT(0)) > 0),
+                                      delay_us,
+                                      timeout_us);
+       if (rc)
+               DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",
+                             pll->phy->id, status);
+
+       return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
+{
+       u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+       dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
+       dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
+                 data & ~BIT(5));
+       ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
+{
+       u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+       dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
+                 data | BIT(5));
+       dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
+       ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
+{
+       u32 data;
+
+       data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+       dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+                 data & ~BIT(5));
+}
+
+static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
+{
+       u32 data;
+
+       data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+       dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+                 data | BIT(5));
+}
+
+static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
+{
+       struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
+       struct device *dev = &pll_10nm->phy->pdev->dev;
+       int rc;
+
+       dsi_pll_enable_pll_bias(pll_10nm);
+       if (pll_10nm->slave)
+               dsi_pll_enable_pll_bias(pll_10nm->slave);
+
+       rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
+       if (rc) {
+               DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);
+               return rc;
+       }
+
+       /* Start PLL */
+       dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
+                 0x01);
+
+       /*
+        * ensure all PLL configurations are written prior to checking
+        * for PLL lock.
+        */
+       wmb();
+
+       /* Check for PLL lock */
+       rc = dsi_pll_10nm_lock_status(pll_10nm);
+       if (rc) {
+               DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->phy->id);
+               goto error;
+       }
+
+       pll_10nm->phy->pll_on = true;
+
+       dsi_pll_enable_global_clk(pll_10nm);
+       if (pll_10nm->slave)
+               dsi_pll_enable_global_clk(pll_10nm->slave);
+
+       dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
+                 0x01);
+       if (pll_10nm->slave)
+               dsi_phy_write(pll_10nm->slave->phy->base +
+                         REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
+
+error:
+       return rc;
+}
+
+static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
+{
+       dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
+       dsi_pll_disable_pll_bias(pll);
+}
+
+static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
+{
+       struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
+
+       /*
+        * To avoid any stray glitches while abruptly powering down the PLL
+        * make sure to gate the clock using the clock enable bit before
+        * powering down the PLL
+        */
+       dsi_pll_disable_global_clk(pll_10nm);
+       dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
+       dsi_pll_disable_sub(pll_10nm);
+       if (pll_10nm->slave) {
+               dsi_pll_disable_global_clk(pll_10nm->slave);
+               dsi_pll_disable_sub(pll_10nm->slave);
+       }
+       /* flush, ensure all register writes are done */
+       wmb();
+       pll_10nm->phy->pll_on = false;
+}
+
+static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
+                                                 unsigned long parent_rate)
+{
+       struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
+       void __iomem *base = pll_10nm->phy->pll_base;
+       u64 ref_clk = VCO_REF_CLK_RATE;
+       u64 vco_rate = 0x0;
+       u64 multiplier;
+       u32 frac;
+       u32 dec;
+       u64 pll_freq, tmp64;
+
+       dec = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
+       dec &= 0xff;
+
+       frac = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
+       frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
+                 0xff) << 8);
+       frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
+                 0x3) << 16);
+
+       /*
+        * TODO:
+        *      1. Assumes prescaler is disabled
+        */
+       multiplier = 1 << FRAC_BITS;
+       pll_freq = dec * (ref_clk * 2);
+       tmp64 = (ref_clk * 2 * frac);
+       pll_freq += div_u64(tmp64, multiplier);
+
+       vco_rate = pll_freq;
+
+       DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
+           pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac);
+
+       return (unsigned long)vco_rate;
+}
+
+static long dsi_pll_10nm_clk_round_rate(struct clk_hw *hw,
+               unsigned long rate, unsigned long *parent_rate)
+{
+       struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
+
+       if      (rate < pll_10nm->phy->cfg->min_pll_rate)
+               return  pll_10nm->phy->cfg->min_pll_rate;
+       else if (rate > pll_10nm->phy->cfg->max_pll_rate)
+               return  pll_10nm->phy->cfg->max_pll_rate;
+       else
+               return rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
+       .round_rate = dsi_pll_10nm_clk_round_rate,
+       .set_rate = dsi_pll_10nm_vco_set_rate,
+       .recalc_rate = dsi_pll_10nm_vco_recalc_rate,
+       .prepare = dsi_pll_10nm_vco_prepare,
+       .unprepare = dsi_pll_10nm_vco_unprepare,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy)
+{
+       struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
+       struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
+       void __iomem *phy_base = pll_10nm->phy->base;
+       u32 cmn_clk_cfg0, cmn_clk_cfg1;
+
+       cached->pll_out_div = dsi_phy_read(pll_10nm->phy->pll_base +
+                                      REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+       cached->pll_out_div &= 0x3;
+
+       cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
+       cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
+       cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+
+       cmn_clk_cfg1 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+       cached->pll_mux = cmn_clk_cfg1 & 0x3;
+
+       DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
+           pll_10nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
+           cached->pix_clk_div, cached->pll_mux);
+}
+
+static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy)
+{
+       struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
+       struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
+       void __iomem *phy_base = pll_10nm->phy->base;
+       u32 val;
+       int ret;
+
+       val = dsi_phy_read(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+       val &= ~0x3;
+       val |= cached->pll_out_div;
+       dsi_phy_write(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
+
+       dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+                 cached->bit_clk_div | (cached->pix_clk_div << 4));
+
+       val = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+       val &= ~0x3;
+       val |= cached->pll_mux;
+       dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
+
+       ret = dsi_pll_10nm_vco_set_rate(phy->vco_hw,
+                       pll_10nm->vco_current_rate,
+                       VCO_REF_CLK_RATE);
+       if (ret) {
+               DRM_DEV_ERROR(&pll_10nm->phy->pdev->dev,
+                       "restore vco rate failed. ret=%d\n", ret);
+               return ret;
+       }
+
+       DBG("DSI PLL%d", pll_10nm->phy->id);
+
+       return 0;
+}
+
+static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy)
+{
+       struct dsi_pll_10nm *pll_10nm = to_pll_10nm(phy->vco_hw);
+       void __iomem *base = phy->base;
+       u32 data = 0x0; /* internal PLL */
+
+       DBG("DSI PLL%d", pll_10nm->phy->id);
+
+       switch (phy->usecase) {
+       case MSM_DSI_PHY_STANDALONE:
+               break;
+       case MSM_DSI_PHY_MASTER:
+               pll_10nm->slave = pll_10nm_list[(pll_10nm->phy->id + 1) % DSI_MAX];
+               break;
+       case MSM_DSI_PHY_SLAVE:
+               data = 0x1; /* external PLL */
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* set PLL src */
+       dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
+
+       return 0;
+}
+
+/*
+ * The post dividers and mux clocks are created using the standard divider and
+ * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
+ * state to follow the master PLL's divider/mux state. Therefore, we don't
+ * require special clock ops that also configure the slave PLL registers
+ */
+static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm, struct clk_hw **provided_clocks)
+{
+       char clk_name[32], parent[32], vco_name[32];
+       char parent2[32], parent3[32], parent4[32];
+       struct clk_init_data vco_init = {
+               .parent_names = (const char *[]){ "xo" },
+               .num_parents = 1,
+               .name = vco_name,
+               .flags = CLK_IGNORE_UNUSED,
+               .ops = &clk_ops_dsi_pll_10nm_vco,
+       };
+       struct device *dev = &pll_10nm->phy->pdev->dev;
+       struct clk_hw *hw;
+       int ret;
+
+       DBG("DSI%d", pll_10nm->phy->id);
+
+       snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->phy->id);
+       pll_10nm->clk_hw.init = &vco_init;
+
+       ret = devm_clk_hw_register(dev, &pll_10nm->clk_hw);
+       if (ret)
+               return ret;
+
+       snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
+       snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->phy->id);
+
+       hw = devm_clk_hw_register_divider(dev, clk_name,
+                                    parent, CLK_SET_RATE_PARENT,
+                                    pll_10nm->phy->pll_base +
+                                    REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
+                                    0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto fail;
+       }
+
+       snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
+       snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
+
+       /* BIT CLK: DIV_CTRL_3_0 */
+       hw = devm_clk_hw_register_divider(dev, clk_name, parent,
+                                    CLK_SET_RATE_PARENT,
+                                    pll_10nm->phy->base +
+                                    REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+                                    0, 4, CLK_DIVIDER_ONE_BASED,
+                                    &pll_10nm->postdiv_lock);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto fail;
+       }
+
+       snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->phy->id);
+       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
+
+       /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
+       hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
+                                         CLK_SET_RATE_PARENT, 1, 8);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto fail;
+       }
+
+       provided_clocks[DSI_BYTE_PLL_CLK] = hw;
+
+       snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
+       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
+
+       hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
+                                         0, 1, 2);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto fail;
+       }
+
+       snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
+       snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
+
+       hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
+                                         0, 1, 4);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto fail;
+       }
+
+       snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
+       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->phy->id);
+       snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->phy->id);
+       snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->phy->id);
+       snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->phy->id);
+
+       hw = devm_clk_hw_register_mux(dev, clk_name,
+                                ((const char *[]){
+                                parent, parent2, parent3, parent4
+                                }), 4, 0, pll_10nm->phy->base +
+                                REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+                                0, 2, 0, NULL);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto fail;
+       }
+
+       snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->phy->id);
+       snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->phy->id);
+
+       /* PIX CLK DIV : DIV_CTRL_7_4*/
+       hw = devm_clk_hw_register_divider(dev, clk_name, parent,
+                                    0, pll_10nm->phy->base +
+                                       REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+                                    4, 4, CLK_DIVIDER_ONE_BASED,
+                                    &pll_10nm->postdiv_lock);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto fail;
+       }
+
+       provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
+
+       return 0;
+
+fail:
+
+       return ret;
+}
+
+static int dsi_pll_10nm_init(struct msm_dsi_phy *phy)
+{
+       struct platform_device *pdev = phy->pdev;
+       struct dsi_pll_10nm *pll_10nm;
+       int ret;
+
+       pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
+       if (!pll_10nm)
+               return -ENOMEM;
+
+       DBG("DSI PLL%d", phy->id);
+
+       pll_10nm_list[phy->id] = pll_10nm;
+
+       spin_lock_init(&pll_10nm->postdiv_lock);
+
+       pll_10nm->phy = phy;
+
+       ret = pll_10nm_register(pll_10nm, phy->provided_clocks->hws);
+       if (ret) {
+               DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+               return ret;
+       }
+
+       phy->vco_hw = &pll_10nm->clk_hw;
+
+       /* TODO: Remove this when we have proper display handover support */
+       msm_dsi_phy_pll_save_state(phy);
+
+       return 0;
+}
+
 static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
 {
        void __iomem *base = phy->base;
@@ -42,7 +746,7 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
        u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
        void __iomem *lane_base = phy->lane_base;
 
-       if (phy->cfg->quirks & V3_0_0_10NM_OLD_TIMINGS_QUIRK)
+       if (phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)
                tx_dctrl[3] = 0x02;
 
        /* Strength ctrl settings */
@@ -77,14 +781,14 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
                              tx_dctrl[i]);
        }
 
-       if (!(phy->cfg->quirks & V3_0_0_10NM_OLD_TIMINGS_QUIRK)) {
+       if (!(phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)) {
                /* Toggle BIT 0 to release freeze I/0 */
                dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
                dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
        }
 }
 
-static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy,
                               struct msm_dsi_phy_clk_request *clk_req)
 {
        int ret;
@@ -175,7 +879,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
        /* Select full-rate mode */
        dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40);
 
-       ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
+       ret = dsi_10nm_set_usecase(phy);
        if (ret) {
                DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
                        __func__, ret);
@@ -216,24 +920,8 @@ static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
        DBG("DSI%d PHY disabled", phy->id);
 }
 
-static int dsi_10nm_phy_init(struct msm_dsi_phy *phy)
-{
-       struct platform_device *pdev = phy->pdev;
-
-       phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
-                                    "DSI_PHY_LANE");
-       if (IS_ERR(phy->lane_base)) {
-               DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
-                       __func__);
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
 const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
-       .type = MSM_DSI_PHY_10NM,
-       .src_pll_truthtable = { {false, false}, {true, false} },
+       .has_phy_lane = true,
        .reg_cfg = {
                .num = 1,
                .regs = {
@@ -243,15 +931,18 @@ const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
        .ops = {
                .enable = dsi_10nm_phy_enable,
                .disable = dsi_10nm_phy_disable,
-               .init = dsi_10nm_phy_init,
+               .pll_init = dsi_pll_10nm_init,
+               .save_pll_state = dsi_10nm_pll_save_state,
+               .restore_pll_state = dsi_10nm_pll_restore_state,
        },
+       .min_pll_rate = 1000000000UL,
+       .max_pll_rate = 3500000000UL,
        .io_start = { 0xae94400, 0xae96400 },
        .num_dsi_phy = 2,
 };
 
 const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
-       .type = MSM_DSI_PHY_10NM,
-       .src_pll_truthtable = { {false, false}, {true, false} },
+       .has_phy_lane = true,
        .reg_cfg = {
                .num = 1,
                .regs = {
@@ -261,9 +952,13 @@ const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs = {
        .ops = {
                .enable = dsi_10nm_phy_enable,
                .disable = dsi_10nm_phy_disable,
-               .init = dsi_10nm_phy_init,
+               .pll_init = dsi_pll_10nm_init,
+               .save_pll_state = dsi_10nm_pll_save_state,
+               .restore_pll_state = dsi_10nm_pll_restore_state,
        },
+       .min_pll_rate = 1000000000UL,
+       .max_pll_rate = 3500000000UL,
        .io_start = { 0xc994400, 0xc996400 },
        .num_dsi_phy = 2,
-       .quirks = V3_0_0_10NM_OLD_TIMINGS_QUIRK,
+       .quirks = DSI_PHY_10NM_QUIRK_OLD_TIMINGS,
 };
index 5194005..65d68eb 100644 (file)
@@ -3,6 +3,8 @@
  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  */
 
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/delay.h>
 
 #include "dsi_phy.h"
 
 #define PHY_14NM_CKLN_IDX      4
 
+/*
+ * DSI PLL 14nm - clock diagram (eg: DSI0):
+ *
+ *         dsi0n1_postdiv_clk
+ *                         |
+ *                         |
+ *                 +----+  |  +----+
+ *  dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte
+ *                 +----+  |  +----+
+ *                         |           dsi0n1_postdivby2_clk
+ *                         |   +----+  |
+ *                         o---| /2 |--o--|\
+ *                         |   +----+     | \   +----+
+ *                         |              |  |--| n2 |-- dsi0pll
+ *                         o--------------| /   +----+
+ *                                        |/
+ */
+
+#define POLL_MAX_READS                 15
+#define POLL_TIMEOUT_US                        1000
+
+#define VCO_REF_CLK_RATE               19200000
+#define VCO_MIN_RATE                   1300000000UL
+#define VCO_MAX_RATE                   2600000000UL
+
+struct dsi_pll_config {
+       u64 vco_current_rate;
+
+       u32 ssc_en;     /* SSC enable/disable */
+
+       /* fixed params */
+       u32 plllock_cnt;
+       u32 ssc_center;
+       u32 ssc_adj_period;
+       u32 ssc_spread;
+       u32 ssc_freq;
+
+       /* calculated */
+       u32 dec_start;
+       u32 div_frac_start;
+       u32 ssc_period;
+       u32 ssc_step_size;
+       u32 plllock_cmp;
+       u32 pll_vco_div_ref;
+       u32 pll_vco_count;
+       u32 pll_kvco_div_ref;
+       u32 pll_kvco_count;
+};
+
+struct pll_14nm_cached_state {
+       unsigned long vco_rate;
+       u8 n2postdiv;
+       u8 n1postdiv;
+};
+
+struct dsi_pll_14nm {
+       struct clk_hw clk_hw;
+
+       struct msm_dsi_phy *phy;
+
+       /* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */
+       spinlock_t postdiv_lock;
+
+       struct pll_14nm_cached_state cached_state;
+
+       struct dsi_pll_14nm *slave;
+};
+
+#define to_pll_14nm(x) container_of(x, struct dsi_pll_14nm, clk_hw)
+
+/*
+ * Private struct for N1/N2 post-divider clocks. These clocks are similar to
+ * the generic clk_divider class of clocks. The only difference is that it
+ * also sets the slave DSI PLL's post-dividers if in Dual DSI mode
+ */
+struct dsi_pll_14nm_postdiv {
+       struct clk_hw hw;
+
+       /* divider params */
+       u8 shift;
+       u8 width;
+       u8 flags; /* same flags as used by clk_divider struct */
+
+       struct dsi_pll_14nm *pll;
+};
+
+#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
+
+static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
+                                   u32 nb_tries, u32 timeout_us)
+{
+       bool pll_locked = false;
+       void __iomem *base = pll_14nm->phy->pll_base;
+       u32 tries, val;
+
+       tries = nb_tries;
+       while (tries--) {
+               val = dsi_phy_read(base +
+                              REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+               pll_locked = !!(val & BIT(5));
+
+               if (pll_locked)
+                       break;
+
+               udelay(timeout_us);
+       }
+
+       if (!pll_locked) {
+               tries = nb_tries;
+               while (tries--) {
+                       val = dsi_phy_read(base +
+                               REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
+                       pll_locked = !!(val & BIT(0));
+
+                       if (pll_locked)
+                               break;
+
+                       udelay(timeout_us);
+               }
+       }
+
+       DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+       return pll_locked;
+}
+
+static void dsi_pll_14nm_config_init(struct dsi_pll_config *pconf)
+{
+       /* fixed input */
+       pconf->plllock_cnt = 1;
+
+       /*
+        * SSC is enabled by default. We might need DT props for configuring
+        * some SSC params like PPM and center/down spread etc.
+        */
+       pconf->ssc_en = 1;
+       pconf->ssc_center = 0;          /* down spread by default */
+       pconf->ssc_spread = 5;          /* PPM / 1000 */
+       pconf->ssc_freq = 31500;        /* default recommended */
+       pconf->ssc_adj_period = 37;
+}
+
+#define CEIL(x, y)             (((x) + ((y) - 1)) / (y))
+
+static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
+{
+       u32 period, ssc_period;
+       u32 ref, rem;
+       u64 step_size;
+
+       DBG("vco=%lld ref=%d", pconf->vco_current_rate, VCO_REF_CLK_RATE);
+
+       ssc_period = pconf->ssc_freq / 500;
+       period = (u32)VCO_REF_CLK_RATE / 1000;
+       ssc_period  = CEIL(period, ssc_period);
+       ssc_period -= 1;
+       pconf->ssc_period = ssc_period;
+
+       DBG("ssc freq=%d spread=%d period=%d", pconf->ssc_freq,
+           pconf->ssc_spread, pconf->ssc_period);
+
+       step_size = (u32)pconf->vco_current_rate;
+       ref = VCO_REF_CLK_RATE;
+       ref /= 1000;
+       step_size = div_u64(step_size, ref);
+       step_size <<= 20;
+       step_size = div_u64(step_size, 1000);
+       step_size *= pconf->ssc_spread;
+       step_size = div_u64(step_size, 1000);
+       step_size *= (pconf->ssc_adj_period + 1);
+
+       rem = 0;
+       step_size = div_u64_rem(step_size, ssc_period + 1, &rem);
+       if (rem)
+               step_size++;
+
+       DBG("step_size=%lld", step_size);
+
+       step_size &= 0x0ffff;   /* take lower 16 bits */
+
+       pconf->ssc_step_size = step_size;
+}
+
+static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
+{
+       u64 multiplier = BIT(20);
+       u64 dec_start_multiple, dec_start, pll_comp_val;
+       u32 duration, div_frac_start;
+       u64 vco_clk_rate = pconf->vco_current_rate;
+       u64 fref = VCO_REF_CLK_RATE;
+
+       DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref);
+
+       dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref);
+       div_u64_rem(dec_start_multiple, multiplier, &div_frac_start);
+
+       dec_start = div_u64(dec_start_multiple, multiplier);
+
+       pconf->dec_start = (u32)dec_start;
+       pconf->div_frac_start = div_frac_start;
+
+       if (pconf->plllock_cnt == 0)
+               duration = 1024;
+       else if (pconf->plllock_cnt == 1)
+               duration = 256;
+       else if (pconf->plllock_cnt == 2)
+               duration = 128;
+       else
+               duration = 32;
+
+       pll_comp_val = duration * dec_start_multiple;
+       pll_comp_val = div_u64(pll_comp_val, multiplier);
+       do_div(pll_comp_val, 10);
+
+       pconf->plllock_cmp = (u32)pll_comp_val;
+}
+
+static u32 pll_14nm_kvco_slop(u32 vrate)
+{
+       u32 slop = 0;
+
+       if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL)
+               slop =  600;
+       else if (vrate > 1800000000UL && vrate < 2300000000UL)
+               slop = 400;
+       else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE)
+               slop = 280;
+
+       return slop;
+}
+
+static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
+{
+       u64 vco_clk_rate = pconf->vco_current_rate;
+       u64 fref = VCO_REF_CLK_RATE;
+       u32 vco_measure_time = 5;
+       u32 kvco_measure_time = 5;
+       u64 data;
+       u32 cnt;
+
+       data = fref * vco_measure_time;
+       do_div(data, 1000000);
+       data &= 0x03ff; /* 10 bits */
+       data -= 2;
+       pconf->pll_vco_div_ref = data;
+
+       data = div_u64(vco_clk_rate, 1000000);  /* unit is Mhz */
+       data *= vco_measure_time;
+       do_div(data, 10);
+       pconf->pll_vco_count = data;
+
+       data = fref * kvco_measure_time;
+       do_div(data, 1000000);
+       data &= 0x03ff; /* 10 bits */
+       data -= 1;
+       pconf->pll_kvco_div_ref = data;
+
+       cnt = pll_14nm_kvco_slop(vco_clk_rate);
+       cnt *= 2;
+       cnt /= 100;
+       cnt *= kvco_measure_time;
+       pconf->pll_kvco_count = cnt;
+}
+
+static void pll_db_commit_ssc(struct dsi_pll_14nm *pll, struct dsi_pll_config *pconf)
+{
+       void __iomem *base = pll->phy->pll_base;
+       u8 data;
+
+       data = pconf->ssc_adj_period;
+       data &= 0x0ff;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data);
+       data = (pconf->ssc_adj_period >> 8);
+       data &= 0x03;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data);
+
+       data = pconf->ssc_period;
+       data &= 0x0ff;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data);
+       data = (pconf->ssc_period >> 8);
+       data &= 0x0ff;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data);
+
+       data = pconf->ssc_step_size;
+       data &= 0x0ff;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data);
+       data = (pconf->ssc_step_size >> 8);
+       data &= 0x0ff;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data);
+
+       data = (pconf->ssc_center & 0x01);
+       data <<= 1;
+       data |= 0x01; /* enable */
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data);
+
+       wmb();  /* make sure register committed */
+}
+
+static void pll_db_commit_common(struct dsi_pll_14nm *pll,
+                                struct dsi_pll_config *pconf)
+{
+       void __iomem *base = pll->phy->pll_base;
+       u8 data;
+
+       /* confgiure the non frequency dependent pll registers */
+       data = 0;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data);
+
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, 1);
+
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, 48);
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, 4 << 3); /* bandgap_timer */
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, 5); /* pll_wakeup_timer */
+
+       data = pconf->pll_vco_div_ref & 0xff;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data);
+       data = (pconf->pll_vco_div_ref >> 8) & 0x3;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data);
+
+       data = pconf->pll_kvco_div_ref & 0xff;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data);
+       data = (pconf->pll_kvco_div_ref >> 8) & 0x3;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data);
+
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, 16);
+
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, 4);
+
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, 4);
+
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, 1 << 3 | 1);
+
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, 0 << 3 | 0);
+
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, 0 << 3 | 0);
+
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, 4 << 3 | 4);
+
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, 1 << 4 | 11);
+
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, 7);
+
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, 1 << 4 | 2);
+}
+
+static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm)
+{
+       void __iomem *cmn_base = pll_14nm->phy->base;
+
+       /* de assert pll start and apply pll sw reset */
+
+       /* stop pll */
+       dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
+
+       /* pll sw reset */
+       dsi_phy_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10);
+       wmb();  /* make sure register committed */
+
+       dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0);
+       wmb();  /* make sure register committed */
+}
+
+static void pll_db_commit_14nm(struct dsi_pll_14nm *pll,
+                              struct dsi_pll_config *pconf)
+{
+       void __iomem *base = pll->phy->pll_base;
+       void __iomem *cmn_base = pll->phy->base;
+       u8 data;
+
+       DBG("DSI%d PLL", pll->phy->id);
+
+       dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, 0x3c);
+
+       pll_db_commit_common(pll, pconf);
+
+       pll_14nm_software_reset(pll);
+
+       /* Use the /2 path in Mux */
+       dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, 1);
+
+       data = 0xff; /* data, clk, pll normal operation */
+       dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data);
+
+       /* configure the frequency dependent pll registers */
+       data = pconf->dec_start;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data);
+
+       data = pconf->div_frac_start & 0xff;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data);
+       data = (pconf->div_frac_start >> 8) & 0xff;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data);
+       data = (pconf->div_frac_start >> 16) & 0xf;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data);
+
+       data = pconf->plllock_cmp & 0xff;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data);
+
+       data = (pconf->plllock_cmp >> 8) & 0xff;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data);
+
+       data = (pconf->plllock_cmp >> 16) & 0x3;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data);
+
+       data = pconf->plllock_cnt << 1 | 0 << 3; /* plllock_rng */
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data);
+
+       data = pconf->pll_vco_count & 0xff;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data);
+       data = (pconf->pll_vco_count >> 8) & 0xff;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data);
+
+       data = pconf->pll_kvco_count & 0xff;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data);
+       data = (pconf->pll_kvco_count >> 8) & 0x3;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data);
+
+       /*
+        * High nibble configures the post divider internal to the VCO. It's
+        * fixed to divide by 1 for now.
+        *
+        * 0: divided by 1
+        * 1: divided by 2
+        * 2: divided by 4
+        * 3: divided by 8
+        */
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, 0 << 4 | 3);
+
+       if (pconf->ssc_en)
+               pll_db_commit_ssc(pll, pconf);
+
+       wmb();  /* make sure register committed */
+}
+
+/*
+ * VCO clock Callbacks
+ */
+static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+                                    unsigned long parent_rate)
+{
+       struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
+       struct dsi_pll_config conf;
+
+       DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->phy->id, rate,
+           parent_rate);
+
+       dsi_pll_14nm_config_init(&conf);
+       conf.vco_current_rate = rate;
+
+       pll_14nm_dec_frac_calc(pll_14nm, &conf);
+
+       if (conf.ssc_en)
+               pll_14nm_ssc_calc(pll_14nm, &conf);
+
+       pll_14nm_calc_vco_count(pll_14nm, &conf);
+
+       /* commit the slave DSI PLL registers if we're master. Note that we
+        * don't lock the slave PLL. We just ensure that the PLL/PHY registers
+        * of the master and slave are identical
+        */
+       if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) {
+               struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+
+               pll_db_commit_14nm(pll_14nm_slave, &conf);
+       }
+
+       pll_db_commit_14nm(pll_14nm, &conf);
+
+       return 0;
+}
+
+static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw,
+                                                 unsigned long parent_rate)
+{
+       struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
+       void __iomem *base = pll_14nm->phy->pll_base;
+       u64 vco_rate, multiplier = BIT(20);
+       u32 div_frac_start;
+       u32 dec_start;
+       u64 ref_clk = parent_rate;
+
+       dec_start = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DEC_START);
+       dec_start &= 0x0ff;
+
+       DBG("dec_start = %x", dec_start);
+
+       div_frac_start = (dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3)
+                               & 0xf) << 16;
+       div_frac_start |= (dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2)
+                               & 0xff) << 8;
+       div_frac_start |= dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1)
+                               & 0xff;
+
+       DBG("div_frac_start = %x", div_frac_start);
+
+       vco_rate = ref_clk * dec_start;
+
+       vco_rate += ((ref_clk * div_frac_start) / multiplier);
+
+       /*
+        * Recalculating the rate from dec_start and frac_start doesn't end up
+        * the rate we originally set. Convert the freq to KHz, round it up and
+        * convert it back to MHz.
+        */
+       vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000;
+
+       DBG("returning vco rate = %lu", (unsigned long)vco_rate);
+
+       return (unsigned long)vco_rate;
+}
+
+static int dsi_pll_14nm_vco_prepare(struct clk_hw *hw)
+{
+       struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
+       void __iomem *base = pll_14nm->phy->pll_base;
+       void __iomem *cmn_base = pll_14nm->phy->base;
+       bool locked;
+
+       DBG("");
+
+       if (unlikely(pll_14nm->phy->pll_on))
+               return 0;
+
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10);
+       dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1);
+
+       locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS,
+                                        POLL_TIMEOUT_US);
+
+       if (unlikely(!locked)) {
+               DRM_DEV_ERROR(&pll_14nm->phy->pdev->dev, "DSI PLL lock failed\n");
+               return -EINVAL;
+       }
+
+       DBG("DSI PLL lock success");
+       pll_14nm->phy->pll_on = true;
+
+       return 0;
+}
+
+static void dsi_pll_14nm_vco_unprepare(struct clk_hw *hw)
+{
+       struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
+       void __iomem *cmn_base = pll_14nm->phy->base;
+
+       DBG("");
+
+       if (unlikely(!pll_14nm->phy->pll_on))
+               return;
+
+       dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
+
+       pll_14nm->phy->pll_on = false;
+}
+
+static long dsi_pll_14nm_clk_round_rate(struct clk_hw *hw,
+               unsigned long rate, unsigned long *parent_rate)
+{
+       struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
+
+       if      (rate < pll_14nm->phy->cfg->min_pll_rate)
+               return  pll_14nm->phy->cfg->min_pll_rate;
+       else if (rate > pll_14nm->phy->cfg->max_pll_rate)
+               return  pll_14nm->phy->cfg->max_pll_rate;
+       else
+               return rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_14nm_vco = {
+       .round_rate = dsi_pll_14nm_clk_round_rate,
+       .set_rate = dsi_pll_14nm_vco_set_rate,
+       .recalc_rate = dsi_pll_14nm_vco_recalc_rate,
+       .prepare = dsi_pll_14nm_vco_prepare,
+       .unprepare = dsi_pll_14nm_vco_unprepare,
+};
+
+/*
+ * N1 and N2 post-divider clock callbacks
+ */
+#define div_mask(width)        ((1 << (width)) - 1)
+static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
+                                                     unsigned long parent_rate)
+{
+       struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+       struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+       void __iomem *base = pll_14nm->phy->base;
+       u8 shift = postdiv->shift;
+       u8 width = postdiv->width;
+       u32 val;
+
+       DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, parent_rate);
+
+       val = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift;
+       val &= div_mask(width);
+
+       return divider_recalc_rate(hw, parent_rate, val, NULL,
+                                  postdiv->flags, width);
+}
+
+static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,
+                                           unsigned long rate,
+                                           unsigned long *prate)
+{
+       struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+       struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+
+       DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, rate);
+
+       return divider_round_rate(hw, rate, prate, NULL,
+                                 postdiv->width,
+                                 postdiv->flags);
+}
+
+static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+                                        unsigned long parent_rate)
+{
+       struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
+       struct dsi_pll_14nm *pll_14nm = postdiv->pll;
+       void __iomem *base = pll_14nm->phy->base;
+       spinlock_t *lock = &pll_14nm->postdiv_lock;
+       u8 shift = postdiv->shift;
+       u8 width = postdiv->width;
+       unsigned int value;
+       unsigned long flags = 0;
+       u32 val;
+
+       DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->phy->id, rate,
+           parent_rate);
+
+       value = divider_get_val(rate, parent_rate, NULL, postdiv->width,
+                               postdiv->flags);
+
+       spin_lock_irqsave(lock, flags);
+
+       val = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
+       val &= ~(div_mask(width) << shift);
+
+       val |= value << shift;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
+
+       /* If we're master in dual DSI mode, then the slave PLL's post-dividers
+        * follow the master's post dividers
+        */
+       if (pll_14nm->phy->usecase == MSM_DSI_PHY_MASTER) {
+               struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+               void __iomem *slave_base = pll_14nm_slave->phy->base;
+
+               dsi_phy_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
+       }
+
+       spin_unlock_irqrestore(lock, flags);
+
+       return 0;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = {
+       .recalc_rate = dsi_pll_14nm_postdiv_recalc_rate,
+       .round_rate = dsi_pll_14nm_postdiv_round_rate,
+       .set_rate = dsi_pll_14nm_postdiv_set_rate,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_14nm_pll_save_state(struct msm_dsi_phy *phy)
+{
+       struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw);
+       struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
+       void __iomem *cmn_base = pll_14nm->phy->base;
+       u32 data;
+
+       data = dsi_phy_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
+
+       cached_state->n1postdiv = data & 0xf;
+       cached_state->n2postdiv = (data >> 4) & 0xf;
+
+       DBG("DSI%d PLL save state %x %x", pll_14nm->phy->id,
+           cached_state->n1postdiv, cached_state->n2postdiv);
+
+       cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
+}
+
+static int dsi_14nm_pll_restore_state(struct msm_dsi_phy *phy)
+{
+       struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw);
+       struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
+       void __iomem *cmn_base = pll_14nm->phy->base;
+       u32 data;
+       int ret;
+
+       ret = dsi_pll_14nm_vco_set_rate(phy->vco_hw,
+                                       cached_state->vco_rate, 0);
+       if (ret) {
+               DRM_DEV_ERROR(&pll_14nm->phy->pdev->dev,
+                       "restore vco rate failed. ret=%d\n", ret);
+               return ret;
+       }
+
+       data = cached_state->n1postdiv | (cached_state->n2postdiv << 4);
+
+       DBG("DSI%d PLL restore state %x %x", pll_14nm->phy->id,
+           cached_state->n1postdiv, cached_state->n2postdiv);
+
+       dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
+
+       /* also restore post-dividers for slave DSI PLL */
+       if (phy->usecase == MSM_DSI_PHY_MASTER) {
+               struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
+               void __iomem *slave_base = pll_14nm_slave->phy->base;
+
+               dsi_phy_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
+       }
+
+       return 0;
+}
+
+static int dsi_14nm_set_usecase(struct msm_dsi_phy *phy)
+{
+       struct dsi_pll_14nm *pll_14nm = to_pll_14nm(phy->vco_hw);
+       void __iomem *base = phy->pll_base;
+       u32 clkbuflr_en, bandgap = 0;
+
+       switch (phy->usecase) {
+       case MSM_DSI_PHY_STANDALONE:
+               clkbuflr_en = 0x1;
+               break;
+       case MSM_DSI_PHY_MASTER:
+               clkbuflr_en = 0x3;
+               pll_14nm->slave = pll_14nm_list[(pll_14nm->phy->id + 1) % DSI_MAX];
+               break;
+       case MSM_DSI_PHY_SLAVE:
+               clkbuflr_en = 0x0;
+               bandgap = 0x3;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en);
+       if (bandgap)
+               dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap);
+
+       return 0;
+}
+
+static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
+                                               const char *name,
+                                               const char *parent_name,
+                                               unsigned long flags,
+                                               u8 shift)
+{
+       struct dsi_pll_14nm_postdiv *pll_postdiv;
+       struct device *dev = &pll_14nm->phy->pdev->dev;
+       struct clk_init_data postdiv_init = {
+               .parent_names = (const char *[]) { parent_name },
+               .num_parents = 1,
+               .name = name,
+               .flags = flags,
+               .ops = &clk_ops_dsi_pll_14nm_postdiv,
+       };
+       int ret;
+
+       pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL);
+       if (!pll_postdiv)
+               return ERR_PTR(-ENOMEM);
+
+       pll_postdiv->pll = pll_14nm;
+       pll_postdiv->shift = shift;
+       /* both N1 and N2 postdividers are 4 bits wide */
+       pll_postdiv->width = 4;
+       /* range of each divider is from 1 to 15 */
+       pll_postdiv->flags = CLK_DIVIDER_ONE_BASED;
+       pll_postdiv->hw.init = &postdiv_init;
+
+       ret = devm_clk_hw_register(dev, &pll_postdiv->hw);
+       if (ret)
+               return ERR_PTR(ret);
+
+       return &pll_postdiv->hw;
+}
+
+static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm, struct clk_hw **provided_clocks)
+{
+       char clk_name[32], parent[32], vco_name[32];
+       struct clk_init_data vco_init = {
+               .parent_names = (const char *[]){ "xo" },
+               .num_parents = 1,
+               .name = vco_name,
+               .flags = CLK_IGNORE_UNUSED,
+               .ops = &clk_ops_dsi_pll_14nm_vco,
+       };
+       struct device *dev = &pll_14nm->phy->pdev->dev;
+       struct clk_hw *hw;
+       int ret;
+
+       DBG("DSI%d", pll_14nm->phy->id);
+
+       snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->phy->id);
+       pll_14nm->clk_hw.init = &vco_init;
+
+       ret = devm_clk_hw_register(dev, &pll_14nm->clk_hw);
+       if (ret)
+               return ret;
+
+       snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
+       snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->phy->id);
+
+       /* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */
+       hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent,
+                                      CLK_SET_RATE_PARENT, 0);
+       if (IS_ERR(hw))
+               return PTR_ERR(hw);
+
+       snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->phy->id);
+       snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
+
+       /* DSI Byte clock = VCO_CLK / N1 / 8 */
+       hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
+                                         CLK_SET_RATE_PARENT, 1, 8);
+       if (IS_ERR(hw))
+               return PTR_ERR(hw);
+
+       provided_clocks[DSI_BYTE_PLL_CLK] = hw;
+
+       snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
+       snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->phy->id);
+
+       /*
+        * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider
+        * on the way. Don't let it set parent.
+        */
+       hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2);
+       if (IS_ERR(hw))
+               return PTR_ERR(hw);
+
+       snprintf(clk_name, 32, "dsi%dpll", pll_14nm->phy->id);
+       snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->phy->id);
+
+       /* DSI pixel clock = VCO_CLK / N1 / 2 / N2
+        * This is the output of N2 post-divider, bits 4-7 in
+        * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent.
+        */
+       hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4);
+       if (IS_ERR(hw))
+               return PTR_ERR(hw);
+
+       provided_clocks[DSI_PIXEL_PLL_CLK]      = hw;
+
+       return 0;
+}
+
+static int dsi_pll_14nm_init(struct msm_dsi_phy *phy)
+{
+       struct platform_device *pdev = phy->pdev;
+       struct dsi_pll_14nm *pll_14nm;
+       int ret;
+
+       if (!pdev)
+               return -ENODEV;
+
+       pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL);
+       if (!pll_14nm)
+               return -ENOMEM;
+
+       DBG("PLL%d", phy->id);
+
+       pll_14nm_list[phy->id] = pll_14nm;
+
+       spin_lock_init(&pll_14nm->postdiv_lock);
+
+       pll_14nm->phy = phy;
+
+       ret = pll_14nm_register(pll_14nm, phy->provided_clocks->hws);
+       if (ret) {
+               DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+               return ret;
+       }
+
+       phy->vco_hw = &pll_14nm->clk_hw;
+
+       return 0;
+}
+
 static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy,
                                     struct msm_dsi_dphy_timing *timing,
                                     int lane_idx)
@@ -47,7 +938,7 @@ static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy,
                      DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(0xa0));
 }
 
-static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy,
                               struct msm_dsi_phy_clk_request *clk_req)
 {
        struct msm_dsi_dphy_timing *timing = &phy->timing;
@@ -56,6 +947,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
        int ret;
        void __iomem *base = phy->base;
        void __iomem *lane_base = phy->lane_base;
+       u32 glbl_test_ctrl;
 
        if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
                DRM_DEV_ERROR(&phy->pdev->dev,
@@ -103,11 +995,13 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
        udelay(100);
        dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x00);
 
-       msm_dsi_phy_set_src_pll(phy, src_pll_id,
-                               REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL,
-                               DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL);
-
-       ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
+       glbl_test_ctrl = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL);
+       if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
+               glbl_test_ctrl |= DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+       else
+               glbl_test_ctrl &= ~DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+       dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, glbl_test_ctrl);
+       ret = dsi_14nm_set_usecase(phy);
        if (ret) {
                DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
                        __func__, ret);
@@ -129,24 +1023,8 @@ static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy)
        wmb();
 }
 
-static int dsi_14nm_phy_init(struct msm_dsi_phy *phy)
-{
-       struct platform_device *pdev = phy->pdev;
-
-       phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
-                               "DSI_PHY_LANE");
-       if (IS_ERR(phy->lane_base)) {
-               DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
-                       __func__);
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
 const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
-       .type = MSM_DSI_PHY_14NM,
-       .src_pll_truthtable = { {false, false}, {true, false} },
+       .has_phy_lane = true,
        .reg_cfg = {
                .num = 1,
                .regs = {
@@ -156,15 +1034,18 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
        .ops = {
                .enable = dsi_14nm_phy_enable,
                .disable = dsi_14nm_phy_disable,
-               .init = dsi_14nm_phy_init,
+               .pll_init = dsi_pll_14nm_init,
+               .save_pll_state = dsi_14nm_pll_save_state,
+               .restore_pll_state = dsi_14nm_pll_restore_state,
        },
+       .min_pll_rate = VCO_MIN_RATE,
+       .max_pll_rate = VCO_MAX_RATE,
        .io_start = { 0x994400, 0x996400 },
        .num_dsi_phy = 2,
 };
 
 const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
-       .type = MSM_DSI_PHY_14NM,
-       .src_pll_truthtable = { {false, false}, {true, false} },
+       .has_phy_lane = true,
        .reg_cfg = {
                .num = 1,
                .regs = {
@@ -174,8 +1055,12 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
        .ops = {
                .enable = dsi_14nm_phy_enable,
                .disable = dsi_14nm_phy_disable,
-               .init = dsi_14nm_phy_init,
+               .pll_init = dsi_pll_14nm_init,
+               .save_pll_state = dsi_14nm_pll_save_state,
+               .restore_pll_state = dsi_14nm_pll_restore_state,
        },
+       .min_pll_rate = VCO_MIN_RATE,
+       .max_pll_rate = VCO_MAX_RATE,
        .io_start = { 0xc994400, 0xc996000 },
        .num_dsi_phy = 2,
 };
index eca86bf..e96d789 100644 (file)
@@ -63,13 +63,14 @@ static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
        dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_0, 0x03);
 }
 
-static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy,
                                struct msm_dsi_phy_clk_request *clk_req)
 {
        struct msm_dsi_dphy_timing *timing = &phy->timing;
        int i;
        void __iomem *base = phy->base;
        u32 cfg_4[4] = {0x20, 0x40, 0x20, 0x00};
+       u32 val;
 
        DBG("");
 
@@ -83,9 +84,12 @@ static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 
        dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_0, 0xff);
 
-       msm_dsi_phy_set_src_pll(phy, src_pll_id,
-                               REG_DSI_20nm_PHY_GLBL_TEST_CTRL,
-                               DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL);
+       val = dsi_phy_read(base + REG_DSI_20nm_PHY_GLBL_TEST_CTRL);
+       if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_STANDALONE)
+               val |= DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+       else
+               val &= ~DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+       dsi_phy_write(base + REG_DSI_20nm_PHY_GLBL_TEST_CTRL, val);
 
        for (i = 0; i < 4; i++) {
                dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_3(i),
@@ -125,8 +129,7 @@ static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy)
 }
 
 const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
-       .type = MSM_DSI_PHY_20NM,
-       .src_pll_truthtable = { {false, true}, {false, true} },
+       .has_phy_regulator = true,
        .reg_cfg = {
                .num = 2,
                .regs = {
@@ -137,7 +140,6 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
        .ops = {
                .enable = dsi_20nm_phy_enable,
                .disable = dsi_20nm_phy_disable,
-               .init = msm_dsi_phy_init_common,
        },
        .io_start = { 0xfd998500, 0xfd9a0500 },
        .num_dsi_phy = 2,
index c3c580c..3304acd 100644 (file)
@@ -3,9 +3,621 @@
  * Copyright (c) 2015, The Linux Foundation. All rights reserved.
  */
 
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
 #include "dsi_phy.h"
 #include "dsi.xml.h"
 
+/*
+ * DSI PLL 28nm - clock diagram (eg: DSI0):
+ *
+ *         dsi0analog_postdiv_clk
+ *                             |         dsi0indirect_path_div2_clk
+ *                             |          |
+ *                   +------+  |  +----+  |  |\   dsi0byte_mux
+ *  dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \   |
+ *                |  +------+     +----+     | m|  |  +----+
+ *                |                          | u|--o--| /4 |-- dsi0pllbyte
+ *                |                          | x|     +----+
+ *                o--------------------------| /
+ *                |                          |/
+ *                |          +------+
+ *                o----------| DIV3 |------------------------- dsi0pll
+ *                           +------+
+ */
+
+#define POLL_MAX_READS                 10
+#define POLL_TIMEOUT_US                50
+
+#define VCO_REF_CLK_RATE               19200000
+#define VCO_MIN_RATE                   350000000
+#define VCO_MAX_RATE                   750000000
+
+/* v2.0.0 28nm LP implementation */
+#define DSI_PHY_28NM_QUIRK_PHY_LP      BIT(0)
+
+#define LPFR_LUT_SIZE                  10
+struct lpfr_cfg {
+       unsigned long vco_rate;
+       u32 resistance;
+};
+
+/* Loop filter resistance: */
+static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
+       { 479500000,  8 },
+       { 480000000, 11 },
+       { 575500000,  8 },
+       { 576000000, 12 },
+       { 610500000,  8 },
+       { 659500000,  9 },
+       { 671500000, 10 },
+       { 672000000, 14 },
+       { 708500000, 10 },
+       { 750000000, 11 },
+};
+
+struct pll_28nm_cached_state {
+       unsigned long vco_rate;
+       u8 postdiv3;
+       u8 postdiv1;
+       u8 byte_mux;
+};
+
+struct dsi_pll_28nm {
+       struct clk_hw clk_hw;
+
+       struct msm_dsi_phy *phy;
+
+       struct pll_28nm_cached_state cached_state;
+};
+
+#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, clk_hw)
+
+static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
+                               u32 nb_tries, u32 timeout_us)
+{
+       bool pll_locked = false;
+       u32 val;
+
+       while (nb_tries--) {
+               val = dsi_phy_read(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_STATUS);
+               pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
+
+               if (pll_locked)
+                       break;
+
+               udelay(timeout_us);
+       }
+       DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+       return pll_locked;
+}
+
+static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
+{
+       void __iomem *base = pll_28nm->phy->pll_base;
+
+       /*
+        * Add HW recommended delays after toggling the software
+        * reset bit off and back on.
+        */
+       dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
+                       DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
+       dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
+}
+
+/*
+ * Clock Callbacks
+ */
+static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long parent_rate)
+{
+       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+       struct device *dev = &pll_28nm->phy->pdev->dev;
+       void __iomem *base = pll_28nm->phy->pll_base;
+       unsigned long div_fbx1000, gen_vco_clk;
+       u32 refclk_cfg, frac_n_mode, frac_n_value;
+       u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
+       u32 cal_cfg10, cal_cfg11;
+       u32 rem;
+       int i;
+
+       VERB("rate=%lu, parent's=%lu", rate, parent_rate);
+
+       /* Force postdiv2 to be div-4 */
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
+
+       /* Configure the Loop filter resistance */
+       for (i = 0; i < LPFR_LUT_SIZE; i++)
+               if (rate <= lpfr_lut[i].vco_rate)
+                       break;
+       if (i == LPFR_LUT_SIZE) {
+               DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
+                               rate);
+               return -EINVAL;
+       }
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
+
+       /* Loop filter capacitance values : c1 and c2 */
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
+
+       rem = rate % VCO_REF_CLK_RATE;
+       if (rem) {
+               refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
+               frac_n_mode = 1;
+               div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
+               gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
+       } else {
+               refclk_cfg = 0x0;
+               frac_n_mode = 0;
+               div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
+               gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
+       }
+
+       DBG("refclk_cfg = %d", refclk_cfg);
+
+       rem = div_fbx1000 % 1000;
+       frac_n_value = (rem << 16) / 1000;
+
+       DBG("div_fb = %lu", div_fbx1000);
+       DBG("frac_n_value = %d", frac_n_value);
+
+       DBG("Generated VCO Clock: %lu", gen_vco_clk);
+       rem = 0;
+       sdm_cfg1 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
+       sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
+       if (frac_n_mode) {
+               sdm_cfg0 = 0x0;
+               sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
+               sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
+                               (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
+               sdm_cfg3 = frac_n_value >> 8;
+               sdm_cfg2 = frac_n_value & 0xff;
+       } else {
+               sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
+               sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
+                               (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
+               sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
+               sdm_cfg2 = 0;
+               sdm_cfg3 = 0;
+       }
+
+       DBG("sdm_cfg0=%d", sdm_cfg0);
+       DBG("sdm_cfg1=%d", sdm_cfg1);
+       DBG("sdm_cfg2=%d", sdm_cfg2);
+       DBG("sdm_cfg3=%d", sdm_cfg3);
+
+       cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
+       cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
+       DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
+
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3,    0x2b);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4,    0x06);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,  0x0d);
+
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
+               DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
+               DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
+
+       /* Add hardware recommended delay for correct PLL configuration */
+       if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
+               udelay(1000);
+       else
+               udelay(1);
+
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0,   sdm_cfg0);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0,   0x12);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6,   0x30);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7,   0x00);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8,   0x60);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9,   0x00);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10,  cal_cfg10 & 0xff);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11,  cal_cfg11 & 0xff);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG,  0x20);
+
+       return 0;
+}
+
+static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
+{
+       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+       return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
+                                       POLL_TIMEOUT_US);
+}
+
+static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+       void __iomem *base = pll_28nm->phy->pll_base;
+       u32 sdm0, doubler, sdm_byp_div;
+       u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
+       u32 ref_clk = VCO_REF_CLK_RATE;
+       unsigned long vco_rate;
+
+       VERB("parent_rate=%lu", parent_rate);
+
+       /* Check to see if the ref clk doubler is enabled */
+       doubler = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
+                       DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
+       ref_clk += (doubler * VCO_REF_CLK_RATE);
+
+       /* see if it is integer mode or sdm mode */
+       sdm0 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
+       if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
+               /* integer mode */
+               sdm_byp_div = FIELD(
+                               dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
+                               DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
+               vco_rate = ref_clk * sdm_byp_div;
+       } else {
+               /* sdm mode */
+               sdm_dc_off = FIELD(
+                               dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
+                               DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
+               DBG("sdm_dc_off = %d", sdm_dc_off);
+               sdm2 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
+                               DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
+               sdm3 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
+                               DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
+               sdm_freq_seed = (sdm3 << 8) | sdm2;
+               DBG("sdm_freq_seed = %d", sdm_freq_seed);
+
+               vco_rate = (ref_clk * (sdm_dc_off + 1)) +
+                       mult_frac(ref_clk, sdm_freq_seed, BIT(16));
+               DBG("vco rate = %lu", vco_rate);
+       }
+
+       DBG("returning vco rate = %lu", vco_rate);
+
+       return vco_rate;
+}
+
+static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm)
+{
+       struct device *dev = &pll_28nm->phy->pdev->dev;
+       void __iomem *base = pll_28nm->phy->pll_base;
+       u32 max_reads = 5, timeout_us = 100;
+       bool locked;
+       u32 val;
+       int i;
+
+       DBG("id=%d", pll_28nm->phy->id);
+
+       pll_28nm_software_reset(pll_28nm);
+
+       /*
+        * PLL power up sequence.
+        * Add necessary delays recommended by hardware.
+        */
+       val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+       dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
+
+       val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+       dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+       val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+       dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+       val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+       dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
+
+       for (i = 0; i < 2; i++) {
+               /* DSI Uniphy lock detect setting */
+               dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
+                               0x0c, 100);
+               dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
+
+               /* poll for PLL ready status */
+               locked = pll_28nm_poll_for_ready(pll_28nm,
+                                               max_reads, timeout_us);
+               if (locked)
+                       break;
+
+               pll_28nm_software_reset(pll_28nm);
+
+               /*
+                * PLL power up sequence.
+                * Add necessary delays recommended by hardware.
+                */
+               val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+               dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
+
+               val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+               dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+               val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+               dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
+
+               val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+               dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
+
+               val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
+               dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+               val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+               dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
+       }
+
+       if (unlikely(!locked))
+               DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
+       else
+               DBG("DSI PLL Lock success");
+
+       return locked ? 0 : -EINVAL;
+}
+
+static int dsi_pll_28nm_vco_prepare_hpm(struct clk_hw *hw)
+{
+       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+       int i, ret;
+
+       if (unlikely(pll_28nm->phy->pll_on))
+               return 0;
+
+       for (i = 0; i < 3; i++) {
+               ret = _dsi_pll_28nm_vco_prepare_hpm(pll_28nm);
+               if (!ret) {
+                       pll_28nm->phy->pll_on = true;
+                       return 0;
+               }
+       }
+
+       return ret;
+}
+
+static int dsi_pll_28nm_vco_prepare_lp(struct clk_hw *hw)
+{
+       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+       struct device *dev = &pll_28nm->phy->pdev->dev;
+       void __iomem *base = pll_28nm->phy->pll_base;
+       bool locked;
+       u32 max_reads = 10, timeout_us = 50;
+       u32 val;
+
+       DBG("id=%d", pll_28nm->phy->id);
+
+       if (unlikely(pll_28nm->phy->pll_on))
+               return 0;
+
+       pll_28nm_software_reset(pll_28nm);
+
+       /*
+        * PLL power up sequence.
+        * Add necessary delays recommended by hardware.
+        */
+       dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
+
+       val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
+       dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+       val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
+       dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+       val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
+               DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
+       dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
+
+       /* DSI PLL toggle lock detect setting */
+       dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
+       dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
+
+       locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
+
+       if (unlikely(!locked)) {
+               DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
+               return -EINVAL;
+       }
+
+       DBG("DSI PLL lock success");
+       pll_28nm->phy->pll_on = true;
+
+       return 0;
+}
+
+static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
+{
+       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+       DBG("id=%d", pll_28nm->phy->id);
+
+       if (unlikely(!pll_28nm->phy->pll_on))
+               return;
+
+       dsi_phy_write(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
+
+       pll_28nm->phy->pll_on = false;
+}
+
+static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
+               unsigned long rate, unsigned long *parent_rate)
+{
+       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+       if      (rate < pll_28nm->phy->cfg->min_pll_rate)
+               return  pll_28nm->phy->cfg->min_pll_rate;
+       else if (rate > pll_28nm->phy->cfg->max_pll_rate)
+               return  pll_28nm->phy->cfg->max_pll_rate;
+       else
+               return rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = {
+       .round_rate = dsi_pll_28nm_clk_round_rate,
+       .set_rate = dsi_pll_28nm_clk_set_rate,
+       .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
+       .prepare = dsi_pll_28nm_vco_prepare_hpm,
+       .unprepare = dsi_pll_28nm_vco_unprepare,
+       .is_enabled = dsi_pll_28nm_clk_is_enabled,
+};
+
+static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
+       .round_rate = dsi_pll_28nm_clk_round_rate,
+       .set_rate = dsi_pll_28nm_clk_set_rate,
+       .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
+       .prepare = dsi_pll_28nm_vco_prepare_lp,
+       .unprepare = dsi_pll_28nm_vco_unprepare,
+       .is_enabled = dsi_pll_28nm_clk_is_enabled,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
+{
+       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
+       struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+       void __iomem *base = pll_28nm->phy->pll_base;
+
+       cached_state->postdiv3 =
+                       dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
+       cached_state->postdiv1 =
+                       dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
+       cached_state->byte_mux = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
+       if (dsi_pll_28nm_clk_is_enabled(phy->vco_hw))
+               cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
+       else
+               cached_state->vco_rate = 0;
+}
+
+static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
+{
+       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
+       struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+       void __iomem *base = pll_28nm->phy->pll_base;
+       int ret;
+
+       ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw,
+                                       cached_state->vco_rate, 0);
+       if (ret) {
+               DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
+                       "restore vco rate failed. ret=%d\n", ret);
+               return ret;
+       }
+
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
+                       cached_state->postdiv3);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+                       cached_state->postdiv1);
+       dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
+                       cached_state->byte_mux);
+
+       return 0;
+}
+
+static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
+{
+       char clk_name[32], parent1[32], parent2[32], vco_name[32];
+       struct clk_init_data vco_init = {
+               .parent_names = (const char *[]){ "xo" },
+               .num_parents = 1,
+               .name = vco_name,
+               .flags = CLK_IGNORE_UNUSED,
+       };
+       struct device *dev = &pll_28nm->phy->pdev->dev;
+       struct clk_hw *hw;
+       int ret;
+
+       DBG("%d", pll_28nm->phy->id);
+
+       if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
+               vco_init.ops = &clk_ops_dsi_pll_28nm_vco_lp;
+       else
+               vco_init.ops = &clk_ops_dsi_pll_28nm_vco_hpm;
+
+       snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
+       pll_28nm->clk_hw.init = &vco_init;
+       ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
+       if (ret)
+               return ret;
+
+       snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
+       snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
+       hw = devm_clk_hw_register_divider(dev, clk_name,
+                       parent1, CLK_SET_RATE_PARENT,
+                       pll_28nm->phy->pll_base +
+                       REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
+                       0, 4, 0, NULL);
+       if (IS_ERR(hw))
+               return PTR_ERR(hw);
+
+       snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
+       snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->phy->id);
+       hw = devm_clk_hw_register_fixed_factor(dev, clk_name,
+                       parent1, CLK_SET_RATE_PARENT,
+                       1, 2);
+       if (IS_ERR(hw))
+               return PTR_ERR(hw);
+
+       snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id);
+       snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
+       hw = devm_clk_hw_register_divider(dev, clk_name,
+                               parent1, 0, pll_28nm->phy->pll_base +
+                               REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
+                               0, 8, 0, NULL);
+       if (IS_ERR(hw))
+               return PTR_ERR(hw);
+       provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
+
+       snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->phy->id);
+       snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->phy->id);
+       snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->phy->id);
+       hw = devm_clk_hw_register_mux(dev, clk_name,
+                       ((const char *[]){
+                               parent1, parent2
+                       }), 2, CLK_SET_RATE_PARENT, pll_28nm->phy->pll_base +
+                       REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
+       if (IS_ERR(hw))
+               return PTR_ERR(hw);
+
+       snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id);
+       snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->phy->id);
+       hw = devm_clk_hw_register_fixed_factor(dev, clk_name,
+                               parent1, CLK_SET_RATE_PARENT, 1, 4);
+       if (IS_ERR(hw))
+               return PTR_ERR(hw);
+       provided_clocks[DSI_BYTE_PLL_CLK] = hw;
+
+       return 0;
+}
+
+static int dsi_pll_28nm_init(struct msm_dsi_phy *phy)
+{
+       struct platform_device *pdev = phy->pdev;
+       struct dsi_pll_28nm *pll_28nm;
+       int ret;
+
+       if (!pdev)
+               return -ENODEV;
+
+       pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
+       if (!pll_28nm)
+               return -ENOMEM;
+
+       pll_28nm->phy = phy;
+
+       ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws);
+       if (ret) {
+               DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+               return ret;
+       }
+
+       phy->vco_hw = &pll_28nm->clk_hw;
+
+       return 0;
+}
+
 static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
                struct msm_dsi_dphy_timing *timing)
 {
@@ -66,7 +678,7 @@ static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy)
        dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x1);
        dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
 
-       if (phy->cfg->type == MSM_DSI_PHY_28NM_LP)
+       if (phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
                dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x05);
        else
                dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x0d);
@@ -86,12 +698,13 @@ static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
                dsi_28nm_phy_regulator_enable_dcdc(phy);
 }
 
-static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
                                struct msm_dsi_phy_clk_request *clk_req)
 {
        struct msm_dsi_dphy_timing *timing = &phy->timing;
        int i;
        void __iomem *base = phy->base;
+       u32 val;
 
        DBG("");
 
@@ -131,9 +744,12 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
 
        dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
 
-       msm_dsi_phy_set_src_pll(phy, src_pll_id,
-                               REG_DSI_28nm_PHY_GLBL_TEST_CTRL,
-                               DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL);
+       val = dsi_phy_read(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL);
+       if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
+               val &= ~DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+       else
+               val |= DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
+       dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, val);
 
        return 0;
 }
@@ -151,8 +767,7 @@ static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
 }
 
 const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
-       .type = MSM_DSI_PHY_28NM_HPM,
-       .src_pll_truthtable = { {true, true}, {false, true} },
+       .has_phy_regulator = true,
        .reg_cfg = {
                .num = 1,
                .regs = {
@@ -162,15 +777,18 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
        .ops = {
                .enable = dsi_28nm_phy_enable,
                .disable = dsi_28nm_phy_disable,
-               .init = msm_dsi_phy_init_common,
+               .pll_init = dsi_pll_28nm_init,
+               .save_pll_state = dsi_28nm_pll_save_state,
+               .restore_pll_state = dsi_28nm_pll_restore_state,
        },
+       .min_pll_rate = VCO_MIN_RATE,
+       .max_pll_rate = VCO_MAX_RATE,
        .io_start = { 0xfd922b00, 0xfd923100 },
        .num_dsi_phy = 2,
 };
 
 const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
-       .type = MSM_DSI_PHY_28NM_HPM,
-       .src_pll_truthtable = { {true, true}, {false, true} },
+       .has_phy_regulator = true,
        .reg_cfg = {
                .num = 1,
                .regs = {
@@ -180,15 +798,18 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs = {
        .ops = {
                .enable = dsi_28nm_phy_enable,
                .disable = dsi_28nm_phy_disable,
-               .init = msm_dsi_phy_init_common,
+               .pll_init = dsi_pll_28nm_init,
+               .save_pll_state = dsi_28nm_pll_save_state,
+               .restore_pll_state = dsi_28nm_pll_restore_state,
        },
+       .min_pll_rate = VCO_MIN_RATE,
+       .max_pll_rate = VCO_MAX_RATE,
        .io_start = { 0x1a94400, 0x1a96400 },
        .num_dsi_phy = 2,
 };
 
 const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
-       .type = MSM_DSI_PHY_28NM_LP,
-       .src_pll_truthtable = { {true, true}, {true, true} },
+       .has_phy_regulator = true,
        .reg_cfg = {
                .num = 1,
                .regs = {
@@ -198,9 +819,14 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
        .ops = {
                .enable = dsi_28nm_phy_enable,
                .disable = dsi_28nm_phy_disable,
-               .init = msm_dsi_phy_init_common,
+               .pll_init = dsi_pll_28nm_init,
+               .save_pll_state = dsi_28nm_pll_save_state,
+               .restore_pll_state = dsi_28nm_pll_restore_state,
        },
+       .min_pll_rate = VCO_MIN_RATE,
+       .max_pll_rate = VCO_MAX_RATE,
        .io_start = { 0x1a98500 },
        .num_dsi_phy = 1,
+       .quirks = DSI_PHY_28NM_QUIRK_PHY_LP,
 };
 
index f225833..582b142 100644 (file)
  * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
  */
 
+#include <linux/clk-provider.h>
 #include <linux/delay.h>
 
 #include "dsi_phy.h"
 #include "dsi.xml.h"
 
+/*
+ * DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):
+ *
+ *
+ *                        +------+
+ *  dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock)
+ *  F * byte_clk    |     +------+
+ *                  | bit clock divider (F / 8)
+ *                  |
+ *                  |     +------+
+ *                  o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG
+ *                  |     +------+                 | (sets parent rate)
+ *                  | byte clock divider (F)       |
+ *                  |                              |
+ *                  |                              o---> To esc RCG
+ *                  |                                (doesn't set parent rate)
+ *                  |
+ *                  |     +------+
+ *                  o-----| DIV3 |----dsi0pll------o---> To dsi RCG
+ *                        +------+                 | (sets parent rate)
+ *                  dsi clock divider (F * magic)  |
+ *                                                 |
+ *                                                 o---> To pixel rcg
+ *                                                  (doesn't set parent rate)
+ */
+
+#define POLL_MAX_READS         8000
+#define POLL_TIMEOUT_US                1
+
+#define VCO_REF_CLK_RATE       27000000
+#define VCO_MIN_RATE           600000000
+#define VCO_MAX_RATE           1200000000
+
+#define VCO_PREF_DIV_RATIO     27
+
+struct pll_28nm_cached_state {
+       unsigned long vco_rate;
+       u8 postdiv3;
+       u8 postdiv2;
+       u8 postdiv1;
+};
+
+struct clk_bytediv {
+       struct clk_hw hw;
+       void __iomem *reg;
+};
+
+struct dsi_pll_28nm {
+       struct clk_hw clk_hw;
+
+       struct msm_dsi_phy *phy;
+
+       struct pll_28nm_cached_state cached_state;
+};
+
+#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, clk_hw)
+
+static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
+                                   int nb_tries, int timeout_us)
+{
+       bool pll_locked = false;
+       u32 val;
+
+       while (nb_tries--) {
+               val = dsi_phy_read(pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_RDY);
+               pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);
+
+               if (pll_locked)
+                       break;
+
+               udelay(timeout_us);
+       }
+       DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
+
+       return pll_locked;
+}
+
+/*
+ * Clock Callbacks
+ */
+static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+                                    unsigned long parent_rate)
+{
+       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+       void __iomem *base = pll_28nm->phy->pll_base;
+       u32 val, temp, fb_divider;
+
+       DBG("rate=%lu, parent's=%lu", rate, parent_rate);
+
+       temp = rate / 10;
+       val = VCO_REF_CLK_RATE / 10;
+       fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
+       fb_divider = fb_divider / 2 - 1;
+       dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
+                       fb_divider & 0xff);
+
+       val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
+
+       val |= (fb_divider >> 8) & 0x07;
+
+       dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
+                       val);
+
+       val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
+
+       val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
+
+       dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
+                       val);
+
+       dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
+                       0xf);
+
+       val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+       val |= 0x7 << 4;
+       dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
+                       val);
+
+       return 0;
+}
+
+static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
+{
+       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+       return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
+                                       POLL_TIMEOUT_US);
+}
+
+static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
+                                                 unsigned long parent_rate)
+{
+       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+       void __iomem *base = pll_28nm->phy->pll_base;
+       unsigned long vco_rate;
+       u32 status, fb_divider, temp, ref_divider;
+
+       VERB("parent_rate=%lu", parent_rate);
+
+       status = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
+
+       if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {
+               fb_divider = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
+               fb_divider &= 0xff;
+               temp = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;
+               fb_divider = (temp << 8) | fb_divider;
+               fb_divider += 1;
+
+               ref_divider = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
+               ref_divider &= 0x3f;
+               ref_divider += 1;
+
+               /* multiply by 2 */
+               vco_rate = (parent_rate / ref_divider) * fb_divider * 2;
+       } else {
+               vco_rate = 0;
+       }
+
+       DBG("returning vco rate = %lu", vco_rate);
+
+       return vco_rate;
+}
+
+static int dsi_pll_28nm_vco_prepare(struct clk_hw *hw)
+{
+       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+       struct device *dev = &pll_28nm->phy->pdev->dev;
+       void __iomem *base = pll_28nm->phy->pll_base;
+       bool locked;
+       unsigned int bit_div, byte_div;
+       int max_reads = 1000, timeout_us = 100;
+       u32 val;
+
+       DBG("id=%d", pll_28nm->phy->id);
+
+       if (unlikely(pll_28nm->phy->pll_on))
+               return 0;
+
+       /*
+        * before enabling the PLL, configure the bit clock divider since we
+        * don't expose it as a clock to the outside world
+        * 1: read back the byte clock divider that should already be set
+        * 2: divide by 8 to get bit clock divider
+        * 3: write it to POSTDIV1
+        */
+       val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
+       byte_div = val + 1;
+       bit_div = byte_div / 8;
+
+       val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+       val &= ~0xf;
+       val |= (bit_div - 1);
+       dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);
+
+       /* enable the PLL */
+       dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
+                       DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
+
+       locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
+
+       if (unlikely(!locked)) {
+               DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
+               return -EINVAL;
+       }
+
+       DBG("DSI PLL lock success");
+       pll_28nm->phy->pll_on = true;
+
+       return 0;
+}
+
+static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
+{
+       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+       DBG("id=%d", pll_28nm->phy->id);
+
+       if (unlikely(!pll_28nm->phy->pll_on))
+               return;
+
+       dsi_phy_write(pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);
+
+       pll_28nm->phy->pll_on = false;
+}
+
+static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
+               unsigned long rate, unsigned long *parent_rate)
+{
+       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
+
+       if      (rate < pll_28nm->phy->cfg->min_pll_rate)
+               return  pll_28nm->phy->cfg->min_pll_rate;
+       else if (rate > pll_28nm->phy->cfg->max_pll_rate)
+               return  pll_28nm->phy->cfg->max_pll_rate;
+       else
+               return rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
+       .round_rate = dsi_pll_28nm_clk_round_rate,
+       .set_rate = dsi_pll_28nm_clk_set_rate,
+       .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
+       .prepare = dsi_pll_28nm_vco_prepare,
+       .unprepare = dsi_pll_28nm_vco_unprepare,
+       .is_enabled = dsi_pll_28nm_clk_is_enabled,
+};
+
+/*
+ * Custom byte clock divier clk_ops
+ *
+ * This clock is the entry point to configuring the PLL. The user (dsi host)
+ * will set this clock's rate to the desired byte clock rate. The VCO lock
+ * frequency is a multiple of the byte clock rate. The multiplication factor
+ * (shown as F in the diagram above) is a function of the byte clock rate.
+ *
+ * This custom divider clock ensures that its parent (VCO) is set to the
+ * desired rate, and that the byte clock postdivider (POSTDIV2) is configured
+ * accordingly
+ */
+#define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw)
+
+static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       struct clk_bytediv *bytediv = to_clk_bytediv(hw);
+       unsigned int div;
+
+       div = dsi_phy_read(bytediv->reg) & 0xff;
+
+       return parent_rate / (div + 1);
+}
+
+/* find multiplication factor(wrt byte clock) at which the VCO should be set */
+static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
+{
+       unsigned long bit_mhz;
+
+       /* convert to bit clock in Mhz */
+       bit_mhz = (byte_clk_rate * 8) / 1000000;
+
+       if (bit_mhz < 125)
+               return 64;
+       else if (bit_mhz < 250)
+               return 32;
+       else if (bit_mhz < 600)
+               return 16;
+       else
+               return 8;
+}
+
+static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
+                                  unsigned long *prate)
+{
+       unsigned long best_parent;
+       unsigned int factor;
+
+       factor = get_vco_mul_factor(rate);
+
+       best_parent = rate * factor;
+       *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+
+       return *prate / factor;
+}
+
+static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long parent_rate)
+{
+       struct clk_bytediv *bytediv = to_clk_bytediv(hw);
+       u32 val;
+       unsigned int factor;
+
+       factor = get_vco_mul_factor(rate);
+
+       val = dsi_phy_read(bytediv->reg);
+       val |= (factor - 1) & 0xff;
+       dsi_phy_write(bytediv->reg, val);
+
+       return 0;
+}
+
+/* Our special byte clock divider ops */
+static const struct clk_ops clk_bytediv_ops = {
+       .round_rate = clk_bytediv_round_rate,
+       .set_rate = clk_bytediv_set_rate,
+       .recalc_rate = clk_bytediv_recalc_rate,
+};
+
+/*
+ * PLL Callbacks
+ */
+static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
+{
+       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
+       struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+       void __iomem *base = pll_28nm->phy->pll_base;
+
+       cached_state->postdiv3 =
+                       dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
+       cached_state->postdiv2 =
+                       dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
+       cached_state->postdiv1 =
+                       dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
+
+       cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
+}
+
+static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
+{
+       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(phy->vco_hw);
+       struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
+       void __iomem *base = pll_28nm->phy->pll_base;
+       int ret;
+
+       ret = dsi_pll_28nm_clk_set_rate(phy->vco_hw,
+                                       cached_state->vco_rate, 0);
+       if (ret) {
+               DRM_DEV_ERROR(&pll_28nm->phy->pdev->dev,
+                       "restore vco rate failed. ret=%d\n", ret);
+               return ret;
+       }
+
+       dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
+                       cached_state->postdiv3);
+       dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
+                       cached_state->postdiv2);
+       dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
+                       cached_state->postdiv1);
+
+       return 0;
+}
+
+static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **provided_clocks)
+{
+       char *clk_name, *parent_name, *vco_name;
+       struct clk_init_data vco_init = {
+               .parent_names = (const char *[]){ "pxo" },
+               .num_parents = 1,
+               .flags = CLK_IGNORE_UNUSED,
+               .ops = &clk_ops_dsi_pll_28nm_vco,
+       };
+       struct device *dev = &pll_28nm->phy->pdev->dev;
+       struct clk_hw *hw;
+       struct clk_bytediv *bytediv;
+       struct clk_init_data bytediv_init = { };
+       int ret;
+
+       DBG("%d", pll_28nm->phy->id);
+
+       bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL);
+       if (!bytediv)
+               return -ENOMEM;
+
+       vco_name = devm_kzalloc(dev, 32, GFP_KERNEL);
+       if (!vco_name)
+               return -ENOMEM;
+
+       clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
+       if (!clk_name)
+               return -ENOMEM;
+
+       snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
+       vco_init.name = vco_name;
+
+       pll_28nm->clk_hw.init = &vco_init;
+
+       ret = devm_clk_hw_register(dev, &pll_28nm->clk_hw);
+       if (ret)
+               return ret;
+
+       /* prepare and register bytediv */
+       bytediv->hw.init = &bytediv_init;
+       bytediv->reg = pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
+
+       snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->phy->id);
+       snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->phy->id);
+
+       bytediv_init.name = clk_name;
+       bytediv_init.ops = &clk_bytediv_ops;
+       bytediv_init.flags = CLK_SET_RATE_PARENT;
+       bytediv_init.parent_names = (const char * const *) &parent_name;
+       bytediv_init.num_parents = 1;
+
+       /* DIV2 */
+       ret = devm_clk_hw_register(dev, &bytediv->hw);
+       if (ret)
+               return ret;
+       provided_clocks[DSI_BYTE_PLL_CLK] = &bytediv->hw;
+
+       snprintf(clk_name, 32, "dsi%dpll", pll_28nm->phy->id);
+       /* DIV3 */
+       hw = devm_clk_hw_register_divider(dev, clk_name,
+                               parent_name, 0, pll_28nm->phy->pll_base +
+                               REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
+                               0, 8, 0, NULL);
+       if (IS_ERR(hw))
+               return PTR_ERR(hw);
+       provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
+
+       return 0;
+}
+
+static int dsi_pll_28nm_8960_init(struct msm_dsi_phy *phy)
+{
+       struct platform_device *pdev = phy->pdev;
+       struct dsi_pll_28nm *pll_28nm;
+       int ret;
+
+       if (!pdev)
+               return -ENODEV;
+
+       pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
+       if (!pll_28nm)
+               return -ENOMEM;
+
+       pll_28nm->phy = phy;
+
+       ret = pll_28nm_register(pll_28nm, phy->provided_clocks->hws);
+       if (ret) {
+               DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+               return ret;
+       }
+
+       phy->vco_hw = &pll_28nm->clk_hw;
+
+       return 0;
+}
+
 static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
                struct msm_dsi_dphy_timing *timing)
 {
@@ -117,7 +585,7 @@ static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
        dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1, 0x88);
 }
 
-static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
                                struct msm_dsi_phy_clk_request *clk_req)
 {
        struct msm_dsi_dphy_timing *timing = &phy->timing;
@@ -174,8 +642,7 @@ static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
 }
 
 const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
-       .type = MSM_DSI_PHY_28NM_8960,
-       .src_pll_truthtable = { {true, true}, {false, true} },
+       .has_phy_regulator = true,
        .reg_cfg = {
                .num = 1,
                .regs = {
@@ -185,8 +652,12 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
        .ops = {
                .enable = dsi_28nm_phy_enable,
                .disable = dsi_28nm_phy_disable,
-               .init = msm_dsi_phy_init_common,
+               .pll_init = dsi_pll_28nm_8960_init,
+               .save_pll_state = dsi_28nm_pll_save_state,
+               .restore_pll_state = dsi_28nm_pll_restore_state,
        },
+       .min_pll_rate = VCO_MIN_RATE,
+       .max_pll_rate = VCO_MAX_RATE,
        .io_start = { 0x4700300, 0x5800300 },
        .num_dsi_phy = 2,
 };
index 79c034a..e76ce40 100644 (file)
  * Copyright (c) 2018, The Linux Foundation
  */
 
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/iopoll.h>
 
 #include "dsi_phy.h"
 #include "dsi.xml.h"
 
+/*
+ * DSI PLL 7nm - clock diagram (eg: DSI0): TODO: updated CPHY diagram
+ *
+ *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk
+ *                              |                |
+ *                              |                |
+ *                 +---------+  |  +----------+  |  +----+
+ *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
+ *                 +---------+  |  +----------+  |  +----+
+ *                              |                |
+ *                              |                |         dsi0_pll_by_2_bit_clk
+ *                              |                |          |
+ *                              |                |  +----+  |  |\  dsi0_pclk_mux
+ *                              |                |--| /2 |--o--| \   |
+ *                              |                |  +----+     |  \  |  +---------+
+ *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
+ *                              |------------------------------|  /     +---------+
+ *                              |          +-----+             | /
+ *                              -----------| /4? |--o----------|/
+ *                                         +-----+  |           |
+ *                                                  |           |dsiclk_sel
+ *                                                  |
+ *                                                  dsi0_pll_post_out_div_clk
+ */
+
+#define VCO_REF_CLK_RATE               19200000
+#define FRAC_BITS 18
+
+/* Hardware is V4.1 */
+#define DSI_PHY_7NM_QUIRK_V4_1         BIT(0)
+
+struct dsi_pll_config {
+       bool enable_ssc;
+       bool ssc_center;
+       u32 ssc_freq;
+       u32 ssc_offset;
+       u32 ssc_adj_per;
+
+       /* out */
+       u32 decimal_div_start;
+       u32 frac_div_start;
+       u32 pll_clock_inverters;
+       u32 ssc_stepsize;
+       u32 ssc_div_per;
+};
+
+struct pll_7nm_cached_state {
+       unsigned long vco_rate;
+       u8 bit_clk_div;
+       u8 pix_clk_div;
+       u8 pll_out_div;
+       u8 pll_mux;
+};
+
+struct dsi_pll_7nm {
+       struct clk_hw clk_hw;
+
+       struct msm_dsi_phy *phy;
+
+       u64 vco_current_rate;
+
+       /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
+       spinlock_t postdiv_lock;
+
+       struct pll_7nm_cached_state cached_state;
+
+       struct dsi_pll_7nm *slave;
+};
+
+#define to_pll_7nm(x)  container_of(x, struct dsi_pll_7nm, clk_hw)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];
+
+static void dsi_pll_setup_config(struct dsi_pll_config *config)
+{
+       config->ssc_freq = 31500;
+       config->ssc_offset = 4800;
+       config->ssc_adj_per = 2;
+
+       /* TODO: ssc enable */
+       config->enable_ssc = false;
+       config->ssc_center = 0;
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config *config)
+{
+       u64 fref = VCO_REF_CLK_RATE;
+       u64 pll_freq;
+       u64 divider;
+       u64 dec, dec_multiple;
+       u32 frac;
+       u64 multiplier;
+
+       pll_freq = pll->vco_current_rate;
+
+       divider = fref * 2;
+
+       multiplier = 1 << FRAC_BITS;
+       dec_multiple = div_u64(pll_freq * multiplier, divider);
+       div_u64_rem(dec_multiple, multiplier, &frac);
+
+       dec = div_u64(dec_multiple, multiplier);
+
+       if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1))
+               config->pll_clock_inverters = 0x28;
+       else if (pll_freq <= 1000000000ULL)
+               config->pll_clock_inverters = 0xa0;
+       else if (pll_freq <= 2500000000ULL)
+               config->pll_clock_inverters = 0x20;
+       else if (pll_freq <= 3020000000ULL)
+               config->pll_clock_inverters = 0x00;
+       else
+               config->pll_clock_inverters = 0x40;
+
+       config->decimal_div_start = dec;
+       config->frac_div_start = frac;
+}
+
+#define SSC_CENTER             BIT(0)
+#define SSC_EN                 BIT(1)
+
+static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll, struct dsi_pll_config *config)
+{
+       u32 ssc_per;
+       u32 ssc_mod;
+       u64 ssc_step_size;
+       u64 frac;
+
+       if (!config->enable_ssc) {
+               DBG("SSC not enabled\n");
+               return;
+       }
+
+       ssc_per = DIV_ROUND_CLOSEST(VCO_REF_CLK_RATE, config->ssc_freq) / 2 - 1;
+       ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+       ssc_per -= ssc_mod;
+
+       frac = config->frac_div_start;
+       ssc_step_size = config->decimal_div_start;
+       ssc_step_size *= (1 << FRAC_BITS);
+       ssc_step_size += frac;
+       ssc_step_size *= config->ssc_offset;
+       ssc_step_size *= (config->ssc_adj_per + 1);
+       ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+       ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+       config->ssc_div_per = ssc_per;
+       config->ssc_stepsize = ssc_step_size;
+
+       pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+                config->decimal_div_start, frac, FRAC_BITS);
+       pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+                ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *config)
+{
+       void __iomem *base = pll->phy->pll_base;
+
+       if (config->enable_ssc) {
+               pr_debug("SSC is enabled\n");
+
+               dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
+                         config->ssc_stepsize & 0xff);
+               dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
+                         config->ssc_stepsize >> 8);
+               dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,
+                         config->ssc_div_per & 0xff);
+               dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
+                         config->ssc_div_per >> 8);
+               dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,
+                         config->ssc_adj_per & 0xff);
+               dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,
+                         config->ssc_adj_per >> 8);
+               dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,
+                         SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
+       }
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
+{
+       void __iomem *base = pll->phy->pll_base;
+       u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00;
+
+       if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
+               if (pll->vco_current_rate >= 3100000000ULL)
+                       analog_controls_five_1 = 0x03;
+
+               if (pll->vco_current_rate < 1520000000ULL)
+                       vco_config_1 = 0x08;
+               else if (pll->vco_current_rate < 2990000000ULL)
+                       vco_config_1 = 0x01;
+       }
+
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,
+                 analog_controls_five_1);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER, 0x00);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_OUTDIV, 0x00);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE, 0x00);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x0a);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1, 0xc0);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x84);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x29);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x2f);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_IFILT, 0x2a);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_IFILT,
+                 pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1 ? 0x3f : 0x22);
+
+       if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
+               dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
+               if (pll->slave)
+                       dsi_phy_write(pll->slave->phy->pll_base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
+       }
+}
+
+static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *config)
+{
+       void __iomem *base = pll->phy->pll_base;
+
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1, config->decimal_div_start);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1,
+                 config->frac_div_start & 0xff);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1,
+                 (config->frac_div_start & 0xff00) >> 8);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
+                 (config->frac_div_start & 0x30000) >> 16);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */
+       dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, config->pll_clock_inverters);
+}
+
+static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+                                    unsigned long parent_rate)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
+       struct dsi_pll_config config;
+
+       DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->phy->id, rate,
+           parent_rate);
+
+       pll_7nm->vco_current_rate = rate;
+
+       dsi_pll_setup_config(&config);
+
+       dsi_pll_calc_dec_frac(pll_7nm, &config);
+
+       dsi_pll_calc_ssc(pll_7nm, &config);
+
+       dsi_pll_commit(pll_7nm, &config);
+
+       dsi_pll_config_hzindep_reg(pll_7nm);
+
+       dsi_pll_ssc_commit(pll_7nm, &config);
+
+       /* flush, ensure all register writes are done*/
+       wmb();
+
+       return 0;
+}
+
+static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll)
+{
+       int rc;
+       u32 status = 0;
+       u32 const delay_us = 100;
+       u32 const timeout_us = 5000;
+
+       rc = readl_poll_timeout_atomic(pll->phy->pll_base +
+                                      REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE,
+                                      status,
+                                      ((status & BIT(0)) > 0),
+                                      delay_us,
+                                      timeout_us);
+       if (rc)
+               pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
+                      pll->phy->id, status);
+
+       return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll)
+{
+       u32 data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+
+       dsi_phy_write(pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0);
+       dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0, data & ~BIT(5));
+       ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
+{
+       u32 data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+
+       dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0, data | BIT(5));
+       dsi_phy_write(pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
+       ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
+{
+       u32 data;
+
+       data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+       dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data & ~BIT(5));
+}
+
+static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
+{
+       u32 data;
+
+       dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x04);
+
+       data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+       dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+                 data | BIT(5) | BIT(4));
+}
+
+static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
+{
+       /*
+        * Reset the PHY digital domain. This would be needed when
+        * coming out of a CX or analog rail power collapse while
+        * ensuring that the pads maintain LP00 or LP11 state
+        */
+       dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0));
+       wmb(); /* Ensure that the reset is deasserted */
+       dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 0x0);
+       wmb(); /* Ensure that the reset is deasserted */
+}
+
+static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
+       int rc;
+
+       dsi_pll_enable_pll_bias(pll_7nm);
+       if (pll_7nm->slave)
+               dsi_pll_enable_pll_bias(pll_7nm->slave);
+
+       /* Start PLL */
+       dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x01);
+
+       /*
+        * ensure all PLL configurations are written prior to checking
+        * for PLL lock.
+        */
+       wmb();
+
+       /* Check for PLL lock */
+       rc = dsi_pll_7nm_lock_status(pll_7nm);
+       if (rc) {
+               pr_err("PLL(%d) lock failed\n", pll_7nm->phy->id);
+               goto error;
+       }
+
+       pll_7nm->phy->pll_on = true;
+
+       /*
+        * assert power on reset for PHY digital in case the PLL is
+        * enabled after CX of analog domain power collapse. This needs
+        * to be done before enabling the global clk.
+        */
+       dsi_pll_phy_dig_reset(pll_7nm);
+       if (pll_7nm->slave)
+               dsi_pll_phy_dig_reset(pll_7nm->slave);
+
+       dsi_pll_enable_global_clk(pll_7nm);
+       if (pll_7nm->slave)
+               dsi_pll_enable_global_clk(pll_7nm->slave);
+
+error:
+       return rc;
+}
+
+static void dsi_pll_disable_sub(struct dsi_pll_7nm *pll)
+{
+       dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0);
+       dsi_pll_disable_pll_bias(pll);
+}
+
+static void dsi_pll_7nm_vco_unprepare(struct clk_hw *hw)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
+
+       /*
+        * To avoid any stray glitches while abruptly powering down the PLL
+        * make sure to gate the clock using the clock enable bit before
+        * powering down the PLL
+        */
+       dsi_pll_disable_global_clk(pll_7nm);
+       dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0);
+       dsi_pll_disable_sub(pll_7nm);
+       if (pll_7nm->slave) {
+               dsi_pll_disable_global_clk(pll_7nm->slave);
+               dsi_pll_disable_sub(pll_7nm->slave);
+       }
+       /* flush, ensure all register writes are done */
+       wmb();
+       pll_7nm->phy->pll_on = false;
+}
+
+static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
+                                                 unsigned long parent_rate)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
+       void __iomem *base = pll_7nm->phy->pll_base;
+       u64 ref_clk = VCO_REF_CLK_RATE;
+       u64 vco_rate = 0x0;
+       u64 multiplier;
+       u32 frac;
+       u32 dec;
+       u64 pll_freq, tmp64;
+
+       dec = dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);
+       dec &= 0xff;
+
+       frac = dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1);
+       frac |= ((dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) &
+                 0xff) << 8);
+       frac |= ((dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
+                 0x3) << 16);
+
+       /*
+        * TODO:
+        *      1. Assumes prescaler is disabled
+        */
+       multiplier = 1 << FRAC_BITS;
+       pll_freq = dec * (ref_clk * 2);
+       tmp64 = (ref_clk * 2 * frac);
+       pll_freq += div_u64(tmp64, multiplier);
+
+       vco_rate = pll_freq;
+
+       DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
+           pll_7nm->phy->id, (unsigned long)vco_rate, dec, frac);
+
+       return (unsigned long)vco_rate;
+}
+
+static long dsi_pll_7nm_clk_round_rate(struct clk_hw *hw,
+               unsigned long rate, unsigned long *parent_rate)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
+
+       if      (rate < pll_7nm->phy->cfg->min_pll_rate)
+               return  pll_7nm->phy->cfg->min_pll_rate;
+       else if (rate > pll_7nm->phy->cfg->max_pll_rate)
+               return  pll_7nm->phy->cfg->max_pll_rate;
+       else
+               return rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_7nm_vco = {
+       .round_rate = dsi_pll_7nm_clk_round_rate,
+       .set_rate = dsi_pll_7nm_vco_set_rate,
+       .recalc_rate = dsi_pll_7nm_vco_recalc_rate,
+       .prepare = dsi_pll_7nm_vco_prepare,
+       .unprepare = dsi_pll_7nm_vco_unprepare,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+       struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
+       void __iomem *phy_base = pll_7nm->phy->base;
+       u32 cmn_clk_cfg0, cmn_clk_cfg1;
+
+       cached->pll_out_div = dsi_phy_read(pll_7nm->phy->pll_base +
+                                      REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+       cached->pll_out_div &= 0x3;
+
+       cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
+       cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
+       cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+
+       cmn_clk_cfg1 = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+       cached->pll_mux = cmn_clk_cfg1 & 0x3;
+
+       DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
+           pll_7nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
+           cached->pix_clk_div, cached->pll_mux);
+}
+
+static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+       struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
+       void __iomem *phy_base = pll_7nm->phy->base;
+       u32 val;
+       int ret;
+
+       val = dsi_phy_read(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+       val &= ~0x3;
+       val |= cached->pll_out_div;
+       dsi_phy_write(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);
+
+       dsi_phy_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+                 cached->bit_clk_div | (cached->pix_clk_div << 4));
+
+       val = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+       val &= ~0x3;
+       val |= cached->pll_mux;
+       dsi_phy_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val);
+
+       ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw,
+                       pll_7nm->vco_current_rate,
+                       VCO_REF_CLK_RATE);
+       if (ret) {
+               DRM_DEV_ERROR(&pll_7nm->phy->pdev->dev,
+                       "restore vco rate failed. ret=%d\n", ret);
+               return ret;
+       }
+
+       DBG("DSI PLL%d", pll_7nm->phy->id);
+
+       return 0;
+}
+
+static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
+       void __iomem *base = phy->base;
+       u32 data = 0x0; /* internal PLL */
+
+       DBG("DSI PLL%d", pll_7nm->phy->id);
+
+       switch (phy->usecase) {
+       case MSM_DSI_PHY_STANDALONE:
+               break;
+       case MSM_DSI_PHY_MASTER:
+               pll_7nm->slave = pll_7nm_list[(pll_7nm->phy->id + 1) % DSI_MAX];
+               break;
+       case MSM_DSI_PHY_SLAVE:
+               data = 0x1; /* external PLL */
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* set PLL src */
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, (data << 2));
+
+       return 0;
+}
+
+/*
+ * The post dividers and mux clocks are created using the standard divider and
+ * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
+ * state to follow the master PLL's divider/mux state. Therefore, we don't
+ * require special clock ops that also configure the slave PLL registers
+ */
+static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provided_clocks)
+{
+       char clk_name[32], parent[32], vco_name[32];
+       char parent2[32], parent3[32], parent4[32];
+       struct clk_init_data vco_init = {
+               .parent_names = (const char *[]){ "bi_tcxo" },
+               .num_parents = 1,
+               .name = vco_name,
+               .flags = CLK_IGNORE_UNUSED,
+               .ops = &clk_ops_dsi_pll_7nm_vco,
+       };
+       struct device *dev = &pll_7nm->phy->pdev->dev;
+       struct clk_hw *hw;
+       int ret;
+
+       DBG("DSI%d", pll_7nm->phy->id);
+
+       snprintf(vco_name, 32, "dsi%dvco_clk", pll_7nm->phy->id);
+       pll_7nm->clk_hw.init = &vco_init;
+
+       ret = devm_clk_hw_register(dev, &pll_7nm->clk_hw);
+       if (ret)
+               return ret;
+
+       snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
+       snprintf(parent, 32, "dsi%dvco_clk", pll_7nm->phy->id);
+
+       hw = devm_clk_hw_register_divider(dev, clk_name,
+                                    parent, CLK_SET_RATE_PARENT,
+                                    pll_7nm->phy->pll_base +
+                                    REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
+                                    0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto fail;
+       }
+
+       snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
+       snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
+
+       /* BIT CLK: DIV_CTRL_3_0 */
+       hw = devm_clk_hw_register_divider(dev, clk_name, parent,
+                                    CLK_SET_RATE_PARENT,
+                                    pll_7nm->phy->base +
+                                    REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+                                    0, 4, CLK_DIVIDER_ONE_BASED,
+                                    &pll_7nm->postdiv_lock);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto fail;
+       }
+
+       snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_7nm->phy->id);
+       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
+
+       /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
+       hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
+                                         CLK_SET_RATE_PARENT, 1, 8);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto fail;
+       }
+
+       provided_clocks[DSI_BYTE_PLL_CLK] = hw;
+
+       snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
+       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
+
+       hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
+                                         0, 1, 2);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto fail;
+       }
+
+       snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
+       snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
+
+       hw = devm_clk_hw_register_fixed_factor(dev, clk_name, parent,
+                                         0, 1, 4);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto fail;
+       }
+
+       snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
+       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->phy->id);
+       snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->phy->id);
+       snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->phy->id);
+       snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->phy->id);
+
+       hw = devm_clk_hw_register_mux(dev, clk_name,
+                                ((const char *[]){
+                                parent, parent2, parent3, parent4
+                                }), 4, 0, pll_7nm->phy->base +
+                                REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+                                0, 2, 0, NULL);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto fail;
+       }
+
+       snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->phy->id);
+       snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->phy->id);
+
+       /* PIX CLK DIV : DIV_CTRL_7_4*/
+       hw = devm_clk_hw_register_divider(dev, clk_name, parent,
+                                    0, pll_7nm->phy->base +
+                                       REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+                                    4, 4, CLK_DIVIDER_ONE_BASED,
+                                    &pll_7nm->postdiv_lock);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto fail;
+       }
+
+       provided_clocks[DSI_PIXEL_PLL_CLK] = hw;
+
+       return 0;
+
+fail:
+
+       return ret;
+}
+
+static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
+{
+       struct platform_device *pdev = phy->pdev;
+       struct dsi_pll_7nm *pll_7nm;
+       int ret;
+
+       pll_7nm = devm_kzalloc(&pdev->dev, sizeof(*pll_7nm), GFP_KERNEL);
+       if (!pll_7nm)
+               return -ENOMEM;
+
+       DBG("DSI PLL%d", phy->id);
+
+       pll_7nm_list[phy->id] = pll_7nm;
+
+       spin_lock_init(&pll_7nm->postdiv_lock);
+
+       pll_7nm->phy = phy;
+
+       ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws);
+       if (ret) {
+               DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+               return ret;
+       }
+
+       phy->vco_hw = &pll_7nm->clk_hw;
+
+       /* TODO: Remove this when we have proper display handover support */
+       msm_dsi_phy_pll_save_state(phy);
+
+       return 0;
+}
+
 static int dsi_phy_hw_v4_0_is_pll_on(struct msm_dsi_phy *phy)
 {
        void __iomem *base = phy->base;
@@ -44,7 +776,7 @@ static void dsi_phy_hw_v4_0_lane_settings(struct msm_dsi_phy *phy)
        const u8 *tx_dctrl = tx_dctrl_0;
        void __iomem *lane_base = phy->lane_base;
 
-       if (phy->cfg->type == MSM_DSI_PHY_7NM_V4_1)
+       if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1)
                tx_dctrl = tx_dctrl_1;
 
        /* Strength ctrl settings */
@@ -69,7 +801,7 @@ static void dsi_phy_hw_v4_0_lane_settings(struct msm_dsi_phy *phy)
        }
 }
 
-static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
                              struct msm_dsi_phy_clk_request *clk_req)
 {
        int ret;
@@ -108,7 +840,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
        /* Alter PHY configurations if data rate less than 1.5GHZ*/
        less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000);
 
-       if (phy->cfg->type == MSM_DSI_PHY_7NM_V4_1) {
+       if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
                vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52;
                glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d :  0x00;
                glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 :  0x3c;
@@ -165,7 +897,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
        /* Select full-rate mode */
        dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40);
 
-       ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
+       ret = dsi_7nm_set_usecase(phy);
        if (ret) {
                DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
                        __func__, ret);
@@ -224,24 +956,8 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
        DBG("DSI%d PHY disabled", phy->id);
 }
 
-static int dsi_7nm_phy_init(struct msm_dsi_phy *phy)
-{
-       struct platform_device *pdev = phy->pdev;
-
-       phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
-                                    "DSI_PHY_LANE");
-       if (IS_ERR(phy->lane_base)) {
-               DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
-                       __func__);
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
 const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
-       .type = MSM_DSI_PHY_7NM_V4_1,
-       .src_pll_truthtable = { {false, false}, {true, false} },
+       .has_phy_lane = true,
        .reg_cfg = {
                .num = 1,
                .regs = {
@@ -251,15 +967,19 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
        .ops = {
                .enable = dsi_7nm_phy_enable,
                .disable = dsi_7nm_phy_disable,
-               .init = dsi_7nm_phy_init,
+               .pll_init = dsi_pll_7nm_init,
+               .save_pll_state = dsi_7nm_pll_save_state,
+               .restore_pll_state = dsi_7nm_pll_restore_state,
        },
+       .min_pll_rate = 600000000UL,
+       .max_pll_rate = (5000000000ULL < ULONG_MAX) ? 5000000000ULL : ULONG_MAX,
        .io_start = { 0xae94400, 0xae96400 },
        .num_dsi_phy = 2,
+       .quirks = DSI_PHY_7NM_QUIRK_V4_1,
 };
 
 const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
-       .type = MSM_DSI_PHY_7NM,
-       .src_pll_truthtable = { {false, false}, {true, false} },
+       .has_phy_lane = true,
        .reg_cfg = {
                .num = 1,
                .regs = {
@@ -269,8 +989,12 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
        .ops = {
                .enable = dsi_7nm_phy_enable,
                .disable = dsi_7nm_phy_disable,
-               .init = dsi_7nm_phy_init,
+               .pll_init = dsi_pll_7nm_init,
+               .save_pll_state = dsi_7nm_pll_save_state,
+               .restore_pll_state = dsi_7nm_pll_restore_state,
        },
+       .min_pll_rate = 1000000000UL,
+       .max_pll_rate = 3500000000UL,
        .io_start = { 0xae94400, 0xae96400 },
        .num_dsi_phy = 2,
 };
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
deleted file mode 100644 (file)
index a45fe95..0000000
+++ /dev/null
@@ -1,184 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
- */
-
-#include "dsi_pll.h"
-
-static int dsi_pll_enable(struct msm_dsi_pll *pll)
-{
-       int i, ret = 0;
-
-       /*
-        * Certain PLLs do not allow VCO rate update when it is on.
-        * Keep track of their status to turn on/off after set rate success.
-        */
-       if (unlikely(pll->pll_on))
-               return 0;
-
-       /* Try all enable sequences until one succeeds */
-       for (i = 0; i < pll->en_seq_cnt; i++) {
-               ret = pll->enable_seqs[i](pll);
-               DBG("DSI PLL %s after sequence #%d",
-                       ret ? "unlocked" : "locked", i + 1);
-               if (!ret)
-                       break;
-       }
-
-       if (ret) {
-               DRM_ERROR("DSI PLL failed to lock\n");
-               return ret;
-       }
-
-       pll->pll_on = true;
-
-       return 0;
-}
-
-static void dsi_pll_disable(struct msm_dsi_pll *pll)
-{
-       if (unlikely(!pll->pll_on))
-               return;
-
-       pll->disable_seq(pll);
-
-       pll->pll_on = false;
-}
-
-/*
- * DSI PLL Helper functions
- */
-long msm_dsi_pll_helper_clk_round_rate(struct clk_hw *hw,
-               unsigned long rate, unsigned long *parent_rate)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-
-       if      (rate < pll->min_rate)
-               return  pll->min_rate;
-       else if (rate > pll->max_rate)
-               return  pll->max_rate;
-       else
-               return rate;
-}
-
-int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-
-       return dsi_pll_enable(pll);
-}
-
-void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-
-       dsi_pll_disable(pll);
-}
-
-void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev,
-                                       struct clk **clks, u32 num_clks)
-{
-       of_clk_del_provider(pdev->dev.of_node);
-
-       if (!num_clks || !clks)
-               return;
-
-       do {
-               clk_unregister(clks[--num_clks]);
-               clks[num_clks] = NULL;
-       } while (num_clks);
-}
-
-/*
- * DSI PLL API
- */
-int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
-       struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
-{
-       if (pll->get_provider)
-               return pll->get_provider(pll,
-                                       byte_clk_provider,
-                                       pixel_clk_provider);
-
-       return -EINVAL;
-}
-
-void msm_dsi_pll_destroy(struct msm_dsi_pll *pll)
-{
-       if (pll->destroy)
-               pll->destroy(pll);
-}
-
-void msm_dsi_pll_save_state(struct msm_dsi_pll *pll)
-{
-       if (pll->save_state) {
-               pll->save_state(pll);
-               pll->state_saved = true;
-       }
-}
-
-int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll)
-{
-       int ret;
-
-       if (pll->restore_state && pll->state_saved) {
-               ret = pll->restore_state(pll);
-               if (ret)
-                       return ret;
-
-               pll->state_saved = false;
-       }
-
-       return 0;
-}
-
-int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
-                           enum msm_dsi_phy_usecase uc)
-{
-       if (pll->set_usecase)
-               return pll->set_usecase(pll, uc);
-
-       return 0;
-}
-
-struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
-                       enum msm_dsi_phy_type type, int id)
-{
-       struct device *dev = &pdev->dev;
-       struct msm_dsi_pll *pll;
-
-       switch (type) {
-       case MSM_DSI_PHY_28NM_HPM:
-       case MSM_DSI_PHY_28NM_LP:
-               pll = msm_dsi_pll_28nm_init(pdev, type, id);
-               break;
-       case MSM_DSI_PHY_28NM_8960:
-               pll = msm_dsi_pll_28nm_8960_init(pdev, id);
-               break;
-       case MSM_DSI_PHY_14NM:
-               pll = msm_dsi_pll_14nm_init(pdev, id);
-               break;
-       case MSM_DSI_PHY_10NM:
-               pll = msm_dsi_pll_10nm_init(pdev, id);
-               break;
-       case MSM_DSI_PHY_7NM:
-       case MSM_DSI_PHY_7NM_V4_1:
-               pll = msm_dsi_pll_7nm_init(pdev, id);
-               break;
-       default:
-               pll = ERR_PTR(-ENXIO);
-               break;
-       }
-
-       if (IS_ERR(pll)) {
-               DRM_DEV_ERROR(dev, "%s: failed to init DSI PLL\n", __func__);
-               return pll;
-       }
-
-       pll->type = type;
-
-       DBG("DSI:%d PLL registered", id);
-
-       return pll;
-}
-
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
deleted file mode 100644 (file)
index 3405982..0000000
+++ /dev/null
@@ -1,130 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __DSI_PLL_H__
-#define __DSI_PLL_H__
-
-#include <linux/clk-provider.h>
-#include <linux/delay.h>
-
-#include "dsi.h"
-
-#define NUM_DSI_CLOCKS_MAX     6
-#define MAX_DSI_PLL_EN_SEQS    10
-
-struct msm_dsi_pll {
-       enum msm_dsi_phy_type type;
-
-       struct clk_hw   clk_hw;
-       bool            pll_on;
-       bool            state_saved;
-
-       unsigned long   min_rate;
-       unsigned long   max_rate;
-       u32             en_seq_cnt;
-
-       int (*enable_seqs[MAX_DSI_PLL_EN_SEQS])(struct msm_dsi_pll *pll);
-       void (*disable_seq)(struct msm_dsi_pll *pll);
-       int (*get_provider)(struct msm_dsi_pll *pll,
-                       struct clk **byte_clk_provider,
-                       struct clk **pixel_clk_provider);
-       void (*destroy)(struct msm_dsi_pll *pll);
-       void (*save_state)(struct msm_dsi_pll *pll);
-       int (*restore_state)(struct msm_dsi_pll *pll);
-       int (*set_usecase)(struct msm_dsi_pll *pll,
-                          enum msm_dsi_phy_usecase uc);
-};
-
-#define hw_clk_to_pll(x) container_of(x, struct msm_dsi_pll, clk_hw)
-
-static inline void pll_write(void __iomem *reg, u32 data)
-{
-       msm_writel(data, reg);
-}
-
-static inline u32 pll_read(const void __iomem *reg)
-{
-       return msm_readl(reg);
-}
-
-static inline void pll_write_udelay(void __iomem *reg, u32 data, u32 delay_us)
-{
-       pll_write(reg, data);
-       udelay(delay_us);
-}
-
-static inline void pll_write_ndelay(void __iomem *reg, u32 data, u32 delay_ns)
-{
-       pll_write((reg), data);
-       ndelay(delay_ns);
-}
-
-/*
- * DSI PLL Helper functions
- */
-
-/* clock callbacks */
-long msm_dsi_pll_helper_clk_round_rate(struct clk_hw *hw,
-               unsigned long rate, unsigned long *parent_rate);
-int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw);
-void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw);
-/* misc */
-void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev,
-                                       struct clk **clks, u32 num_clks);
-
-/*
- * Initialization for Each PLL Type
- */
-#ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
-struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
-                                       enum msm_dsi_phy_type type, int id);
-#else
-static inline struct msm_dsi_pll *msm_dsi_pll_28nm_init(
-       struct platform_device *pdev, enum msm_dsi_phy_type type, int id)
-{
-       return ERR_PTR(-ENODEV);
-}
-#endif
-#ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
-struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
-                                              int id);
-#else
-static inline struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(
-       struct platform_device *pdev, int id)
-{
-       return ERR_PTR(-ENODEV);
-}
-#endif
-
-#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY
-struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id);
-#else
-static inline struct msm_dsi_pll *
-msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
-{
-       return ERR_PTR(-ENODEV);
-}
-#endif
-#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
-struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id);
-#else
-static inline struct msm_dsi_pll *
-msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
-{
-       return ERR_PTR(-ENODEV);
-}
-#endif
-#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
-struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id);
-#else
-static inline struct msm_dsi_pll *
-msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
-{
-       return ERR_PTR(-ENODEV);
-}
-#endif
-
-#endif /* __DSI_PLL_H__ */
-
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
deleted file mode 100644 (file)
index de3b802..0000000
+++ /dev/null
@@ -1,881 +0,0 @@
-/*
- * SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2018, The Linux Foundation
- */
-
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/iopoll.h>
-
-#include "dsi_pll.h"
-#include "dsi.xml.h"
-
-/*
- * DSI PLL 10nm - clock diagram (eg: DSI0):
- *
- *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk
- *                              |                |
- *                              |                |
- *                 +---------+  |  +----------+  |  +----+
- *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
- *                 +---------+  |  +----------+  |  +----+
- *                              |                |
- *                              |                |         dsi0_pll_by_2_bit_clk
- *                              |                |          |
- *                              |                |  +----+  |  |\  dsi0_pclk_mux
- *                              |                |--| /2 |--o--| \   |
- *                              |                |  +----+     |  \  |  +---------+
- *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
- *                              |------------------------------|  /     +---------+
- *                              |          +-----+             | /
- *                              -----------| /4? |--o----------|/
- *                                         +-----+  |           |
- *                                                  |           |dsiclk_sel
- *                                                  |
- *                                                  dsi0_pll_post_out_div_clk
- */
-
-#define DSI_BYTE_PLL_CLK               0
-#define DSI_PIXEL_PLL_CLK              1
-#define NUM_PROVIDED_CLKS              2
-
-#define VCO_REF_CLK_RATE               19200000
-
-struct dsi_pll_regs {
-       u32 pll_prop_gain_rate;
-       u32 pll_lockdet_rate;
-       u32 decimal_div_start;
-       u32 frac_div_start_low;
-       u32 frac_div_start_mid;
-       u32 frac_div_start_high;
-       u32 pll_clock_inverters;
-       u32 ssc_stepsize_low;
-       u32 ssc_stepsize_high;
-       u32 ssc_div_per_low;
-       u32 ssc_div_per_high;
-       u32 ssc_adjper_low;
-       u32 ssc_adjper_high;
-       u32 ssc_control;
-};
-
-struct dsi_pll_config {
-       u32 ref_freq;
-       bool div_override;
-       u32 output_div;
-       bool ignore_frac;
-       bool disable_prescaler;
-       bool enable_ssc;
-       bool ssc_center;
-       u32 dec_bits;
-       u32 frac_bits;
-       u32 lock_timer;
-       u32 ssc_freq;
-       u32 ssc_offset;
-       u32 ssc_adj_per;
-       u32 thresh_cycles;
-       u32 refclk_cycles;
-};
-
-struct pll_10nm_cached_state {
-       unsigned long vco_rate;
-       u8 bit_clk_div;
-       u8 pix_clk_div;
-       u8 pll_out_div;
-       u8 pll_mux;
-};
-
-struct dsi_pll_10nm {
-       struct msm_dsi_pll base;
-
-       int id;
-       struct platform_device *pdev;
-
-       void __iomem *phy_cmn_mmio;
-       void __iomem *mmio;
-
-       u64 vco_ref_clk_rate;
-       u64 vco_current_rate;
-
-       /* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */
-       spinlock_t postdiv_lock;
-
-       int vco_delay;
-       struct dsi_pll_config pll_configuration;
-       struct dsi_pll_regs reg_setup;
-
-       /* private clocks: */
-       struct clk_hw *out_div_clk_hw;
-       struct clk_hw *bit_clk_hw;
-       struct clk_hw *byte_clk_hw;
-       struct clk_hw *by_2_bit_clk_hw;
-       struct clk_hw *post_out_div_clk_hw;
-       struct clk_hw *pclk_mux_hw;
-       struct clk_hw *out_dsiclk_hw;
-
-       /* clock-provider: */
-       struct clk_hw_onecell_data *hw_data;
-
-       struct pll_10nm_cached_state cached_state;
-
-       enum msm_dsi_phy_usecase uc;
-       struct dsi_pll_10nm *slave;
-};
-
-#define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, base)
-
-/*
- * Global list of private DSI PLL struct pointers. We need this for Dual DSI
- * mode, where the master PLL's clk_ops needs access the slave's private data
- */
-static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
-
-static void dsi_pll_setup_config(struct dsi_pll_10nm *pll)
-{
-       struct dsi_pll_config *config = &pll->pll_configuration;
-
-       config->ref_freq = pll->vco_ref_clk_rate;
-       config->output_div = 1;
-       config->dec_bits = 8;
-       config->frac_bits = 18;
-       config->lock_timer = 64;
-       config->ssc_freq = 31500;
-       config->ssc_offset = 5000;
-       config->ssc_adj_per = 2;
-       config->thresh_cycles = 32;
-       config->refclk_cycles = 256;
-
-       config->div_override = false;
-       config->ignore_frac = false;
-       config->disable_prescaler = false;
-
-       config->enable_ssc = false;
-       config->ssc_center = 0;
-}
-
-static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll)
-{
-       struct dsi_pll_config *config = &pll->pll_configuration;
-       struct dsi_pll_regs *regs = &pll->reg_setup;
-       u64 fref = pll->vco_ref_clk_rate;
-       u64 pll_freq;
-       u64 divider;
-       u64 dec, dec_multiple;
-       u32 frac;
-       u64 multiplier;
-
-       pll_freq = pll->vco_current_rate;
-
-       if (config->disable_prescaler)
-               divider = fref;
-       else
-               divider = fref * 2;
-
-       multiplier = 1 << config->frac_bits;
-       dec_multiple = div_u64(pll_freq * multiplier, divider);
-       dec = div_u64_rem(dec_multiple, multiplier, &frac);
-
-       if (pll_freq <= 1900000000UL)
-               regs->pll_prop_gain_rate = 8;
-       else if (pll_freq <= 3000000000UL)
-               regs->pll_prop_gain_rate = 10;
-       else
-               regs->pll_prop_gain_rate = 12;
-       if (pll_freq < 1100000000UL)
-               regs->pll_clock_inverters = 8;
-       else
-               regs->pll_clock_inverters = 0;
-
-       regs->pll_lockdet_rate = config->lock_timer;
-       regs->decimal_div_start = dec;
-       regs->frac_div_start_low = (frac & 0xff);
-       regs->frac_div_start_mid = (frac & 0xff00) >> 8;
-       regs->frac_div_start_high = (frac & 0x30000) >> 16;
-}
-
-#define SSC_CENTER             BIT(0)
-#define SSC_EN                 BIT(1)
-
-static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll)
-{
-       struct dsi_pll_config *config = &pll->pll_configuration;
-       struct dsi_pll_regs *regs = &pll->reg_setup;
-       u32 ssc_per;
-       u32 ssc_mod;
-       u64 ssc_step_size;
-       u64 frac;
-
-       if (!config->enable_ssc) {
-               DBG("SSC not enabled\n");
-               return;
-       }
-
-       ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
-       ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
-       ssc_per -= ssc_mod;
-
-       frac = regs->frac_div_start_low |
-                       (regs->frac_div_start_mid << 8) |
-                       (regs->frac_div_start_high << 16);
-       ssc_step_size = regs->decimal_div_start;
-       ssc_step_size *= (1 << config->frac_bits);
-       ssc_step_size += frac;
-       ssc_step_size *= config->ssc_offset;
-       ssc_step_size *= (config->ssc_adj_per + 1);
-       ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
-       ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
-
-       regs->ssc_div_per_low = ssc_per & 0xFF;
-       regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
-       regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
-       regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
-       regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
-       regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
-
-       regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
-
-       pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
-                regs->decimal_div_start, frac, config->frac_bits);
-       pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
-                ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
-}
-
-static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll)
-{
-       void __iomem *base = pll->mmio;
-       struct dsi_pll_regs *regs = &pll->reg_setup;
-
-       if (pll->pll_configuration.enable_ssc) {
-               pr_debug("SSC is enabled\n");
-
-               pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
-                         regs->ssc_stepsize_low);
-               pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
-                         regs->ssc_stepsize_high);
-               pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
-                         regs->ssc_div_per_low);
-               pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
-                         regs->ssc_div_per_high);
-               pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
-                         regs->ssc_adjper_low);
-               pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
-                         regs->ssc_adjper_high);
-               pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
-                         SSC_EN | regs->ssc_control);
-       }
-}
-
-static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
-{
-       void __iomem *base = pll->mmio;
-
-       pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
-                 0xba);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
-                 0x4c);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
-}
-
-static void dsi_pll_commit(struct dsi_pll_10nm *pll)
-{
-       void __iomem *base = pll->mmio;
-       struct dsi_pll_regs *reg = &pll->reg_setup;
-
-       pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
-                 reg->decimal_div_start);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
-                 reg->frac_div_start_low);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
-                 reg->frac_div_start_mid);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
-                 reg->frac_div_start_high);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1,
-                 reg->pll_lockdet_rate);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
-       pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
-                 reg->pll_clock_inverters);
-}
-
-static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
-                                    unsigned long parent_rate)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-       struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-
-       DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->id, rate,
-           parent_rate);
-
-       pll_10nm->vco_current_rate = rate;
-       pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
-
-       dsi_pll_setup_config(pll_10nm);
-
-       dsi_pll_calc_dec_frac(pll_10nm);
-
-       dsi_pll_calc_ssc(pll_10nm);
-
-       dsi_pll_commit(pll_10nm);
-
-       dsi_pll_config_hzindep_reg(pll_10nm);
-
-       dsi_pll_ssc_commit(pll_10nm);
-
-       /* flush, ensure all register writes are done*/
-       wmb();
-
-       return 0;
-}
-
-static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
-{
-       struct device *dev = &pll->pdev->dev;
-       int rc;
-       u32 status = 0;
-       u32 const delay_us = 100;
-       u32 const timeout_us = 5000;
-
-       rc = readl_poll_timeout_atomic(pll->mmio +
-                                      REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,
-                                      status,
-                                      ((status & BIT(0)) > 0),
-                                      delay_us,
-                                      timeout_us);
-       if (rc)
-               DRM_DEV_ERROR(dev, "DSI PLL(%d) lock failed, status=0x%08x\n",
-                             pll->id, status);
-
-       return rc;
-}
-
-static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
-{
-       u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);
-
-       pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
-       pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,
-                 data & ~BIT(5));
-       ndelay(250);
-}
-
-static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
-{
-       u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);
-
-       pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,
-                 data | BIT(5));
-       pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
-       ndelay(250);
-}
-
-static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
-{
-       u32 data;
-
-       data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
-       pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
-                 data & ~BIT(5));
-}
-
-static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
-{
-       u32 data;
-
-       data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
-       pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
-                 data | BIT(5));
-}
-
-static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-       struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-       struct device *dev = &pll_10nm->pdev->dev;
-       int rc;
-
-       dsi_pll_enable_pll_bias(pll_10nm);
-       if (pll_10nm->slave)
-               dsi_pll_enable_pll_bias(pll_10nm->slave);
-
-       rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
-       if (rc) {
-               DRM_DEV_ERROR(dev, "vco_set_rate failed, rc=%d\n", rc);
-               return rc;
-       }
-
-       /* Start PLL */
-       pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
-                 0x01);
-
-       /*
-        * ensure all PLL configurations are written prior to checking
-        * for PLL lock.
-        */
-       wmb();
-
-       /* Check for PLL lock */
-       rc = dsi_pll_10nm_lock_status(pll_10nm);
-       if (rc) {
-               DRM_DEV_ERROR(dev, "PLL(%d) lock failed\n", pll_10nm->id);
-               goto error;
-       }
-
-       pll->pll_on = true;
-
-       dsi_pll_enable_global_clk(pll_10nm);
-       if (pll_10nm->slave)
-               dsi_pll_enable_global_clk(pll_10nm->slave);
-
-       pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
-                 0x01);
-       if (pll_10nm->slave)
-               pll_write(pll_10nm->slave->phy_cmn_mmio +
-                         REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
-
-error:
-       return rc;
-}
-
-static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
-{
-       pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
-       dsi_pll_disable_pll_bias(pll);
-}
-
-static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-       struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-
-       /*
-        * To avoid any stray glitches while abruptly powering down the PLL
-        * make sure to gate the clock using the clock enable bit before
-        * powering down the PLL
-        */
-       dsi_pll_disable_global_clk(pll_10nm);
-       pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
-       dsi_pll_disable_sub(pll_10nm);
-       if (pll_10nm->slave) {
-               dsi_pll_disable_global_clk(pll_10nm->slave);
-               dsi_pll_disable_sub(pll_10nm->slave);
-       }
-       /* flush, ensure all register writes are done */
-       wmb();
-       pll->pll_on = false;
-}
-
-static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
-                                                 unsigned long parent_rate)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-       struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-       struct dsi_pll_config *config = &pll_10nm->pll_configuration;
-       void __iomem *base = pll_10nm->mmio;
-       u64 ref_clk = pll_10nm->vco_ref_clk_rate;
-       u64 vco_rate = 0x0;
-       u64 multiplier;
-       u32 frac;
-       u32 dec;
-       u64 pll_freq, tmp64;
-
-       dec = pll_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
-       dec &= 0xff;
-
-       frac = pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
-       frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
-                 0xff) << 8);
-       frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
-                 0x3) << 16);
-
-       /*
-        * TODO:
-        *      1. Assumes prescaler is disabled
-        */
-       multiplier = 1 << config->frac_bits;
-       pll_freq = dec * (ref_clk * 2);
-       tmp64 = (ref_clk * 2 * frac);
-       pll_freq += div_u64(tmp64, multiplier);
-
-       vco_rate = pll_freq;
-
-       DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
-           pll_10nm->id, (unsigned long)vco_rate, dec, frac);
-
-       return (unsigned long)vco_rate;
-}
-
-static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
-       .round_rate = msm_dsi_pll_helper_clk_round_rate,
-       .set_rate = dsi_pll_10nm_vco_set_rate,
-       .recalc_rate = dsi_pll_10nm_vco_recalc_rate,
-       .prepare = dsi_pll_10nm_vco_prepare,
-       .unprepare = dsi_pll_10nm_vco_unprepare,
-};
-
-/*
- * PLL Callbacks
- */
-
-static void dsi_pll_10nm_save_state(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-       struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
-       void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
-       u32 cmn_clk_cfg0, cmn_clk_cfg1;
-
-       cached->pll_out_div = pll_read(pll_10nm->mmio +
-                                      REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
-       cached->pll_out_div &= 0x3;
-
-       cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
-       cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
-       cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
-
-       cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
-       cached->pll_mux = cmn_clk_cfg1 & 0x3;
-
-       DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
-           pll_10nm->id, cached->pll_out_div, cached->bit_clk_div,
-           cached->pix_clk_div, cached->pll_mux);
-}
-
-static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-       struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
-       void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
-       u32 val;
-       int ret;
-
-       val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
-       val &= ~0x3;
-       val |= cached->pll_out_div;
-       pll_write(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
-
-       pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
-                 cached->bit_clk_div | (cached->pix_clk_div << 4));
-
-       val = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
-       val &= ~0x3;
-       val |= cached->pll_mux;
-       pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
-
-       ret = dsi_pll_10nm_vco_set_rate(&pll->clk_hw, pll_10nm->vco_current_rate, pll_10nm->vco_ref_clk_rate);
-       if (ret) {
-               DRM_DEV_ERROR(&pll_10nm->pdev->dev,
-                       "restore vco rate failed. ret=%d\n", ret);
-               return ret;
-       }
-
-       DBG("DSI PLL%d", pll_10nm->id);
-
-       return 0;
-}
-
-static int dsi_pll_10nm_set_usecase(struct msm_dsi_pll *pll,
-                                   enum msm_dsi_phy_usecase uc)
-{
-       struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-       void __iomem *base = pll_10nm->phy_cmn_mmio;
-       u32 data = 0x0; /* internal PLL */
-
-       DBG("DSI PLL%d", pll_10nm->id);
-
-       switch (uc) {
-       case MSM_DSI_PHY_STANDALONE:
-               break;
-       case MSM_DSI_PHY_MASTER:
-               pll_10nm->slave = pll_10nm_list[(pll_10nm->id + 1) % DSI_MAX];
-               break;
-       case MSM_DSI_PHY_SLAVE:
-               data = 0x1; /* external PLL */
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       /* set PLL src */
-       pll_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
-
-       pll_10nm->uc = uc;
-
-       return 0;
-}
-
-static int dsi_pll_10nm_get_provider(struct msm_dsi_pll *pll,
-                                    struct clk **byte_clk_provider,
-                                    struct clk **pixel_clk_provider)
-{
-       struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-       struct clk_hw_onecell_data *hw_data = pll_10nm->hw_data;
-
-       DBG("DSI PLL%d", pll_10nm->id);
-
-       if (byte_clk_provider)
-               *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
-       if (pixel_clk_provider)
-               *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
-
-       return 0;
-}
-
-static void dsi_pll_10nm_destroy(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
-       struct device *dev = &pll_10nm->pdev->dev;
-
-       DBG("DSI PLL%d", pll_10nm->id);
-       of_clk_del_provider(dev->of_node);
-
-       clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw);
-       clk_hw_unregister_mux(pll_10nm->pclk_mux_hw);
-       clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw);
-       clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw);
-       clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw);
-       clk_hw_unregister_divider(pll_10nm->bit_clk_hw);
-       clk_hw_unregister_divider(pll_10nm->out_div_clk_hw);
-       clk_hw_unregister(&pll_10nm->base.clk_hw);
-}
-
-/*
- * The post dividers and mux clocks are created using the standard divider and
- * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
- * state to follow the master PLL's divider/mux state. Therefore, we don't
- * require special clock ops that also configure the slave PLL registers
- */
-static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
-{
-       char clk_name[32], parent[32], vco_name[32];
-       char parent2[32], parent3[32], parent4[32];
-       struct clk_init_data vco_init = {
-               .parent_names = (const char *[]){ "xo" },
-               .num_parents = 1,
-               .name = vco_name,
-               .flags = CLK_IGNORE_UNUSED,
-               .ops = &clk_ops_dsi_pll_10nm_vco,
-       };
-       struct device *dev = &pll_10nm->pdev->dev;
-       struct clk_hw_onecell_data *hw_data;
-       struct clk_hw *hw;
-       int ret;
-
-       DBG("DSI%d", pll_10nm->id);
-
-       hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
-                              NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
-                              GFP_KERNEL);
-       if (!hw_data)
-               return -ENOMEM;
-
-       snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->id);
-       pll_10nm->base.clk_hw.init = &vco_init;
-
-       ret = clk_hw_register(dev, &pll_10nm->base.clk_hw);
-       if (ret)
-               return ret;
-
-       snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
-       snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->id);
-
-       hw = clk_hw_register_divider(dev, clk_name,
-                                    parent, CLK_SET_RATE_PARENT,
-                                    pll_10nm->mmio +
-                                    REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
-                                    0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
-       if (IS_ERR(hw)) {
-               ret = PTR_ERR(hw);
-               goto err_base_clk_hw;
-       }
-
-       pll_10nm->out_div_clk_hw = hw;
-
-       snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
-       snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
-
-       /* BIT CLK: DIV_CTRL_3_0 */
-       hw = clk_hw_register_divider(dev, clk_name, parent,
-                                    CLK_SET_RATE_PARENT,
-                                    pll_10nm->phy_cmn_mmio +
-                                    REG_DSI_10nm_PHY_CMN_CLK_CFG0,
-                                    0, 4, CLK_DIVIDER_ONE_BASED,
-                                    &pll_10nm->postdiv_lock);
-       if (IS_ERR(hw)) {
-               ret = PTR_ERR(hw);
-               goto err_out_div_clk_hw;
-       }
-
-       pll_10nm->bit_clk_hw = hw;
-
-       snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);
-       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
-
-       /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
-       hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
-                                         CLK_SET_RATE_PARENT, 1, 8);
-       if (IS_ERR(hw)) {
-               ret = PTR_ERR(hw);
-               goto err_bit_clk_hw;
-       }
-
-       pll_10nm->byte_clk_hw = hw;
-       hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
-
-       snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
-       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
-
-       hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
-                                         0, 1, 2);
-       if (IS_ERR(hw)) {
-               ret = PTR_ERR(hw);
-               goto err_byte_clk_hw;
-       }
-
-       pll_10nm->by_2_bit_clk_hw = hw;
-
-       snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
-       snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
-
-       hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
-                                         0, 1, 4);
-       if (IS_ERR(hw)) {
-               ret = PTR_ERR(hw);
-               goto err_by_2_bit_clk_hw;
-       }
-
-       pll_10nm->post_out_div_clk_hw = hw;
-
-       snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->id);
-       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
-       snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
-       snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
-       snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
-
-       hw = clk_hw_register_mux(dev, clk_name,
-                                ((const char *[]){
-                                parent, parent2, parent3, parent4
-                                }), 4, 0, pll_10nm->phy_cmn_mmio +
-                                REG_DSI_10nm_PHY_CMN_CLK_CFG1,
-                                0, 2, 0, NULL);
-       if (IS_ERR(hw)) {
-               ret = PTR_ERR(hw);
-               goto err_post_out_div_clk_hw;
-       }
-
-       pll_10nm->pclk_mux_hw = hw;
-
-       snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);
-       snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
-
-       /* PIX CLK DIV : DIV_CTRL_7_4*/
-       hw = clk_hw_register_divider(dev, clk_name, parent,
-                                    0, pll_10nm->phy_cmn_mmio +
-                                       REG_DSI_10nm_PHY_CMN_CLK_CFG0,
-                                    4, 4, CLK_DIVIDER_ONE_BASED,
-                                    &pll_10nm->postdiv_lock);
-       if (IS_ERR(hw)) {
-               ret = PTR_ERR(hw);
-               goto err_pclk_mux_hw;
-       }
-
-       pll_10nm->out_dsiclk_hw = hw;
-       hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
-
-       hw_data->num = NUM_PROVIDED_CLKS;
-       pll_10nm->hw_data = hw_data;
-
-       ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
-                                    pll_10nm->hw_data);
-       if (ret) {
-               DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
-               goto err_dsiclk_hw;
-       }
-
-       return 0;
-
-err_dsiclk_hw:
-       clk_hw_unregister_divider(pll_10nm->out_dsiclk_hw);
-err_pclk_mux_hw:
-       clk_hw_unregister_mux(pll_10nm->pclk_mux_hw);
-err_post_out_div_clk_hw:
-       clk_hw_unregister_fixed_factor(pll_10nm->post_out_div_clk_hw);
-err_by_2_bit_clk_hw:
-       clk_hw_unregister_fixed_factor(pll_10nm->by_2_bit_clk_hw);
-err_byte_clk_hw:
-       clk_hw_unregister_fixed_factor(pll_10nm->byte_clk_hw);
-err_bit_clk_hw:
-       clk_hw_unregister_divider(pll_10nm->bit_clk_hw);
-err_out_div_clk_hw:
-       clk_hw_unregister_divider(pll_10nm->out_div_clk_hw);
-err_base_clk_hw:
-       clk_hw_unregister(&pll_10nm->base.clk_hw);
-
-       return ret;
-}
-
-struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
-{
-       struct dsi_pll_10nm *pll_10nm;
-       struct msm_dsi_pll *pll;
-       int ret;
-
-       pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
-       if (!pll_10nm)
-               return ERR_PTR(-ENOMEM);
-
-       DBG("DSI PLL%d", id);
-
-       pll_10nm->pdev = pdev;
-       pll_10nm->id = id;
-       pll_10nm_list[id] = pll_10nm;
-
-       pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
-       if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {
-               DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
-       pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
-       if (IS_ERR_OR_NULL(pll_10nm->mmio)) {
-               DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
-       spin_lock_init(&pll_10nm->postdiv_lock);
-
-       pll = &pll_10nm->base;
-       pll->min_rate = 1000000000UL;
-       pll->max_rate = 3500000000UL;
-       pll->get_provider = dsi_pll_10nm_get_provider;
-       pll->destroy = dsi_pll_10nm_destroy;
-       pll->save_state = dsi_pll_10nm_save_state;
-       pll->restore_state = dsi_pll_10nm_restore_state;
-       pll->set_usecase = dsi_pll_10nm_set_usecase;
-
-       pll_10nm->vco_delay = 1;
-
-       ret = pll_10nm_register(pll_10nm);
-       if (ret) {
-               DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
-               return ERR_PTR(ret);
-       }
-
-       /* TODO: Remove this when we have proper display handover support */
-       msm_dsi_pll_save_state(pll);
-
-       return pll;
-}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
deleted file mode 100644 (file)
index f847376..0000000
+++ /dev/null
@@ -1,1096 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-
-#include "dsi_pll.h"
-#include "dsi.xml.h"
-
-/*
- * DSI PLL 14nm - clock diagram (eg: DSI0):
- *
- *         dsi0n1_postdiv_clk
- *                         |
- *                         |
- *                 +----+  |  +----+
- *  dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte
- *                 +----+  |  +----+
- *                         |           dsi0n1_postdivby2_clk
- *                         |   +----+  |
- *                         o---| /2 |--o--|\
- *                         |   +----+     | \   +----+
- *                         |              |  |--| n2 |-- dsi0pll
- *                         o--------------| /   +----+
- *                                        |/
- */
-
-#define POLL_MAX_READS                 15
-#define POLL_TIMEOUT_US                        1000
-
-#define NUM_PROVIDED_CLKS              2
-
-#define VCO_REF_CLK_RATE               19200000
-#define VCO_MIN_RATE                   1300000000UL
-#define VCO_MAX_RATE                   2600000000UL
-
-#define DSI_BYTE_PLL_CLK               0
-#define DSI_PIXEL_PLL_CLK              1
-
-#define DSI_PLL_DEFAULT_VCO_POSTDIV    1
-
-struct dsi_pll_input {
-       u32 fref;       /* reference clk */
-       u32 fdata;      /* bit clock rate */
-       u32 dsiclk_sel; /* Mux configuration (see diagram) */
-       u32 ssc_en;     /* SSC enable/disable */
-       u32 ldo_en;
-
-       /* fixed params */
-       u32 refclk_dbler_en;
-       u32 vco_measure_time;
-       u32 kvco_measure_time;
-       u32 bandgap_timer;
-       u32 pll_wakeup_timer;
-       u32 plllock_cnt;
-       u32 plllock_rng;
-       u32 ssc_center;
-       u32 ssc_adj_period;
-       u32 ssc_spread;
-       u32 ssc_freq;
-       u32 pll_ie_trim;
-       u32 pll_ip_trim;
-       u32 pll_iptat_trim;
-       u32 pll_cpcset_cur;
-       u32 pll_cpmset_cur;
-
-       u32 pll_icpmset;
-       u32 pll_icpcset;
-
-       u32 pll_icpmset_p;
-       u32 pll_icpmset_m;
-
-       u32 pll_icpcset_p;
-       u32 pll_icpcset_m;
-
-       u32 pll_lpf_res1;
-       u32 pll_lpf_cap1;
-       u32 pll_lpf_cap2;
-       u32 pll_c3ctrl;
-       u32 pll_r3ctrl;
-};
-
-struct dsi_pll_output {
-       u32 pll_txclk_en;
-       u32 dec_start;
-       u32 div_frac_start;
-       u32 ssc_period;
-       u32 ssc_step_size;
-       u32 plllock_cmp;
-       u32 pll_vco_div_ref;
-       u32 pll_vco_count;
-       u32 pll_kvco_div_ref;
-       u32 pll_kvco_count;
-       u32 pll_misc1;
-       u32 pll_lpf2_postdiv;
-       u32 pll_resetsm_cntrl;
-       u32 pll_resetsm_cntrl2;
-       u32 pll_resetsm_cntrl5;
-       u32 pll_kvco_code;
-
-       u32 cmn_clk_cfg0;
-       u32 cmn_clk_cfg1;
-       u32 cmn_ldo_cntrl;
-
-       u32 pll_postdiv;
-       u32 fcvo;
-};
-
-struct pll_14nm_cached_state {
-       unsigned long vco_rate;
-       u8 n2postdiv;
-       u8 n1postdiv;
-};
-
-struct dsi_pll_14nm {
-       struct msm_dsi_pll base;
-
-       int id;
-       struct platform_device *pdev;
-
-       void __iomem *phy_cmn_mmio;
-       void __iomem *mmio;
-
-       int vco_delay;
-
-       struct dsi_pll_input in;
-       struct dsi_pll_output out;
-
-       /* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */
-       spinlock_t postdiv_lock;
-
-       u64 vco_current_rate;
-       u64 vco_ref_clk_rate;
-
-       /* private clocks: */
-       struct clk_hw *hws[NUM_DSI_CLOCKS_MAX];
-       u32 num_hws;
-
-       /* clock-provider: */
-       struct clk_hw_onecell_data *hw_data;
-
-       struct pll_14nm_cached_state cached_state;
-
-       enum msm_dsi_phy_usecase uc;
-       struct dsi_pll_14nm *slave;
-};
-
-#define to_pll_14nm(x) container_of(x, struct dsi_pll_14nm, base)
-
-/*
- * Private struct for N1/N2 post-divider clocks. These clocks are similar to
- * the generic clk_divider class of clocks. The only difference is that it
- * also sets the slave DSI PLL's post-dividers if in Dual DSI mode
- */
-struct dsi_pll_14nm_postdiv {
-       struct clk_hw hw;
-
-       /* divider params */
-       u8 shift;
-       u8 width;
-       u8 flags; /* same flags as used by clk_divider struct */
-
-       struct dsi_pll_14nm *pll;
-};
-
-#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw)
-
-/*
- * Global list of private DSI PLL struct pointers. We need this for Dual DSI
- * mode, where the master PLL's clk_ops needs access the slave's private data
- */
-static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX];
-
-static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
-                                   u32 nb_tries, u32 timeout_us)
-{
-       bool pll_locked = false;
-       void __iomem *base = pll_14nm->mmio;
-       u32 tries, val;
-
-       tries = nb_tries;
-       while (tries--) {
-               val = pll_read(base +
-                              REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
-               pll_locked = !!(val & BIT(5));
-
-               if (pll_locked)
-                       break;
-
-               udelay(timeout_us);
-       }
-
-       if (!pll_locked) {
-               tries = nb_tries;
-               while (tries--) {
-                       val = pll_read(base +
-                               REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
-                       pll_locked = !!(val & BIT(0));
-
-                       if (pll_locked)
-                               break;
-
-                       udelay(timeout_us);
-               }
-       }
-
-       DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
-
-       return pll_locked;
-}
-
-static void dsi_pll_14nm_input_init(struct dsi_pll_14nm *pll)
-{
-       pll->in.fref = pll->vco_ref_clk_rate;
-       pll->in.fdata = 0;
-       pll->in.dsiclk_sel = 1; /* Use the /2 path in Mux */
-       pll->in.ldo_en = 0;     /* disabled for now */
-
-       /* fixed input */
-       pll->in.refclk_dbler_en = 0;
-       pll->in.vco_measure_time = 5;
-       pll->in.kvco_measure_time = 5;
-       pll->in.bandgap_timer = 4;
-       pll->in.pll_wakeup_timer = 5;
-       pll->in.plllock_cnt = 1;
-       pll->in.plllock_rng = 0;
-
-       /*
-        * SSC is enabled by default. We might need DT props for configuring
-        * some SSC params like PPM and center/down spread etc.
-        */
-       pll->in.ssc_en = 1;
-       pll->in.ssc_center = 0;         /* down spread by default */
-       pll->in.ssc_spread = 5;         /* PPM / 1000 */
-       pll->in.ssc_freq = 31500;       /* default recommended */
-       pll->in.ssc_adj_period = 37;
-
-       pll->in.pll_ie_trim = 4;
-       pll->in.pll_ip_trim = 4;
-       pll->in.pll_cpcset_cur = 1;
-       pll->in.pll_cpmset_cur = 1;
-       pll->in.pll_icpmset = 4;
-       pll->in.pll_icpcset = 4;
-       pll->in.pll_icpmset_p = 0;
-       pll->in.pll_icpmset_m = 0;
-       pll->in.pll_icpcset_p = 0;
-       pll->in.pll_icpcset_m = 0;
-       pll->in.pll_lpf_res1 = 3;
-       pll->in.pll_lpf_cap1 = 11;
-       pll->in.pll_lpf_cap2 = 1;
-       pll->in.pll_iptat_trim = 7;
-       pll->in.pll_c3ctrl = 2;
-       pll->in.pll_r3ctrl = 1;
-}
-
-#define CEIL(x, y)             (((x) + ((y) - 1)) / (y))
-
-static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll)
-{
-       u32 period, ssc_period;
-       u32 ref, rem;
-       u64 step_size;
-
-       DBG("vco=%lld ref=%lld", pll->vco_current_rate, pll->vco_ref_clk_rate);
-
-       ssc_period = pll->in.ssc_freq / 500;
-       period = (u32)pll->vco_ref_clk_rate / 1000;
-       ssc_period  = CEIL(period, ssc_period);
-       ssc_period -= 1;
-       pll->out.ssc_period = ssc_period;
-
-       DBG("ssc freq=%d spread=%d period=%d", pll->in.ssc_freq,
-           pll->in.ssc_spread, pll->out.ssc_period);
-
-       step_size = (u32)pll->vco_current_rate;
-       ref = pll->vco_ref_clk_rate;
-       ref /= 1000;
-       step_size = div_u64(step_size, ref);
-       step_size <<= 20;
-       step_size = div_u64(step_size, 1000);
-       step_size *= pll->in.ssc_spread;
-       step_size = div_u64(step_size, 1000);
-       step_size *= (pll->in.ssc_adj_period + 1);
-
-       rem = 0;
-       step_size = div_u64_rem(step_size, ssc_period + 1, &rem);
-       if (rem)
-               step_size++;
-
-       DBG("step_size=%lld", step_size);
-
-       step_size &= 0x0ffff;   /* take lower 16 bits */
-
-       pll->out.ssc_step_size = step_size;
-}
-
-static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll)
-{
-       struct dsi_pll_input *pin = &pll->in;
-       struct dsi_pll_output *pout = &pll->out;
-       u64 multiplier = BIT(20);
-       u64 dec_start_multiple, dec_start, pll_comp_val;
-       u32 duration, div_frac_start;
-       u64 vco_clk_rate = pll->vco_current_rate;
-       u64 fref = pll->vco_ref_clk_rate;
-
-       DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref);
-
-       dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref);
-       div_u64_rem(dec_start_multiple, multiplier, &div_frac_start);
-
-       dec_start = div_u64(dec_start_multiple, multiplier);
-
-       pout->dec_start = (u32)dec_start;
-       pout->div_frac_start = div_frac_start;
-
-       if (pin->plllock_cnt == 0)
-               duration = 1024;
-       else if (pin->plllock_cnt == 1)
-               duration = 256;
-       else if (pin->plllock_cnt == 2)
-               duration = 128;
-       else
-               duration = 32;
-
-       pll_comp_val = duration * dec_start_multiple;
-       pll_comp_val = div_u64(pll_comp_val, multiplier);
-       do_div(pll_comp_val, 10);
-
-       pout->plllock_cmp = (u32)pll_comp_val;
-
-       pout->pll_txclk_en = 1;
-       pout->cmn_ldo_cntrl = 0x3c;
-}
-
-static u32 pll_14nm_kvco_slop(u32 vrate)
-{
-       u32 slop = 0;
-
-       if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL)
-               slop =  600;
-       else if (vrate > 1800000000UL && vrate < 2300000000UL)
-               slop = 400;
-       else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE)
-               slop = 280;
-
-       return slop;
-}
-
-static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll)
-{
-       struct dsi_pll_input *pin = &pll->in;
-       struct dsi_pll_output *pout = &pll->out;
-       u64 vco_clk_rate = pll->vco_current_rate;
-       u64 fref = pll->vco_ref_clk_rate;
-       u64 data;
-       u32 cnt;
-
-       data = fref * pin->vco_measure_time;
-       do_div(data, 1000000);
-       data &= 0x03ff; /* 10 bits */
-       data -= 2;
-       pout->pll_vco_div_ref = data;
-
-       data = div_u64(vco_clk_rate, 1000000);  /* unit is Mhz */
-       data *= pin->vco_measure_time;
-       do_div(data, 10);
-       pout->pll_vco_count = data;
-
-       data = fref * pin->kvco_measure_time;
-       do_div(data, 1000000);
-       data &= 0x03ff; /* 10 bits */
-       data -= 1;
-       pout->pll_kvco_div_ref = data;
-
-       cnt = pll_14nm_kvco_slop(vco_clk_rate);
-       cnt *= 2;
-       cnt /= 100;
-       cnt *= pin->kvco_measure_time;
-       pout->pll_kvco_count = cnt;
-
-       pout->pll_misc1 = 16;
-       pout->pll_resetsm_cntrl = 48;
-       pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3;
-       pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer;
-       pout->pll_kvco_code = 0;
-}
-
-static void pll_db_commit_ssc(struct dsi_pll_14nm *pll)
-{
-       void __iomem *base = pll->mmio;
-       struct dsi_pll_input *pin = &pll->in;
-       struct dsi_pll_output *pout = &pll->out;
-       u8 data;
-
-       data = pin->ssc_adj_period;
-       data &= 0x0ff;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data);
-       data = (pin->ssc_adj_period >> 8);
-       data &= 0x03;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data);
-
-       data = pout->ssc_period;
-       data &= 0x0ff;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data);
-       data = (pout->ssc_period >> 8);
-       data &= 0x0ff;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data);
-
-       data = pout->ssc_step_size;
-       data &= 0x0ff;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data);
-       data = (pout->ssc_step_size >> 8);
-       data &= 0x0ff;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data);
-
-       data = (pin->ssc_center & 0x01);
-       data <<= 1;
-       data |= 0x01; /* enable */
-       pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data);
-
-       wmb();  /* make sure register committed */
-}
-
-static void pll_db_commit_common(struct dsi_pll_14nm *pll,
-                                struct dsi_pll_input *pin,
-                                struct dsi_pll_output *pout)
-{
-       void __iomem *base = pll->mmio;
-       u8 data;
-
-       /* confgiure the non frequency dependent pll registers */
-       data = 0;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data);
-
-       data = pout->pll_txclk_en;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, data);
-
-       data = pout->pll_resetsm_cntrl;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, data);
-       data = pout->pll_resetsm_cntrl2;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, data);
-       data = pout->pll_resetsm_cntrl5;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, data);
-
-       data = pout->pll_vco_div_ref & 0xff;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data);
-       data = (pout->pll_vco_div_ref >> 8) & 0x3;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data);
-
-       data = pout->pll_kvco_div_ref & 0xff;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data);
-       data = (pout->pll_kvco_div_ref >> 8) & 0x3;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data);
-
-       data = pout->pll_misc1;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, data);
-
-       data = pin->pll_ie_trim;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, data);
-
-       data = pin->pll_ip_trim;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, data);
-
-       data = pin->pll_cpmset_cur << 3 | pin->pll_cpcset_cur;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, data);
-
-       data = pin->pll_icpcset_p << 3 | pin->pll_icpcset_m;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, data);
-
-       data = pin->pll_icpmset_p << 3 | pin->pll_icpcset_m;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, data);
-
-       data = pin->pll_icpmset << 3 | pin->pll_icpcset;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, data);
-
-       data = pin->pll_lpf_cap2 << 4 | pin->pll_lpf_cap1;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, data);
-
-       data = pin->pll_iptat_trim;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, data);
-
-       data = pin->pll_c3ctrl | pin->pll_r3ctrl << 4;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, data);
-}
-
-static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm)
-{
-       void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
-
-       /* de assert pll start and apply pll sw reset */
-
-       /* stop pll */
-       pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
-
-       /* pll sw reset */
-       pll_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10);
-       wmb();  /* make sure register committed */
-
-       pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0);
-       wmb();  /* make sure register committed */
-}
-
-static void pll_db_commit_14nm(struct dsi_pll_14nm *pll,
-                              struct dsi_pll_input *pin,
-                              struct dsi_pll_output *pout)
-{
-       void __iomem *base = pll->mmio;
-       void __iomem *cmn_base = pll->phy_cmn_mmio;
-       u8 data;
-
-       DBG("DSI%d PLL", pll->id);
-
-       data = pout->cmn_ldo_cntrl;
-       pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data);
-
-       pll_db_commit_common(pll, pin, pout);
-
-       pll_14nm_software_reset(pll);
-
-       data = pin->dsiclk_sel; /* set dsiclk_sel = 1  */
-       pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, data);
-
-       data = 0xff; /* data, clk, pll normal operation */
-       pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data);
-
-       /* configure the frequency dependent pll registers */
-       data = pout->dec_start;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data);
-
-       data = pout->div_frac_start & 0xff;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data);
-       data = (pout->div_frac_start >> 8) & 0xff;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data);
-       data = (pout->div_frac_start >> 16) & 0xf;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data);
-
-       data = pout->plllock_cmp & 0xff;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data);
-
-       data = (pout->plllock_cmp >> 8) & 0xff;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data);
-
-       data = (pout->plllock_cmp >> 16) & 0x3;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data);
-
-       data = pin->plllock_cnt << 1 | pin->plllock_rng << 3;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data);
-
-       data = pout->pll_vco_count & 0xff;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data);
-       data = (pout->pll_vco_count >> 8) & 0xff;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data);
-
-       data = pout->pll_kvco_count & 0xff;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data);
-       data = (pout->pll_kvco_count >> 8) & 0x3;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data);
-
-       data = (pout->pll_postdiv - 1) << 4 | pin->pll_lpf_res1;
-       pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, data);
-
-       if (pin->ssc_en)
-               pll_db_commit_ssc(pll);
-
-       wmb();  /* make sure register committed */
-}
-
-/*
- * VCO clock Callbacks
- */
-static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
-                                    unsigned long parent_rate)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-       struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-       struct dsi_pll_input *pin = &pll_14nm->in;
-       struct dsi_pll_output *pout = &pll_14nm->out;
-
-       DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->id, rate,
-           parent_rate);
-
-       pll_14nm->vco_current_rate = rate;
-       pll_14nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
-
-       dsi_pll_14nm_input_init(pll_14nm);
-
-       /*
-        * This configures the post divider internal to the VCO. It's
-        * fixed to divide by 1 for now.
-        *
-        * tx_band = pll_postdiv.
-        * 0: divided by 1
-        * 1: divided by 2
-        * 2: divided by 4
-        * 3: divided by 8
-        */
-       pout->pll_postdiv = DSI_PLL_DEFAULT_VCO_POSTDIV;
-
-       pll_14nm_dec_frac_calc(pll_14nm);
-
-       if (pin->ssc_en)
-               pll_14nm_ssc_calc(pll_14nm);
-
-       pll_14nm_calc_vco_count(pll_14nm);
-
-       /* commit the slave DSI PLL registers if we're master. Note that we
-        * don't lock the slave PLL. We just ensure that the PLL/PHY registers
-        * of the master and slave are identical
-        */
-       if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
-               struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
-
-               pll_db_commit_14nm(pll_14nm_slave, pin, pout);
-       }
-
-       pll_db_commit_14nm(pll_14nm, pin, pout);
-
-       return 0;
-}
-
-static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw,
-                                                 unsigned long parent_rate)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-       struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-       void __iomem *base = pll_14nm->mmio;
-       u64 vco_rate, multiplier = BIT(20);
-       u32 div_frac_start;
-       u32 dec_start;
-       u64 ref_clk = parent_rate;
-
-       dec_start = pll_read(base + REG_DSI_14nm_PHY_PLL_DEC_START);
-       dec_start &= 0x0ff;
-
-       DBG("dec_start = %x", dec_start);
-
-       div_frac_start = (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3)
-                               & 0xf) << 16;
-       div_frac_start |= (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2)
-                               & 0xff) << 8;
-       div_frac_start |= pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1)
-                               & 0xff;
-
-       DBG("div_frac_start = %x", div_frac_start);
-
-       vco_rate = ref_clk * dec_start;
-
-       vco_rate += ((ref_clk * div_frac_start) / multiplier);
-
-       /*
-        * Recalculating the rate from dec_start and frac_start doesn't end up
-        * the rate we originally set. Convert the freq to KHz, round it up and
-        * convert it back to MHz.
-        */
-       vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000;
-
-       DBG("returning vco rate = %lu", (unsigned long)vco_rate);
-
-       return (unsigned long)vco_rate;
-}
-
-static const struct clk_ops clk_ops_dsi_pll_14nm_vco = {
-       .round_rate = msm_dsi_pll_helper_clk_round_rate,
-       .set_rate = dsi_pll_14nm_vco_set_rate,
-       .recalc_rate = dsi_pll_14nm_vco_recalc_rate,
-       .prepare = msm_dsi_pll_helper_clk_prepare,
-       .unprepare = msm_dsi_pll_helper_clk_unprepare,
-};
-
-/*
- * N1 and N2 post-divider clock callbacks
- */
-#define div_mask(width)        ((1 << (width)) - 1)
-static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
-                                                     unsigned long parent_rate)
-{
-       struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
-       struct dsi_pll_14nm *pll_14nm = postdiv->pll;
-       void __iomem *base = pll_14nm->phy_cmn_mmio;
-       u8 shift = postdiv->shift;
-       u8 width = postdiv->width;
-       u32 val;
-
-       DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, parent_rate);
-
-       val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift;
-       val &= div_mask(width);
-
-       return divider_recalc_rate(hw, parent_rate, val, NULL,
-                                  postdiv->flags, width);
-}
-
-static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,
-                                           unsigned long rate,
-                                           unsigned long *prate)
-{
-       struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
-       struct dsi_pll_14nm *pll_14nm = postdiv->pll;
-
-       DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, rate);
-
-       return divider_round_rate(hw, rate, prate, NULL,
-                                 postdiv->width,
-                                 postdiv->flags);
-}
-
-static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
-                                        unsigned long parent_rate)
-{
-       struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
-       struct dsi_pll_14nm *pll_14nm = postdiv->pll;
-       void __iomem *base = pll_14nm->phy_cmn_mmio;
-       spinlock_t *lock = &pll_14nm->postdiv_lock;
-       u8 shift = postdiv->shift;
-       u8 width = postdiv->width;
-       unsigned int value;
-       unsigned long flags = 0;
-       u32 val;
-
-       DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->id, rate,
-           parent_rate);
-
-       value = divider_get_val(rate, parent_rate, NULL, postdiv->width,
-                               postdiv->flags);
-
-       spin_lock_irqsave(lock, flags);
-
-       val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
-       val &= ~(div_mask(width) << shift);
-
-       val |= value << shift;
-       pll_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
-
-       /* If we're master in dual DSI mode, then the slave PLL's post-dividers
-        * follow the master's post dividers
-        */
-       if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
-               struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
-               void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio;
-
-               pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
-       }
-
-       spin_unlock_irqrestore(lock, flags);
-
-       return 0;
-}
-
-static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = {
-       .recalc_rate = dsi_pll_14nm_postdiv_recalc_rate,
-       .round_rate = dsi_pll_14nm_postdiv_round_rate,
-       .set_rate = dsi_pll_14nm_postdiv_set_rate,
-};
-
-/*
- * PLL Callbacks
- */
-
-static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-       void __iomem *base = pll_14nm->mmio;
-       void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
-       bool locked;
-
-       DBG("");
-
-       pll_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10);
-       pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1);
-
-       locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS,
-                                        POLL_TIMEOUT_US);
-
-       if (unlikely(!locked))
-               DRM_DEV_ERROR(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
-       else
-               DBG("DSI PLL lock success");
-
-       return locked ? 0 : -EINVAL;
-}
-
-static void dsi_pll_14nm_disable_seq(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-       void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
-
-       DBG("");
-
-       pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
-}
-
-static void dsi_pll_14nm_save_state(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-       struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
-       void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
-       u32 data;
-
-       data = pll_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
-
-       cached_state->n1postdiv = data & 0xf;
-       cached_state->n2postdiv = (data >> 4) & 0xf;
-
-       DBG("DSI%d PLL save state %x %x", pll_14nm->id,
-           cached_state->n1postdiv, cached_state->n2postdiv);
-
-       cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
-}
-
-static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-       struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state;
-       void __iomem *cmn_base = pll_14nm->phy_cmn_mmio;
-       u32 data;
-       int ret;
-
-       ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw,
-                                       cached_state->vco_rate, 0);
-       if (ret) {
-               DRM_DEV_ERROR(&pll_14nm->pdev->dev,
-                       "restore vco rate failed. ret=%d\n", ret);
-               return ret;
-       }
-
-       data = cached_state->n1postdiv | (cached_state->n2postdiv << 4);
-
-       DBG("DSI%d PLL restore state %x %x", pll_14nm->id,
-           cached_state->n1postdiv, cached_state->n2postdiv);
-
-       pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
-
-       /* also restore post-dividers for slave DSI PLL */
-       if (pll_14nm->uc == MSM_DSI_PHY_MASTER) {
-               struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
-               void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio;
-
-               pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
-       }
-
-       return 0;
-}
-
-static int dsi_pll_14nm_set_usecase(struct msm_dsi_pll *pll,
-                                   enum msm_dsi_phy_usecase uc)
-{
-       struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-       void __iomem *base = pll_14nm->mmio;
-       u32 clkbuflr_en, bandgap = 0;
-
-       switch (uc) {
-       case MSM_DSI_PHY_STANDALONE:
-               clkbuflr_en = 0x1;
-               break;
-       case MSM_DSI_PHY_MASTER:
-               clkbuflr_en = 0x3;
-               pll_14nm->slave = pll_14nm_list[(pll_14nm->id + 1) % DSI_MAX];
-               break;
-       case MSM_DSI_PHY_SLAVE:
-               clkbuflr_en = 0x0;
-               bandgap = 0x3;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       pll_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en);
-       if (bandgap)
-               pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap);
-
-       pll_14nm->uc = uc;
-
-       return 0;
-}
-
-static int dsi_pll_14nm_get_provider(struct msm_dsi_pll *pll,
-                                    struct clk **byte_clk_provider,
-                                    struct clk **pixel_clk_provider)
-{
-       struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-       struct clk_hw_onecell_data *hw_data = pll_14nm->hw_data;
-
-       if (byte_clk_provider)
-               *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
-       if (pixel_clk_provider)
-               *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
-
-       return 0;
-}
-
-static void dsi_pll_14nm_destroy(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll);
-       struct platform_device *pdev = pll_14nm->pdev;
-       int num_hws = pll_14nm->num_hws;
-
-       of_clk_del_provider(pdev->dev.of_node);
-
-       while (num_hws--)
-               clk_hw_unregister(pll_14nm->hws[num_hws]);
-}
-
-static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm,
-                                               const char *name,
-                                               const char *parent_name,
-                                               unsigned long flags,
-                                               u8 shift)
-{
-       struct dsi_pll_14nm_postdiv *pll_postdiv;
-       struct device *dev = &pll_14nm->pdev->dev;
-       struct clk_init_data postdiv_init = {
-               .parent_names = (const char *[]) { parent_name },
-               .num_parents = 1,
-               .name = name,
-               .flags = flags,
-               .ops = &clk_ops_dsi_pll_14nm_postdiv,
-       };
-       int ret;
-
-       pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL);
-       if (!pll_postdiv)
-               return ERR_PTR(-ENOMEM);
-
-       pll_postdiv->pll = pll_14nm;
-       pll_postdiv->shift = shift;
-       /* both N1 and N2 postdividers are 4 bits wide */
-       pll_postdiv->width = 4;
-       /* range of each divider is from 1 to 15 */
-       pll_postdiv->flags = CLK_DIVIDER_ONE_BASED;
-       pll_postdiv->hw.init = &postdiv_init;
-
-       ret = clk_hw_register(dev, &pll_postdiv->hw);
-       if (ret)
-               return ERR_PTR(ret);
-
-       return &pll_postdiv->hw;
-}
-
-static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm)
-{
-       char clk_name[32], parent[32], vco_name[32];
-       struct clk_init_data vco_init = {
-               .parent_names = (const char *[]){ "xo" },
-               .num_parents = 1,
-               .name = vco_name,
-               .flags = CLK_IGNORE_UNUSED,
-               .ops = &clk_ops_dsi_pll_14nm_vco,
-       };
-       struct device *dev = &pll_14nm->pdev->dev;
-       struct clk_hw **hws = pll_14nm->hws;
-       struct clk_hw_onecell_data *hw_data;
-       struct clk_hw *hw;
-       int num = 0;
-       int ret;
-
-       DBG("DSI%d", pll_14nm->id);
-
-       hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
-                              NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
-                              GFP_KERNEL);
-       if (!hw_data)
-               return -ENOMEM;
-
-       snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->id);
-       pll_14nm->base.clk_hw.init = &vco_init;
-
-       ret = clk_hw_register(dev, &pll_14nm->base.clk_hw);
-       if (ret)
-               return ret;
-
-       hws[num++] = &pll_14nm->base.clk_hw;
-
-       snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
-       snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->id);
-
-       /* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */
-       hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent,
-                                      CLK_SET_RATE_PARENT, 0);
-       if (IS_ERR(hw))
-               return PTR_ERR(hw);
-
-       hws[num++] = hw;
-
-       snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->id);
-       snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
-
-       /* DSI Byte clock = VCO_CLK / N1 / 8 */
-       hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
-                                         CLK_SET_RATE_PARENT, 1, 8);
-       if (IS_ERR(hw))
-               return PTR_ERR(hw);
-
-       hws[num++] = hw;
-       hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
-
-       snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id);
-       snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id);
-
-       /*
-        * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider
-        * on the way. Don't let it set parent.
-        */
-       hw = clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2);
-       if (IS_ERR(hw))
-               return PTR_ERR(hw);
-
-       hws[num++] = hw;
-
-       snprintf(clk_name, 32, "dsi%dpll", pll_14nm->id);
-       snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id);
-
-       /* DSI pixel clock = VCO_CLK / N1 / 2 / N2
-        * This is the output of N2 post-divider, bits 4-7 in
-        * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent.
-        */
-       hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4);
-       if (IS_ERR(hw))
-               return PTR_ERR(hw);
-
-       hws[num++] = hw;
-       hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
-
-       pll_14nm->num_hws = num;
-
-       hw_data->num = NUM_PROVIDED_CLKS;
-       pll_14nm->hw_data = hw_data;
-
-       ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
-                                    pll_14nm->hw_data);
-       if (ret) {
-               DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
-{
-       struct dsi_pll_14nm *pll_14nm;
-       struct msm_dsi_pll *pll;
-       int ret;
-
-       if (!pdev)
-               return ERR_PTR(-ENODEV);
-
-       pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL);
-       if (!pll_14nm)
-               return ERR_PTR(-ENOMEM);
-
-       DBG("PLL%d", id);
-
-       pll_14nm->pdev = pdev;
-       pll_14nm->id = id;
-       pll_14nm_list[id] = pll_14nm;
-
-       pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
-       if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) {
-               DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
-       pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
-       if (IS_ERR_OR_NULL(pll_14nm->mmio)) {
-               DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
-       spin_lock_init(&pll_14nm->postdiv_lock);
-
-       pll = &pll_14nm->base;
-       pll->min_rate = VCO_MIN_RATE;
-       pll->max_rate = VCO_MAX_RATE;
-       pll->get_provider = dsi_pll_14nm_get_provider;
-       pll->destroy = dsi_pll_14nm_destroy;
-       pll->disable_seq = dsi_pll_14nm_disable_seq;
-       pll->save_state = dsi_pll_14nm_save_state;
-       pll->restore_state = dsi_pll_14nm_restore_state;
-       pll->set_usecase = dsi_pll_14nm_set_usecase;
-
-       pll_14nm->vco_delay = 1;
-
-       pll->en_seq_cnt = 1;
-       pll->enable_seqs[0] = dsi_pll_14nm_enable_seq;
-
-       ret = pll_14nm_register(pll_14nm);
-       if (ret) {
-               DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
-               return ERR_PTR(ret);
-       }
-
-       return pll;
-}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
deleted file mode 100644 (file)
index 37a1f99..0000000
+++ /dev/null
@@ -1,643 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-
-#include "dsi_pll.h"
-#include "dsi.xml.h"
-
-/*
- * DSI PLL 28nm - clock diagram (eg: DSI0):
- *
- *         dsi0analog_postdiv_clk
- *                             |         dsi0indirect_path_div2_clk
- *                             |          |
- *                   +------+  |  +----+  |  |\   dsi0byte_mux
- *  dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \   |
- *                |  +------+     +----+     | m|  |  +----+
- *                |                          | u|--o--| /4 |-- dsi0pllbyte
- *                |                          | x|     +----+
- *                o--------------------------| /
- *                |                          |/
- *                |          +------+
- *                o----------| DIV3 |------------------------- dsi0pll
- *                           +------+
- */
-
-#define POLL_MAX_READS                 10
-#define POLL_TIMEOUT_US                50
-
-#define NUM_PROVIDED_CLKS              2
-
-#define VCO_REF_CLK_RATE               19200000
-#define VCO_MIN_RATE                   350000000
-#define VCO_MAX_RATE                   750000000
-
-#define DSI_BYTE_PLL_CLK               0
-#define DSI_PIXEL_PLL_CLK              1
-
-#define LPFR_LUT_SIZE                  10
-struct lpfr_cfg {
-       unsigned long vco_rate;
-       u32 resistance;
-};
-
-/* Loop filter resistance: */
-static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
-       { 479500000,  8 },
-       { 480000000, 11 },
-       { 575500000,  8 },
-       { 576000000, 12 },
-       { 610500000,  8 },
-       { 659500000,  9 },
-       { 671500000, 10 },
-       { 672000000, 14 },
-       { 708500000, 10 },
-       { 750000000, 11 },
-};
-
-struct pll_28nm_cached_state {
-       unsigned long vco_rate;
-       u8 postdiv3;
-       u8 postdiv1;
-       u8 byte_mux;
-};
-
-struct dsi_pll_28nm {
-       struct msm_dsi_pll base;
-
-       int id;
-       struct platform_device *pdev;
-       void __iomem *mmio;
-
-       int vco_delay;
-
-       /* private clocks: */
-       struct clk *clks[NUM_DSI_CLOCKS_MAX];
-       u32 num_clks;
-
-       /* clock-provider: */
-       struct clk *provided_clks[NUM_PROVIDED_CLKS];
-       struct clk_onecell_data clk_data;
-
-       struct pll_28nm_cached_state cached_state;
-};
-
-#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, base)
-
-static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
-                               u32 nb_tries, u32 timeout_us)
-{
-       bool pll_locked = false;
-       u32 val;
-
-       while (nb_tries--) {
-               val = pll_read(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_STATUS);
-               pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
-
-               if (pll_locked)
-                       break;
-
-               udelay(timeout_us);
-       }
-       DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
-
-       return pll_locked;
-}
-
-static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
-{
-       void __iomem *base = pll_28nm->mmio;
-
-       /*
-        * Add HW recommended delays after toggling the software
-        * reset bit off and back on.
-        */
-       pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
-                       DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
-       pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
-}
-
-/*
- * Clock Callbacks
- */
-static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
-               unsigned long parent_rate)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-       struct device *dev = &pll_28nm->pdev->dev;
-       void __iomem *base = pll_28nm->mmio;
-       unsigned long div_fbx1000, gen_vco_clk;
-       u32 refclk_cfg, frac_n_mode, frac_n_value;
-       u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
-       u32 cal_cfg10, cal_cfg11;
-       u32 rem;
-       int i;
-
-       VERB("rate=%lu, parent's=%lu", rate, parent_rate);
-
-       /* Force postdiv2 to be div-4 */
-       pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
-
-       /* Configure the Loop filter resistance */
-       for (i = 0; i < LPFR_LUT_SIZE; i++)
-               if (rate <= lpfr_lut[i].vco_rate)
-                       break;
-       if (i == LPFR_LUT_SIZE) {
-               DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
-                               rate);
-               return -EINVAL;
-       }
-       pll_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
-
-       /* Loop filter capacitance values : c1 and c2 */
-       pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
-
-       rem = rate % VCO_REF_CLK_RATE;
-       if (rem) {
-               refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
-               frac_n_mode = 1;
-               div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
-               gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
-       } else {
-               refclk_cfg = 0x0;
-               frac_n_mode = 0;
-               div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
-               gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
-       }
-
-       DBG("refclk_cfg = %d", refclk_cfg);
-
-       rem = div_fbx1000 % 1000;
-       frac_n_value = (rem << 16) / 1000;
-
-       DBG("div_fb = %lu", div_fbx1000);
-       DBG("frac_n_value = %d", frac_n_value);
-
-       DBG("Generated VCO Clock: %lu", gen_vco_clk);
-       rem = 0;
-       sdm_cfg1 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
-       sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
-       if (frac_n_mode) {
-               sdm_cfg0 = 0x0;
-               sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
-               sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
-                               (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
-               sdm_cfg3 = frac_n_value >> 8;
-               sdm_cfg2 = frac_n_value & 0xff;
-       } else {
-               sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
-               sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
-                               (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
-               sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
-               sdm_cfg2 = 0;
-               sdm_cfg3 = 0;
-       }
-
-       DBG("sdm_cfg0=%d", sdm_cfg0);
-       DBG("sdm_cfg1=%d", sdm_cfg1);
-       DBG("sdm_cfg2=%d", sdm_cfg2);
-       DBG("sdm_cfg3=%d", sdm_cfg3);
-
-       cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
-       cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
-       DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
-
-       pll_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3,    0x2b);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4,    0x06);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,  0x0d);
-
-       pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
-               DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
-       pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
-               DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
-       pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
-
-       /* Add hardware recommended delay for correct PLL configuration */
-       if (pll_28nm->vco_delay)
-               udelay(pll_28nm->vco_delay);
-
-       pll_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0,   sdm_cfg0);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0,   0x12);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6,   0x30);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7,   0x00);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8,   0x60);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9,   0x00);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10,  cal_cfg10 & 0xff);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11,  cal_cfg11 & 0xff);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG,  0x20);
-
-       return 0;
-}
-
-static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-
-       return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
-                                       POLL_TIMEOUT_US);
-}
-
-static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
-               unsigned long parent_rate)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-       void __iomem *base = pll_28nm->mmio;
-       u32 sdm0, doubler, sdm_byp_div;
-       u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
-       u32 ref_clk = VCO_REF_CLK_RATE;
-       unsigned long vco_rate;
-
-       VERB("parent_rate=%lu", parent_rate);
-
-       /* Check to see if the ref clk doubler is enabled */
-       doubler = pll_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
-                       DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
-       ref_clk += (doubler * VCO_REF_CLK_RATE);
-
-       /* see if it is integer mode or sdm mode */
-       sdm0 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
-       if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
-               /* integer mode */
-               sdm_byp_div = FIELD(
-                               pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
-                               DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
-               vco_rate = ref_clk * sdm_byp_div;
-       } else {
-               /* sdm mode */
-               sdm_dc_off = FIELD(
-                               pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
-                               DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
-               DBG("sdm_dc_off = %d", sdm_dc_off);
-               sdm2 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
-                               DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
-               sdm3 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
-                               DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
-               sdm_freq_seed = (sdm3 << 8) | sdm2;
-               DBG("sdm_freq_seed = %d", sdm_freq_seed);
-
-               vco_rate = (ref_clk * (sdm_dc_off + 1)) +
-                       mult_frac(ref_clk, sdm_freq_seed, BIT(16));
-               DBG("vco rate = %lu", vco_rate);
-       }
-
-       DBG("returning vco rate = %lu", vco_rate);
-
-       return vco_rate;
-}
-
-static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
-       .round_rate = msm_dsi_pll_helper_clk_round_rate,
-       .set_rate = dsi_pll_28nm_clk_set_rate,
-       .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
-       .prepare = msm_dsi_pll_helper_clk_prepare,
-       .unprepare = msm_dsi_pll_helper_clk_unprepare,
-       .is_enabled = dsi_pll_28nm_clk_is_enabled,
-};
-
-/*
- * PLL Callbacks
- */
-static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-       struct device *dev = &pll_28nm->pdev->dev;
-       void __iomem *base = pll_28nm->mmio;
-       u32 max_reads = 5, timeout_us = 100;
-       bool locked;
-       u32 val;
-       int i;
-
-       DBG("id=%d", pll_28nm->id);
-
-       pll_28nm_software_reset(pll_28nm);
-
-       /*
-        * PLL power up sequence.
-        * Add necessary delays recommended by hardware.
-        */
-       val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
-       pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
-
-       val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
-       pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
-
-       val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
-       pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
-
-       val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
-       pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
-
-       for (i = 0; i < 2; i++) {
-               /* DSI Uniphy lock detect setting */
-               pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
-                               0x0c, 100);
-               pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
-
-               /* poll for PLL ready status */
-               locked = pll_28nm_poll_for_ready(pll_28nm,
-                                               max_reads, timeout_us);
-               if (locked)
-                       break;
-
-               pll_28nm_software_reset(pll_28nm);
-
-               /*
-                * PLL power up sequence.
-                * Add necessary delays recommended by hardware.
-                */
-               val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
-               pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
-
-               val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
-               pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
-
-               val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
-               pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
-
-               val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
-               pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
-
-               val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
-               pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
-
-               val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
-               pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
-       }
-
-       if (unlikely(!locked))
-               DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
-       else
-               DBG("DSI PLL Lock success");
-
-       return locked ? 0 : -EINVAL;
-}
-
-static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-       struct device *dev = &pll_28nm->pdev->dev;
-       void __iomem *base = pll_28nm->mmio;
-       bool locked;
-       u32 max_reads = 10, timeout_us = 50;
-       u32 val;
-
-       DBG("id=%d", pll_28nm->id);
-
-       pll_28nm_software_reset(pll_28nm);
-
-       /*
-        * PLL power up sequence.
-        * Add necessary delays recommended by hardware.
-        */
-       pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
-
-       val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
-       pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
-
-       val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
-       pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
-
-       val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
-               DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
-       pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
-
-       /* DSI PLL toggle lock detect setting */
-       pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
-       pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
-
-       locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
-
-       if (unlikely(!locked))
-               DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
-       else
-               DBG("DSI PLL lock success");
-
-       return locked ? 0 : -EINVAL;
-}
-
-static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-
-       DBG("id=%d", pll_28nm->id);
-       pll_write(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
-}
-
-static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-       struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
-       void __iomem *base = pll_28nm->mmio;
-
-       cached_state->postdiv3 =
-                       pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
-       cached_state->postdiv1 =
-                       pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
-       cached_state->byte_mux = pll_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
-       if (dsi_pll_28nm_clk_is_enabled(&pll->clk_hw))
-               cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
-       else
-               cached_state->vco_rate = 0;
-}
-
-static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-       struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
-       void __iomem *base = pll_28nm->mmio;
-       int ret;
-
-       ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
-                                       cached_state->vco_rate, 0);
-       if (ret) {
-               DRM_DEV_ERROR(&pll_28nm->pdev->dev,
-                       "restore vco rate failed. ret=%d\n", ret);
-               return ret;
-       }
-
-       pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
-                       cached_state->postdiv3);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
-                       cached_state->postdiv1);
-       pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
-                       cached_state->byte_mux);
-
-       return 0;
-}
-
-static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
-                               struct clk **byte_clk_provider,
-                               struct clk **pixel_clk_provider)
-{
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-
-       if (byte_clk_provider)
-               *byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
-       if (pixel_clk_provider)
-               *pixel_clk_provider =
-                               pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
-
-       return 0;
-}
-
-static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-       int i;
-
-       msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
-                                       pll_28nm->clks, pll_28nm->num_clks);
-
-       for (i = 0; i < NUM_PROVIDED_CLKS; i++)
-               pll_28nm->provided_clks[i] = NULL;
-
-       pll_28nm->num_clks = 0;
-       pll_28nm->clk_data.clks = NULL;
-       pll_28nm->clk_data.clk_num = 0;
-}
-
-static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
-{
-       char clk_name[32], parent1[32], parent2[32], vco_name[32];
-       struct clk_init_data vco_init = {
-               .parent_names = (const char *[]){ "xo" },
-               .num_parents = 1,
-               .name = vco_name,
-               .flags = CLK_IGNORE_UNUSED,
-               .ops = &clk_ops_dsi_pll_28nm_vco,
-       };
-       struct device *dev = &pll_28nm->pdev->dev;
-       struct clk **clks = pll_28nm->clks;
-       struct clk **provided_clks = pll_28nm->provided_clks;
-       int num = 0;
-       int ret;
-
-       DBG("%d", pll_28nm->id);
-
-       snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
-       pll_28nm->base.clk_hw.init = &vco_init;
-       clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
-
-       snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
-       snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
-       clks[num++] = clk_register_divider(dev, clk_name,
-                       parent1, CLK_SET_RATE_PARENT,
-                       pll_28nm->mmio +
-                       REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
-                       0, 4, 0, NULL);
-
-       snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
-       snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
-       clks[num++] = clk_register_fixed_factor(dev, clk_name,
-                       parent1, CLK_SET_RATE_PARENT,
-                       1, 2);
-
-       snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
-       snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
-       clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
-                       clk_register_divider(dev, clk_name,
-                               parent1, 0, pll_28nm->mmio +
-                               REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
-                               0, 8, 0, NULL);
-
-       snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->id);
-       snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
-       snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
-       clks[num++] = clk_register_mux(dev, clk_name,
-                       ((const char *[]){
-                               parent1, parent2
-                       }), 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
-                       REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
-
-       snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
-       snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->id);
-       clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
-                       clk_register_fixed_factor(dev, clk_name,
-                               parent1, CLK_SET_RATE_PARENT, 1, 4);
-
-       pll_28nm->num_clks = num;
-
-       pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
-       pll_28nm->clk_data.clks = provided_clks;
-
-       ret = of_clk_add_provider(dev->of_node,
-                       of_clk_src_onecell_get, &pll_28nm->clk_data);
-       if (ret) {
-               DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
-                                       enum msm_dsi_phy_type type, int id)
-{
-       struct dsi_pll_28nm *pll_28nm;
-       struct msm_dsi_pll *pll;
-       int ret;
-
-       if (!pdev)
-               return ERR_PTR(-ENODEV);
-
-       pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
-       if (!pll_28nm)
-               return ERR_PTR(-ENOMEM);
-
-       pll_28nm->pdev = pdev;
-       pll_28nm->id = id;
-
-       pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
-       if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
-               DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       pll = &pll_28nm->base;
-       pll->min_rate = VCO_MIN_RATE;
-       pll->max_rate = VCO_MAX_RATE;
-       pll->get_provider = dsi_pll_28nm_get_provider;
-       pll->destroy = dsi_pll_28nm_destroy;
-       pll->disable_seq = dsi_pll_28nm_disable_seq;
-       pll->save_state = dsi_pll_28nm_save_state;
-       pll->restore_state = dsi_pll_28nm_restore_state;
-
-       if (type == MSM_DSI_PHY_28NM_HPM) {
-               pll_28nm->vco_delay = 1;
-
-               pll->en_seq_cnt = 3;
-               pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_hpm;
-               pll->enable_seqs[1] = dsi_pll_28nm_enable_seq_hpm;
-               pll->enable_seqs[2] = dsi_pll_28nm_enable_seq_hpm;
-       } else if (type == MSM_DSI_PHY_28NM_LP) {
-               pll_28nm->vco_delay = 1000;
-
-               pll->en_seq_cnt = 1;
-               pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;
-       } else {
-               DRM_DEV_ERROR(&pdev->dev, "phy type (%d) is not 28nm\n", type);
-               return ERR_PTR(-EINVAL);
-       }
-
-       ret = pll_28nm_register(pll_28nm);
-       if (ret) {
-               DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
-               return ERR_PTR(ret);
-       }
-
-       return pll;
-}
-
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
deleted file mode 100644 (file)
index a6e7a25..0000000
+++ /dev/null
@@ -1,526 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/clk-provider.h>
-
-#include "dsi_pll.h"
-#include "dsi.xml.h"
-
-/*
- * DSI PLL 28nm (8960/A family) - clock diagram (eg: DSI1):
- *
- *
- *                        +------+
- *  dsi1vco_clk ----o-----| DIV1 |---dsi1pllbit (not exposed as clock)
- *  F * byte_clk    |     +------+
- *                  | bit clock divider (F / 8)
- *                  |
- *                  |     +------+
- *                  o-----| DIV2 |---dsi0pllbyte---o---> To byte RCG
- *                  |     +------+                 | (sets parent rate)
- *                  | byte clock divider (F)       |
- *                  |                              |
- *                  |                              o---> To esc RCG
- *                  |                                (doesn't set parent rate)
- *                  |
- *                  |     +------+
- *                  o-----| DIV3 |----dsi0pll------o---> To dsi RCG
- *                        +------+                 | (sets parent rate)
- *                  dsi clock divider (F * magic)  |
- *                                                 |
- *                                                 o---> To pixel rcg
- *                                                  (doesn't set parent rate)
- */
-
-#define POLL_MAX_READS         8000
-#define POLL_TIMEOUT_US                1
-
-#define NUM_PROVIDED_CLKS      2
-
-#define VCO_REF_CLK_RATE       27000000
-#define VCO_MIN_RATE           600000000
-#define VCO_MAX_RATE           1200000000
-
-#define DSI_BYTE_PLL_CLK       0
-#define DSI_PIXEL_PLL_CLK      1
-
-#define VCO_PREF_DIV_RATIO     27
-
-struct pll_28nm_cached_state {
-       unsigned long vco_rate;
-       u8 postdiv3;
-       u8 postdiv2;
-       u8 postdiv1;
-};
-
-struct clk_bytediv {
-       struct clk_hw hw;
-       void __iomem *reg;
-};
-
-struct dsi_pll_28nm {
-       struct msm_dsi_pll base;
-
-       int id;
-       struct platform_device *pdev;
-       void __iomem *mmio;
-
-       /* custom byte clock divider */
-       struct clk_bytediv *bytediv;
-
-       /* private clocks: */
-       struct clk *clks[NUM_DSI_CLOCKS_MAX];
-       u32 num_clks;
-
-       /* clock-provider: */
-       struct clk *provided_clks[NUM_PROVIDED_CLKS];
-       struct clk_onecell_data clk_data;
-
-       struct pll_28nm_cached_state cached_state;
-};
-
-#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, base)
-
-static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
-                                   int nb_tries, int timeout_us)
-{
-       bool pll_locked = false;
-       u32 val;
-
-       while (nb_tries--) {
-               val = pll_read(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_RDY);
-               pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);
-
-               if (pll_locked)
-                       break;
-
-               udelay(timeout_us);
-       }
-       DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
-
-       return pll_locked;
-}
-
-/*
- * Clock Callbacks
- */
-static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
-                                    unsigned long parent_rate)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-       void __iomem *base = pll_28nm->mmio;
-       u32 val, temp, fb_divider;
-
-       DBG("rate=%lu, parent's=%lu", rate, parent_rate);
-
-       temp = rate / 10;
-       val = VCO_REF_CLK_RATE / 10;
-       fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
-       fb_divider = fb_divider / 2 - 1;
-       pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
-                       fb_divider & 0xff);
-
-       val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
-
-       val |= (fb_divider >> 8) & 0x07;
-
-       pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
-                       val);
-
-       val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
-
-       val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
-
-       pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
-                       val);
-
-       pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
-                       0xf);
-
-       val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
-       val |= 0x7 << 4;
-       pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
-                       val);
-
-       return 0;
-}
-
-static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-
-       return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
-                                       POLL_TIMEOUT_US);
-}
-
-static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
-                                                 unsigned long parent_rate)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-       void __iomem *base = pll_28nm->mmio;
-       unsigned long vco_rate;
-       u32 status, fb_divider, temp, ref_divider;
-
-       VERB("parent_rate=%lu", parent_rate);
-
-       status = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
-
-       if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {
-               fb_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
-               fb_divider &= 0xff;
-               temp = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;
-               fb_divider = (temp << 8) | fb_divider;
-               fb_divider += 1;
-
-               ref_divider = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
-               ref_divider &= 0x3f;
-               ref_divider += 1;
-
-               /* multiply by 2 */
-               vco_rate = (parent_rate / ref_divider) * fb_divider * 2;
-       } else {
-               vco_rate = 0;
-       }
-
-       DBG("returning vco rate = %lu", vco_rate);
-
-       return vco_rate;
-}
-
-static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
-       .round_rate = msm_dsi_pll_helper_clk_round_rate,
-       .set_rate = dsi_pll_28nm_clk_set_rate,
-       .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
-       .prepare = msm_dsi_pll_helper_clk_prepare,
-       .unprepare = msm_dsi_pll_helper_clk_unprepare,
-       .is_enabled = dsi_pll_28nm_clk_is_enabled,
-};
-
-/*
- * Custom byte clock divier clk_ops
- *
- * This clock is the entry point to configuring the PLL. The user (dsi host)
- * will set this clock's rate to the desired byte clock rate. The VCO lock
- * frequency is a multiple of the byte clock rate. The multiplication factor
- * (shown as F in the diagram above) is a function of the byte clock rate.
- *
- * This custom divider clock ensures that its parent (VCO) is set to the
- * desired rate, and that the byte clock postdivider (POSTDIV2) is configured
- * accordingly
- */
-#define to_clk_bytediv(_hw) container_of(_hw, struct clk_bytediv, hw)
-
-static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,
-               unsigned long parent_rate)
-{
-       struct clk_bytediv *bytediv = to_clk_bytediv(hw);
-       unsigned int div;
-
-       div = pll_read(bytediv->reg) & 0xff;
-
-       return parent_rate / (div + 1);
-}
-
-/* find multiplication factor(wrt byte clock) at which the VCO should be set */
-static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
-{
-       unsigned long bit_mhz;
-
-       /* convert to bit clock in Mhz */
-       bit_mhz = (byte_clk_rate * 8) / 1000000;
-
-       if (bit_mhz < 125)
-               return 64;
-       else if (bit_mhz < 250)
-               return 32;
-       else if (bit_mhz < 600)
-               return 16;
-       else
-               return 8;
-}
-
-static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
-                                  unsigned long *prate)
-{
-       unsigned long best_parent;
-       unsigned int factor;
-
-       factor = get_vco_mul_factor(rate);
-
-       best_parent = rate * factor;
-       *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
-
-       return *prate / factor;
-}
-
-static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
-                               unsigned long parent_rate)
-{
-       struct clk_bytediv *bytediv = to_clk_bytediv(hw);
-       u32 val;
-       unsigned int factor;
-
-       factor = get_vco_mul_factor(rate);
-
-       val = pll_read(bytediv->reg);
-       val |= (factor - 1) & 0xff;
-       pll_write(bytediv->reg, val);
-
-       return 0;
-}
-
-/* Our special byte clock divider ops */
-static const struct clk_ops clk_bytediv_ops = {
-       .round_rate = clk_bytediv_round_rate,
-       .set_rate = clk_bytediv_set_rate,
-       .recalc_rate = clk_bytediv_recalc_rate,
-};
-
-/*
- * PLL Callbacks
- */
-static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-       struct device *dev = &pll_28nm->pdev->dev;
-       void __iomem *base = pll_28nm->mmio;
-       bool locked;
-       unsigned int bit_div, byte_div;
-       int max_reads = 1000, timeout_us = 100;
-       u32 val;
-
-       DBG("id=%d", pll_28nm->id);
-
-       /*
-        * before enabling the PLL, configure the bit clock divider since we
-        * don't expose it as a clock to the outside world
-        * 1: read back the byte clock divider that should already be set
-        * 2: divide by 8 to get bit clock divider
-        * 3: write it to POSTDIV1
-        */
-       val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
-       byte_div = val + 1;
-       bit_div = byte_div / 8;
-
-       val = pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
-       val &= ~0xf;
-       val |= (bit_div - 1);
-       pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);
-
-       /* enable the PLL */
-       pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
-                       DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
-
-       locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
-
-       if (unlikely(!locked))
-               DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
-       else
-               DBG("DSI PLL lock success");
-
-       return locked ? 0 : -EINVAL;
-}
-
-static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-
-       DBG("id=%d", pll_28nm->id);
-       pll_write(pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);
-}
-
-static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-       struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
-       void __iomem *base = pll_28nm->mmio;
-
-       cached_state->postdiv3 =
-                       pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
-       cached_state->postdiv2 =
-                       pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
-       cached_state->postdiv1 =
-                       pll_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
-
-       cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
-}
-
-static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-       struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
-       void __iomem *base = pll_28nm->mmio;
-       int ret;
-
-       ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
-                                       cached_state->vco_rate, 0);
-       if (ret) {
-               DRM_DEV_ERROR(&pll_28nm->pdev->dev,
-                       "restore vco rate failed. ret=%d\n", ret);
-               return ret;
-       }
-
-       pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
-                       cached_state->postdiv3);
-       pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
-                       cached_state->postdiv2);
-       pll_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
-                       cached_state->postdiv1);
-
-       return 0;
-}
-
-static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
-                               struct clk **byte_clk_provider,
-                               struct clk **pixel_clk_provider)
-{
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-
-       if (byte_clk_provider)
-               *byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
-       if (pixel_clk_provider)
-               *pixel_clk_provider =
-                               pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
-
-       return 0;
-}
-
-static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-
-       msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
-                                       pll_28nm->clks, pll_28nm->num_clks);
-}
-
-static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
-{
-       char *clk_name, *parent_name, *vco_name;
-       struct clk_init_data vco_init = {
-               .parent_names = (const char *[]){ "pxo" },
-               .num_parents = 1,
-               .flags = CLK_IGNORE_UNUSED,
-               .ops = &clk_ops_dsi_pll_28nm_vco,
-       };
-       struct device *dev = &pll_28nm->pdev->dev;
-       struct clk **clks = pll_28nm->clks;
-       struct clk **provided_clks = pll_28nm->provided_clks;
-       struct clk_bytediv *bytediv;
-       struct clk_init_data bytediv_init = { };
-       int ret, num = 0;
-
-       DBG("%d", pll_28nm->id);
-
-       bytediv = devm_kzalloc(dev, sizeof(*bytediv), GFP_KERNEL);
-       if (!bytediv)
-               return -ENOMEM;
-
-       vco_name = devm_kzalloc(dev, 32, GFP_KERNEL);
-       if (!vco_name)
-               return -ENOMEM;
-
-       parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
-       if (!parent_name)
-               return -ENOMEM;
-
-       clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
-       if (!clk_name)
-               return -ENOMEM;
-
-       pll_28nm->bytediv = bytediv;
-
-       snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
-       vco_init.name = vco_name;
-
-       pll_28nm->base.clk_hw.init = &vco_init;
-
-       clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
-
-       /* prepare and register bytediv */
-       bytediv->hw.init = &bytediv_init;
-       bytediv->reg = pll_28nm->mmio + REG_DSI_28nm_8960_PHY_PLL_CTRL_9;
-
-       snprintf(parent_name, 32, "dsi%dvco_clk", pll_28nm->id);
-       snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
-
-       bytediv_init.name = clk_name;
-       bytediv_init.ops = &clk_bytediv_ops;
-       bytediv_init.flags = CLK_SET_RATE_PARENT;
-       bytediv_init.parent_names = (const char * const *) &parent_name;
-       bytediv_init.num_parents = 1;
-
-       /* DIV2 */
-       clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
-                       clk_register(dev, &bytediv->hw);
-
-       snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
-       /* DIV3 */
-       clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
-                       clk_register_divider(dev, clk_name,
-                               parent_name, 0, pll_28nm->mmio +
-                               REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
-                               0, 8, 0, NULL);
-
-       pll_28nm->num_clks = num;
-
-       pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
-       pll_28nm->clk_data.clks = provided_clks;
-
-       ret = of_clk_add_provider(dev->of_node,
-                       of_clk_src_onecell_get, &pll_28nm->clk_data);
-       if (ret) {
-               DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
-                                              int id)
-{
-       struct dsi_pll_28nm *pll_28nm;
-       struct msm_dsi_pll *pll;
-       int ret;
-
-       if (!pdev)
-               return ERR_PTR(-ENODEV);
-
-       pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
-       if (!pll_28nm)
-               return ERR_PTR(-ENOMEM);
-
-       pll_28nm->pdev = pdev;
-       pll_28nm->id = id + 1;
-
-       pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
-       if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
-               DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       pll = &pll_28nm->base;
-       pll->min_rate = VCO_MIN_RATE;
-       pll->max_rate = VCO_MAX_RATE;
-       pll->get_provider = dsi_pll_28nm_get_provider;
-       pll->destroy = dsi_pll_28nm_destroy;
-       pll->disable_seq = dsi_pll_28nm_disable_seq;
-       pll->save_state = dsi_pll_28nm_save_state;
-       pll->restore_state = dsi_pll_28nm_restore_state;
-
-       pll->en_seq_cnt = 1;
-       pll->enable_seqs[0] = dsi_pll_28nm_enable_seq;
-
-       ret = pll_28nm_register(pll_28nm);
-       if (ret) {
-               DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
-               return ERR_PTR(ret);
-       }
-
-       return pll;
-}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
deleted file mode 100644 (file)
index 93bf142..0000000
+++ /dev/null
@@ -1,912 +0,0 @@
-/*
- * SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2018, The Linux Foundation
- */
-
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/iopoll.h>
-
-#include "dsi_pll.h"
-#include "dsi.xml.h"
-
-/*
- * DSI PLL 7nm - clock diagram (eg: DSI0): TODO: updated CPHY diagram
- *
- *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk
- *                              |                |
- *                              |                |
- *                 +---------+  |  +----------+  |  +----+
- *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
- *                 +---------+  |  +----------+  |  +----+
- *                              |                |
- *                              |                |         dsi0_pll_by_2_bit_clk
- *                              |                |          |
- *                              |                |  +----+  |  |\  dsi0_pclk_mux
- *                              |                |--| /2 |--o--| \   |
- *                              |                |  +----+     |  \  |  +---------+
- *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
- *                              |------------------------------|  /     +---------+
- *                              |          +-----+             | /
- *                              -----------| /4? |--o----------|/
- *                                         +-----+  |           |
- *                                                  |           |dsiclk_sel
- *                                                  |
- *                                                  dsi0_pll_post_out_div_clk
- */
-
-#define DSI_BYTE_PLL_CLK               0
-#define DSI_PIXEL_PLL_CLK              1
-#define NUM_PROVIDED_CLKS              2
-
-#define VCO_REF_CLK_RATE               19200000
-
-struct dsi_pll_regs {
-       u32 pll_prop_gain_rate;
-       u32 pll_lockdet_rate;
-       u32 decimal_div_start;
-       u32 frac_div_start_low;
-       u32 frac_div_start_mid;
-       u32 frac_div_start_high;
-       u32 pll_clock_inverters;
-       u32 ssc_stepsize_low;
-       u32 ssc_stepsize_high;
-       u32 ssc_div_per_low;
-       u32 ssc_div_per_high;
-       u32 ssc_adjper_low;
-       u32 ssc_adjper_high;
-       u32 ssc_control;
-};
-
-struct dsi_pll_config {
-       u32 ref_freq;
-       bool div_override;
-       u32 output_div;
-       bool ignore_frac;
-       bool disable_prescaler;
-       bool enable_ssc;
-       bool ssc_center;
-       u32 dec_bits;
-       u32 frac_bits;
-       u32 lock_timer;
-       u32 ssc_freq;
-       u32 ssc_offset;
-       u32 ssc_adj_per;
-       u32 thresh_cycles;
-       u32 refclk_cycles;
-};
-
-struct pll_7nm_cached_state {
-       unsigned long vco_rate;
-       u8 bit_clk_div;
-       u8 pix_clk_div;
-       u8 pll_out_div;
-       u8 pll_mux;
-};
-
-struct dsi_pll_7nm {
-       struct msm_dsi_pll base;
-
-       int id;
-       struct platform_device *pdev;
-
-       void __iomem *phy_cmn_mmio;
-       void __iomem *mmio;
-
-       u64 vco_ref_clk_rate;
-       u64 vco_current_rate;
-
-       /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
-       spinlock_t postdiv_lock;
-
-       int vco_delay;
-       struct dsi_pll_config pll_configuration;
-       struct dsi_pll_regs reg_setup;
-
-       /* private clocks: */
-       struct clk_hw *out_div_clk_hw;
-       struct clk_hw *bit_clk_hw;
-       struct clk_hw *byte_clk_hw;
-       struct clk_hw *by_2_bit_clk_hw;
-       struct clk_hw *post_out_div_clk_hw;
-       struct clk_hw *pclk_mux_hw;
-       struct clk_hw *out_dsiclk_hw;
-
-       /* clock-provider: */
-       struct clk_hw_onecell_data *hw_data;
-
-       struct pll_7nm_cached_state cached_state;
-
-       enum msm_dsi_phy_usecase uc;
-       struct dsi_pll_7nm *slave;
-};
-
-#define to_pll_7nm(x)  container_of(x, struct dsi_pll_7nm, base)
-
-/*
- * Global list of private DSI PLL struct pointers. We need this for Dual DSI
- * mode, where the master PLL's clk_ops needs access the slave's private data
- */
-static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];
-
-static void dsi_pll_setup_config(struct dsi_pll_7nm *pll)
-{
-       struct dsi_pll_config *config = &pll->pll_configuration;
-
-       config->ref_freq = pll->vco_ref_clk_rate;
-       config->output_div = 1;
-       config->dec_bits = 8;
-       config->frac_bits = 18;
-       config->lock_timer = 64;
-       config->ssc_freq = 31500;
-       config->ssc_offset = 4800;
-       config->ssc_adj_per = 2;
-       config->thresh_cycles = 32;
-       config->refclk_cycles = 256;
-
-       config->div_override = false;
-       config->ignore_frac = false;
-       config->disable_prescaler = false;
-
-       /* TODO: ssc enable */
-       config->enable_ssc = false;
-       config->ssc_center = 0;
-}
-
-static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll)
-{
-       struct dsi_pll_config *config = &pll->pll_configuration;
-       struct dsi_pll_regs *regs = &pll->reg_setup;
-       u64 fref = pll->vco_ref_clk_rate;
-       u64 pll_freq;
-       u64 divider;
-       u64 dec, dec_multiple;
-       u32 frac;
-       u64 multiplier;
-
-       pll_freq = pll->vco_current_rate;
-
-       if (config->disable_prescaler)
-               divider = fref;
-       else
-               divider = fref * 2;
-
-       multiplier = 1 << config->frac_bits;
-       dec_multiple = div_u64(pll_freq * multiplier, divider);
-       div_u64_rem(dec_multiple, multiplier, &frac);
-
-       dec = div_u64(dec_multiple, multiplier);
-
-       if (pll->base.type != MSM_DSI_PHY_7NM_V4_1)
-               regs->pll_clock_inverters = 0x28;
-       else if (pll_freq <= 1000000000ULL)
-               regs->pll_clock_inverters = 0xa0;
-       else if (pll_freq <= 2500000000ULL)
-               regs->pll_clock_inverters = 0x20;
-       else if (pll_freq <= 3020000000ULL)
-               regs->pll_clock_inverters = 0x00;
-       else
-               regs->pll_clock_inverters = 0x40;
-
-       regs->pll_lockdet_rate = config->lock_timer;
-       regs->decimal_div_start = dec;
-       regs->frac_div_start_low = (frac & 0xff);
-       regs->frac_div_start_mid = (frac & 0xff00) >> 8;
-       regs->frac_div_start_high = (frac & 0x30000) >> 16;
-}
-
-#define SSC_CENTER             BIT(0)
-#define SSC_EN                 BIT(1)
-
-static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll)
-{
-       struct dsi_pll_config *config = &pll->pll_configuration;
-       struct dsi_pll_regs *regs = &pll->reg_setup;
-       u32 ssc_per;
-       u32 ssc_mod;
-       u64 ssc_step_size;
-       u64 frac;
-
-       if (!config->enable_ssc) {
-               DBG("SSC not enabled\n");
-               return;
-       }
-
-       ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
-       ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
-       ssc_per -= ssc_mod;
-
-       frac = regs->frac_div_start_low |
-                       (regs->frac_div_start_mid << 8) |
-                       (regs->frac_div_start_high << 16);
-       ssc_step_size = regs->decimal_div_start;
-       ssc_step_size *= (1 << config->frac_bits);
-       ssc_step_size += frac;
-       ssc_step_size *= config->ssc_offset;
-       ssc_step_size *= (config->ssc_adj_per + 1);
-       ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
-       ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
-
-       regs->ssc_div_per_low = ssc_per & 0xFF;
-       regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
-       regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
-       regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
-       regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
-       regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
-
-       regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
-
-       pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
-                regs->decimal_div_start, frac, config->frac_bits);
-       pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
-                ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
-}
-
-static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll)
-{
-       void __iomem *base = pll->mmio;
-       struct dsi_pll_regs *regs = &pll->reg_setup;
-
-       if (pll->pll_configuration.enable_ssc) {
-               pr_debug("SSC is enabled\n");
-
-               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
-                         regs->ssc_stepsize_low);
-               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
-                         regs->ssc_stepsize_high);
-               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,
-                         regs->ssc_div_per_low);
-               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
-                         regs->ssc_div_per_high);
-               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,
-                         regs->ssc_adjper_low);
-               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,
-                         regs->ssc_adjper_high);
-               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,
-                         SSC_EN | regs->ssc_control);
-       }
-}
-
-static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
-{
-       void __iomem *base = pll->mmio;
-       u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00;
-
-       if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {
-               if (pll->vco_current_rate >= 3100000000ULL)
-                       analog_controls_five_1 = 0x03;
-
-               if (pll->vco_current_rate < 1520000000ULL)
-                       vco_config_1 = 0x08;
-               else if (pll->vco_current_rate < 2990000000ULL)
-                       vco_config_1 = 0x01;
-       }
-
-       pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,
-                 analog_controls_five_1);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER, 0x00);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_OUTDIV, 0x00);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE, 0x00);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x0a);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1, 0xc0);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x84);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x29);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x2f);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT, 0x2a);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT,
-                 pll->base.type == MSM_DSI_PHY_7NM_V4_1 ? 0x3f : 0x22);
-
-       if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {
-               pll_write(base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
-               if (pll->slave)
-                       pll_write(pll->slave->mmio + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
-       }
-}
-
-static void dsi_pll_commit(struct dsi_pll_7nm *pll)
-{
-       void __iomem *base = pll->mmio;
-       struct dsi_pll_regs *reg = &pll->reg_setup;
-
-       pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1, reg->decimal_div_start);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */
-       pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
-}
-
-static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
-                                    unsigned long parent_rate)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-
-       DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->id, rate,
-           parent_rate);
-
-       pll_7nm->vco_current_rate = rate;
-       pll_7nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
-
-       dsi_pll_setup_config(pll_7nm);
-
-       dsi_pll_calc_dec_frac(pll_7nm);
-
-       dsi_pll_calc_ssc(pll_7nm);
-
-       dsi_pll_commit(pll_7nm);
-
-       dsi_pll_config_hzindep_reg(pll_7nm);
-
-       dsi_pll_ssc_commit(pll_7nm);
-
-       /* flush, ensure all register writes are done*/
-       wmb();
-
-       return 0;
-}
-
-static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll)
-{
-       int rc;
-       u32 status = 0;
-       u32 const delay_us = 100;
-       u32 const timeout_us = 5000;
-
-       rc = readl_poll_timeout_atomic(pll->mmio +
-                                      REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE,
-                                      status,
-                                      ((status & BIT(0)) > 0),
-                                      delay_us,
-                                      timeout_us);
-       if (rc)
-               pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
-                      pll->id, status);
-
-       return rc;
-}
-
-static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll)
-{
-       u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);
-
-       pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0);
-       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data & ~BIT(5));
-       ndelay(250);
-}
-
-static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
-{
-       u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);
-
-       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data | BIT(5));
-       pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
-       ndelay(250);
-}
-
-static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
-{
-       u32 data;
-
-       data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
-       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data & ~BIT(5));
-}
-
-static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
-{
-       u32 data;
-
-       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x04);
-
-       data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
-       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1,
-                 data | BIT(5) | BIT(4));
-}
-
-static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
-{
-       /*
-        * Reset the PHY digital domain. This would be needed when
-        * coming out of a CX or analog rail power collapse while
-        * ensuring that the pads maintain LP00 or LP11 state
-        */
-       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0));
-       wmb(); /* Ensure that the reset is deasserted */
-       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 0x0);
-       wmb(); /* Ensure that the reset is deasserted */
-}
-
-static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-       int rc;
-
-       dsi_pll_enable_pll_bias(pll_7nm);
-       if (pll_7nm->slave)
-               dsi_pll_enable_pll_bias(pll_7nm->slave);
-
-       /* Start PLL */
-       pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x01);
-
-       /*
-        * ensure all PLL configurations are written prior to checking
-        * for PLL lock.
-        */
-       wmb();
-
-       /* Check for PLL lock */
-       rc = dsi_pll_7nm_lock_status(pll_7nm);
-       if (rc) {
-               pr_err("PLL(%d) lock failed\n", pll_7nm->id);
-               goto error;
-       }
-
-       pll->pll_on = true;
-
-       /*
-        * assert power on reset for PHY digital in case the PLL is
-        * enabled after CX of analog domain power collapse. This needs
-        * to be done before enabling the global clk.
-        */
-       dsi_pll_phy_dig_reset(pll_7nm);
-       if (pll_7nm->slave)
-               dsi_pll_phy_dig_reset(pll_7nm->slave);
-
-       dsi_pll_enable_global_clk(pll_7nm);
-       if (pll_7nm->slave)
-               dsi_pll_enable_global_clk(pll_7nm->slave);
-
-error:
-       return rc;
-}
-
-static void dsi_pll_disable_sub(struct dsi_pll_7nm *pll)
-{
-       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0);
-       dsi_pll_disable_pll_bias(pll);
-}
-
-static void dsi_pll_7nm_vco_unprepare(struct clk_hw *hw)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-
-       /*
-        * To avoid any stray glitches while abruptly powering down the PLL
-        * make sure to gate the clock using the clock enable bit before
-        * powering down the PLL
-        */
-       dsi_pll_disable_global_clk(pll_7nm);
-       pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0);
-       dsi_pll_disable_sub(pll_7nm);
-       if (pll_7nm->slave) {
-               dsi_pll_disable_global_clk(pll_7nm->slave);
-               dsi_pll_disable_sub(pll_7nm->slave);
-       }
-       /* flush, ensure all register writes are done */
-       wmb();
-       pll->pll_on = false;
-}
-
-static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
-                                                 unsigned long parent_rate)
-{
-       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
-       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-       void __iomem *base = pll_7nm->mmio;
-       u64 ref_clk = pll_7nm->vco_ref_clk_rate;
-       u64 vco_rate = 0x0;
-       u64 multiplier;
-       u32 frac;
-       u32 dec;
-       u64 pll_freq, tmp64;
-
-       dec = pll_read(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);
-       dec &= 0xff;
-
-       frac = pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1);
-       frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) &
-                 0xff) << 8);
-       frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
-                 0x3) << 16);
-
-       /*
-        * TODO:
-        *      1. Assumes prescaler is disabled
-        *      2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
-        */
-       multiplier = 1 << 18;
-       pll_freq = dec * (ref_clk * 2);
-       tmp64 = (ref_clk * 2 * frac);
-       pll_freq += div_u64(tmp64, multiplier);
-
-       vco_rate = pll_freq;
-
-       DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
-           pll_7nm->id, (unsigned long)vco_rate, dec, frac);
-
-       return (unsigned long)vco_rate;
-}
-
-static const struct clk_ops clk_ops_dsi_pll_7nm_vco = {
-       .round_rate = msm_dsi_pll_helper_clk_round_rate,
-       .set_rate = dsi_pll_7nm_vco_set_rate,
-       .recalc_rate = dsi_pll_7nm_vco_recalc_rate,
-       .prepare = dsi_pll_7nm_vco_prepare,
-       .unprepare = dsi_pll_7nm_vco_unprepare,
-};
-
-/*
- * PLL Callbacks
- */
-
-static void dsi_pll_7nm_save_state(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-       struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
-       void __iomem *phy_base = pll_7nm->phy_cmn_mmio;
-       u32 cmn_clk_cfg0, cmn_clk_cfg1;
-
-       cached->pll_out_div = pll_read(pll_7nm->mmio +
-                                      REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
-       cached->pll_out_div &= 0x3;
-
-       cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
-       cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
-       cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
-
-       cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
-       cached->pll_mux = cmn_clk_cfg1 & 0x3;
-
-       DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
-           pll_7nm->id, cached->pll_out_div, cached->bit_clk_div,
-           cached->pix_clk_div, cached->pll_mux);
-}
-
-static int dsi_pll_7nm_restore_state(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-       struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
-       void __iomem *phy_base = pll_7nm->phy_cmn_mmio;
-       u32 val;
-       int ret;
-
-       val = pll_read(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
-       val &= ~0x3;
-       val |= cached->pll_out_div;
-       pll_write(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);
-
-       pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
-                 cached->bit_clk_div | (cached->pix_clk_div << 4));
-
-       val = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
-       val &= ~0x3;
-       val |= cached->pll_mux;
-       pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val);
-
-       ret = dsi_pll_7nm_vco_set_rate(&pll->clk_hw, pll_7nm->vco_current_rate, pll_7nm->vco_ref_clk_rate);
-       if (ret) {
-               DRM_DEV_ERROR(&pll_7nm->pdev->dev,
-                       "restore vco rate failed. ret=%d\n", ret);
-               return ret;
-       }
-
-       DBG("DSI PLL%d", pll_7nm->id);
-
-       return 0;
-}
-
-static int dsi_pll_7nm_set_usecase(struct msm_dsi_pll *pll,
-                                   enum msm_dsi_phy_usecase uc)
-{
-       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-       void __iomem *base = pll_7nm->phy_cmn_mmio;
-       u32 data = 0x0; /* internal PLL */
-
-       DBG("DSI PLL%d", pll_7nm->id);
-
-       switch (uc) {
-       case MSM_DSI_PHY_STANDALONE:
-               break;
-       case MSM_DSI_PHY_MASTER:
-               pll_7nm->slave = pll_7nm_list[(pll_7nm->id + 1) % DSI_MAX];
-               break;
-       case MSM_DSI_PHY_SLAVE:
-               data = 0x1; /* external PLL */
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       /* set PLL src */
-       pll_write(base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, (data << 2));
-
-       pll_7nm->uc = uc;
-
-       return 0;
-}
-
-static int dsi_pll_7nm_get_provider(struct msm_dsi_pll *pll,
-                                    struct clk **byte_clk_provider,
-                                    struct clk **pixel_clk_provider)
-{
-       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-       struct clk_hw_onecell_data *hw_data = pll_7nm->hw_data;
-
-       DBG("DSI PLL%d", pll_7nm->id);
-
-       if (byte_clk_provider)
-               *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
-       if (pixel_clk_provider)
-               *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
-
-       return 0;
-}
-
-static void dsi_pll_7nm_destroy(struct msm_dsi_pll *pll)
-{
-       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
-       struct device *dev = &pll_7nm->pdev->dev;
-
-       DBG("DSI PLL%d", pll_7nm->id);
-       of_clk_del_provider(dev->of_node);
-
-       clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);
-       clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);
-       clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);
-       clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);
-       clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);
-       clk_hw_unregister_divider(pll_7nm->bit_clk_hw);
-       clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);
-       clk_hw_unregister(&pll_7nm->base.clk_hw);
-}
-
-/*
- * The post dividers and mux clocks are created using the standard divider and
- * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
- * state to follow the master PLL's divider/mux state. Therefore, we don't
- * require special clock ops that also configure the slave PLL registers
- */
-static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm)
-{
-       char clk_name[32], parent[32], vco_name[32];
-       char parent2[32], parent3[32], parent4[32];
-       struct clk_init_data vco_init = {
-               .parent_names = (const char *[]){ "bi_tcxo" },
-               .num_parents = 1,
-               .name = vco_name,
-               .flags = CLK_IGNORE_UNUSED,
-               .ops = &clk_ops_dsi_pll_7nm_vco,
-       };
-       struct device *dev = &pll_7nm->pdev->dev;
-       struct clk_hw_onecell_data *hw_data;
-       struct clk_hw *hw;
-       int ret;
-
-       DBG("DSI%d", pll_7nm->id);
-
-       hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
-                              NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
-                              GFP_KERNEL);
-       if (!hw_data)
-               return -ENOMEM;
-
-       snprintf(vco_name, 32, "dsi%dvco_clk", pll_7nm->id);
-       pll_7nm->base.clk_hw.init = &vco_init;
-
-       ret = clk_hw_register(dev, &pll_7nm->base.clk_hw);
-       if (ret)
-               return ret;
-
-       snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
-       snprintf(parent, 32, "dsi%dvco_clk", pll_7nm->id);
-
-       hw = clk_hw_register_divider(dev, clk_name,
-                                    parent, CLK_SET_RATE_PARENT,
-                                    pll_7nm->mmio +
-                                    REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
-                                    0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
-       if (IS_ERR(hw)) {
-               ret = PTR_ERR(hw);
-               goto err_base_clk_hw;
-       }
-
-       pll_7nm->out_div_clk_hw = hw;
-
-       snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
-       snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
-
-       /* BIT CLK: DIV_CTRL_3_0 */
-       hw = clk_hw_register_divider(dev, clk_name, parent,
-                                    CLK_SET_RATE_PARENT,
-                                    pll_7nm->phy_cmn_mmio +
-                                    REG_DSI_7nm_PHY_CMN_CLK_CFG0,
-                                    0, 4, CLK_DIVIDER_ONE_BASED,
-                                    &pll_7nm->postdiv_lock);
-       if (IS_ERR(hw)) {
-               ret = PTR_ERR(hw);
-               goto err_out_div_clk_hw;
-       }
-
-       pll_7nm->bit_clk_hw = hw;
-
-       snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_7nm->id);
-       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
-
-       /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
-       hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
-                                         CLK_SET_RATE_PARENT, 1, 8);
-       if (IS_ERR(hw)) {
-               ret = PTR_ERR(hw);
-               goto err_bit_clk_hw;
-       }
-
-       pll_7nm->byte_clk_hw = hw;
-       hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
-
-       snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);
-       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
-
-       hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
-                                         0, 1, 2);
-       if (IS_ERR(hw)) {
-               ret = PTR_ERR(hw);
-               goto err_byte_clk_hw;
-       }
-
-       pll_7nm->by_2_bit_clk_hw = hw;
-
-       snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);
-       snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
-
-       hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
-                                         0, 1, 4);
-       if (IS_ERR(hw)) {
-               ret = PTR_ERR(hw);
-               goto err_by_2_bit_clk_hw;
-       }
-
-       pll_7nm->post_out_div_clk_hw = hw;
-
-       snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->id);
-       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
-       snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);
-       snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
-       snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);
-
-       hw = clk_hw_register_mux(dev, clk_name,
-                                ((const char *[]){
-                                parent, parent2, parent3, parent4
-                                }), 4, 0, pll_7nm->phy_cmn_mmio +
-                                REG_DSI_7nm_PHY_CMN_CLK_CFG1,
-                                0, 2, 0, NULL);
-       if (IS_ERR(hw)) {
-               ret = PTR_ERR(hw);
-               goto err_post_out_div_clk_hw;
-       }
-
-       pll_7nm->pclk_mux_hw = hw;
-
-       snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->id);
-       snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->id);
-
-       /* PIX CLK DIV : DIV_CTRL_7_4*/
-       hw = clk_hw_register_divider(dev, clk_name, parent,
-                                    0, pll_7nm->phy_cmn_mmio +
-                                       REG_DSI_7nm_PHY_CMN_CLK_CFG0,
-                                    4, 4, CLK_DIVIDER_ONE_BASED,
-                                    &pll_7nm->postdiv_lock);
-       if (IS_ERR(hw)) {
-               ret = PTR_ERR(hw);
-               goto err_pclk_mux_hw;
-       }
-
-       pll_7nm->out_dsiclk_hw = hw;
-       hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
-
-       hw_data->num = NUM_PROVIDED_CLKS;
-       pll_7nm->hw_data = hw_data;
-
-       ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
-                                    pll_7nm->hw_data);
-       if (ret) {
-               DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
-               goto err_dsiclk_hw;
-       }
-
-       return 0;
-
-err_dsiclk_hw:
-       clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);
-err_pclk_mux_hw:
-       clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);
-err_post_out_div_clk_hw:
-       clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);
-err_by_2_bit_clk_hw:
-       clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);
-err_byte_clk_hw:
-       clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);
-err_bit_clk_hw:
-       clk_hw_unregister_divider(pll_7nm->bit_clk_hw);
-err_out_div_clk_hw:
-       clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);
-err_base_clk_hw:
-       clk_hw_unregister(&pll_7nm->base.clk_hw);
-
-       return ret;
-}
-
-struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
-{
-       struct dsi_pll_7nm *pll_7nm;
-       struct msm_dsi_pll *pll;
-       int ret;
-
-       pll_7nm = devm_kzalloc(&pdev->dev, sizeof(*pll_7nm), GFP_KERNEL);
-       if (!pll_7nm)
-               return ERR_PTR(-ENOMEM);
-
-       DBG("DSI PLL%d", id);
-
-       pll_7nm->pdev = pdev;
-       pll_7nm->id = id;
-       pll_7nm_list[id] = pll_7nm;
-
-       pll_7nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
-       if (IS_ERR_OR_NULL(pll_7nm->phy_cmn_mmio)) {
-               DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
-       pll_7nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
-       if (IS_ERR_OR_NULL(pll_7nm->mmio)) {
-               DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
-       spin_lock_init(&pll_7nm->postdiv_lock);
-
-       pll = &pll_7nm->base;
-       pll->min_rate = 1000000000UL;
-       pll->max_rate = 3500000000UL;
-       if (pll->type == MSM_DSI_PHY_7NM_V4_1) {
-               pll->min_rate = 600000000UL;
-               pll->max_rate = (unsigned long)5000000000ULL;
-               /* workaround for max rate overflowing on 32-bit builds: */
-               pll->max_rate = max(pll->max_rate, 0xffffffffUL);
-       }
-       pll->get_provider = dsi_pll_7nm_get_provider;
-       pll->destroy = dsi_pll_7nm_destroy;
-       pll->save_state = dsi_pll_7nm_save_state;
-       pll->restore_state = dsi_pll_7nm_restore_state;
-       pll->set_usecase = dsi_pll_7nm_set_usecase;
-
-       pll_7nm->vco_delay = 1;
-
-       ret = pll_7nm_register(pll_7nm);
-       if (ret) {
-               DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
-               return ERR_PTR(ret);
-       }
-
-       /* TODO: Remove this when we have proper display handover support */
-       msm_dsi_pll_save_state(pll);
-
-       return pll;
-}
index e9c6544..fab09e7 100644 (file)
@@ -57,10 +57,13 @@ static void vblank_put(struct msm_kms *kms, unsigned crtc_mask)
 
 static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
 {
+       int crtc_index;
        struct drm_crtc *crtc;
 
-       for_each_crtc_mask(kms->dev, crtc, crtc_mask)
-               mutex_lock(&kms->commit_lock[drm_crtc_index(crtc)]);
+       for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
+               crtc_index = drm_crtc_index(crtc);
+               mutex_lock_nested(&kms->commit_lock[crtc_index], crtc_index);
+       }
 }
 
 static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
index 85ad0ba..d611cc8 100644 (file)
@@ -111,23 +111,15 @@ static const struct file_operations msm_gpu_fops = {
 static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
 {
        struct msm_drm_private *priv = dev->dev_private;
-       struct msm_gpu *gpu = priv->gpu;
        int ret;
 
-       ret = mutex_lock_interruptible(&priv->mm_lock);
+       ret = mutex_lock_interruptible(&priv->obj_lock);
        if (ret)
                return ret;
 
-       if (gpu) {
-               seq_printf(m, "Active Objects (%s):\n", gpu->name);
-               msm_gem_describe_objects(&gpu->active_list, m);
-       }
-
-       seq_printf(m, "Inactive Objects:\n");
-       msm_gem_describe_objects(&priv->inactive_dontneed, m);
-       msm_gem_describe_objects(&priv->inactive_willneed, m);
+       msm_gem_describe_objects(&priv->objects, m);
 
-       mutex_unlock(&priv->mm_lock);
+       mutex_unlock(&priv->obj_lock);
 
        return 0;
 }
index 94525ac..e1104d2 100644 (file)
@@ -39,6 +39,7 @@
  *           GEM object's debug name
  * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
  * - 1.6.0 - Syncobj support
+ * - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
  */
 #define MSM_VERSION_MAJOR      1
 #define MSM_VERSION_MINOR      6
@@ -446,8 +447,12 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
 
        priv->wq = alloc_ordered_workqueue("msm", 0);
 
+       INIT_LIST_HEAD(&priv->objects);
+       mutex_init(&priv->obj_lock);
+
        INIT_LIST_HEAD(&priv->inactive_willneed);
        INIT_LIST_HEAD(&priv->inactive_dontneed);
+       INIT_LIST_HEAD(&priv->inactive_unpinned);
        mutex_init(&priv->mm_lock);
 
        /* Teach lockdep about lock ordering wrt. shrinker: */
@@ -570,6 +575,7 @@ err_free_priv:
        kfree(priv);
 err_put_drm_dev:
        drm_dev_put(ddev);
+       platform_set_drvdata(pdev, NULL);
        return ret;
 }
 
@@ -1072,6 +1078,10 @@ static int __maybe_unused msm_pm_resume(struct device *dev)
 static int __maybe_unused msm_pm_prepare(struct device *dev)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
+       struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
+
+       if (!priv || !priv->kms)
+               return 0;
 
        return drm_mode_config_helper_suspend(ddev);
 }
@@ -1079,6 +1089,10 @@ static int __maybe_unused msm_pm_prepare(struct device *dev)
 static void __maybe_unused msm_pm_complete(struct device *dev)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
+       struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
+
+       if (!priv || !priv->kms)
+               return;
 
        drm_mode_config_helper_resume(ddev);
 }
@@ -1173,10 +1187,11 @@ static int compare_name_mdp(struct device *dev, void *data)
        return (strstr(dev_name(dev), "mdp") != NULL);
 }
 
-static int add_display_components(struct device *dev,
+static int add_display_components(struct platform_device *pdev,
                                  struct component_match **matchptr)
 {
        struct device *mdp_dev;
+       struct device *dev = &pdev->dev;
        int ret;
 
        /*
@@ -1185,9 +1200,9 @@ static int add_display_components(struct device *dev,
         * Populate the children devices, find the MDP5/DPU node, and then add
         * the interfaces to our components list.
         */
-       if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
-           of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss") ||
-           of_device_is_compatible(dev->of_node, "qcom,sc7180-mdss")) {
+       switch (get_mdp_ver(pdev)) {
+       case KMS_MDP5:
+       case KMS_DPU:
                ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
                if (ret) {
                        DRM_DEV_ERROR(dev, "failed to populate children devices\n");
@@ -1206,9 +1221,11 @@ static int add_display_components(struct device *dev,
                /* add the MDP component itself */
                drm_of_component_match_add(dev, matchptr, compare_of,
                                           mdp_dev->of_node);
-       } else {
+               break;
+       case KMS_MDP4:
                /* MDP4 */
                mdp_dev = dev;
+               break;
        }
 
        ret = add_components_mdp(mdp_dev, matchptr);
@@ -1273,7 +1290,7 @@ static int msm_pdev_probe(struct platform_device *pdev)
        int ret;
 
        if (get_mdp_ver(pdev)) {
-               ret = add_display_components(&pdev->dev, &match);
+               ret = add_display_components(pdev, &match);
                if (ret)
                        return ret;
        }
@@ -1311,6 +1328,10 @@ static int msm_pdev_remove(struct platform_device *pdev)
 static void msm_pdev_shutdown(struct platform_device *pdev)
 {
        struct drm_device *drm = platform_get_drvdata(pdev);
+       struct msm_drm_private *priv = drm ? drm->dev_private : NULL;
+
+       if (!priv || !priv->kms)
+               return;
 
        drm_atomic_helper_shutdown(drm);
 }
@@ -1320,6 +1341,9 @@ static const struct of_device_id dt_match[] = {
        { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
        { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
        { .compatible = "qcom,sc7180-mdss", .data = (void *)KMS_DPU },
+       { .compatible = "qcom,sc7280-mdss", .data = (void *)KMS_DPU },
+       { .compatible = "qcom,sm8150-mdss", .data = (void *)KMS_DPU },
+       { .compatible = "qcom,sm8250-mdss", .data = (void *)KMS_DPU },
        {}
 };
 MODULE_DEVICE_TABLE(of, dt_match);
index 591c47a..2668941 100644 (file)
@@ -174,20 +174,35 @@ struct msm_drm_private {
        struct msm_rd_state *hangrd;   /* debugfs to dump hanging submits */
        struct msm_perf_state *perf;
 
-       /*
-        * Lists of inactive GEM objects.  Every bo is either in one of the
+       /**
+        * List of all GEM objects (mainly for debugfs, protected by obj_lock
+        * (acquire before per GEM object lock)
+        */
+       struct list_head objects;
+       struct mutex obj_lock;
+
+       /**
+        * LRUs of inactive GEM objects.  Every bo is either in one of the
         * inactive lists (depending on whether or not it is shrinkable) or
-        * gpu->active_list (for the gpu it is active on[1])
+        * gpu->active_list (for the gpu it is active on[1]), or transiently
+        * on a temporary list as the shrinker is running.
+        *
+        * Note that inactive_willneed also contains pinned and vmap'd bos,
+        * but the number of pinned-but-not-active objects is small (scanout
+        * buffers, ringbuffer, etc).
         *
-        * These lists are protected by mm_lock.  If struct_mutex is involved, it
-        * should be aquired prior to mm_lock.  One should *not* hold mm_lock in
+        * These lists are protected by mm_lock (which should be acquired
+        * before per GEM object lock).  One should *not* hold mm_lock in
         * get_pages()/vmap()/etc paths, as they can trigger the shrinker.
         *
         * [1] if someone ever added support for the old 2d cores, there could be
         *     more than one gpu object
         */
-       struct list_head inactive_willneed;  /* inactive + !shrinkable */
-       struct list_head inactive_dontneed;  /* inactive +  shrinkable */
+       struct list_head inactive_willneed;  /* inactive + potentially unpin/evictable */
+       struct list_head inactive_dontneed;  /* inactive + shrinkable */
+       struct list_head inactive_unpinned;  /* inactive + purged or unpinned */
+       long shrinkable_count;               /* write access under mm_lock */
+       long evictable_count;                /* write access under mm_lock */
        struct mutex mm_lock;
 
        struct workqueue_struct *wq;
index d42f066..91c0e49 100644 (file)
@@ -33,6 +33,7 @@ static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
 #ifdef CONFIG_DEBUG_FS
 void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
 {
+       struct msm_gem_stats stats = {};
        int i, n = fb->format->num_planes;
 
        seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
@@ -42,7 +43,7 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
        for (i = 0; i < n; i++) {
                seq_printf(m, "   %d: offset=%d pitch=%d, obj: ",
                                i, fb->offsets[i], fb->pitches[i]);
-               msm_gem_describe(fb->obj[i], m);
+               msm_gem_describe(fb->obj[i], m, &stats);
        }
 }
 #endif
index ad27036..cd59a59 100644 (file)
@@ -45,7 +45,7 @@ int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
        int ret;
 
        if (fence > fctx->last_fence) {
-               DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n",
+               DRM_ERROR_RATELIMITED("%s: waiting on invalid fence: %u (of %u)\n",
                                fctx->name, fence, fctx->last_fence);
                return -EINVAL;
        }
index f091c1e..b199942 100644 (file)
@@ -96,7 +96,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
-       WARN_ON(!msm_gem_is_locked(obj));
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
 
        if (!msm_obj->pages) {
                struct drm_device *dev = obj->dev;
@@ -130,6 +130,9 @@ static struct page **get_pages(struct drm_gem_object *obj)
                 */
                if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
                        sync_for_device(msm_obj);
+
+               GEM_WARN_ON(msm_obj->active_count);
+               update_inactive(msm_obj);
        }
 
        return msm_obj->pages;
@@ -162,6 +165,7 @@ static void put_pages(struct drm_gem_object *obj)
 
                        sg_free_table(msm_obj->sgt);
                        kfree(msm_obj->sgt);
+                       msm_obj->sgt = NULL;
                }
 
                if (use_pages(obj))
@@ -180,7 +184,7 @@ struct page **msm_gem_get_pages(struct drm_gem_object *obj)
 
        msm_gem_lock(obj);
 
-       if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+       if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
                msm_gem_unlock(obj);
                return ERR_PTR(-EBUSY);
        }
@@ -256,7 +260,7 @@ static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
                goto out;
        }
 
-       if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+       if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
                msm_gem_unlock(obj);
                return VM_FAULT_SIGBUS;
        }
@@ -289,7 +293,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
        struct drm_device *dev = obj->dev;
        int ret;
 
-       WARN_ON(!msm_gem_is_locked(obj));
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
 
        /* Make it mmapable */
        ret = drm_gem_create_mmap_offset(obj);
@@ -318,7 +322,7 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
        struct msm_gem_vma *vma;
 
-       WARN_ON(!msm_gem_is_locked(obj));
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
 
        vma = kzalloc(sizeof(*vma), GFP_KERNEL);
        if (!vma)
@@ -337,7 +341,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
        struct msm_gem_vma *vma;
 
-       WARN_ON(!msm_gem_is_locked(obj));
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
 
        list_for_each_entry(vma, &msm_obj->vmas, list) {
                if (vma->aspace == aspace)
@@ -356,19 +360,25 @@ static void del_vma(struct msm_gem_vma *vma)
        kfree(vma);
 }
 
-/* Called with msm_obj locked */
+/**
+ * If close is true, this also closes the VMA (releasing the allocated
+ * iova range) in addition to removing the iommu mapping.  In the eviction
+ * case (!close), we keep the iova allocated, but only remove the iommu
+ * mapping.
+ */
 static void
-put_iova_spaces(struct drm_gem_object *obj)
+put_iova_spaces(struct drm_gem_object *obj, bool close)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
        struct msm_gem_vma *vma;
 
-       WARN_ON(!msm_gem_is_locked(obj));
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
 
        list_for_each_entry(vma, &msm_obj->vmas, list) {
                if (vma->aspace) {
                        msm_gem_purge_vma(vma->aspace, vma);
-                       msm_gem_close_vma(vma->aspace, vma);
+                       if (close)
+                               msm_gem_close_vma(vma->aspace, vma);
                }
        }
 }
@@ -380,7 +390,7 @@ put_iova_vmas(struct drm_gem_object *obj)
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
        struct msm_gem_vma *vma, *tmp;
 
-       WARN_ON(!msm_gem_is_locked(obj));
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
 
        list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
                del_vma(vma);
@@ -394,7 +404,7 @@ static int get_iova_locked(struct drm_gem_object *obj,
        struct msm_gem_vma *vma;
        int ret = 0;
 
-       WARN_ON(!msm_gem_is_locked(obj));
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
 
        vma = lookup_vma(obj, aspace);
 
@@ -421,7 +431,7 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
        struct msm_gem_vma *vma;
        struct page **pages;
-       int prot = IOMMU_READ;
+       int ret, prot = IOMMU_READ;
 
        if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
                prot |= IOMMU_WRITE;
@@ -429,21 +439,26 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
        if (msm_obj->flags & MSM_BO_MAP_PRIV)
                prot |= IOMMU_PRIV;
 
-       WARN_ON(!msm_gem_is_locked(obj));
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
 
-       if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
+       if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
                return -EBUSY;
 
        vma = lookup_vma(obj, aspace);
-       if (WARN_ON(!vma))
+       if (GEM_WARN_ON(!vma))
                return -EINVAL;
 
        pages = get_pages(obj);
        if (IS_ERR(pages))
                return PTR_ERR(pages);
 
-       return msm_gem_map_vma(aspace, vma, prot,
+       ret = msm_gem_map_vma(aspace, vma, prot,
                        msm_obj->sgt, obj->size >> PAGE_SHIFT);
+
+       if (!ret)
+               msm_obj->pin_count++;
+
+       return ret;
 }
 
 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
@@ -453,7 +468,7 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
        u64 local;
        int ret;
 
-       WARN_ON(!msm_gem_is_locked(obj));
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
 
        ret = get_iova_locked(obj, aspace, &local,
                range_start, range_end);
@@ -524,7 +539,7 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
        msm_gem_lock(obj);
        vma = lookup_vma(obj, aspace);
        msm_gem_unlock(obj);
-       WARN_ON(!vma);
+       GEM_WARN_ON(!vma);
 
        return vma ? vma->iova : 0;
 }
@@ -535,14 +550,21 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
 void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
                struct msm_gem_address_space *aspace)
 {
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
        struct msm_gem_vma *vma;
 
-       WARN_ON(!msm_gem_is_locked(obj));
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
 
        vma = lookup_vma(obj, aspace);
 
-       if (!WARN_ON(!vma))
+       if (!GEM_WARN_ON(!vma)) {
                msm_gem_unmap_vma(aspace, vma);
+
+               msm_obj->pin_count--;
+               GEM_WARN_ON(msm_obj->pin_count < 0);
+
+               update_inactive(msm_obj);
+       }
 }
 
 /*
@@ -593,12 +615,12 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
        int ret = 0;
 
-       WARN_ON(!msm_gem_is_locked(obj));
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
 
        if (obj->import_attach)
                return ERR_PTR(-ENODEV);
 
-       if (WARN_ON(msm_obj->madv > madv)) {
+       if (GEM_WARN_ON(msm_obj->madv > madv)) {
                DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
                        msm_obj->madv, madv);
                return ERR_PTR(-EBUSY);
@@ -664,8 +686,8 @@ void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
-       WARN_ON(!msm_gem_is_locked(obj));
-       WARN_ON(msm_obj->vmap_count < 1);
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
+       GEM_WARN_ON(msm_obj->vmap_count < 1);
 
        msm_obj->vmap_count--;
 }
@@ -707,20 +729,23 @@ void msm_gem_purge(struct drm_gem_object *obj)
        struct drm_device *dev = obj->dev;
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
-       WARN_ON(!is_purgeable(msm_obj));
-       WARN_ON(obj->import_attach);
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
+       GEM_WARN_ON(!is_purgeable(msm_obj));
 
-       put_iova_spaces(obj);
+       /* Get rid of any iommu mapping(s): */
+       put_iova_spaces(obj, true);
 
        msm_gem_vunmap(obj);
 
+       drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
+
        put_pages(obj);
 
        put_iova_vmas(obj);
 
        msm_obj->madv = __MSM_MADV_PURGED;
+       update_inactive(msm_obj);
 
-       drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
        drm_gem_free_mmap_offset(obj);
 
        /* Our goal here is to return as much of the memory as
@@ -734,13 +759,36 @@ void msm_gem_purge(struct drm_gem_object *obj)
                        0, (loff_t)-1);
 }
 
+/**
+ * Unpin the backing pages and make them available to be swapped out.
+ */
+void msm_gem_evict(struct drm_gem_object *obj)
+{
+       struct drm_device *dev = obj->dev;
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
+       GEM_WARN_ON(is_unevictable(msm_obj));
+       GEM_WARN_ON(!msm_obj->evictable);
+       GEM_WARN_ON(msm_obj->active_count);
+
+       /* Get rid of any iommu mapping(s): */
+       put_iova_spaces(obj, false);
+
+       drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
+
+       put_pages(obj);
+
+       update_inactive(msm_obj);
+}
+
 void msm_gem_vunmap(struct drm_gem_object *obj)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
-       WARN_ON(!msm_gem_is_locked(obj));
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
 
-       if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
+       if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
                return;
 
        vunmap(msm_obj->vaddr);
@@ -788,12 +836,16 @@ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
        struct msm_drm_private *priv = obj->dev->dev_private;
 
        might_sleep();
-       WARN_ON(!msm_gem_is_locked(obj));
-       WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
+       GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
+       GEM_WARN_ON(msm_obj->dontneed);
+       GEM_WARN_ON(!msm_obj->sgt);
 
        if (msm_obj->active_count++ == 0) {
                mutex_lock(&priv->mm_lock);
-               list_del_init(&msm_obj->mm_list);
+               if (msm_obj->evictable)
+                       mark_unevictable(msm_obj);
+               list_del(&msm_obj->mm_list);
                list_add_tail(&msm_obj->mm_list, &gpu->active_list);
                mutex_unlock(&priv->mm_lock);
        }
@@ -804,7 +856,7 @@ void msm_gem_active_put(struct drm_gem_object *obj)
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
        might_sleep();
-       WARN_ON(!msm_gem_is_locked(obj));
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
 
        if (--msm_obj->active_count == 0) {
                update_inactive(msm_obj);
@@ -815,14 +867,29 @@ static void update_inactive(struct msm_gem_object *msm_obj)
 {
        struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
 
+       GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
+
+       if (msm_obj->active_count != 0)
+               return;
+
        mutex_lock(&priv->mm_lock);
-       WARN_ON(msm_obj->active_count != 0);
 
-       list_del_init(&msm_obj->mm_list);
-       if (msm_obj->madv == MSM_MADV_WILLNEED)
+       if (msm_obj->dontneed)
+               mark_unpurgeable(msm_obj);
+       if (msm_obj->evictable)
+               mark_unevictable(msm_obj);
+
+       list_del(&msm_obj->mm_list);
+       if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
                list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
-       else
+               mark_evictable(msm_obj);
+       } else if (msm_obj->madv == MSM_MADV_DONTNEED) {
                list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
+               mark_purgeable(msm_obj);
+       } else {
+               GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
+               list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
+       }
 
        mutex_unlock(&priv->mm_lock);
 }
@@ -863,7 +930,8 @@ static void describe_fence(struct dma_fence *fence, const char *type,
                                fence->seqno);
 }
 
-void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
+               struct msm_gem_stats *stats)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
        struct dma_resv *robj = obj->resv;
@@ -875,11 +943,28 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 
        msm_gem_lock(obj);
 
+       stats->all.count++;
+       stats->all.size += obj->size;
+
+       if (is_active(msm_obj)) {
+               stats->active.count++;
+               stats->active.size += obj->size;
+       }
+
+       if (msm_obj->pages) {
+               stats->resident.count++;
+               stats->resident.size += obj->size;
+       }
+
        switch (msm_obj->madv) {
        case __MSM_MADV_PURGED:
+               stats->purged.count++;
+               stats->purged.size += obj->size;
                madv = " purged";
                break;
        case MSM_MADV_DONTNEED:
+               stats->purgeable.count++;
+               stats->purgeable.size += obj->size;
                madv = " purgeable";
                break;
        case MSM_MADV_WILLNEED:
@@ -946,20 +1031,26 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 
 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
 {
+       struct msm_gem_stats stats = {};
        struct msm_gem_object *msm_obj;
-       int count = 0;
-       size_t size = 0;
 
        seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
-       list_for_each_entry(msm_obj, list, mm_list) {
+       list_for_each_entry(msm_obj, list, node) {
                struct drm_gem_object *obj = &msm_obj->base;
                seq_puts(m, "   ");
-               msm_gem_describe(obj, m);
-               count++;
-               size += obj->size;
+               msm_gem_describe(obj, m, &stats);
        }
 
-       seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
+       seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
+                       stats.all.count, stats.all.size);
+       seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
+                       stats.active.count, stats.active.size);
+       seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
+                       stats.resident.count, stats.resident.size);
+       seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
+                       stats.purgeable.count, stats.purgeable.size);
+       seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
+                       stats.purged.count, stats.purged.size);
 }
 #endif
 
@@ -970,19 +1061,25 @@ void msm_gem_free_object(struct drm_gem_object *obj)
        struct drm_device *dev = obj->dev;
        struct msm_drm_private *priv = dev->dev_private;
 
+       mutex_lock(&priv->obj_lock);
+       list_del(&msm_obj->node);
+       mutex_unlock(&priv->obj_lock);
+
        mutex_lock(&priv->mm_lock);
+       if (msm_obj->dontneed)
+               mark_unpurgeable(msm_obj);
        list_del(&msm_obj->mm_list);
        mutex_unlock(&priv->mm_lock);
 
        msm_gem_lock(obj);
 
        /* object should not be on active list: */
-       WARN_ON(is_active(msm_obj));
+       GEM_WARN_ON(is_active(msm_obj));
 
-       put_iova_spaces(obj);
+       put_iova_spaces(obj, true);
 
        if (obj->import_attach) {
-               WARN_ON(msm_obj->vaddr);
+               GEM_WARN_ON(msm_obj->vaddr);
 
                /* Don't drop the pages for imported dmabuf, as they are not
                 * ours, just free the array we allocated:
@@ -1098,7 +1195,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
        else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
                use_vram = true;
 
-       if (WARN_ON(use_vram && !priv->vram.size))
+       if (GEM_WARN_ON(use_vram && !priv->vram.size))
                return ERR_PTR(-EINVAL);
 
        /* Disallow zero sized objects as they make the underlying
@@ -1153,10 +1250,13 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
        }
 
        mutex_lock(&priv->mm_lock);
-       /* Initially obj is idle, obj->madv == WILLNEED: */
-       list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
+       list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
        mutex_unlock(&priv->mm_lock);
 
+       mutex_lock(&priv->obj_lock);
+       list_add_tail(&msm_obj->node, &priv->objects);
+       mutex_unlock(&priv->obj_lock);
+
        return obj;
 
 fail:
@@ -1224,9 +1324,13 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
        msm_gem_unlock(obj);
 
        mutex_lock(&priv->mm_lock);
-       list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
+       list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
        mutex_unlock(&priv->mm_lock);
 
+       mutex_lock(&priv->obj_lock);
+       list_add_tail(&msm_obj->node, &priv->objects);
+       mutex_unlock(&priv->obj_lock);
+
        return obj;
 
 fail:
index b3a0a88..a6480d2 100644 (file)
 #include <linux/dma-resv.h>
 #include "msm_drv.h"
 
+/* Make all GEM related WARN_ON()s ratelimited.. when things go wrong they
+ * tend to go wrong 1000s of times in a short timespan.
+ */
+#define GEM_WARN_ON(x)  WARN_RATELIMIT(x, "%s", __stringify(x))
+
 /* Additional internal-use only BO flags: */
 #define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
 #define MSM_BO_MAP_PRIV      0x20000000    /* use IOMMU_PRIV when mapping */
@@ -50,18 +55,35 @@ struct msm_gem_object {
         */
        uint8_t madv;
 
+       /**
+        * Is object on inactive_dontneed list (ie. counted in priv->shrinkable_count)?
+        */
+       bool dontneed : 1;
+
+       /**
+        * Is object evictable (ie. counted in priv->evictable_count)?
+        */
+       bool evictable : 1;
+
        /**
         * count of active vmap'ing
         */
        uint8_t vmap_count;
 
-       /* And object is either:
-        *  inactive - on priv->inactive_list
+       /**
+        * Node in list of all objects (mainly for debugfs, protected by
+        * priv->obj_lock
+        */
+       struct list_head node;
+
+       /**
+        * An object is either:
+        *  inactive - on priv->inactive_dontneed or priv->inactive_willneed
+        *     (depending on purgeability status)
         *  active   - on one one of the gpu's active_list..  well, at
         *     least for now we don't have (I don't think) hw sync between
         *     2d and 3d one devices which have both, meaning we need to
         *     block on submit if a bo is already on other ring
-        *
         */
        struct list_head mm_list;
 
@@ -78,8 +100,6 @@ struct msm_gem_object {
 
        struct list_head vmas;    /* list of msm_gem_vma */
 
-       struct llist_node freed;
-
        /* For physically contiguous buffers.  Used when we don't have
         * an IOMMU.  Also used for stolen/splashscreen buffer.
         */
@@ -88,6 +108,7 @@ struct msm_gem_object {
        char name[32]; /* Identifier to print for the debugfs files */
 
        int active_count;
+       int pin_count;
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
@@ -147,8 +168,17 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
                struct dma_buf *dmabuf, struct sg_table *sgt);
 __printf(2, 3)
 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
+
 #ifdef CONFIG_DEBUG_FS
-void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
+struct msm_gem_stats {
+       struct {
+               unsigned count;
+               size_t size;
+       } all, active, resident, purgeable, purged;
+};
+
+void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
+               struct msm_gem_stats *stats);
 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
 #endif
 
@@ -184,23 +214,101 @@ msm_gem_is_locked(struct drm_gem_object *obj)
 
 static inline bool is_active(struct msm_gem_object *msm_obj)
 {
-       WARN_ON(!msm_gem_is_locked(&msm_obj->base));
+       GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
        return msm_obj->active_count;
 }
 
+/* imported/exported objects are not purgeable: */
+static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
+{
+       return msm_obj->base.dma_buf && msm_obj->base.import_attach;
+}
+
 static inline bool is_purgeable(struct msm_gem_object *msm_obj)
 {
        return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
-                       !msm_obj->base.dma_buf && !msm_obj->base.import_attach;
+                       !is_unpurgeable(msm_obj);
 }
 
 static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
 {
-       WARN_ON(!msm_gem_is_locked(&msm_obj->base));
+       GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
        return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
 }
 
+static inline void mark_purgeable(struct msm_gem_object *msm_obj)
+{
+       struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
+
+       GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
+
+       if (is_unpurgeable(msm_obj))
+               return;
+
+       if (GEM_WARN_ON(msm_obj->dontneed))
+               return;
+
+       priv->shrinkable_count += msm_obj->base.size >> PAGE_SHIFT;
+       msm_obj->dontneed = true;
+}
+
+static inline void mark_unpurgeable(struct msm_gem_object *msm_obj)
+{
+       struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
+
+       GEM_WARN_ON(!mutex_is_locked(&priv->mm_lock));
+
+       if (is_unpurgeable(msm_obj))
+               return;
+
+       if (GEM_WARN_ON(!msm_obj->dontneed))
+               return;
+
+       priv->shrinkable_count -= msm_obj->base.size >> PAGE_SHIFT;
+       GEM_WARN_ON(priv->shrinkable_count < 0);
+       msm_obj->dontneed = false;
+}
+
+static inline bool is_unevictable(struct msm_gem_object *msm_obj)
+{
+       return is_unpurgeable(msm_obj) || msm_obj->pin_count || msm_obj->vaddr;
+}
+
+static inline void mark_evictable(struct msm_gem_object *msm_obj)
+{
+       struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
+
+       WARN_ON(!mutex_is_locked(&priv->mm_lock));
+
+       if (is_unevictable(msm_obj))
+               return;
+
+       if (WARN_ON(msm_obj->evictable))
+               return;
+
+       priv->evictable_count += msm_obj->base.size >> PAGE_SHIFT;
+       msm_obj->evictable = true;
+}
+
+static inline void mark_unevictable(struct msm_gem_object *msm_obj)
+{
+       struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
+
+       WARN_ON(!mutex_is_locked(&priv->mm_lock));
+
+       if (is_unevictable(msm_obj))
+               return;
+
+       if (WARN_ON(!msm_obj->evictable))
+               return;
+
+       priv->evictable_count -= msm_obj->base.size >> PAGE_SHIFT;
+       WARN_ON(priv->evictable_count < 0);
+       msm_obj->evictable = false;
+}
+
 void msm_gem_purge(struct drm_gem_object *obj);
+void msm_gem_evict(struct drm_gem_object *obj);
 void msm_gem_vunmap(struct drm_gem_object *obj);
 
 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
index 9d5248b..1187ecf 100644 (file)
 #include "msm_gpu.h"
 #include "msm_gpu_trace.h"
 
+/* Default disabled for now until it has some more testing on the different
+ * iommu combinations that can be paired with the driver:
+ */
+bool enable_eviction = false;
+MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
+module_param(enable_eviction, bool, 0600);
+
+static bool can_swap(void)
+{
+       return enable_eviction && get_nr_swap_pages() > 0;
+}
+
 static unsigned long
 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 {
        struct msm_drm_private *priv =
                container_of(shrinker, struct msm_drm_private, shrinker);
-       struct msm_gem_object *msm_obj;
-       unsigned long count = 0;
+       unsigned count = priv->shrinkable_count;
 
-       mutex_lock(&priv->mm_lock);
+       if (can_swap())
+               count += priv->evictable_count;
 
-       list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) {
-               if (!msm_gem_trylock(&msm_obj->base))
-                       continue;
-               if (is_purgeable(msm_obj))
-                       count += msm_obj->base.size >> PAGE_SHIFT;
-               msm_gem_unlock(&msm_obj->base);
-       }
+       return count;
+}
 
-       mutex_unlock(&priv->mm_lock);
+static bool
+purge(struct msm_gem_object *msm_obj)
+{
+       if (!is_purgeable(msm_obj))
+               return false;
 
-       return count;
+       /*
+        * This will move the obj out of still_in_list to
+        * the purged list
+        */
+       msm_gem_purge(&msm_obj->base);
+
+       return true;
+}
+
+static bool
+evict(struct msm_gem_object *msm_obj)
+{
+       if (is_unevictable(msm_obj))
+               return false;
+
+       msm_gem_evict(&msm_obj->base);
+
+       return true;
 }
 
 static unsigned long
-msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+scan(struct msm_drm_private *priv, unsigned nr_to_scan, struct list_head *list,
+               bool (*shrink)(struct msm_gem_object *msm_obj))
 {
-       struct msm_drm_private *priv =
-               container_of(shrinker, struct msm_drm_private, shrinker);
-       struct msm_gem_object *msm_obj;
-       unsigned long freed = 0;
+       unsigned freed = 0;
+       struct list_head still_in_list;
+
+       INIT_LIST_HEAD(&still_in_list);
 
        mutex_lock(&priv->mm_lock);
 
-       list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) {
-               if (freed >= sc->nr_to_scan)
+       while (freed < nr_to_scan) {
+               struct msm_gem_object *msm_obj = list_first_entry_or_null(
+                               list, typeof(*msm_obj), mm_list);
+
+               if (!msm_obj)
                        break;
-               if (!msm_gem_trylock(&msm_obj->base))
+
+               list_move_tail(&msm_obj->mm_list, &still_in_list);
+
+               /*
+                * If it is in the process of being freed, msm_gem_free_object
+                * can be blocked on mm_lock waiting to remove it.  So just
+                * skip it.
+                */
+               if (!kref_get_unless_zero(&msm_obj->base.refcount))
                        continue;
-               if (is_purgeable(msm_obj)) {
-                       msm_gem_purge(&msm_obj->base);
+
+               /*
+                * Now that we own a reference, we can drop mm_lock for the
+                * rest of the loop body, to reduce contention with the
+                * retire_submit path (which could make more objects purgeable)
+                */
+
+               mutex_unlock(&priv->mm_lock);
+
+               /*
+                * Note that this still needs to be trylock, since we can
+                * hit shrinker in response to trying to get backing pages
+                * for this obj (ie. while it's lock is already held)
+                */
+               if (!msm_gem_trylock(&msm_obj->base))
+                       goto tail;
+
+               if (shrink(msm_obj))
                        freed += msm_obj->base.size >> PAGE_SHIFT;
-               }
+
                msm_gem_unlock(&msm_obj->base);
+
+tail:
+               drm_gem_object_put(&msm_obj->base);
+               mutex_lock(&priv->mm_lock);
        }
 
+       list_splice_tail(&still_in_list, list);
        mutex_unlock(&priv->mm_lock);
 
+       return freed;
+}
+
+static unsigned long
+msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+       struct msm_drm_private *priv =
+               container_of(shrinker, struct msm_drm_private, shrinker);
+       unsigned long freed;
+
+       freed = scan(priv, sc->nr_to_scan, &priv->inactive_dontneed, purge);
+
        if (freed > 0)
                trace_msm_gem_purge(freed << PAGE_SHIFT);
 
-       return freed;
+       if (can_swap() && freed < sc->nr_to_scan) {
+               int evicted = scan(priv, sc->nr_to_scan - freed,
+                               &priv->inactive_willneed, evict);
+
+               if (evicted > 0)
+                       trace_msm_gem_evict(evicted << PAGE_SHIFT);
+
+               freed += evicted;
+       }
+
+       return (freed > 0) ? freed : SHRINK_STOP;
 }
 
 /* since we don't know any better, lets bail after a few
@@ -68,26 +151,15 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
  */
 static const int vmap_shrink_limit = 15;
 
-static unsigned
-vmap_shrink(struct list_head *mm_list)
+static bool
+vmap_shrink(struct msm_gem_object *msm_obj)
 {
-       struct msm_gem_object *msm_obj;
-       unsigned unmapped = 0;
+       if (!is_vunmapable(msm_obj))
+               return false;
 
-       list_for_each_entry(msm_obj, mm_list, mm_list) {
-               if (!msm_gem_trylock(&msm_obj->base))
-                       continue;
-               if (is_vunmapable(msm_obj)) {
-                       msm_gem_vunmap(&msm_obj->base);
-                       unmapped++;
-               }
-               msm_gem_unlock(&msm_obj->base);
+       msm_gem_vunmap(&msm_obj->base);
 
-               if (++unmapped >= vmap_shrink_limit)
-                       break;
-       }
-
-       return unmapped;
+       return true;
 }
 
 static int
@@ -103,17 +175,11 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
        };
        unsigned idx, unmapped = 0;
 
-       mutex_lock(&priv->mm_lock);
-
-       for (idx = 0; mm_lists[idx]; idx++) {
-               unmapped += vmap_shrink(mm_lists[idx]);
-
-               if (unmapped >= vmap_shrink_limit)
-                       break;
+       for (idx = 0; mm_lists[idx] && unmapped < vmap_shrink_limit; idx++) {
+               unmapped += scan(priv, vmap_shrink_limit - unmapped,
+                               mm_lists[idx], vmap_shrink);
        }
 
-       mutex_unlock(&priv->mm_lock);
-
        *(unsigned long *)ptr += unmapped;
 
        if (unmapped > 0)
index ab7c167..9dd1c58 100644 (file)
@@ -251,6 +251,8 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
        if (ret)
                return ret;
 
+       gpu->suspend_count++;
+
        return 0;
 }
 
index d7cd02c..18baf93 100644 (file)
@@ -152,6 +152,8 @@ struct msm_gpu {
                ktime_t time;
        } devfreq;
 
+       uint32_t suspend_count;
+
        struct msm_gpu_state *crashstate;
        /* True if the hardware supports expanded apriv (a650 and newer) */
        bool hw_apriv;
index 03e0c25..ca0b08d 100644 (file)
@@ -128,6 +128,19 @@ TRACE_EVENT(msm_gem_purge,
 );
 
 
+TRACE_EVENT(msm_gem_evict,
+               TP_PROTO(u32 bytes),
+               TP_ARGS(bytes),
+               TP_STRUCT__entry(
+                       __field(u32, bytes)
+                       ),
+               TP_fast_assign(
+                       __entry->bytes = bytes;
+                       ),
+               TP_printk("Evicting %u bytes", __entry->bytes)
+);
+
+
 TRACE_EVENT(msm_gem_purge_vmaps,
                TP_PROTO(u32 unmapped),
                TP_ARGS(unmapped),
index 4735251..d8151a8 100644 (file)
@@ -157,7 +157,6 @@ struct msm_kms {
         * from the crtc's pending_timer close to end of the frame:
         */
        struct mutex commit_lock[MAX_CRTCS];
-       struct lock_class_key commit_lock_keys[MAX_CRTCS];
        unsigned pending_crtc_mask;
        struct msm_pending_timer pending_timers[MAX_CRTCS];
 };
@@ -167,11 +166,8 @@ static inline int msm_kms_init(struct msm_kms *kms,
 {
        unsigned i, ret;
 
-       for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) {
-               lockdep_register_key(&kms->commit_lock_keys[i]);
-               __mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]",
-                            &kms->commit_lock_keys[i]);
-       }
+       for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
+               mutex_init(&kms->commit_lock[i]);
 
        kms->funcs = funcs;
 
index 196612a..1c9c0cd 100644 (file)
@@ -2693,9 +2693,20 @@ nv50_display_create(struct drm_device *dev)
        else
                nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
 
-       if (disp->disp->object.oclass >= GK104_DISP) {
+       /* FIXME: 256x256 cursors are supported on Kepler, however unlike Maxwell and later
+        * generations Kepler requires that we use small pages (4K) for cursor scanout surfaces. The
+        * proper fix for this is to teach nouveau to migrate fbs being used for the cursor plane to
+        * small page allocations in prepare_fb(). When this is implemented, we should also force
+        * large pages (128K) for ovly fbs in order to fix Kepler ovlys.
+        * But until then, just limit cursors to 128x128 - which is small enough to avoid ever using
+        * large pages.
+        */
+       if (disp->disp->object.oclass >= GM107_DISP) {
                dev->mode_config.cursor_width = 256;
                dev->mode_config.cursor_height = 256;
+       } else if (disp->disp->object.oclass >= GK104_DISP) {
+               dev->mode_config.cursor_width = 128;
+               dev->mode_config.cursor_height = 128;
        } else {
                dev->mode_config.cursor_width = 64;
                dev->mode_config.cursor_height = 64;
index 2d5d68f..3e09df0 100644 (file)
@@ -548,6 +548,10 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
 
        if (!ttm_dma)
                return;
+       if (!ttm_dma->pages) {
+               NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
+               return;
+       }
 
        /* Don't waste time looping if the object is coherent */
        if (nvbo->force_coherent)
@@ -580,6 +584,10 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
 
        if (!ttm_dma)
                return;
+       if (!ttm_dma->pages) {
+               NV_DEBUG(drm, "ttm_dma 0x%p: pages NULL\n", ttm_dma);
+               return;
+       }
 
        /* Don't waste time looping if the object is coherent */
        if (nvbo->force_coherent)
index 022a8d5..5f1722b 100644 (file)
@@ -2149,12 +2149,12 @@ static int dsi_vc_send_short(struct dsi_data *dsi, int vc,
                             const struct mipi_dsi_msg *msg)
 {
        struct mipi_dsi_packet pkt;
-       int err;
+       int ret;
        u32 r;
 
-       err = mipi_dsi_create_packet(&pkt, msg);
-       if (err)
-               return err;
+       ret = mipi_dsi_create_packet(&pkt, msg);
+       if (ret < 0)
+               return ret;
 
        WARN_ON(!dsi_bus_is_locked(dsi));
 
index af381d7..5fbfb71 100644 (file)
@@ -37,6 +37,7 @@ struct dsic_panel_data {
        u32 height_mm;
        u32 max_hs_rate;
        u32 max_lp_rate;
+       bool te_support;
 };
 
 struct panel_drv_data {
@@ -334,9 +335,11 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
        if (r)
                goto err;
 
-       r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
-       if (r)
-               goto err;
+       if (ddata->panel_data->te_support) {
+               r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+               if (r)
+                       goto err;
+       }
 
        /* possible panel bug */
        msleep(100);
@@ -619,6 +622,7 @@ static const struct dsic_panel_data taal_data = {
        .height_mm = 0,
        .max_hs_rate = 300000000,
        .max_lp_rate = 10000000,
+       .te_support = true,
 };
 
 static const struct dsic_panel_data himalaya_data = {
@@ -629,6 +633,7 @@ static const struct dsic_panel_data himalaya_data = {
        .height_mm = 88,
        .max_hs_rate = 300000000,
        .max_lp_rate = 10000000,
+       .te_support = false,
 };
 
 static const struct dsic_panel_data droid4_data = {
@@ -639,6 +644,7 @@ static const struct dsic_panel_data droid4_data = {
        .height_mm = 89,
        .max_hs_rate = 300000000,
        .max_lp_rate = 10000000,
+       .te_support = false,
 };
 
 static const struct of_device_id dsicm_of_match[] = {
index 49e6c93..bacaf1b 100644 (file)
@@ -186,7 +186,7 @@ static ssize_t vmirror_show(struct device *dev, struct device_attribute *attr,
 {
        struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", lcd->vmirror);
+       return sysfs_emit(buf, "%d\n", lcd->vmirror);
 }
 
 static ssize_t vmirror_store(struct device *dev, struct device_attribute *attr,
@@ -214,7 +214,7 @@ static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
 {
        struct td043mtea1_panel *lcd = dev_get_drvdata(dev);
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", lcd->mode);
+       return sysfs_emit(buf, "%d\n", lcd->mode);
 }
 
 static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
index a0459b1..7444dc0 100644 (file)
@@ -2569,6 +2569,7 @@ int r600_init_microcode(struct radeon_device *rdev)
                pr_err("r600_cp: Bogus length %zu in firmware \"%s\"\n",
                       rdev->me_fw->size, fw_name);
                err = -EINVAL;
+               goto out;
        }
 
        snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
@@ -2579,6 +2580,7 @@ int r600_init_microcode(struct radeon_device *rdev)
                pr_err("r600_rlc: Bogus length %zu in firmware \"%s\"\n",
                       rdev->rlc_fw->size, fw_name);
                err = -EINVAL;
+               goto out;
        }
 
        if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
index 34b7c6f..8be4799 100644 (file)
@@ -38,7 +38,7 @@ extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes,
 
 
 struct r600_cs_track {
-       /* configuration we miror so that we use same code btw kms/ums */
+       /* configuration we mirror so that we use same code btw kms/ums */
        u32                     group_size;
        u32                     nbanks;
        u32                     npipes;
@@ -963,7 +963,7 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
  *
  * This function will test against r600_reg_safe_bm and return 0
  * if register is safe. If register is not flag as safe this function
- * will test it against a list of register needind special handling.
+ * will test it against a list of register needing special handling.
  */
 static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
 {
@@ -2336,7 +2336,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
 /**
  * r600_dma_cs_next_reloc() - parse next reloc
  * @p:         parser structure holding parsing context.
- * @cs_reloc:          reloc informations
+ * @cs_reloc:          reloc information
  *
  * Return the next reloc, do bo validation and compute
  * GPU offset using the provided start.
index 66a0e73..59cf1d2 100644 (file)
@@ -241,6 +241,9 @@ radeon_dp_mst_detect(struct drm_connector *connector,
                to_radeon_connector(connector);
        struct radeon_connector *master = radeon_connector->mst_port;
 
+       if (drm_connector_is_unregistered(connector))
+               return connector_status_disconnected;
+
        return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
                                      radeon_connector->port);
 }
index 804f7a4..cee11c5 100644 (file)
@@ -380,6 +380,8 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
        }
 #endif
        man = ttm_manager_type(bdev, TTM_PL_VRAM);
+       if (!man)
+               return 0;
        return ttm_resource_manager_evict_all(bdev, man);
 }
 
index b6737be..0c1950f 100644 (file)
@@ -360,11 +360,10 @@ static ssize_t radeon_get_pm_profile(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        int cp = rdev->pm.profile;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
-                       (cp == PM_PROFILE_AUTO) ? "auto" :
-                       (cp == PM_PROFILE_LOW) ? "low" :
-                       (cp == PM_PROFILE_MID) ? "mid" :
-                       (cp == PM_PROFILE_HIGH) ? "high" : "default");
+       return sysfs_emit(buf, "%s\n", (cp == PM_PROFILE_AUTO) ? "auto" :
+                         (cp == PM_PROFILE_LOW) ? "low" :
+                         (cp == PM_PROFILE_MID) ? "mid" :
+                         (cp == PM_PROFILE_HIGH) ? "high" : "default");
 }
 
 static ssize_t radeon_set_pm_profile(struct device *dev,
@@ -415,9 +414,8 @@ static ssize_t radeon_get_pm_method(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        int pm = rdev->pm.pm_method;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
-                       (pm == PM_METHOD_DYNPM) ? "dynpm" :
-                       (pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
+       return sysfs_emit(buf, "%s\n", (pm == PM_METHOD_DYNPM) ? "dynpm" :
+                         (pm == PM_METHOD_PROFILE) ? "profile" : "dpm");
 }
 
 static ssize_t radeon_set_pm_method(struct device *dev,
@@ -472,9 +470,9 @@ static ssize_t radeon_get_dpm_state(struct device *dev,
        struct radeon_device *rdev = ddev->dev_private;
        enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
-                       (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
-                       (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
+       return sysfs_emit(buf, "%s\n",
+                         (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
+                         (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
 }
 
 static ssize_t radeon_set_dpm_state(struct device *dev,
@@ -518,11 +516,11 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
 
        if  ((rdev->flags & RADEON_IS_PX) &&
             (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
-               return snprintf(buf, PAGE_SIZE, "off\n");
+               return sysfs_emit(buf, "off\n");
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
-                       (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
-                       (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
+       return sysfs_emit(buf, "%s\n",
+                         (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
+                         (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
 }
 
 static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
@@ -685,7 +683,7 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev,
        else
                temp = 0;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
@@ -701,7 +699,7 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
        else
                temp = rdev->pm.dpm.thermal.max_temp;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
@@ -731,7 +729,7 @@ static ssize_t radeon_hwmon_show_sclk(struct device *dev,
           for hwmon */
        sclk *= 10000;
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", sclk);
+       return sysfs_emit(buf, "%u\n", sclk);
 }
 
 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, radeon_hwmon_show_sclk, NULL,
@@ -752,7 +750,7 @@ static ssize_t radeon_hwmon_show_vddc(struct device *dev,
        if (rdev->asic->dpm.get_current_vddc)
                vddc = rdev->asic->dpm.get_current_vddc(rdev);
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", vddc);
+       return sysfs_emit(buf, "%u\n", vddc);
 }
 
 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, radeon_hwmon_show_vddc, NULL,
index 476ce9c..380b300 100644 (file)
@@ -360,7 +360,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm
        if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
                /* check that we only pin down anonymous memory
                   to prevent problems with writeback */
-               unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
+               unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE;
                struct vm_area_struct *vma;
                vma = find_vma(gtt->usermm, gtt->userptr);
                if (!vma || vma->vm_file || vma->vm_end < end)
@@ -382,7 +382,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm
        } while (pinned < ttm->num_pages);
 
        r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
-                                     ttm->num_pages << PAGE_SHIFT,
+                                     (u64)ttm->num_pages << PAGE_SHIFT,
                                      GFP_KERNEL);
        if (r)
                goto release_sg;
@@ -415,7 +415,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_device *bdev, struct ttm_tt *
                DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 
        /* double check that we don't free the table twice */
-       if (!ttm->sg->sgl)
+       if (!ttm->sg || !ttm->sg->sgl)
                return;
 
        /* free the sg table and pages again */
@@ -481,13 +481,14 @@ static void radeon_ttm_backend_unbind(struct ttm_device *bdev, struct ttm_tt *tt
        struct radeon_ttm_tt *gtt = (void *)ttm;
        struct radeon_device *rdev = radeon_get_rdev(bdev);
 
+       if (gtt->userptr)
+               radeon_ttm_tt_unpin_userptr(bdev, ttm);
+
        if (!gtt->bound)
                return;
 
        radeon_gart_unbind(rdev, gtt->offset, ttm->num_pages);
 
-       if (gtt->userptr)
-               radeon_ttm_tt_unpin_userptr(bdev, ttm);
        gtt->bound = false;
 }
 
index ba8c603..ca37617 100644 (file)
@@ -48,21 +48,12 @@ static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
 static const struct drm_encoder_funcs rcar_du_encoder_funcs = {
 };
 
-static void rcar_du_encoder_release(struct drm_device *dev, void *res)
-{
-       struct rcar_du_encoder *renc = res;
-
-       drm_encoder_cleanup(&renc->base);
-       kfree(renc);
-}
-
 int rcar_du_encoder_init(struct rcar_du_device *rcdu,
                         enum rcar_du_output output,
                         struct device_node *enc_node)
 {
        struct rcar_du_encoder *renc;
        struct drm_bridge *bridge;
-       int ret;
 
        /*
         * Locate the DRM bridge from the DT node. For the DPAD outputs, if the
@@ -101,26 +92,16 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
                        return -ENOLINK;
        }
 
-       renc = kzalloc(sizeof(*renc), GFP_KERNEL);
-       if (renc == NULL)
-               return -ENOMEM;
-
-       renc->output = output;
-
        dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
                enc_node, output);
 
-       ret = drm_encoder_init(&rcdu->ddev, &renc->base, &rcar_du_encoder_funcs,
-                              DRM_MODE_ENCODER_NONE, NULL);
-       if (ret < 0) {
-               kfree(renc);
-               return ret;
-       }
+       renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base,
+                                 &rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE,
+                                 NULL);
+       if (!renc)
+               return -ENOMEM;
 
-       ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_encoder_release,
-                                      renc);
-       if (ret)
-               return ret;
+       renc->output = output;
 
        /*
         * Attach the bridge to the encoder. The bridge will create the
index d82a7eb..92d8de2 100644 (file)
@@ -361,40 +361,16 @@ static void drm_sched_job_timedout(struct work_struct *work)
   */
 void drm_sched_increase_karma(struct drm_sched_job *bad)
 {
-       int i;
-       struct drm_sched_entity *tmp;
-       struct drm_sched_entity *entity;
-       struct drm_gpu_scheduler *sched = bad->sched;
-
-       /* don't increase @bad's karma if it's from KERNEL RQ,
-        * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
-        * corrupt but keep in mind that kernel jobs always considered good.
-        */
-       if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
-               atomic_inc(&bad->karma);
-               for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
-                    i++) {
-                       struct drm_sched_rq *rq = &sched->sched_rq[i];
-
-                       spin_lock(&rq->lock);
-                       list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
-                               if (bad->s_fence->scheduled.context ==
-                                   entity->fence_context) {
-                                       if (atomic_read(&bad->karma) >
-                                           bad->sched->hang_limit)
-                                               if (entity->guilty)
-                                                       atomic_set(entity->guilty, 1);
-                                       break;
-                               }
-                       }
-                       spin_unlock(&rq->lock);
-                       if (&entity->list != &rq->entities)
-                               break;
-               }
-       }
+       drm_sched_increase_karma_ext(bad, 1);
 }
 EXPORT_SYMBOL(drm_sched_increase_karma);
 
+void drm_sched_reset_karma(struct drm_sched_job *bad)
+{
+       drm_sched_increase_karma_ext(bad, 0);
+}
+EXPORT_SYMBOL(drm_sched_reset_karma);
+
 /**
  * drm_sched_stop - stop the scheduler
  *
@@ -533,15 +509,32 @@ EXPORT_SYMBOL(drm_sched_start);
  *
  */
 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
+{
+       drm_sched_resubmit_jobs_ext(sched, INT_MAX);
+}
+EXPORT_SYMBOL(drm_sched_resubmit_jobs);
+
+/**
+ * drm_sched_resubmit_jobs_ext - helper to relunch certain number of jobs from mirror ring list
+ *
+ * @sched: scheduler instance
+ * @max: job numbers to relaunch
+ *
+ */
+void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max)
 {
        struct drm_sched_job *s_job, *tmp;
        uint64_t guilty_context;
        bool found_guilty = false;
        struct dma_fence *fence;
+       int i = 0;
 
        list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
                struct drm_sched_fence *s_fence = s_job->s_fence;
 
+               if (i >= max)
+                       break;
+
                if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
                        found_guilty = true;
                        guilty_context = s_job->s_fence->scheduled.context;
@@ -552,6 +545,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
 
                dma_fence_put(s_job->s_fence->parent);
                fence = sched->ops->run_job(s_job);
+               i++;
 
                if (IS_ERR_OR_NULL(fence)) {
                        if (IS_ERR(fence))
@@ -563,7 +557,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
                }
        }
 }
-EXPORT_SYMBOL(drm_sched_resubmit_jobs);
+EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
 
 /**
  * drm_sched_job_init - init a scheduler job
@@ -903,3 +897,48 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
        sched->ready = false;
 }
 EXPORT_SYMBOL(drm_sched_fini);
+
+/**
+ * drm_sched_increase_karma_ext - Update sched_entity guilty flag
+ *
+ * @bad: The job guilty of time out
+ * @type: type for increase/reset karma
+ *
+ */
+void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
+{
+       int i;
+       struct drm_sched_entity *tmp;
+       struct drm_sched_entity *entity;
+       struct drm_gpu_scheduler *sched = bad->sched;
+
+       /* don't change @bad's karma if it's from KERNEL RQ,
+        * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
+        * corrupt but keep in mind that kernel jobs always considered good.
+        */
+       if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
+               if (type == 0)
+                       atomic_set(&bad->karma, 0);
+               else if (type == 1)
+                       atomic_inc(&bad->karma);
+
+               for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
+                    i++) {
+                       struct drm_sched_rq *rq = &sched->sched_rq[i];
+
+                       spin_lock(&rq->lock);
+                       list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
+                               if (bad->s_fence->scheduled.context ==
+                                   entity->fence_context) {
+                                       if (entity->guilty)
+                                               atomic_set(entity->guilty, type);
+                                       break;
+                               }
+                       }
+                       spin_unlock(&rq->lock);
+                       if (&entity->list != &rq->entities)
+                               break;
+               }
+       }
+}
+EXPORT_SYMBOL(drm_sched_increase_karma_ext);
index da6afe7..f9120dc 100644 (file)
@@ -832,10 +832,14 @@ static struct drm_plane *tegra_primary_plane_create(struct drm_device *drm,
        return &plane->base;
 }
 
-static const u32 tegra_cursor_plane_formats[] = {
+static const u32 tegra_legacy_cursor_plane_formats[] = {
        DRM_FORMAT_RGBA8888,
 };
 
+static const u32 tegra_cursor_plane_formats[] = {
+       DRM_FORMAT_ARGB8888,
+};
+
 static int tegra_cursor_atomic_check(struct drm_plane *plane,
                                     struct drm_atomic_state *state)
 {
@@ -875,12 +879,24 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
                                                                           plane);
        struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
        struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
-       u32 value = CURSOR_CLIP_DISPLAY;
+       struct tegra_drm *tegra = plane->dev->dev_private;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       u64 dma_mask = *dc->dev->dma_mask;
+#endif
+       unsigned int x, y;
+       u32 value = 0;
 
        /* rien ne va plus */
        if (!new_state->crtc || !new_state->fb)
                return;
 
+       /*
+        * Legacy display supports hardware clipping of the cursor, but
+        * nvdisplay relies on software to clip the cursor to the screen.
+        */
+       if (!dc->soc->has_nvdisplay)
+               value |= CURSOR_CLIP_DISPLAY;
+
        switch (new_state->crtc_w) {
        case 32:
                value |= CURSOR_SIZE_32x32;
@@ -908,7 +924,7 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
        tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
 
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-       value = (tegra_plane_state->iova[0] >> 32) & 0x3;
+       value = (tegra_plane_state->iova[0] >> 32) & (dma_mask >> 32);
        tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
 #endif
 
@@ -920,15 +936,39 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
        value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL);
        value &= ~CURSOR_DST_BLEND_MASK;
        value &= ~CURSOR_SRC_BLEND_MASK;
-       value |= CURSOR_MODE_NORMAL;
+
+       if (dc->soc->has_nvdisplay)
+               value &= ~CURSOR_COMPOSITION_MODE_XOR;
+       else
+               value |= CURSOR_MODE_NORMAL;
+
        value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC;
        value |= CURSOR_SRC_BLEND_K1_TIMES_SRC;
        value |= CURSOR_ALPHA;
        tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
 
+       /* nvdisplay relies on software for clipping */
+       if (dc->soc->has_nvdisplay) {
+               struct drm_rect src;
+
+               x = new_state->dst.x1;
+               y = new_state->dst.y1;
+
+               drm_rect_fp_to_int(&src, &new_state->src);
+
+               value = (src.y1 & tegra->vmask) << 16 | (src.x1 & tegra->hmask);
+               tegra_dc_writel(dc, value, DC_DISP_PCALC_HEAD_SET_CROPPED_POINT_IN_CURSOR);
+
+               value = (drm_rect_height(&src) & tegra->vmask) << 16 |
+                       (drm_rect_width(&src) & tegra->hmask);
+               tegra_dc_writel(dc, value, DC_DISP_PCALC_HEAD_SET_CROPPED_SIZE_IN_CURSOR);
+       } else {
+               x = new_state->crtc_x;
+               y = new_state->crtc_y;
+       }
+
        /* position the cursor */
-       value = (new_state->crtc_y & 0x3fff) << 16 |
-               (new_state->crtc_x & 0x3fff);
+       value = ((y & tegra->vmask) << 16) | (x & tegra->hmask);
        tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
 }
 
@@ -982,8 +1022,13 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
        plane->index = 6;
        plane->dc = dc;
 
-       num_formats = ARRAY_SIZE(tegra_cursor_plane_formats);
-       formats = tegra_cursor_plane_formats;
+       if (!dc->soc->has_nvdisplay) {
+               num_formats = ARRAY_SIZE(tegra_legacy_cursor_plane_formats);
+               formats = tegra_legacy_cursor_plane_formats;
+       } else {
+               num_formats = ARRAY_SIZE(tegra_cursor_plane_formats);
+               formats = tegra_cursor_plane_formats;
+       }
 
        err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
                                       &tegra_plane_funcs, formats,
@@ -1700,6 +1745,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
                        dev_err(dc->dev,
                                "failed to set clock rate to %lu Hz\n",
                                state->pclk);
+
+               err = clk_set_rate(dc->clk, state->pclk);
+               if (err < 0)
+                       dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
+                               dc->clk, state->pclk, err);
        }
 
        DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk),
@@ -1710,11 +1760,6 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
                value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
                tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
        }
-
-       err = clk_set_rate(dc->clk, state->pclk);
-       if (err < 0)
-               dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
-                       dc->clk, state->pclk, err);
 }
 
 static void tegra_dc_stop(struct tegra_dc *dc)
@@ -2035,6 +2080,16 @@ static bool tegra_dc_has_window_groups(struct tegra_dc *dc)
        return false;
 }
 
+static int tegra_dc_early_init(struct host1x_client *client)
+{
+       struct drm_device *drm = dev_get_drvdata(client->host);
+       struct tegra_drm *tegra = drm->dev_private;
+
+       tegra->num_crtcs++;
+
+       return 0;
+}
+
 static int tegra_dc_init(struct host1x_client *client)
 {
        struct drm_device *drm = dev_get_drvdata(client->host);
@@ -2045,6 +2100,12 @@ static int tegra_dc_init(struct host1x_client *client)
        struct drm_plane *cursor = NULL;
        int err;
 
+       /*
+        * DC has been reset by now, so VBLANK syncpoint can be released
+        * for general use.
+        */
+       host1x_syncpt_release_vblank_reservation(client, 26 + dc->pipe);
+
        /*
         * XXX do not register DCs with no window groups because we cannot
         * assign a primary plane to them, which in turn will cause KMS to
@@ -2111,6 +2172,12 @@ static int tegra_dc_init(struct host1x_client *client)
        if (dc->soc->pitch_align > tegra->pitch_align)
                tegra->pitch_align = dc->soc->pitch_align;
 
+       /* track maximum resolution */
+       if (dc->soc->has_nvdisplay)
+               drm->mode_config.max_width = drm->mode_config.max_height = 16384;
+       else
+               drm->mode_config.max_width = drm->mode_config.max_height = 4096;
+
        err = tegra_dc_rgb_init(drm, dc);
        if (err < 0 && err != -ENODEV) {
                dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
@@ -2141,7 +2208,7 @@ cleanup:
                drm_plane_cleanup(primary);
 
        host1x_client_iommu_detach(client);
-       host1x_syncpt_free(dc->syncpt);
+       host1x_syncpt_put(dc->syncpt);
 
        return err;
 }
@@ -2166,7 +2233,17 @@ static int tegra_dc_exit(struct host1x_client *client)
        }
 
        host1x_client_iommu_detach(client);
-       host1x_syncpt_free(dc->syncpt);
+       host1x_syncpt_put(dc->syncpt);
+
+       return 0;
+}
+
+static int tegra_dc_late_exit(struct host1x_client *client)
+{
+       struct drm_device *drm = dev_get_drvdata(client->host);
+       struct tegra_drm *tegra = drm->dev_private;
+
+       tegra->num_crtcs--;
 
        return 0;
 }
@@ -2235,8 +2312,10 @@ put_rpm:
 }
 
 static const struct host1x_client_ops dc_client_ops = {
+       .early_init = tegra_dc_early_init,
        .init = tegra_dc_init,
        .exit = tegra_dc_exit,
+       .late_exit = tegra_dc_late_exit,
        .suspend = tegra_dc_runtime_suspend,
        .resume = tegra_dc_runtime_resume,
 };
@@ -2246,6 +2325,7 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
        .supports_interlacing = false,
        .supports_cursor = false,
        .supports_block_linear = false,
+       .supports_sector_layout = false,
        .has_legacy_blending = true,
        .pitch_align = 8,
        .has_powergate = false,
@@ -2265,6 +2345,7 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
        .supports_interlacing = false,
        .supports_cursor = false,
        .supports_block_linear = false,
+       .supports_sector_layout = false,
        .has_legacy_blending = true,
        .pitch_align = 8,
        .has_powergate = false,
@@ -2284,6 +2365,7 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
        .supports_interlacing = false,
        .supports_cursor = false,
        .supports_block_linear = false,
+       .supports_sector_layout = false,
        .has_legacy_blending = true,
        .pitch_align = 64,
        .has_powergate = true,
@@ -2303,6 +2385,7 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
        .supports_interlacing = true,
        .supports_cursor = true,
        .supports_block_linear = true,
+       .supports_sector_layout = false,
        .has_legacy_blending = false,
        .pitch_align = 64,
        .has_powergate = true,
@@ -2322,6 +2405,7 @@ static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
        .supports_interlacing = true,
        .supports_cursor = true,
        .supports_block_linear = true,
+       .supports_sector_layout = false,
        .has_legacy_blending = false,
        .pitch_align = 64,
        .has_powergate = true,
@@ -2375,6 +2459,7 @@ static const struct tegra_dc_soc_info tegra186_dc_soc_info = {
        .supports_interlacing = true,
        .supports_cursor = true,
        .supports_block_linear = true,
+       .supports_sector_layout = false,
        .has_legacy_blending = false,
        .pitch_align = 64,
        .has_powergate = false,
@@ -2423,6 +2508,7 @@ static const struct tegra_dc_soc_info tegra194_dc_soc_info = {
        .supports_interlacing = true,
        .supports_cursor = true,
        .supports_block_linear = true,
+       .supports_sector_layout = true,
        .has_legacy_blending = false,
        .pitch_align = 64,
        .has_powergate = false,
@@ -2513,22 +2599,18 @@ static int tegra_dc_couple(struct tegra_dc *dc)
         * POWER_CONTROL registers during CRTC enabling.
         */
        if (dc->soc->coupled_pm && dc->pipe == 1) {
-               u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
-               struct device_link *link;
-               struct device *partner;
+               struct device *companion;
+               struct tegra_dc *parent;
 
-               partner = driver_find_device(dc->dev->driver, NULL, NULL,
-                                            tegra_dc_match_by_pipe);
-               if (!partner)
+               companion = driver_find_device(dc->dev->driver, NULL, (const void *)0,
+                                              tegra_dc_match_by_pipe);
+               if (!companion)
                        return -EPROBE_DEFER;
 
-               link = device_link_add(dc->dev, partner, flags);
-               if (!link) {
-                       dev_err(dc->dev, "failed to link controllers\n");
-                       return -EINVAL;
-               }
+               parent = dev_get_drvdata(companion);
+               dc->client.parent = &parent->client;
 
-               dev_dbg(dc->dev, "coupled to %s\n", dev_name(partner));
+               dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
        }
 
        return 0;
@@ -2536,9 +2618,16 @@ static int tegra_dc_couple(struct tegra_dc *dc)
 
 static int tegra_dc_probe(struct platform_device *pdev)
 {
+       u64 dma_mask = dma_get_mask(pdev->dev.parent);
        struct tegra_dc *dc;
        int err;
 
+       err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+               return err;
+       }
+
        dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
        if (!dc)
                return -ENOMEM;
index 051d03d..29f19c3 100644 (file)
@@ -52,6 +52,7 @@ struct tegra_dc_soc_info {
        bool supports_interlacing;
        bool supports_cursor;
        bool supports_block_linear;
+       bool supports_sector_layout;
        bool has_legacy_blending;
        unsigned int pitch_align;
        bool has_powergate;
@@ -511,6 +512,8 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
 
 #define DC_DISP_CURSOR_START_ADDR_HI           0x4ec
 #define DC_DISP_BLEND_CURSOR_CONTROL           0x4f1
+#define CURSOR_COMPOSITION_MODE_BLEND          (0 << 25)
+#define CURSOR_COMPOSITION_MODE_XOR            (1 << 25)
 #define CURSOR_MODE_LEGACY                     (0 << 24)
 #define CURSOR_MODE_NORMAL                     (1 << 24)
 #define CURSOR_DST_BLEND_ZERO                  (0 << 16)
@@ -705,6 +708,9 @@ int tegra_dc_rgb_exit(struct tegra_dc *dc);
 #define PROTOCOL_MASK (0xf << 8)
 #define PROTOCOL_SINGLE_TMDS_A (0x1 << 8)
 
+#define DC_DISP_PCALC_HEAD_SET_CROPPED_POINT_IN_CURSOR 0x442
+#define DC_DISP_PCALC_HEAD_SET_CROPPED_SIZE_IN_CURSOR  0x446
+
 #define DC_WIN_CORE_WINDOWGROUP_SET_CONTROL    0x702
 #define OWNER_MASK (0xf << 0)
 #define OWNER(x) (((x) & 0xf) << 0)
index 105fb9c..ea56c6e 100644 (file)
@@ -534,9 +534,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
        dpaux->aux.transfer = tegra_dpaux_transfer;
        dpaux->aux.dev = &pdev->dev;
 
-       err = drm_dp_aux_register(&dpaux->aux);
-       if (err < 0)
-               return err;
+       drm_dp_aux_init(&dpaux->aux);
 
        /*
         * Assume that by default the DPAUX/I2C pads will be used for HDMI,
@@ -589,8 +587,6 @@ static int tegra_dpaux_remove(struct platform_device *pdev)
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
-       drm_dp_aux_unregister(&dpaux->aux);
-
        mutex_lock(&dpaux_lock);
        list_del(&dpaux->list);
        mutex_unlock(&dpaux_lock);
@@ -723,6 +719,10 @@ int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output)
        unsigned long timeout;
        int err;
 
+       err = drm_dp_aux_register(aux);
+       if (err < 0)
+               return err;
+
        output->connector.polled = DRM_CONNECTOR_POLL_HPD;
        dpaux->output = output;
 
@@ -760,6 +760,7 @@ int drm_dp_aux_detach(struct drm_dp_aux *aux)
        unsigned long timeout;
        int err;
 
+       drm_dp_aux_unregister(aux);
        disable_irq(dpaux->irq);
 
        if (dpaux->output->panel) {
index 90709c3..0c350b0 100644 (file)
@@ -174,7 +174,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
        struct drm_tegra_syncpt syncpt;
        struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
        struct drm_gem_object **refs;
-       struct host1x_syncpt *sp;
+       struct host1x_syncpt *sp = NULL;
        struct host1x_job *job;
        unsigned int num_refs;
        int err;
@@ -301,8 +301,8 @@ int tegra_drm_submit(struct tegra_drm_context *context,
                goto fail;
        }
 
-       /* check whether syncpoint ID is valid */
-       sp = host1x_syncpt_get(host1x, syncpt.id);
+       /* Syncpoint ref will be dropped on job release. */
+       sp = host1x_syncpt_get_by_id(host1x, syncpt.id);
        if (!sp) {
                err = -ENOENT;
                goto fail;
@@ -311,7 +311,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
        job->is_addr_reg = context->client->ops->is_addr_reg;
        job->is_valid_class = context->client->ops->is_valid_class;
        job->syncpt_incrs = syncpt.incrs;
-       job->syncpt_id = syncpt.id;
+       job->syncpt = sp;
        job->timeout = 10000;
 
        if (args->timeout && args->timeout < 10000)
@@ -383,7 +383,7 @@ static int tegra_syncpt_read(struct drm_device *drm, void *data,
        struct drm_tegra_syncpt_read *args = data;
        struct host1x_syncpt *sp;
 
-       sp = host1x_syncpt_get(host, args->id);
+       sp = host1x_syncpt_get_by_id_noref(host, args->id);
        if (!sp)
                return -EINVAL;
 
@@ -398,7 +398,7 @@ static int tegra_syncpt_incr(struct drm_device *drm, void *data,
        struct drm_tegra_syncpt_incr *args = data;
        struct host1x_syncpt *sp;
 
-       sp = host1x_syncpt_get(host1x, args->id);
+       sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
        if (!sp)
                return -EINVAL;
 
@@ -412,7 +412,7 @@ static int tegra_syncpt_wait(struct drm_device *drm, void *data,
        struct drm_tegra_syncpt_wait *args = data;
        struct host1x_syncpt *sp;
 
-       sp = host1x_syncpt_get(host1x, args->id);
+       sp = host1x_syncpt_get_by_id_noref(host1x, args->id);
        if (!sp)
                return -EINVAL;
 
@@ -1121,9 +1121,8 @@ static int host1x_drm_probe(struct host1x_device *dev)
 
        drm->mode_config.min_width = 0;
        drm->mode_config.min_height = 0;
-
-       drm->mode_config.max_width = 4096;
-       drm->mode_config.max_height = 4096;
+       drm->mode_config.max_width = 0;
+       drm->mode_config.max_height = 0;
 
        drm->mode_config.allow_fb_modifiers = true;
 
@@ -1142,6 +1141,14 @@ static int host1x_drm_probe(struct host1x_device *dev)
        if (err < 0)
                goto fbdev;
 
+       /*
+        * Now that all display controller have been initialized, the maximum
+        * supported resolution is known and the bitmask for horizontal and
+        * vertical bitfields can be computed.
+        */
+       tegra->hmask = drm->mode_config.max_width - 1;
+       tegra->vmask = drm->mode_config.max_height - 1;
+
        if (tegra->use_explicit_iommu) {
                u64 carveout_start, carveout_end, gem_start, gem_end;
                u64 dma_mask = dma_get_mask(&dev->dev);
index f38de08..87df251 100644 (file)
@@ -24,6 +24,9 @@
 #include "hub.h"
 #include "trace.h"
 
+/* XXX move to include/uapi/drm/drm_fourcc.h? */
+#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT(22)
+
 struct reset_control;
 
 #ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -54,7 +57,9 @@ struct tegra_drm {
        struct tegra_fbdev *fbdev;
 #endif
 
+       unsigned int hmask, vmask;
        unsigned int pitch_align;
+       unsigned int num_crtcs;
 
        struct tegra_display_hub *hub;
 };
index 01939c5..cae8b8c 100644 (file)
@@ -44,6 +44,15 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
 {
        uint64_t modifier = framebuffer->modifier;
 
+       if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
+               if ((modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) == 0)
+                       tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_TEGRA;
+               else
+                       tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_GPU;
+
+               modifier &= ~DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT;
+       }
+
        switch (modifier) {
        case DRM_FORMAT_MOD_LINEAR:
                tiling->mode = TEGRA_BO_TILING_MODE_PITCH;
@@ -86,6 +95,7 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
                break;
 
        default:
+               DRM_DEBUG_KMS("unknown format modifier: %llx\n", modifier);
                return -EINVAL;
        }
 
index fafb572..c15fd99 100644 (file)
@@ -21,9 +21,15 @@ enum tegra_bo_tiling_mode {
        TEGRA_BO_TILING_MODE_BLOCK,
 };
 
+enum tegra_bo_sector_layout {
+       TEGRA_BO_SECTOR_LAYOUT_TEGRA,
+       TEGRA_BO_SECTOR_LAYOUT_GPU,
+};
+
 struct tegra_bo_tiling {
        enum tegra_bo_tiling_mode mode;
        unsigned long value;
+       enum tegra_bo_sector_layout sector_layout;
 };
 
 struct tegra_bo {
index adbe2dd..de288cb 100644 (file)
@@ -67,7 +67,7 @@ static int gr2d_init(struct host1x_client *client)
 detach:
        host1x_client_iommu_detach(client);
 free:
-       host1x_syncpt_free(client->syncpts[0]);
+       host1x_syncpt_put(client->syncpts[0]);
 put:
        host1x_channel_put(gr2d->channel);
        return err;
@@ -86,7 +86,7 @@ static int gr2d_exit(struct host1x_client *client)
                return err;
 
        host1x_client_iommu_detach(client);
-       host1x_syncpt_free(client->syncpts[0]);
+       host1x_syncpt_put(client->syncpts[0]);
        host1x_channel_put(gr2d->channel);
 
        return 0;
index b0b8154..24442ad 100644 (file)
@@ -76,7 +76,7 @@ static int gr3d_init(struct host1x_client *client)
 detach:
        host1x_client_iommu_detach(client);
 free:
-       host1x_syncpt_free(client->syncpts[0]);
+       host1x_syncpt_put(client->syncpts[0]);
 put:
        host1x_channel_put(gr3d->channel);
        return err;
@@ -94,7 +94,7 @@ static int gr3d_exit(struct host1x_client *client)
                return err;
 
        host1x_client_iommu_detach(client);
-       host1x_syncpt_free(client->syncpts[0]);
+       host1x_syncpt_put(client->syncpts[0]);
        host1x_channel_put(gr3d->channel);
 
        return 0;
index 8e6d329..79bff8b 100644 (file)
@@ -55,6 +55,18 @@ static const u64 tegra_shared_plane_modifiers[] = {
        DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
        DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
        DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
+       /*
+        * The GPU sector layout is only supported on Tegra194, but these will
+        * be filtered out later on by ->format_mod_supported() on SoCs where
+        * it isn't supported.
+        */
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+       DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
+       /* sentinel */
        DRM_FORMAT_MOD_INVALID
 };
 
@@ -366,6 +378,12 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
                return -EINVAL;
        }
 
+       if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
+           !dc->soc->supports_sector_layout) {
+               DRM_ERROR("hardware doesn't support GPU sector layout\n");
+               return -EINVAL;
+       }
+
        /*
         * Tegra doesn't support different strides for U and V planes so we
         * error out if the user tries to display a framebuffer with such a
@@ -485,6 +503,16 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
 
        base = tegra_plane_state->iova[0] + fb->offsets[0];
 
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+       /*
+        * Physical address bit 39 in Tegra194 is used as a switch for special
+        * logic that swizzles the memory using either the legacy Tegra or the
+        * dGPU sector layout.
+        */
+       if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
+               base |= BIT(39);
+#endif
+
        tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
        tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
 
@@ -562,9 +590,8 @@ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
        enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
        struct tegra_drm *tegra = drm->dev_private;
        struct tegra_display_hub *hub = tegra->hub;
-       /* planes can be assigned to arbitrary CRTCs */
-       unsigned int possible_crtcs = 0x7;
        struct tegra_shared_plane *plane;
+       unsigned int possible_crtcs;
        unsigned int num_formats;
        const u64 *modifiers;
        struct drm_plane *p;
@@ -583,6 +610,9 @@ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
 
        p = &plane->base.base;
 
+       /* planes can be assigned to arbitrary CRTCs */
+       possible_crtcs = BIT(tegra->num_crtcs) - 1;
+
        num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
        formats = tegra_shared_plane_formats;
        modifiers = tegra_shared_plane_modifiers;
@@ -848,12 +878,19 @@ static const struct host1x_client_ops tegra_display_hub_ops = {
 
 static int tegra_display_hub_probe(struct platform_device *pdev)
 {
+       u64 dma_mask = dma_get_mask(pdev->dev.parent);
        struct device_node *child = NULL;
        struct tegra_display_hub *hub;
        struct clk *clk;
        unsigned int i;
        int err;
 
+       err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+               return err;
+       }
+
        hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
        if (!hub)
                return -ENOMEM;
index 19e8847..2e11b4b 100644 (file)
@@ -83,6 +83,22 @@ static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
        kfree(state);
 }
 
+static bool tegra_plane_supports_sector_layout(struct drm_plane *plane)
+{
+       struct drm_crtc *crtc;
+
+       drm_for_each_crtc(crtc, plane->dev) {
+               if (plane->possible_crtcs & drm_crtc_mask(crtc)) {
+                       struct tegra_dc *dc = to_tegra_dc(crtc);
+
+                       if (!dc->soc->supports_sector_layout)
+                               return false;
+               }
+       }
+
+       return true;
+}
+
 static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
                                             uint32_t format,
                                             uint64_t modifier)
@@ -92,6 +108,14 @@ static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
        if (modifier == DRM_FORMAT_MOD_LINEAR)
                return true;
 
+       /* check for the sector layout bit */
+       if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
+               if (modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) {
+                       if (!tegra_plane_supports_sector_layout(plane))
+                               return false;
+               }
+       }
+
        if (info->num_planes == 1)
                return true;
 
@@ -119,6 +143,14 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
                dma_addr_t phys_addr, *phys;
                struct sg_table *sgt;
 
+               /*
+                * If we're not attached to a domain, we already stored the
+                * physical address when the buffer was allocated. If we're
+                * part of a group that's shared between all display
+                * controllers, we've also already mapped the framebuffer
+                * through the SMMU. In both cases we can short-circuit the
+                * code below and retrieve the stored IOV address.
+                */
                if (!domain || dc->client.group)
                        phys = &phys_addr;
                else
index f02a035..7b88261 100644 (file)
@@ -3115,6 +3115,12 @@ static int tegra_sor_init(struct host1x_client *client)
         * kernel is possible.
         */
        if (sor->rst) {
+               err = pm_runtime_resume_and_get(sor->dev);
+               if (err < 0) {
+                       dev_err(sor->dev, "failed to get runtime PM: %d\n", err);
+                       return err;
+               }
+
                err = reset_control_acquire(sor->rst);
                if (err < 0) {
                        dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
@@ -3148,6 +3154,7 @@ static int tegra_sor_init(struct host1x_client *client)
                }
 
                reset_control_release(sor->rst);
+               pm_runtime_put(sor->dev);
        }
 
        err = clk_prepare_enable(sor->clk_safe);
index 77e1288..72aea1c 100644 (file)
@@ -214,7 +214,7 @@ static int vic_init(struct host1x_client *client)
        return 0;
 
 free_syncpt:
-       host1x_syncpt_free(client->syncpts[0]);
+       host1x_syncpt_put(client->syncpts[0]);
 free_channel:
        host1x_channel_put(vic->channel);
 detach:
@@ -238,7 +238,7 @@ static int vic_exit(struct host1x_client *client)
        if (err < 0)
                return err;
 
-       host1x_syncpt_free(client->syncpts[0]);
+       host1x_syncpt_put(client->syncpts[0]);
        host1x_channel_put(vic->channel);
        host1x_client_iommu_detach(client);
 
index 6ab7b66..cfd0b92 100644 (file)
@@ -705,8 +705,9 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
                return 0;
 
        if (no_wait_gpu) {
+               ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
                dma_fence_put(fence);
-               return -EBUSY;
+               return ret;
        }
 
        dma_resv_add_shared_fence(bo->base.resv, fence);
index a2a17c8..efb7e9c 100644 (file)
@@ -91,6 +91,10 @@ static int ttm_resource_ioremap(struct ttm_device *bdev,
 
                if (mem->bus.caching == ttm_write_combined)
                        addr = ioremap_wc(mem->bus.offset, bus_size);
+#ifdef CONFIG_X86
+               else if (mem->bus.caching == ttm_cached)
+                       addr = ioremap_cache(mem->bus.offset, bus_size);
+#endif
                else
                        addr = ioremap(mem->bus.offset, bus_size);
                if (!addr) {
@@ -371,6 +375,11 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
                if (mem->bus.caching == ttm_write_combined)
                        map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
                                                  size);
+#ifdef CONFIG_X86
+               else if (mem->bus.caching == ttm_cached)
+                       map->virtual = ioremap_cache(bo->mem.bus.offset + offset,
+                                                 size);
+#endif
                else
                        map->virtual = ioremap(bo->mem.bus.offset + offset,
                                               size);
@@ -489,6 +498,11 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
                else if (mem->bus.caching == ttm_write_combined)
                        vaddr_iomem = ioremap_wc(mem->bus.offset,
                                                 bo->base.size);
+#ifdef CONFIG_X86
+               else if (mem->bus.caching == ttm_cached)
+                       vaddr_iomem = ioremap_cache(mem->bus.offset,
+                                                 bo->base.size);
+#endif
                else
                        vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
 
index 269390b..76657dc 100644 (file)
@@ -210,6 +210,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
 {
        const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
        const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
+       struct vc4_dev *vc4 = to_vc4_dev(vc4_crtc->base.dev);
        u32 fifo_len_bytes = pv_data->fifo_depth;
 
        /*
@@ -238,6 +239,22 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
                if (crtc_data->hvs_output == 5)
                        return 32;
 
+               /*
+                * It looks like in some situations, we will overflow
+                * the PixelValve FIFO (with the bit 10 of PV stat being
+                * set) and stall the HVS / PV, eventually resulting in
+                * a page flip timeout.
+                *
+                * Displaying the video overlay during a playback with
+                * Kodi on an RPi3 seems to be a great solution with a
+                * failure rate around 50%.
+                *
+                * Removing 1 from the FIFO full level however
+                * seems to completely remove that issue.
+                */
+               if (!vc4->hvs->hvs5)
+                       return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
+
                return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
        }
 }
index c76e73a..19161b6 100644 (file)
@@ -1150,7 +1150,6 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
        plane->state->src_y = new_plane_state->src_y;
        plane->state->src_w = new_plane_state->src_w;
        plane->state->src_h = new_plane_state->src_h;
-       plane->state->src_h = new_plane_state->src_h;
        plane->state->alpha = new_plane_state->alpha;
        plane->state->pixel_blend_mode = new_plane_state->pixel_blend_mode;
        plane->state->rotation = new_plane_state->rotation;
index 30d9adf..9f14d99 100644 (file)
@@ -521,7 +521,7 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
        drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
        if (IS_ERR(drm_dev)) {
                ret = PTR_ERR(drm_dev);
-               goto fail;
+               goto fail_dev;
        }
 
        drm_info->drm_dev = drm_dev;
@@ -551,8 +551,10 @@ fail_modeset:
        drm_kms_helper_poll_fini(drm_dev);
        drm_mode_config_cleanup(drm_dev);
        drm_dev_put(drm_dev);
-fail:
+fail_dev:
        kfree(drm_info);
+       front_info->drm_info = NULL;
+fail:
        return ret;
 }
 
index 3adacba..e5f4314 100644 (file)
@@ -16,7 +16,6 @@
 struct drm_connector;
 struct xen_drm_front_drm_info;
 
-struct xen_drm_front_drm_info;
 
 int xen_drm_front_conn_init(struct xen_drm_front_drm_info *drm_info,
                            struct drm_connector *connector);
index 99158ee..59d1fb0 100644 (file)
@@ -866,7 +866,7 @@ static int zynqmp_dp_train(struct zynqmp_dp *dp)
                return ret;
 
        zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, 1);
-       memset(dp->train_set, 0, 4);
+       memset(dp->train_set, 0, sizeof(dp->train_set));
        ret = zynqmp_dp_link_train_cr(dp);
        if (ret)
                return ret;
index 347fb96..46f69c5 100644 (file)
@@ -196,6 +196,17 @@ int host1x_device_init(struct host1x_device *device)
 
        mutex_lock(&device->clients_lock);
 
+       list_for_each_entry(client, &device->clients, list) {
+               if (client->ops && client->ops->early_init) {
+                       err = client->ops->early_init(client);
+                       if (err < 0) {
+                               dev_err(&device->dev, "failed to early initialize %s: %d\n",
+                                       dev_name(client->dev), err);
+                               goto teardown_late;
+                       }
+               }
+       }
+
        list_for_each_entry(client, &device->clients, list) {
                if (client->ops && client->ops->init) {
                        err = client->ops->init(client);
@@ -217,6 +228,14 @@ teardown:
                if (client->ops->exit)
                        client->ops->exit(client);
 
+       /* reset client to end of list for late teardown */
+       client = list_entry(&device->clients, struct host1x_client, list);
+
+teardown_late:
+       list_for_each_entry_continue_reverse(client, &device->clients, list)
+               if (client->ops->late_exit)
+                       client->ops->late_exit(client);
+
        mutex_unlock(&device->clients_lock);
        return err;
 }
@@ -251,6 +270,18 @@ int host1x_device_exit(struct host1x_device *device)
                }
        }
 
+       list_for_each_entry_reverse(client, &device->clients, list) {
+               if (client->ops && client->ops->late_exit) {
+                       err = client->ops->late_exit(client);
+                       if (err < 0) {
+                               dev_err(&device->dev, "failed to late cleanup %s: %d\n",
+                                       dev_name(client->dev), err);
+                               mutex_unlock(&device->clients_lock);
+                               return err;
+                       }
+               }
+       }
+
        mutex_unlock(&device->clients_lock);
 
        return 0;
@@ -705,8 +736,9 @@ void host1x_driver_unregister(struct host1x_driver *driver)
 EXPORT_SYMBOL(host1x_driver_unregister);
 
 /**
- * host1x_client_register() - register a host1x client
+ * __host1x_client_register() - register a host1x client
  * @client: host1x client
+ * @key: lock class key for the client-specific mutex
  *
  * Registers a host1x client with each host1x controller instance. Note that
  * each client will only match their parent host1x controller and will only be
@@ -715,13 +747,14 @@ EXPORT_SYMBOL(host1x_driver_unregister);
  * device and call host1x_device_init(), which will in turn call each client's
  * &host1x_client_ops.init implementation.
  */
-int host1x_client_register(struct host1x_client *client)
+int __host1x_client_register(struct host1x_client *client,
+                            struct lock_class_key *key)
 {
        struct host1x *host1x;
        int err;
 
        INIT_LIST_HEAD(&client->list);
-       mutex_init(&client->lock);
+       __mutex_init(&client->lock, "host1x client lock", key);
        client->usecount = 0;
 
        mutex_lock(&devices_lock);
@@ -742,7 +775,7 @@ int host1x_client_register(struct host1x_client *client)
 
        return 0;
 }
-EXPORT_SYMBOL(host1x_client_register);
+EXPORT_SYMBOL(__host1x_client_register);
 
 /**
  * host1x_client_unregister() - unregister a host1x client
index e8d3fda..6e6ca77 100644 (file)
@@ -273,15 +273,13 @@ static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
 static void cdma_start_timer_locked(struct host1x_cdma *cdma,
                                    struct host1x_job *job)
 {
-       struct host1x *host = cdma_to_host1x(cdma);
-
        if (cdma->timeout.client) {
                /* timer already started */
                return;
        }
 
        cdma->timeout.client = job->client;
-       cdma->timeout.syncpt = host1x_syncpt_get(host, job->syncpt_id);
+       cdma->timeout.syncpt = job->syncpt;
        cdma->timeout.syncpt_val = job->syncpt_end;
        cdma->timeout.start_ktime = ktime_get();
 
@@ -312,7 +310,6 @@ static void stop_cdma_timer_locked(struct host1x_cdma *cdma)
 static void update_cdma_locked(struct host1x_cdma *cdma)
 {
        bool signal = false;
-       struct host1x *host1x = cdma_to_host1x(cdma);
        struct host1x_job *job, *n;
 
        /* If CDMA is stopped, queue is cleared and we can return */
@@ -324,8 +321,7 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
         * to consume as many sync queue entries as possible without blocking
         */
        list_for_each_entry_safe(job, n, &cdma->sync_queue, list) {
-               struct host1x_syncpt *sp =
-                       host1x_syncpt_get(host1x, job->syncpt_id);
+               struct host1x_syncpt *sp = job->syncpt;
 
                /* Check whether this syncpt has completed, and bail if not */
                if (!host1x_syncpt_is_expired(sp, job->syncpt_end)) {
@@ -499,8 +495,7 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
                if (!cdma->timeout.initialized) {
                        int err;
 
-                       err = host1x_hw_cdma_timeout_init(host1x, cdma,
-                                                         job->syncpt_id);
+                       err = host1x_hw_cdma_timeout_init(host1x, cdma);
                        if (err) {
                                mutex_unlock(&cdma->lock);
                                return err;
index 1b4997b..8a14880 100644 (file)
@@ -69,6 +69,7 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)
 
 static void show_syncpts(struct host1x *m, struct output *o)
 {
+       struct list_head *pos;
        unsigned int i;
 
        host1x_debug_output(o, "---- syncpts ----\n");
@@ -76,12 +77,19 @@ static void show_syncpts(struct host1x *m, struct output *o)
        for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
                u32 max = host1x_syncpt_read_max(m->syncpt + i);
                u32 min = host1x_syncpt_load(m->syncpt + i);
+               unsigned int waiters = 0;
 
-               if (!min && !max)
+               spin_lock(&m->syncpt[i].intr.lock);
+               list_for_each(pos, &m->syncpt[i].intr.wait_head)
+                       waiters++;
+               spin_unlock(&m->syncpt[i].intr.lock);
+
+               if (!min && !max && !waiters)
                        continue;
 
-               host1x_debug_output(o, "id %u (%s) min %d max %d\n",
-                                   i, m->syncpt[i].name, min, max);
+               host1x_debug_output(o,
+                                   "id %u (%s) min %d max %d (%d waiters)\n",
+                                   i, m->syncpt[i].name, min, max, waiters);
        }
 
        for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
index d0ebb70..fbb6447 100644 (file)
@@ -77,6 +77,7 @@ static const struct host1x_info host1x01_info = {
        .has_hypervisor = false,
        .num_sid_entries = 0,
        .sid_table = NULL,
+       .reserve_vblank_syncpts = true,
 };
 
 static const struct host1x_info host1x02_info = {
@@ -91,6 +92,7 @@ static const struct host1x_info host1x02_info = {
        .has_hypervisor = false,
        .num_sid_entries = 0,
        .sid_table = NULL,
+       .reserve_vblank_syncpts = true,
 };
 
 static const struct host1x_info host1x04_info = {
@@ -105,6 +107,7 @@ static const struct host1x_info host1x04_info = {
        .has_hypervisor = false,
        .num_sid_entries = 0,
        .sid_table = NULL,
+       .reserve_vblank_syncpts = false,
 };
 
 static const struct host1x_info host1x05_info = {
@@ -119,6 +122,7 @@ static const struct host1x_info host1x05_info = {
        .has_hypervisor = false,
        .num_sid_entries = 0,
        .sid_table = NULL,
+       .reserve_vblank_syncpts = false,
 };
 
 static const struct host1x_sid_entry tegra186_sid_table[] = {
@@ -142,6 +146,7 @@ static const struct host1x_info host1x06_info = {
        .has_hypervisor = true,
        .num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
        .sid_table = tegra186_sid_table,
+       .reserve_vblank_syncpts = false,
 };
 
 static const struct host1x_sid_entry tegra194_sid_table[] = {
@@ -165,6 +170,7 @@ static const struct host1x_info host1x07_info = {
        .has_hypervisor = true,
        .num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
        .sid_table = tegra194_sid_table,
+       .reserve_vblank_syncpts = false,
 };
 
 static const struct of_device_id host1x_of_match[] = {
index f781a9b..fa6d4bc 100644 (file)
@@ -37,7 +37,7 @@ struct host1x_cdma_ops {
        void (*start)(struct host1x_cdma *cdma);
        void (*stop)(struct host1x_cdma *cdma);
        void (*flush)(struct  host1x_cdma *cdma);
-       int (*timeout_init)(struct host1x_cdma *cdma, unsigned int syncpt);
+       int (*timeout_init)(struct host1x_cdma *cdma);
        void (*timeout_destroy)(struct host1x_cdma *cdma);
        void (*freeze)(struct host1x_cdma *cdma);
        void (*resume)(struct host1x_cdma *cdma, u32 getptr);
@@ -101,6 +101,12 @@ struct host1x_info {
        bool has_hypervisor; /* has hypervisor registers */
        unsigned int num_sid_entries;
        const struct host1x_sid_entry *sid_table;
+       /*
+        * On T20-T148, the boot chain may setup DC to increment syncpoints
+        * 26/27 on VBLANK. As such we cannot use these syncpoints until
+        * the display driver disables VBLANK increments.
+        */
+       bool reserve_vblank_syncpts;
 };
 
 struct host1x {
@@ -261,10 +267,9 @@ static inline void host1x_hw_cdma_flush(struct host1x *host,
 }
 
 static inline int host1x_hw_cdma_timeout_init(struct host1x *host,
-                                             struct host1x_cdma *cdma,
-                                             unsigned int syncpt)
+                                             struct host1x_cdma *cdma)
 {
-       return host->cdma_op->timeout_init(cdma, syncpt);
+       return host->cdma_op->timeout_init(cdma);
 }
 
 static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
index 2f3bf94..e49cd5b 100644 (file)
@@ -295,7 +295,7 @@ static void cdma_timeout_handler(struct work_struct *work)
 /*
  * Init timeout resources
  */
-static int cdma_timeout_init(struct host1x_cdma *cdma, unsigned int syncpt)
+static int cdma_timeout_init(struct host1x_cdma *cdma)
 {
        INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
        cdma->timeout.initialized = true;
index 5eaa29d..d4c28fa 100644 (file)
@@ -86,8 +86,7 @@ static void submit_gathers(struct host1x_job *job)
 
 static inline void synchronize_syncpt_base(struct host1x_job *job)
 {
-       struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
-       struct host1x_syncpt *sp = host->syncpt + job->syncpt_id;
+       struct host1x_syncpt *sp = job->syncpt;
        unsigned int id;
        u32 value;
 
@@ -118,7 +117,7 @@ static void host1x_channel_set_streamid(struct host1x_channel *channel)
 static int channel_submit(struct host1x_job *job)
 {
        struct host1x_channel *ch = job->channel;
-       struct host1x_syncpt *sp;
+       struct host1x_syncpt *sp = job->syncpt;
        u32 user_syncpt_incrs = job->syncpt_incrs;
        u32 prev_max = 0;
        u32 syncval;
@@ -126,10 +125,9 @@ static int channel_submit(struct host1x_job *job)
        struct host1x_waitlist *completed_waiter = NULL;
        struct host1x *host = dev_get_drvdata(ch->dev->parent);
 
-       sp = host->syncpt + job->syncpt_id;
        trace_host1x_channel_submit(dev_name(ch->dev),
                                    job->num_gathers, job->num_relocs,
-                                   job->syncpt_id, job->syncpt_incrs);
+                                   job->syncpt->id, job->syncpt_incrs);
 
        /* before error checks, return current max */
        prev_max = job->syncpt_end = host1x_syncpt_read_max(sp);
@@ -163,7 +161,7 @@ static int channel_submit(struct host1x_job *job)
                host1x_cdma_push(&ch->cdma,
                                 host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
                                        host1x_uclass_wait_syncpt_r(), 1),
-                                host1x_class_host_wait_syncpt(job->syncpt_id,
+                                host1x_class_host_wait_syncpt(job->syncpt->id,
                                        host1x_syncpt_read_max(sp)));
        }
 
index f31bcfa..ceb4822 100644 (file)
@@ -204,7 +204,7 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
                unsigned int i;
 
                host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
-                                   job, job->syncpt_id, job->syncpt_end,
+                                   job, job->syncpt->id, job->syncpt_end,
                                    job->first_get, job->timeout,
                                    job->num_slots, job->num_unpins);
 
index 3058b3c..b766851 100644 (file)
@@ -29,6 +29,6 @@
 #define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(x)   (0x652c + 4 * (x))
 #define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(x)       (0x6590 + 4 * (x))
 #define HOST1X_SYNC_SYNCPT(x)                          (0x8080 + 4 * (x))
-#define HOST1X_SYNC_SYNCPT_INT_THRESH(x)               (0x8d00 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(x)               (0x9980 + 4 * (x))
 #define HOST1X_SYNC_SYNCPT_CH_APP(x)                   (0xa604 + 4 * (x))
 #define HOST1X_SYNC_SYNCPT_CH_APP_CH(v)                        (((v) & 0x3f) << 8)
index 9245add..6d1f3c0 100644 (file)
@@ -235,25 +235,37 @@ int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
                        host1x_hw_intr_enable_syncpt_intr(host, syncpt->id);
        }
 
-       spin_unlock(&syncpt->intr.lock);
-
        if (ref)
                *ref = waiter;
+
+       spin_unlock(&syncpt->intr.lock);
+
        return 0;
 }
 
-void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref)
+void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref,
+                        bool flush)
 {
        struct host1x_waitlist *waiter = ref;
        struct host1x_syncpt *syncpt;
 
-       while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
-              WLS_REMOVED)
-               schedule();
+       atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED);
 
        syncpt = host->syncpt + id;
-       (void)process_wait_list(host, syncpt,
-                               host1x_syncpt_load(host->syncpt + id));
+
+       spin_lock(&syncpt->intr.lock);
+       if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED) ==
+           WLS_CANCELLED) {
+               list_del(&waiter->list);
+               kref_put(&waiter->refcount, waiter_release);
+       }
+       spin_unlock(&syncpt->intr.lock);
+
+       if (flush) {
+               /* Wait until any concurrently executing handler has finished. */
+               while (atomic_read(&waiter->state) != WLS_HANDLED)
+                       schedule();
+       }
 
        kref_put(&waiter->refcount, waiter_release);
 }
index aac3819..6ea55e6 100644 (file)
@@ -74,8 +74,10 @@ int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
  * Unreference an action submitted to host1x_intr_add_action().
  * You must call this if you passed non-NULL as ref.
  * @ref the ref returned from host1x_intr_add_action()
+ * @flush wait until any pending handlers have completed before returning.
  */
-void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref);
+void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref,
+                        bool flush);
 
 /* Initialize host1x sync point interrupt */
 int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
index 82d0a60..adbdc22 100644 (file)
@@ -79,6 +79,9 @@ static void job_free(struct kref *ref)
 {
        struct host1x_job *job = container_of(ref, struct host1x_job, ref);
 
+       if (job->syncpt)
+               host1x_syncpt_put(job->syncpt);
+
        kfree(job);
 }
 
@@ -674,7 +677,7 @@ EXPORT_SYMBOL(host1x_job_unpin);
  */
 void host1x_job_dump(struct device *dev, struct host1x_job *job)
 {
-       dev_dbg(dev, "    SYNCPT_ID   %d\n", job->syncpt_id);
+       dev_dbg(dev, "    SYNCPT_ID   %d\n", job->syncpt->id);
        dev_dbg(dev, "    SYNCPT_VAL  %d\n", job->syncpt_end);
        dev_dbg(dev, "    FIRST_GET   0x%x\n", job->first_get);
        dev_dbg(dev, "    TIMEOUT     %d\n", job->timeout);
index fce7892..e648ebb 100644 (file)
@@ -42,17 +42,32 @@ static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
                base->requested = false;
 }
 
-static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
-                                                struct host1x_client *client,
-                                                unsigned long flags)
+/**
+ * host1x_syncpt_alloc() - allocate a syncpoint
+ * @host: host1x device data
+ * @flags: bitfield of HOST1X_SYNCPT_* flags
+ * @name: name for the syncpoint for use in debug prints
+ *
+ * Allocates a hardware syncpoint for the caller's use. The caller then has
+ * the sole authority to mutate the syncpoint's value until it is freed again.
+ *
+ * If no free syncpoints are available, or a NULL name was specified, returns
+ * NULL.
+ */
+struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
+                                         unsigned long flags,
+                                         const char *name)
 {
        struct host1x_syncpt *sp = host->syncpt;
+       char *full_name;
        unsigned int i;
-       char *name;
+
+       if (!name)
+               return NULL;
 
        mutex_lock(&host->syncpt_mutex);
 
-       for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
+       for (i = 0; i < host->info->nb_pts && kref_read(&sp->ref); i++, sp++)
                ;
 
        if (i >= host->info->nb_pts)
@@ -64,19 +79,19 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
                        goto unlock;
        }
 
-       name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id,
-                        client ? dev_name(client->dev) : NULL);
-       if (!name)
+       full_name = kasprintf(GFP_KERNEL, "%u-%s", sp->id, name);
+       if (!full_name)
                goto free_base;
 
-       sp->client = client;
-       sp->name = name;
+       sp->name = full_name;
 
        if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
                sp->client_managed = true;
        else
                sp->client_managed = false;
 
+       kref_init(&sp->ref);
+
        mutex_unlock(&host->syncpt_mutex);
        return sp;
 
@@ -87,6 +102,7 @@ unlock:
        mutex_unlock(&host->syncpt_mutex);
        return NULL;
 }
+EXPORT_SYMBOL(host1x_syncpt_alloc);
 
 /**
  * host1x_syncpt_id() - retrieve syncpoint ID
@@ -294,7 +310,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
                }
        }
 
-       host1x_intr_put_ref(sp->host, sp->id, ref);
+       host1x_intr_put_ref(sp->host, sp->id, ref, true);
 
 done:
        return err;
@@ -307,59 +323,12 @@ EXPORT_SYMBOL(host1x_syncpt_wait);
 bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
 {
        u32 current_val;
-       u32 future_val;
 
        smp_rmb();
 
        current_val = (u32)atomic_read(&sp->min_val);
-       future_val = (u32)atomic_read(&sp->max_val);
-
-       /* Note the use of unsigned arithmetic here (mod 1<<32).
-        *
-        * c = current_val = min_val    = the current value of the syncpoint.
-        * t = thresh                   = the value we are checking
-        * f = future_val  = max_val    = the value c will reach when all
-        *                                outstanding increments have completed.
-        *
-        * Note that c always chases f until it reaches f.
-        *
-        * Dtf = (f - t)
-        * Dtc = (c - t)
-        *
-        *  Consider all cases:
-        *
-        *      A) .....c..t..f.....    Dtf < Dtc       need to wait
-        *      B) .....c.....f..t..    Dtf > Dtc       expired
-        *      C) ..t..c.....f.....    Dtf > Dtc       expired    (Dct very large)
-        *
-        *  Any case where f==c: always expired (for any t).    Dtf == Dcf
-        *  Any case where t==c: always expired (for any f).    Dtf >= Dtc (because Dtc==0)
-        *  Any case where t==f!=c: always wait.                Dtf <  Dtc (because Dtf==0,
-        *                                                      Dtc!=0)
-        *
-        *  Other cases:
-        *
-        *      A) .....t..f..c.....    Dtf < Dtc       need to wait
-        *      A) .....f..c..t.....    Dtf < Dtc       need to wait
-        *      A) .....f..t..c.....    Dtf > Dtc       expired
-        *
-        *   So:
-        *         Dtf >= Dtc implies EXPIRED   (return true)
-        *         Dtf <  Dtc implies WAIT      (return false)
-        *
-        * Note: If t is expired then we *cannot* wait on it. We would wait
-        * forever (hang the system).
-        *
-        * Note: do NOT get clever and remove the -thresh from both sides. It
-        * is NOT the same.
-        *
-        * If future valueis zero, we have a client managed sync point. In that
-        * case we do a direct comparison.
-        */
-       if (!host1x_syncpt_client_managed(sp))
-               return future_val - thresh >= current_val - thresh;
-       else
-               return (s32)(current_val - thresh) >= 0;
+
+       return ((current_val - thresh) & 0x80000000U) == 0U;
 }
 
 int host1x_syncpt_init(struct host1x *host)
@@ -401,10 +370,15 @@ int host1x_syncpt_init(struct host1x *host)
        host1x_hw_syncpt_enable_protection(host);
 
        /* Allocate sync point to use for clearing waits for expired fences */
-       host->nop_sp = host1x_syncpt_alloc(host, NULL, 0);
+       host->nop_sp = host1x_syncpt_alloc(host, 0, "reserved-nop");
        if (!host->nop_sp)
                return -ENOMEM;
 
+       if (host->info->reserve_vblank_syncpts) {
+               kref_init(&host->syncpt[26].ref);
+               kref_init(&host->syncpt[27].ref);
+       }
+
        return 0;
 }
 
@@ -416,44 +390,50 @@ int host1x_syncpt_init(struct host1x *host)
  * host1x client drivers can use this function to allocate a syncpoint for
  * subsequent use. A syncpoint returned by this function will be reserved for
  * use by the client exclusively. When no longer using a syncpoint, a host1x
- * client driver needs to release it using host1x_syncpt_free().
+ * client driver needs to release it using host1x_syncpt_put().
  */
 struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
                                            unsigned long flags)
 {
        struct host1x *host = dev_get_drvdata(client->host->parent);
 
-       return host1x_syncpt_alloc(host, client, flags);
+       return host1x_syncpt_alloc(host, flags, dev_name(client->dev));
 }
 EXPORT_SYMBOL(host1x_syncpt_request);
 
-/**
- * host1x_syncpt_free() - free a requested syncpoint
- * @sp: host1x syncpoint
- *
- * Release a syncpoint previously allocated using host1x_syncpt_request(). A
- * host1x client driver should call this when the syncpoint is no longer in
- * use. Note that client drivers must ensure that the syncpoint doesn't remain
- * under the control of hardware after calling this function, otherwise two
- * clients may end up trying to access the same syncpoint concurrently.
- */
-void host1x_syncpt_free(struct host1x_syncpt *sp)
+static void syncpt_release(struct kref *ref)
 {
-       if (!sp)
-               return;
+       struct host1x_syncpt *sp = container_of(ref, struct host1x_syncpt, ref);
+
+       atomic_set(&sp->max_val, host1x_syncpt_read(sp));
 
        mutex_lock(&sp->host->syncpt_mutex);
 
        host1x_syncpt_base_free(sp->base);
        kfree(sp->name);
        sp->base = NULL;
-       sp->client = NULL;
        sp->name = NULL;
        sp->client_managed = false;
 
        mutex_unlock(&sp->host->syncpt_mutex);
 }
-EXPORT_SYMBOL(host1x_syncpt_free);
+
+/**
+ * host1x_syncpt_put() - free a requested syncpoint
+ * @sp: host1x syncpoint
+ *
+ * Release a syncpoint previously allocated using host1x_syncpt_request(). A
+ * host1x client driver should call this when the syncpoint is no longer in
+ * use.
+ */
+void host1x_syncpt_put(struct host1x_syncpt *sp)
+{
+       if (!sp)
+               return;
+
+       kref_put(&sp->ref, syncpt_release);
+}
+EXPORT_SYMBOL(host1x_syncpt_put);
 
 void host1x_syncpt_deinit(struct host1x *host)
 {
@@ -520,16 +500,48 @@ unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)
 }
 
 /**
- * host1x_syncpt_get() - obtain a syncpoint by ID
+ * host1x_syncpt_get_by_id() - obtain a syncpoint by ID
  * @host: host1x controller
  * @id: syncpoint ID
  */
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id)
+struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host,
+                                             unsigned int id)
 {
        if (id >= host->info->nb_pts)
                return NULL;
 
-       return host->syncpt + id;
+       if (kref_get_unless_zero(&host->syncpt[id].ref))
+               return &host->syncpt[id];
+       else
+               return NULL;
+}
+EXPORT_SYMBOL(host1x_syncpt_get_by_id);
+
+/**
+ * host1x_syncpt_get_by_id_noref() - obtain a syncpoint by ID but don't
+ *     increase the refcount.
+ * @host: host1x controller
+ * @id: syncpoint ID
+ */
+struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host,
+                                                   unsigned int id)
+{
+       if (id >= host->info->nb_pts)
+               return NULL;
+
+       return &host->syncpt[id];
+}
+EXPORT_SYMBOL(host1x_syncpt_get_by_id_noref);
+
+/**
+ * host1x_syncpt_get() - increment syncpoint refcount
+ * @sp: syncpoint
+ */
+struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp)
+{
+       kref_get(&sp->ref);
+
+       return sp;
 }
 EXPORT_SYMBOL(host1x_syncpt_get);
 
@@ -552,3 +564,31 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
        return base->id;
 }
 EXPORT_SYMBOL(host1x_syncpt_base_id);
+
+static void do_nothing(struct kref *ref)
+{
+}
+
+/**
+ * host1x_syncpt_release_vblank_reservation() - Make VBLANK syncpoint
+ *   available for allocation
+ *
+ * @client: host1x bus client
+ * @syncpt_id: syncpoint ID to make available
+ *
+ * Makes VBLANK<i> syncpoint available for allocatation if it was
+ * reserved at initialization time. This should be called by the display
+ * driver after it has ensured that any VBLANK increment programming configured
+ * by the boot chain has been disabled.
+ */
+void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
+                                             u32 syncpt_id)
+{
+       struct host1x *host = dev_get_drvdata(client->host->parent);
+
+       if (!host->info->reserve_vblank_syncpts)
+               return;
+
+       kref_put(&host->syncpt[syncpt_id].ref, do_nothing);
+}
+EXPORT_SYMBOL(host1x_syncpt_release_vblank_reservation);
index 8e1d04d..a6766f8 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/atomic.h>
 #include <linux/host1x.h>
 #include <linux/kernel.h>
+#include <linux/kref.h>
 #include <linux/sched.h>
 
 #include "intr.h"
@@ -26,6 +27,8 @@ struct host1x_syncpt_base {
 };
 
 struct host1x_syncpt {
+       struct kref ref;
+
        unsigned int id;
        atomic_t min_val;
        atomic_t max_val;
@@ -33,7 +36,6 @@ struct host1x_syncpt {
        const char *name;
        bool client_managed;
        struct host1x *host;
-       struct host1x_client *client;
        struct host1x_syncpt_base *base;
 
        /* interrupt data */
index dd27b9d..873ef38 100644 (file)
@@ -129,6 +129,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
                if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
                        != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
                        dev_err(dev->dev, "High Speed not supported!\n");
+                       t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
                        dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
                        dev->master_cfg |= DW_IC_CON_SPEED_FAST;
                        dev->hs_hcnt = 0;
index 5ac30d9..97d4f3a 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * i2c-exynos5.c - Samsung Exynos5 I2C Controller Driver
  *
  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
index c45f226..aa00ba8 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Copyright (c) 2014 Linaro Ltd.
- * Copyright (c) 2014 Hisilicon Limited.
+ * Copyright (c) 2014 HiSilicon Limited.
  *
  * Now only support 7 bit address.
  */
index 8509c5f..55177eb 100644 (file)
@@ -525,8 +525,8 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id)
                                i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA);
                                data = *i2c->wbuf;
                                data &= ~JZ4780_I2C_DC_READ;
-                               if ((!i2c->stop_hold) && (i2c->cdata->version >=
-                                               ID_X1000))
+                               if ((i2c->wt_len == 1) && (!i2c->stop_hold) &&
+                                               (i2c->cdata->version >= ID_X1000))
                                        data |= X1000_I2C_DC_STOP;
                                jz4780_i2c_writew(i2c, JZ4780_I2C_DC, data);
                                i2c->wbuf++;
index 937c2c8..4933fc8 100644 (file)
@@ -534,7 +534,7 @@ static void stm32f4_i2c_handle_rx_addr(struct stm32f4_i2c_dev *i2c_dev)
        default:
                /*
                 * N-byte reception:
-                * Enable ACK, reset POS (ACK postion) and clear ADDR flag.
+                * Enable ACK, reset POS (ACK position) and clear ADDR flag.
                 * In that way, ACK will be sent as soon as the current byte
                 * will be received in the shift register
                 */
index 63ebf72..f213623 100644 (file)
@@ -378,7 +378,7 @@ static int i2c_gpio_init_recovery(struct i2c_adapter *adap)
 static int i2c_init_recovery(struct i2c_adapter *adap)
 {
        struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
-       char *err_str;
+       char *err_str, *err_level = KERN_ERR;
 
        if (!bri)
                return 0;
@@ -387,7 +387,8 @@ static int i2c_init_recovery(struct i2c_adapter *adap)
                return -EPROBE_DEFER;
 
        if (!bri->recover_bus) {
-               err_str = "no recover_bus() found";
+               err_str = "no suitable method provided";
+               err_level = KERN_DEBUG;
                goto err;
        }
 
@@ -414,7 +415,7 @@ static int i2c_init_recovery(struct i2c_adapter *adap)
 
        return 0;
  err:
-       dev_err(&adap->dev, "Not using recovery: %s\n", err_str);
+       dev_printk(err_level, &adap->dev, "Not using recovery: %s\n", err_str);
        adap->bus_recovery_info = NULL;
 
        return -EINVAL;
index bf7d22f..e0667c4 100644 (file)
@@ -266,6 +266,8 @@ config ADI_AXI_ADC
        select IIO_BUFFER
        select IIO_BUFFER_HW_CONSUMER
        select IIO_BUFFER_DMAENGINE
+       depends on HAS_IOMEM
+       depends on OF
        help
          Say yes here to build support for Analog Devices Generic
          AXI ADC IP core. The IP core is used for interfacing with
@@ -923,6 +925,7 @@ config STM32_ADC_CORE
        depends on ARCH_STM32 || COMPILE_TEST
        depends on OF
        depends on REGULATOR
+       depends on HAS_IOMEM
        select IIO_BUFFER
        select MFD_STM32_TIMERS
        select IIO_STM32_TIMER_TRIGGER
index 6f9a3e2..7b5212b 100644 (file)
@@ -918,7 +918,7 @@ static int ab8500_gpadc_read_raw(struct iio_dev *indio_dev,
                        return processed;
 
                /* Return millivolt or milliamps or millicentigrades */
-               *val = processed * 1000;
+               *val = processed;
                return IIO_VAL_INT;
        }
 
index 5d597e5..1b4b320 100644 (file)
@@ -91,7 +91,7 @@ static int ad7949_spi_read_channel(struct ad7949_adc_chip *ad7949_adc, int *val,
        int ret;
        int i;
        int bits_per_word = ad7949_adc->resolution;
-       int mask = GENMASK(ad7949_adc->resolution, 0);
+       int mask = GENMASK(ad7949_adc->resolution - 1, 0);
        struct spi_message msg;
        struct spi_transfer tx[] = {
                {
index 05ff948..07b1a99 100644 (file)
@@ -597,7 +597,7 @@ static const struct vadc_channels vadc_chans[] = {
        VADC_CHAN_NO_SCALE(P_MUX16_1_3, 1)
 
        VADC_CHAN_NO_SCALE(LR_MUX1_BAT_THERM, 0)
-       VADC_CHAN_NO_SCALE(LR_MUX2_BAT_ID, 0)
+       VADC_CHAN_VOLT(LR_MUX2_BAT_ID, 0, SCALE_DEFAULT)
        VADC_CHAN_NO_SCALE(LR_MUX3_XO_THERM, 0)
        VADC_CHAN_NO_SCALE(LR_MUX4_AMUX_THM1, 0)
        VADC_CHAN_NO_SCALE(LR_MUX5_AMUX_THM2, 0)
index dfa31a2..ac90be0 100644 (file)
@@ -551,6 +551,8 @@ static irqreturn_t mpu3050_trigger_handler(int irq, void *p)
                                               MPU3050_FIFO_R,
                                               &fifo_values[offset],
                                               toread);
+                       if (ret)
+                               goto out_trigger_unlock;
 
                        dev_dbg(mpu3050->dev,
                                "%04x %04x %04x %04x %04x\n",
index 52f6051..d627054 100644 (file)
 struct hid_humidity_state {
        struct hid_sensor_common common_attributes;
        struct hid_sensor_hub_attribute_info humidity_attr;
-       s32 humidity_data;
+       struct {
+               s32 humidity_data;
+               u64 timestamp __aligned(8);
+       } scan;
        int scale_pre_decml;
        int scale_post_decml;
        int scale_precision;
@@ -125,9 +128,8 @@ static int humidity_proc_event(struct hid_sensor_hub_device *hsdev,
        struct hid_humidity_state *humid_st = iio_priv(indio_dev);
 
        if (atomic_read(&humid_st->common_attributes.data_ready))
-               iio_push_to_buffers_with_timestamp(indio_dev,
-                                       &humid_st->humidity_data,
-                                       iio_get_time_ns(indio_dev));
+               iio_push_to_buffers_with_timestamp(indio_dev, &humid_st->scan,
+                                                  iio_get_time_ns(indio_dev));
 
        return 0;
 }
@@ -142,7 +144,7 @@ static int humidity_capture_sample(struct hid_sensor_hub_device *hsdev,
 
        switch (usage_id) {
        case HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY:
-               humid_st->humidity_data = *(s32 *)raw_data;
+               humid_st->scan.humidity_data = *(s32 *)raw_data;
 
                return 0;
        default:
index 54af2ed..785a4ce 100644 (file)
@@ -462,8 +462,7 @@ static int adis16400_initial_setup(struct iio_dev *indio_dev)
                if (ret)
                        goto err_ret;
 
-               ret = sscanf(indio_dev->name, "adis%u\n", &device_id);
-               if (ret != 1) {
+               if (sscanf(indio_dev->name, "adis%u\n", &device_id) != 1) {
                        ret = -EINVAL;
                        goto err_ret;
                }
index 330cf35..e9e00ce 100644 (file)
@@ -23,6 +23,9 @@ struct prox_state {
        struct hid_sensor_common common_attributes;
        struct hid_sensor_hub_attribute_info prox_attr;
        u32 human_presence;
+       int scale_pre_decml;
+       int scale_post_decml;
+       int scale_precision;
 };
 
 /* Channel definitions */
@@ -93,8 +96,9 @@ static int prox_read_raw(struct iio_dev *indio_dev,
                ret_type = IIO_VAL_INT;
                break;
        case IIO_CHAN_INFO_SCALE:
-               *val = prox_state->prox_attr.units;
-               ret_type = IIO_VAL_INT;
+               *val = prox_state->scale_pre_decml;
+               *val2 = prox_state->scale_post_decml;
+               ret_type = prox_state->scale_precision;
                break;
        case IIO_CHAN_INFO_OFFSET:
                *val = hid_sensor_convert_exponent(
@@ -234,6 +238,11 @@ static int prox_parse_report(struct platform_device *pdev,
                        HID_USAGE_SENSOR_HUMAN_PRESENCE,
                        &st->common_attributes.sensitivity);
 
+       st->scale_precision = hid_sensor_format_scale(
+                               hsdev->usage,
+                               &st->prox_attr,
+                               &st->scale_pre_decml, &st->scale_post_decml);
+
        return ret;
 }
 
index 81688f1..da9a247 100644 (file)
 struct temperature_state {
        struct hid_sensor_common common_attributes;
        struct hid_sensor_hub_attribute_info temperature_attr;
-       s32 temperature_data;
+       struct {
+               s32 temperature_data;
+               u64 timestamp __aligned(8);
+       } scan;
        int scale_pre_decml;
        int scale_post_decml;
        int scale_precision;
@@ -32,7 +35,7 @@ static const struct iio_chan_spec temperature_channels[] = {
                        BIT(IIO_CHAN_INFO_SAMP_FREQ) |
                        BIT(IIO_CHAN_INFO_HYSTERESIS),
        },
-       IIO_CHAN_SOFT_TIMESTAMP(3),
+       IIO_CHAN_SOFT_TIMESTAMP(1),
 };
 
 /* Adjust channel real bits based on report descriptor */
@@ -123,9 +126,8 @@ static int temperature_proc_event(struct hid_sensor_hub_device *hsdev,
        struct temperature_state *temp_st = iio_priv(indio_dev);
 
        if (atomic_read(&temp_st->common_attributes.data_ready))
-               iio_push_to_buffers_with_timestamp(indio_dev,
-                               &temp_st->temperature_data,
-                               iio_get_time_ns(indio_dev));
+               iio_push_to_buffers_with_timestamp(indio_dev, &temp_st->scan,
+                                                  iio_get_time_ns(indio_dev));
 
        return 0;
 }
@@ -140,7 +142,7 @@ static int temperature_capture_sample(struct hid_sensor_hub_device *hsdev,
 
        switch (usage_id) {
        case HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE:
-               temp_st->temperature_data = *(s32 *)raw_data;
+               temp_st->scan.temperature_data = *(s32 *)raw_data;
                return 0;
        default:
                return -EINVAL;
index 0abce00..65e3e7d 100644 (file)
@@ -76,7 +76,9 @@ static struct workqueue_struct *addr_wq;
 
 static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
        [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
-               .len = sizeof(struct rdma_nla_ls_gid)},
+               .len = sizeof(struct rdma_nla_ls_gid),
+               .validation_type = NLA_VALIDATE_MIN,
+               .min = sizeof(struct rdma_nla_ls_gid)},
 };
 
 static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
index 8769e7a..e42c812 100644 (file)
@@ -3610,13 +3610,14 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
            ep->com.local_addr.ss_family == AF_INET) {
                err = cxgb4_remove_server_filter(
                        ep->com.dev->rdev.lldi.ports[0], ep->stid,
-                       ep->com.dev->rdev.lldi.rxq_ids[0], 0);
+                       ep->com.dev->rdev.lldi.rxq_ids[0], false);
        } else {
                struct sockaddr_in6 *sin6;
                c4iw_init_wr_wait(ep->com.wr_waitp);
                err = cxgb4_remove_server(
                                ep->com.dev->rdev.lldi.ports[0], ep->stid,
-                               ep->com.dev->rdev.lldi.rxq_ids[0], 0);
+                               ep->com.dev->rdev.lldi.rxq_ids[0],
+                               ep->com.local_addr.ss_family == AF_INET6);
                if (err)
                        goto done;
                err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
index 2a91b8d..04b1e8f 100644 (file)
@@ -632,22 +632,11 @@ static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
  */
 int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
 {
-       int node = pcibus_to_node(dd->pcidev->bus);
        struct hfi1_affinity_node *entry;
        const struct cpumask *local_mask;
        int curr_cpu, possible, i, ret;
        bool new_entry = false;
 
-       /*
-        * If the BIOS does not have the NUMA node information set, select
-        * NUMA 0 so we get consistent performance.
-        */
-       if (node < 0) {
-               dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
-               node = 0;
-       }
-       dd->node = node;
-
        local_mask = cpumask_of_node(dd->node);
        if (cpumask_first(local_mask) >= nr_cpu_ids)
                local_mask = topology_core_cpumask(0);
@@ -660,7 +649,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
         * create an entry in the global affinity structure and initialize it.
         */
        if (!entry) {
-               entry = node_affinity_allocate(node);
+               entry = node_affinity_allocate(dd->node);
                if (!entry) {
                        dd_dev_err(dd,
                                   "Unable to allocate global affinity node\n");
@@ -751,6 +740,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
        if (new_entry)
                node_affinity_add_tail(entry);
 
+       dd->affinity_entry = entry;
        mutex_unlock(&node_affinity.lock);
 
        return 0;
@@ -766,10 +756,9 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
 {
        struct hfi1_affinity_node *entry;
 
-       if (dd->node < 0)
-               return;
-
        mutex_lock(&node_affinity.lock);
+       if (!dd->affinity_entry)
+               goto unlock;
        entry = node_affinity_lookup(dd->node);
        if (!entry)
                goto unlock;
@@ -780,8 +769,8 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
         */
        _dev_comp_vect_cpu_mask_clean_up(dd, entry);
 unlock:
+       dd->affinity_entry = NULL;
        mutex_unlock(&node_affinity.lock);
-       dd->node = NUMA_NO_NODE;
 }
 
 /*
index e09e824..2a9a040 100644 (file)
@@ -1409,6 +1409,7 @@ struct hfi1_devdata {
        spinlock_t irq_src_lock;
        int vnic_num_vports;
        struct net_device *dummy_netdev;
+       struct hfi1_affinity_node *affinity_entry;
 
        /* Keeps track of IPoIB RSM rule users */
        atomic_t ipoib_rsm_usr_num;
index cb7ad12..786c631 100644 (file)
@@ -1277,7 +1277,6 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
        dd->pport = (struct hfi1_pportdata *)(dd + 1);
        dd->pcidev = pdev;
        pci_set_drvdata(pdev, dd);
-       dd->node = NUMA_NO_NODE;
 
        ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
                        GFP_KERNEL);
@@ -1287,6 +1286,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
                goto bail;
        }
        rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
+       /*
+        * If the BIOS does not have the NUMA node information set, select
+        * NUMA 0 so we get consistent performance.
+        */
+       dd->node = pcibus_to_node(pdev->bus);
+       if (dd->node == NUMA_NO_NODE) {
+               dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
+               dd->node = 0;
+       }
 
        /*
         * Initialize all locks for the device. This needs to be as early as
index 1fb6e1a..1bcab99 100644 (file)
@@ -173,8 +173,7 @@ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
                return 0;
        }
 
-       cpumask_and(node_cpu_mask, cpu_mask,
-                   cpumask_of_node(pcibus_to_node(dd->pcidev->bus)));
+       cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
 
        available_cpus = cpumask_weight(node_cpu_mask);
 
index c3934ab..ce26f97 100644 (file)
@@ -1194,8 +1194,10 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
                           upper_32_bits(dma));
                roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
                           (u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
-               roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
+
+               /* Make sure to write tail first and then head */
                roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
+               roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
        } else {
                roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
                roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
index de3c2fc..07b8350 100644 (file)
@@ -1116,7 +1116,7 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
        case MLX5_CMD_OP_CREATE_MKEY:
                MLX5_SET(destroy_mkey_in, din, opcode,
                         MLX5_CMD_OP_DESTROY_MKEY);
-               MLX5_SET(destroy_mkey_in, in, mkey_index, *obj_id);
+               MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id);
                break;
        case MLX5_CMD_OP_CREATE_CQ:
                MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
index ec4b3f6..f5a52a6 100644 (file)
@@ -1078,7 +1078,7 @@ static int _create_kernel_qp(struct mlx5_ib_dev *dev,
 
        qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
        MLX5_SET(qpc, qpc, uar_page, uar_index);
-       MLX5_SET(qpc, qpc, ts_format, MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT);
+       MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
        MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
 
        /* Set "fast registration enabled" for all kernel QPs */
@@ -1188,7 +1188,8 @@ static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
                }
                return MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING;
        }
-       return MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT;
+       return fr_supported ? MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING :
+                             MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT;
 }
 
 static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
@@ -1206,7 +1207,8 @@ static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
                }
                return MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING;
        }
-       return MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT;
+       return fr_supported ? MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING :
+                             MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT;
 }
 
 static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq,
@@ -1217,7 +1219,8 @@ static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq,
                        MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
                MLX5_CAP_ROCE(dev->mdev, qp_ts_format) ==
                        MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
-       int ts_format = MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
+       int ts_format = fr_supported ? MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING :
+                                      MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
 
        if (recv_cq &&
            recv_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)
@@ -1930,6 +1933,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
                MLX5_SET(qpc, qpc, cd_slave_receive, 1);
 
+       MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
        MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ);
        MLX5_SET(qpc, qpc, no_sq, 1);
        MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
@@ -4873,6 +4877,7 @@ static int  create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
        struct mlx5_ib_dev *dev;
        int has_net_offloads;
        __be64 *rq_pas0;
+       int ts_format;
        void *in;
        void *rqc;
        void *wq;
@@ -4881,6 +4886,10 @@ static int  create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
 
        dev = to_mdev(pd->device);
 
+       ts_format = get_rq_ts_format(dev, to_mcq(init_attr->cq));
+       if (ts_format < 0)
+               return ts_format;
+
        inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
        in = kvzalloc(inlen, GFP_KERNEL);
        if (!in)
@@ -4890,6 +4899,7 @@ static int  create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
        rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
        MLX5_SET(rqc,  rqc, mem_rq_type,
                 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
+       MLX5_SET(rqc, rqc, ts_format, ts_format);
        MLX5_SET(rqc, rqc, user_index, rwq->user_index);
        MLX5_SET(rqc,  rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn);
        MLX5_SET(rqc,  rqc, state, MLX5_RQC_STATE_RST);
index 0eb6a7a..9ea5422 100644 (file)
@@ -1244,7 +1244,8 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
         * TGT QP isn't associated with RQ/SQ
         */
        if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
-           (attrs->qp_type != IB_QPT_XRC_TGT)) {
+           (attrs->qp_type != IB_QPT_XRC_TGT) &&
+           (attrs->qp_type != IB_QPT_XRC_INI)) {
                struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
                struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
 
index 0a08b4b..6734329 100644 (file)
@@ -2720,8 +2720,8 @@ void rtrs_clt_close(struct rtrs_clt *clt)
 
        /* Now it is safe to iterate over all paths without locks */
        list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
-               rtrs_clt_destroy_sess_files(sess, NULL);
                rtrs_clt_close_conns(sess, true);
+               rtrs_clt_destroy_sess_files(sess, NULL);
                kobject_put(&sess->kobj);
        }
        free_clt(clt);
index 430dc69..da8963a 100644 (file)
@@ -26,7 +26,6 @@
 
 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
 MODULE_DESCRIPTION("Joystick device interfaces");
-MODULE_SUPPORTED_DEVICE("input/js");
 MODULE_LICENSE("GPL");
 
 #define JOYDEV_MINOR_BASE      0
index 73e2c8d..448cc53 100644 (file)
@@ -53,7 +53,7 @@ void icc_bulk_put(int num_paths, struct icc_bulk_data *paths)
 EXPORT_SYMBOL_GPL(icc_bulk_put);
 
 /**
- * icc_bulk_set() - set bandwidth to a set of paths
+ * icc_bulk_set_bw() - set bandwidth to a set of paths
  * @num_paths: the number of icc_bulk_data
  * @paths: the icc_bulk_data table containing the paths and bandwidth
  *
index 5ad519c..8a1e70e 100644 (file)
@@ -942,6 +942,8 @@ int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
                       GFP_KERNEL);
        if (new)
                src->links = new;
+       else
+               ret = -ENOMEM;
 
 out:
        mutex_unlock(&icc_lock);
index dfbec30..20f31a1 100644 (file)
@@ -131,7 +131,7 @@ DEFINE_QNODE(mas_pcnoc_sdcc_1, MSM8939_MASTER_SDCC_1, 8, -1, -1, MSM8939_PNOC_IN
 DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8939_MASTER_SDCC_2, 8, -1, -1, MSM8939_PNOC_INT_1);
 DEFINE_QNODE(mas_qdss_bam, MSM8939_MASTER_QDSS_BAM, 8, -1, -1, MSM8939_SNOC_QDSS_INT);
 DEFINE_QNODE(mas_qdss_etr, MSM8939_MASTER_QDSS_ETR, 8, -1, -1, MSM8939_SNOC_QDSS_INT);
-DEFINE_QNODE(mas_snoc_cfg, MSM8939_MASTER_SNOC_CFG, 4, 20, -1, MSM8939_SLAVE_SRVC_SNOC);
+DEFINE_QNODE(mas_snoc_cfg, MSM8939_MASTER_SNOC_CFG, 4, -1, -1, MSM8939_SLAVE_SRVC_SNOC);
 DEFINE_QNODE(mas_spdm, MSM8939_MASTER_SPDM, 4, -1, -1, MSM8939_PNOC_MAS_0);
 DEFINE_QNODE(mas_tcu0, MSM8939_MASTER_TCU0, 16, -1, -1, MSM8939_SLAVE_EBI_CH0, MSM8939_BIMC_SNOC_MAS, MSM8939_SLAVE_AMPSS_L2);
 DEFINE_QNODE(mas_usb_hs1, MSM8939_MASTER_USB_HS1, 4, -1, -1, MSM8939_PNOC_MAS_1);
@@ -156,14 +156,14 @@ DEFINE_QNODE(pcnoc_snoc_mas, MSM8939_PNOC_SNOC_MAS, 8, 29, -1, MSM8939_PNOC_SNOC
 DEFINE_QNODE(pcnoc_snoc_slv, MSM8939_PNOC_SNOC_SLV, 8, -1, 45, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC, MSM8939_SNOC_INT_1);
 DEFINE_QNODE(qdss_int, MSM8939_SNOC_QDSS_INT, 8, -1, -1, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC);
 DEFINE_QNODE(slv_apps_l2, MSM8939_SLAVE_AMPSS_L2, 16, -1, -1, 0);
-DEFINE_QNODE(slv_apss, MSM8939_SLAVE_APSS, 4, -1, 20, 0);
+DEFINE_QNODE(slv_apss, MSM8939_SLAVE_APSS, 4, -1, -1, 0);
 DEFINE_QNODE(slv_audio, MSM8939_SLAVE_LPASS, 4, -1, -1, 0);
 DEFINE_QNODE(slv_bimc_cfg, MSM8939_SLAVE_BIMC_CFG, 4, -1, -1, 0);
 DEFINE_QNODE(slv_blsp_1, MSM8939_SLAVE_BLSP_1, 4, -1, -1, 0);
 DEFINE_QNODE(slv_boot_rom, MSM8939_SLAVE_BOOT_ROM, 4, -1, -1, 0);
 DEFINE_QNODE(slv_camera_cfg, MSM8939_SLAVE_CAMERA_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_cats_0, MSM8939_SLAVE_CATS_128, 16, -1, 106, 0);
-DEFINE_QNODE(slv_cats_1, MSM8939_SLAVE_OCMEM_64, 8, -1, 107, 0);
+DEFINE_QNODE(slv_cats_0, MSM8939_SLAVE_CATS_128, 16, -1, -1, 0);
+DEFINE_QNODE(slv_cats_1, MSM8939_SLAVE_OCMEM_64, 8, -1, -1, 0);
 DEFINE_QNODE(slv_clk_ctl, MSM8939_SLAVE_CLK_CTL, 4, -1, -1, 0);
 DEFINE_QNODE(slv_crypto_0_cfg, MSM8939_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0);
 DEFINE_QNODE(slv_dehr_cfg, MSM8939_SLAVE_DEHR_CFG, 4, -1, -1, 0);
@@ -187,20 +187,20 @@ DEFINE_QNODE(slv_sdcc_2, MSM8939_SLAVE_SDCC_2, 4, -1, -1, 0);
 DEFINE_QNODE(slv_security, MSM8939_SLAVE_SECURITY, 4, -1, -1, 0);
 DEFINE_QNODE(slv_snoc_cfg, MSM8939_SLAVE_SNOC_CFG, 4, -1, -1, 0);
 DEFINE_QNODE(slv_spdm, MSM8939_SLAVE_SPDM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_srvc_snoc, MSM8939_SLAVE_SRVC_SNOC, 8, -1, 29, 0);
+DEFINE_QNODE(slv_srvc_snoc, MSM8939_SLAVE_SRVC_SNOC, 8, -1, -1, 0);
 DEFINE_QNODE(slv_tcsr, MSM8939_SLAVE_TCSR, 4, -1, -1, 0);
 DEFINE_QNODE(slv_tlmm, MSM8939_SLAVE_TLMM, 4, -1, -1, 0);
 DEFINE_QNODE(slv_usb_hs1, MSM8939_SLAVE_USB_HS1, 4, -1, -1, 0);
 DEFINE_QNODE(slv_usb_hs2, MSM8939_SLAVE_USB_HS2, 4, -1, -1, 0);
 DEFINE_QNODE(slv_venus_cfg, MSM8939_SLAVE_VENUS_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(snoc_bimc_0_mas, MSM8939_SNOC_BIMC_0_MAS, 16, 3, -1, MSM8939_SNOC_BIMC_0_SLV);
-DEFINE_QNODE(snoc_bimc_0_slv, MSM8939_SNOC_BIMC_0_SLV, 16, -1, 24, MSM8939_SLAVE_EBI_CH0);
+DEFINE_QNODE(snoc_bimc_0_mas, MSM8939_SNOC_BIMC_0_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_0_SLV);
+DEFINE_QNODE(snoc_bimc_0_slv, MSM8939_SNOC_BIMC_0_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0);
 DEFINE_QNODE(snoc_bimc_1_mas, MSM8939_SNOC_BIMC_1_MAS, 16, 76, -1, MSM8939_SNOC_BIMC_1_SLV);
 DEFINE_QNODE(snoc_bimc_1_slv, MSM8939_SNOC_BIMC_1_SLV, 16, -1, 104, MSM8939_SLAVE_EBI_CH0);
 DEFINE_QNODE(snoc_bimc_2_mas, MSM8939_SNOC_BIMC_2_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_2_SLV);
 DEFINE_QNODE(snoc_bimc_2_slv, MSM8939_SNOC_BIMC_2_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0);
 DEFINE_QNODE(snoc_int_0, MSM8939_SNOC_INT_0, 8, 99, 130, MSM8939_SLAVE_QDSS_STM, MSM8939_SLAVE_IMEM, MSM8939_SNOC_PNOC_MAS);
-DEFINE_QNODE(snoc_int_1, MSM8939_SNOC_INT_1, 8, 100, 131, MSM8939_SLAVE_APSS, MSM8939_SLAVE_CATS_128, MSM8939_SLAVE_OCMEM_64);
+DEFINE_QNODE(snoc_int_1, MSM8939_SNOC_INT_1, 8, -1, -1, MSM8939_SLAVE_APSS, MSM8939_SLAVE_CATS_128, MSM8939_SLAVE_OCMEM_64);
 DEFINE_QNODE(snoc_int_bimc, MSM8939_SNOC_INT_BIMC, 8, 101, 132, MSM8939_SNOC_BIMC_1_MAS);
 DEFINE_QNODE(snoc_pcnoc_mas, MSM8939_SNOC_PNOC_MAS, 8, -1, -1, MSM8939_SNOC_PNOC_SLV);
 DEFINE_QNODE(snoc_pcnoc_slv, MSM8939_SNOC_PNOC_SLV, 8, -1, -1, MSM8939_PNOC_INT_0);
index 9126efc..321f590 100644 (file)
@@ -2714,7 +2714,6 @@ static int __init early_amd_iommu_init(void)
        struct acpi_table_header *ivrs_base;
        int i, remap_cache_sz, ret;
        acpi_status status;
-       u32 pci_id;
 
        if (!amd_iommu_detected)
                return -ENODEV;
@@ -2804,16 +2803,6 @@ static int __init early_amd_iommu_init(void)
        if (ret)
                goto out;
 
-       /* Disable IOMMU if there's Stoney Ridge graphics */
-       for (i = 0; i < 32; i++) {
-               pci_id = read_pci_config(0, i, 0, 0);
-               if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
-                       pr_info("Disable IOMMU on Stoney Ridge\n");
-                       amd_iommu_disabled = true;
-                       break;
-               }
-       }
-
        /* Disable any previously enabled IOMMUs */
        if (!is_kdump_kernel() || amd_iommu_disabled)
                disable_iommus();
@@ -2880,6 +2869,7 @@ static bool detect_ivrs(void)
 {
        struct acpi_table_header *ivrs_base;
        acpi_status status;
+       int i;
 
        status = acpi_get_table("IVRS", 0, &ivrs_base);
        if (status == AE_NOT_FOUND)
@@ -2892,6 +2882,17 @@ static bool detect_ivrs(void)
 
        acpi_put_table(ivrs_base);
 
+       /* Don't use IOMMU if there is Stoney Ridge graphics */
+       for (i = 0; i < 32; i++) {
+               u32 pci_id;
+
+               pci_id = read_pci_config(0, i, 0, 0);
+               if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
+                       pr_info("Disable IOMMU on Stoney Ridge\n");
+                       return false;
+               }
+       }
+
        /* Make sure ACS will be enabled during PCI probe */
        pci_request_acs();
 
@@ -2918,12 +2919,12 @@ static int __init state_next(void)
                }
                break;
        case IOMMU_IVRS_DETECTED:
-               ret = early_amd_iommu_init();
-               init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
-               if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
-                       pr_info("AMD IOMMU disabled\n");
+               if (amd_iommu_disabled) {
                        init_state = IOMMU_CMDLINE_DISABLED;
                        ret = -EINVAL;
+               } else {
+                       ret = early_amd_iommu_init();
+                       init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
                }
                break;
        case IOMMU_ACPI_FINISHED:
@@ -3001,8 +3002,11 @@ int __init amd_iommu_prepare(void)
        amd_iommu_irq_remap = true;
 
        ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
-       if (ret)
+       if (ret) {
+               amd_iommu_irq_remap = false;
                return ret;
+       }
+
        return amd_iommu_irq_remap ? 0 : -ENODEV;
 }
 
index 97eb62f..602aab9 100644 (file)
@@ -849,12 +849,11 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev)
                smmu = tegra_smmu_find(args.np);
                if (smmu) {
                        err = tegra_smmu_configure(smmu, dev, &args);
-                       of_node_put(args.np);
 
-                       if (err < 0)
+                       if (err < 0) {
+                               of_node_put(args.np);
                                return ERR_PTR(err);
-
-                       break;
+                       }
                }
 
                of_node_put(args.np);
index 7168778..cb0afe8 100644 (file)
@@ -721,7 +721,7 @@ u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb)
  * Return value: CAPI result code
  */
 
-u16 capi20_get_manufacturer(u32 contr, u8 *buf)
+u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN])
 {
        struct capi_ctr *ctr;
        u16 ret;
@@ -787,7 +787,7 @@ u16 capi20_get_version(u32 contr, struct capi_version *verp)
  * Return value: CAPI result code
  */
 
-u16 capi20_get_serial(u32 contr, u8 *serial)
+u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN])
 {
        struct capi_ctr *ctr;
        u16 ret;
index ec47508..39f841b 100644 (file)
@@ -694,7 +694,7 @@ isac_release(struct isac_hw *isac)
 {
        if (isac->type & IPAC_TYPE_ISACX)
                WriteISAC(isac, ISACX_MASK, 0xff);
-       else
+       else if (isac->type != 0)
                WriteISAC(isac, ISAC_MASK, 0xff);
        if (isac->dch.timer.function != NULL) {
                del_timer(&isac->dch.timer);
index 7b2f4d0..2f9a289 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * CZ.NIC's Turris Omnia LEDs driver
  *
- * 2020 by Marek Behun <marek.behun@nic.cz>
+ * 2020 by Marek Behún <kabel@kernel.org>
  */
 
 #include <linux/i2c.h>
@@ -287,6 +287,6 @@ static struct i2c_driver omnia_leds_driver = {
 
 module_i2c_driver(omnia_leds_driver);
 
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
 MODULE_DESCRIPTION("CZ.NIC's Turris Omnia LEDs");
 MODULE_LICENSE("GPL v2");
index 9f2ce7f..456a117 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * rWTM BIU Mailbox driver for Armada 37xx
  *
- * Author: Marek Behun <marek.behun@nic.cz>
+ * Author: Marek Behún <kabel@kernel.org>
  */
 
 #include <linux/device.h>
@@ -203,4 +203,4 @@ module_platform_driver(armada_37xx_mbox_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("rWTM BIU Mailbox driver for Armada 37xx");
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
index 5e306bb..1ca65b4 100644 (file)
@@ -529,7 +529,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
         * Grab our output buffer.
         */
        nl = orig_nl = get_result_buffer(param, param_size, &len);
-       if (len < needed) {
+       if (len < needed || len < sizeof(nl->dev)) {
                param->flags |= DM_BUFFER_FULL_FLAG;
                goto out;
        }
index 95391f7..e5f0f17 100644 (file)
@@ -1594,6 +1594,13 @@ static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
        return blk_queue_zoned_model(q) != *zoned_model;
 }
 
+/*
+ * Check the device zoned model based on the target feature flag. If the target
+ * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
+ * also accepted but all devices must have the same zoned model. If the target
+ * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
+ * zoned model with all zoned devices having the same zone size.
+ */
 static bool dm_table_supports_zoned_model(struct dm_table *t,
                                          enum blk_zoned_model zoned_model)
 {
@@ -1603,13 +1610,15 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
        for (i = 0; i < dm_table_get_num_targets(t); i++) {
                ti = dm_table_get_target(t, i);
 
-               if (zoned_model == BLK_ZONED_HM &&
-                   !dm_target_supports_zoned_hm(ti->type))
-                       return false;
-
-               if (!ti->type->iterate_devices ||
-                   ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model))
-                       return false;
+               if (dm_target_supports_zoned_hm(ti->type)) {
+                       if (!ti->type->iterate_devices ||
+                           ti->type->iterate_devices(ti, device_not_zoned_model,
+                                                     &zoned_model))
+                               return false;
+               } else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
+                       if (zoned_model == BLK_ZONED_HM)
+                               return false;
+               }
        }
 
        return true;
@@ -1621,9 +1630,17 @@ static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *
        struct request_queue *q = bdev_get_queue(dev->bdev);
        unsigned int *zone_sectors = data;
 
+       if (!blk_queue_is_zoned(q))
+               return 0;
+
        return blk_queue_zone_sectors(q) != *zone_sectors;
 }
 
+/*
+ * Check consistency of zoned model and zone sectors across all targets. For
+ * zone sectors, if the destination device is a zoned block device, it shall
+ * have the specified zone_sectors.
+ */
 static int validate_hardware_zoned_model(struct dm_table *table,
                                         enum blk_zoned_model zoned_model,
                                         unsigned int zone_sectors)
@@ -1642,7 +1659,7 @@ static int validate_hardware_zoned_model(struct dm_table *table,
                return -EINVAL;
 
        if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
-               DMERR("%s: zone sectors is not consistent across all devices",
+               DMERR("%s: zone sectors is not consistent across all zoned devices",
                      dm_device_name(table->md));
                return -EINVAL;
        }
index 6b8e5bd..808a98e 100644 (file)
@@ -34,7 +34,7 @@
 #define DM_VERITY_OPT_IGN_ZEROES       "ignore_zero_blocks"
 #define DM_VERITY_OPT_AT_MOST_ONCE     "check_at_most_once"
 
-#define DM_VERITY_OPTS_MAX             (2 + DM_VERITY_OPTS_FEC + \
+#define DM_VERITY_OPTS_MAX             (3 + DM_VERITY_OPTS_FEC + \
                                         DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
 
 static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
index 697f9de..7e88df6 100644 (file)
@@ -1143,7 +1143,7 @@ static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv,
 static struct target_type dmz_type = {
        .name            = "zoned",
        .version         = {2, 0, 0},
-       .features        = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
+       .features        = DM_TARGET_SINGLETON | DM_TARGET_MIXED_ZONED_MODEL,
        .module          = THIS_MODULE,
        .ctr             = dmz_ctr,
        .dtr             = dmz_dtr,
index 50b693d..3f3be94 100644 (file)
@@ -2036,7 +2036,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
        if (size != dm_get_size(md))
                memset(&md->geometry, 0, sizeof(md->geometry));
 
-       set_capacity_and_notify(md->disk, size);
+       if (!get_capacity(md->disk))
+               set_capacity(md->disk, size);
+       else
+               set_capacity_and_notify(md->disk, size);
 
        dm_table_event_callback(t, event_callback, md);
 
index 8a85852..5f6e97a 100644 (file)
@@ -430,4 +430,3 @@ MODULE_AUTHOR("Andreas Monitzer <andy@monitzer.com>");
 MODULE_AUTHOR("Ben Backx <ben@bbackx.com>");
 MODULE_DESCRIPTION("FireDTV DVB Driver");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("FireDTV DVB");
index 692b95a..9a82e68 100644 (file)
@@ -41,7 +41,6 @@ MODULE_PARM_DESC(debug,
 
 MODULE_AUTHOR("Andy Walls");
 MODULE_DESCRIPTION("CX23418 ALSA Interface");
-MODULE_SUPPORTED_DEVICE("CX23418 MPEG2 encoder");
 MODULE_LICENSE("GPL");
 
 MODULE_VERSION(CX18_VERSION);
index 95aed00..f2440eb 100644 (file)
@@ -232,7 +232,6 @@ MODULE_PARM_DESC(cx18_first_minor,
 
 MODULE_AUTHOR("Hans Verkuil");
 MODULE_DESCRIPTION("CX23418 driver");
-MODULE_SUPPORTED_DEVICE("CX23418 MPEG2 encoder");
 MODULE_LICENSE("GPL");
 
 MODULE_VERSION(CX18_VERSION);
index 608fbaf..8797d85 100644 (file)
@@ -104,7 +104,6 @@ MODULE_PARM_DESC(index, "Index value for cx25821 capture interface(s).");
 MODULE_DESCRIPTION("ALSA driver module for cx25821 based capture cards");
 MODULE_AUTHOR("Hiep Huynh");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Conexant,25821}");  /* "{{Conexant,23881}," */
 
 static unsigned int debug;
 module_param(debug, int, 0644);
index 95e0cbb..c83814c 100644 (file)
@@ -98,7 +98,6 @@ MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
 MODULE_LICENSE("GPL v2");
 MODULE_VERSION(CX88_VERSION);
 
-MODULE_SUPPORTED_DEVICE("{{Conexant,23881},{{Conexant,23882},{{Conexant,23883}");
 static unsigned int debug;
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "enable debug messages");
index 39029b8..4cefdb2 100644 (file)
@@ -38,7 +38,6 @@ MODULE_PARM_DESC(index,
 
 MODULE_AUTHOR("Andy Walls");
 MODULE_DESCRIPTION("CX23415/CX23416 ALSA Interface");
-MODULE_SUPPORTED_DEVICE("CX23415/CX23416 MPEG2 encoder");
 MODULE_LICENSE("GPL");
 
 MODULE_VERSION(IVTV_VERSION);
index 6e448cb..942b8c2 100644 (file)
@@ -275,9 +275,6 @@ MODULE_PARM_DESC(ivtv_first_minor, "Set device node number assigned to first car
 
 MODULE_AUTHOR("Kevin Thayer, Chris Kennedy, Hans Verkuil");
 MODULE_DESCRIPTION("CX23415/CX23416 driver");
-MODULE_SUPPORTED_DEVICE
-    ("CX23415/CX23416 MPEG2 encoder (WinTV PVR-150/250/350/500,\n"
-               "\t\t\tYuan MPG series and similar)");
 MODULE_LICENSE("GPL");
 
 MODULE_VERSION(IVTV_VERSION);
index 336df65..524912f 100644 (file)
@@ -1269,6 +1269,5 @@ late_initcall_sync(sta2x11_vip_init_module);
 MODULE_DESCRIPTION("STA2X11 Video Input Port driver");
 MODULE_AUTHOR("Wind River");
 MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("sta2x11 video input");
 MODULE_VERSION(DRV_VERSION);
 MODULE_DEVICE_TABLE(pci, sta2x11_vip_pci_tbl);
index 0514be6..e392b3e 100644 (file)
@@ -1363,4 +1363,3 @@ module_platform_driver(atmel_isi_driver);
 MODULE_AUTHOR("Josh Wu <josh.wu@atmel.com>");
 MODULE_DESCRIPTION("The V4L2 driver for Atmel Linux");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("video");
index 0b78fec..61d9885 100644 (file)
@@ -330,4 +330,3 @@ module_platform_driver(atmel_isc_driver);
 MODULE_AUTHOR("Songjun Wu");
 MODULE_DESCRIPTION("The V4L2 driver for Atmel-ISC");
 MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("video");
index 9c94a8b..baac86f 100644 (file)
 MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
 MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("Video");
-
-
-
 
 struct cafe_camera {
        int registered;                 /* Fully initialized? */
index bbcc225..d9b4ad0 100644 (file)
@@ -2149,4 +2149,3 @@ MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
 MODULE_AUTHOR("Hugues Fruchet <hugues.fruchet@st.com>");
 MODULE_DESCRIPTION("STMicroelectronics STM32 Digital Camera Memory Interface driver");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("video");
index e488e78..69d5c62 100644 (file)
@@ -56,7 +56,6 @@ MODULE_PARM_DESC(flicker_mode, "Flicker frequency (0 (disabled), " __stringify(5
 
 MODULE_AUTHOR("Steve Miller (STMicroelectronics) <steve.miller@st.com>");
 MODULE_DESCRIPTION("V4L-driver for STMicroelectronics CPiA2 based cameras");
-MODULE_SUPPORTED_DEVICE("video");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(CPIA_VERSION);
 
index 3a2df36..a19a467 100644 (file)
@@ -51,7 +51,6 @@ MODULE_PARM_DESC(index, "Index value for tm6000x capture interface(s).");
 MODULE_DESCRIPTION("ALSA driver module for tm5600/tm6000/tm6010 based TV cards");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
 MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("{{Trident,tm5600},{{Trident,tm6000},{{Trident,tm6010}");
 static unsigned int debug;
 module_param(debug, int, 0644);
 MODULE_PARM_DESC(debug, "enable debug messages");
index 293a460..4990fa8 100644 (file)
@@ -23,8 +23,6 @@ MODULE_DESCRIPTION("DVB driver extension module for tm5600/6000/6010 based TV ca
 MODULE_AUTHOR("Mauro Carvalho Chehab");
 MODULE_LICENSE("GPL");
 
-MODULE_SUPPORTED_DEVICE("{{Trident, tm5600},{{Trident, tm6000},{{Trident, tm6010}");
-
 static int debug;
 
 module_param(debug, int, 0644);
index fe8ca94..b67cb0a 100644 (file)
@@ -72,7 +72,8 @@ static const struct dmi_system_id dmi_platform_info[] = {
        {}
 };
 
-static const struct resource intel_quark_i2c_res[] = {
+/* This is used as a place holder and will be modified at run-time */
+static struct resource intel_quark_i2c_res[] = {
        [INTEL_QUARK_IORES_MEM] = {
                .flags = IORESOURCE_MEM,
        },
@@ -85,7 +86,8 @@ static struct mfd_cell_acpi_match intel_quark_acpi_match_i2c = {
        .adr = MFD_ACPI_MATCH_I2C,
 };
 
-static const struct resource intel_quark_gpio_res[] = {
+/* This is used as a place holder and will be modified at run-time */
+static struct resource intel_quark_gpio_res[] = {
        [INTEL_QUARK_IORES_MEM] = {
                .flags = IORESOURCE_MEM,
        },
index 4378a9b..2cc370a 100644 (file)
@@ -2286,8 +2286,8 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
        if (buffer_id == 0)
                return -EINVAL;
 
-       if (!mei_cl_is_connected(cl))
-               return -ENODEV;
+       if (mei_cl_is_connected(cl))
+               return -EPROTO;
 
        if (cl->dma_mapped)
                return -EPROTO;
@@ -2327,9 +2327,7 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
 
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(cl->wait,
-                          cl->dma_mapped ||
-                          cl->status ||
-                          !mei_cl_is_connected(cl),
+                          cl->dma_mapped || cl->status,
                           mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
        mutex_lock(&dev->device_lock);
 
@@ -2376,8 +2374,9 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
                return -EOPNOTSUPP;
        }
 
-       if (!mei_cl_is_connected(cl))
-               return -ENODEV;
+       /* do not allow unmap for connected client */
+       if (mei_cl_is_connected(cl))
+               return -EPROTO;
 
        if (!cl->dma_mapped)
                return -EPROTO;
@@ -2405,9 +2404,7 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
 
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(cl->wait,
-                          !cl->dma_mapped ||
-                          cl->status ||
-                          !mei_cl_is_connected(cl),
+                          !cl->dma_mapped || cl->status,
                           mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
        mutex_lock(&dev->device_lock);
 
index eb72582..f9cfb08 100644 (file)
@@ -32,7 +32,6 @@
 
 MODULE_AUTHOR("Eric Brower <ebrower@usa.net>");
 MODULE_DESCRIPTION("User-programmable flash device on Sun Microsystems boardsets");
-MODULE_SUPPORTED_DEVICE(DRIVER_NAME);
 MODULE_LICENSE("GPL");
 MODULE_VERSION("2.1");
 
index 8bdc44b..3c8f665 100644 (file)
@@ -127,6 +127,8 @@ static int com20020pci_probe(struct pci_dev *pdev,
        int i, ioaddr, ret;
        struct resource *r;
 
+       ret = 0;
+
        if (pci_enable_device(pdev))
                return -EIO;
 
@@ -139,6 +141,8 @@ static int com20020pci_probe(struct pci_dev *pdev,
        priv->ci = ci;
        mm = &ci->misc_map;
 
+       pci_set_drvdata(pdev, priv);
+
        INIT_LIST_HEAD(&priv->list_dev);
 
        if (mm->size) {
@@ -161,7 +165,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
                dev = alloc_arcdev(device);
                if (!dev) {
                        ret = -ENOMEM;
-                       goto out_port;
+                       break;
                }
                dev->dev_port = i;
 
@@ -178,7 +182,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
                        pr_err("IO region %xh-%xh already allocated\n",
                               ioaddr, ioaddr + cm->size - 1);
                        ret = -EBUSY;
-                       goto out_port;
+                       goto err_free_arcdev;
                }
 
                /* Dummy access after Reset
@@ -216,18 +220,18 @@ static int com20020pci_probe(struct pci_dev *pdev,
                if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) {
                        pr_err("IO address %Xh is empty!\n", ioaddr);
                        ret = -EIO;
-                       goto out_port;
+                       goto err_free_arcdev;
                }
                if (com20020_check(dev)) {
                        ret = -EIO;
-                       goto out_port;
+                       goto err_free_arcdev;
                }
 
                card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev),
                                    GFP_KERNEL);
                if (!card) {
                        ret = -ENOMEM;
-                       goto out_port;
+                       goto err_free_arcdev;
                }
 
                card->index = i;
@@ -253,29 +257,29 @@ static int com20020pci_probe(struct pci_dev *pdev,
 
                ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
                if (ret)
-                       goto out_port;
+                       goto err_free_arcdev;
 
                ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
                if (ret)
-                       goto out_port;
+                       goto err_free_arcdev;
 
                dev_set_drvdata(&dev->dev, card);
 
                ret = com20020_found(dev, IRQF_SHARED);
                if (ret)
-                       goto out_port;
+                       goto err_free_arcdev;
 
                devm_arcnet_led_init(dev, dev->dev_id, i);
 
                list_add(&card->list, &priv->list_dev);
-       }
+               continue;
 
-       pci_set_drvdata(pdev, priv);
-
-       return 0;
-
-out_port:
-       com20020pci_remove(pdev);
+err_free_arcdev:
+               free_arcdev(dev);
+               break;
+       }
+       if (ret)
+               com20020pci_remove(pdev);
        return ret;
 }
 
index 456315b..74cbbb2 100644 (file)
@@ -3978,15 +3978,11 @@ static int bond_neigh_init(struct neighbour *n)
 
        rcu_read_lock();
        slave = bond_first_slave_rcu(bond);
-       if (!slave) {
-               ret = -EINVAL;
+       if (!slave)
                goto out;
-       }
        slave_ops = slave->dev->netdev_ops;
-       if (!slave_ops->ndo_neigh_setup) {
-               ret = -EINVAL;
+       if (!slave_ops->ndo_neigh_setup)
                goto out;
-       }
 
        /* TODO: find another way [1] to implement this.
         * Passing a zeroed structure is fragile,
index ef474ba..6958830 100644 (file)
@@ -212,18 +212,6 @@ static const struct can_bittiming_const c_can_bittiming_const = {
        .brp_inc = 1,
 };
 
-static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
-{
-       if (priv->device)
-               pm_runtime_enable(priv->device);
-}
-
-static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
-{
-       if (priv->device)
-               pm_runtime_disable(priv->device);
-}
-
 static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
 {
        if (priv->device)
@@ -1335,7 +1323,6 @@ static const struct net_device_ops c_can_netdev_ops = {
 
 int register_c_can_dev(struct net_device *dev)
 {
-       struct c_can_priv *priv = netdev_priv(dev);
        int err;
 
        /* Deactivate pins to prevent DRA7 DCAN IP from being
@@ -1345,28 +1332,19 @@ int register_c_can_dev(struct net_device *dev)
         */
        pinctrl_pm_select_sleep_state(dev->dev.parent);
 
-       c_can_pm_runtime_enable(priv);
-
        dev->flags |= IFF_ECHO; /* we support local echo */
        dev->netdev_ops = &c_can_netdev_ops;
 
        err = register_candev(dev);
-       if (err)
-               c_can_pm_runtime_disable(priv);
-       else
+       if (!err)
                devm_can_led_init(dev);
-
        return err;
 }
 EXPORT_SYMBOL_GPL(register_c_can_dev);
 
 void unregister_c_can_dev(struct net_device *dev)
 {
-       struct c_can_priv *priv = netdev_priv(dev);
-
        unregister_candev(dev);
-
-       c_can_pm_runtime_disable(priv);
 }
 EXPORT_SYMBOL_GPL(unregister_c_can_dev);
 
index 406b484..7efb60b 100644 (file)
@@ -239,12 +239,13 @@ static void c_can_pci_remove(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
        struct c_can_priv *priv = netdev_priv(dev);
+       void __iomem *addr = priv->base;
 
        unregister_c_can_dev(dev);
 
        free_c_can_dev(dev);
 
-       pci_iounmap(pdev, priv->base);
+       pci_iounmap(pdev, addr);
        pci_disable_msi(pdev);
        pci_clear_master(pdev);
        pci_release_regions(pdev);
index 05f425c..47b251b 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/list.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/clk.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -386,6 +387,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
 
+       pm_runtime_enable(priv->device);
        ret = register_c_can_dev(dev);
        if (ret) {
                dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
@@ -398,6 +400,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
        return 0;
 
 exit_free_device:
+       pm_runtime_disable(priv->device);
        free_c_can_dev(dev);
 exit:
        dev_err(&pdev->dev, "probe failed\n");
@@ -408,9 +411,10 @@ exit:
 static int c_can_plat_remove(struct platform_device *pdev)
 {
        struct net_device *dev = platform_get_drvdata(pdev);
+       struct c_can_priv *priv = netdev_priv(dev);
 
        unregister_c_can_dev(dev);
-
+       pm_runtime_disable(priv->device);
        free_c_can_dev(dev);
 
        return 0;
index 867f6be..f5d79e6 100644 (file)
@@ -355,6 +355,7 @@ static void can_dellink(struct net_device *dev, struct list_head *head)
 
 struct rtnl_link_ops can_link_ops __read_mostly = {
        .kind           = "can",
+       .netns_refund   = true,
        .maxtype        = IFLA_CAN_MAX,
        .policy         = can_policy,
        .setup          = can_setup,
index 134c057..57f3635 100644 (file)
@@ -697,9 +697,15 @@ static int flexcan_chip_disable(struct flexcan_priv *priv)
 static int flexcan_chip_freeze(struct flexcan_priv *priv)
 {
        struct flexcan_regs __iomem *regs = priv->regs;
-       unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
+       unsigned int timeout;
+       u32 bitrate = priv->can.bittiming.bitrate;
        u32 reg;
 
+       if (bitrate)
+               timeout = 1000 * 1000 * 10 / bitrate;
+       else
+               timeout = FLEXCAN_TIMEOUT_US / 10;
+
        reg = priv->read(&regs->mcr);
        reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT;
        priv->write(reg, &regs->mcr);
index 37e0501..74d9899 100644 (file)
@@ -57,6 +57,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
 #define KVASER_PCIEFD_KCAN_STAT_REG 0x418
 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
 #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
+#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430
 /* Loopback control register */
@@ -949,6 +950,9 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
                timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
                            0);
 
+               /* Disable Bus load reporting */
+               iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
+
                tx_npackets = ioread32(can->reg_base +
                                       KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
                if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
index 3752520..0c8d36b 100644 (file)
@@ -501,9 +501,6 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
        }
 
        while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
-               if (rxfs & RXFS_RFL)
-                       netdev_warn(dev, "Rx FIFO 0 Message Lost\n");
-
                m_can_read_fifo(dev, rxfs);
 
                quota--;
@@ -876,7 +873,7 @@ static int m_can_rx_peripheral(struct net_device *dev)
 {
        struct m_can_classdev *cdev = netdev_priv(dev);
 
-       m_can_rx_handler(dev, 1);
+       m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT);
 
        m_can_enable_all_interrupts(cdev);
 
index 0df1cdf..1df3c4b 100644 (file)
@@ -21,7 +21,6 @@
 
 MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
 MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards");
-MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe/M.2 FD CAN cards");
 MODULE_LICENSE("GPL v2");
 
 #define PCIEFD_DRV_NAME                "peak_pciefd"
index 6f88c99..4ab9175 100644 (file)
@@ -21,7 +21,6 @@
 
 MODULE_AUTHOR("Sebastian Haas <haas@ems-wuenche.com>");
 MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-PCI/PCIe/104P CAN cards");
-MODULE_SUPPORTED_DEVICE("EMS CPC-PCI/PCIe/104P CAN card");
 MODULE_LICENSE("GPL v2");
 
 #define EMS_PCI_V1_MAX_CHAN 2
index 770304e..e21b169 100644 (file)
@@ -21,7 +21,6 @@
 
 MODULE_AUTHOR("Markus Plessing <plessing@ems-wuensche.com>");
 MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-CARD cards");
-MODULE_SUPPORTED_DEVICE("EMS CPC-CARD CAN card");
 MODULE_LICENSE("GPL v2");
 
 #define EMS_PCMCIA_MAX_CHAN 2
index 0ea6b71..95fe9ee 100644 (file)
@@ -33,7 +33,6 @@
 
 MODULE_AUTHOR("Per Dalen <per.dalen@cnw.se>");
 MODULE_DESCRIPTION("Socket-CAN driver for KVASER PCAN PCI cards");
-MODULE_SUPPORTED_DEVICE("KVASER PCAN PCI CAN card");
 MODULE_LICENSE("GPL v2");
 
 #define MAX_NO_OF_CHANNELS        4 /* max no of channels on a single card */
index 4713921..84eac8c 100644 (file)
@@ -24,8 +24,6 @@
 
 MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
 MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards");
-MODULE_SUPPORTED_DEVICE("PEAK PCAN PCI/PCIe/PCIeC miniPCI CAN cards");
-MODULE_SUPPORTED_DEVICE("PEAK PCAN miniPCIe/cPCI PC/104+ PCI/104e CAN Cards");
 MODULE_LICENSE("GPL v2");
 
 #define DRV_NAME  "peak_pci"
index cf951a7..131a084 100644 (file)
@@ -22,7 +22,6 @@
 MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
 MODULE_DESCRIPTION("CAN driver for PEAK-System PCAN-PC Cards");
 MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("PEAK PCAN-PC Card");
 
 /* PEAK-System PCMCIA driver name */
 #define PCC_NAME               "peak_pcmcia"
index 8567958..5de1ebb 100644 (file)
 MODULE_AUTHOR("Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>");
 MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
                   "the SJA1000 chips");
-MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
-                       "Adlink PCI-7841/cPCI-7841 SE, "
-                       "Marathon CAN-bus-PCI, "
-                       "Marathon CAN-bus-PCIe, "
-                       "TEWS TECHNOLOGIES TPMC810, "
-                       "esd CAN-PCI/CPCI/PCI104/200, "
-                       "esd CAN-PCI/PMC/266, "
-                       "esd CAN-PCIe/2000, "
-                       "Connect Tech Inc. CANpro/104-Plus Opto (CRG001), "
-                       "IXXAT PC-I 04/PCI, "
-                       "ELCUS CAN-200-PCI, "
-                       "ASEM DUAL CAN-RAW")
 MODULE_LICENSE("GPL v2");
 
 #define PLX_PCI_MAX_CHAN 2
index f69fb42..a57da43 100644 (file)
@@ -314,6 +314,18 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len)
        return ret;
 }
 
+static int mcp251x_spi_write(struct spi_device *spi, int len)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       int ret;
+
+       ret = spi_write(spi, priv->spi_tx_buf, len);
+       if (ret)
+               dev_err(&spi->dev, "spi write failed: ret = %d\n", ret);
+
+       return ret;
+}
+
 static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg)
 {
        struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -361,7 +373,7 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val)
        priv->spi_tx_buf[1] = reg;
        priv->spi_tx_buf[2] = val;
 
-       mcp251x_spi_trans(spi, 3);
+       mcp251x_spi_write(spi, 3);
 }
 
 static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
@@ -373,7 +385,7 @@ static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
        priv->spi_tx_buf[2] = v1;
        priv->spi_tx_buf[3] = v2;
 
-       mcp251x_spi_trans(spi, 4);
+       mcp251x_spi_write(spi, 4);
 }
 
 static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
@@ -386,7 +398,7 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
        priv->spi_tx_buf[2] = mask;
        priv->spi_tx_buf[3] = val;
 
-       mcp251x_spi_trans(spi, 4);
+       mcp251x_spi_write(spi, 4);
 }
 
 static u8 mcp251x_read_stat(struct spi_device *spi)
@@ -618,7 +630,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
                                          buf[i]);
        } else {
                memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
-               mcp251x_spi_trans(spi, TXBDAT_OFF + len);
+               mcp251x_spi_write(spi, TXBDAT_OFF + len);
        }
 }
 
@@ -650,7 +662,7 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
 
        /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
        priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
-       mcp251x_spi_trans(priv->spi, 1);
+       mcp251x_spi_write(priv->spi, 1);
 }
 
 static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
@@ -888,7 +900,7 @@ static int mcp251x_hw_reset(struct spi_device *spi)
        mdelay(MCP251X_OST_DELAY_MS);
 
        priv->spi_tx_buf[0] = INSTRUCTION_RESET;
-       ret = mcp251x_spi_trans(spi, 1);
+       ret = mcp251x_spi_write(spi, 1);
        if (ret)
                return ret;
 
index c1e5d5b..538f4d9 100644 (file)
@@ -73,6 +73,7 @@ config CAN_KVASER_USB
            - Kvaser Memorator Pro 5xHS
            - Kvaser USBcan Light 4xHS
            - Kvaser USBcan Pro 2xHS v2
+           - Kvaser USBcan Pro 4xHS
            - Kvaser USBcan Pro 5xHS
            - Kvaser U100
            - Kvaser U100P
index 2b7efd2..4e97da8 100644 (file)
@@ -86,8 +86,9 @@
 #define USB_U100_PRODUCT_ID                    273
 #define USB_U100P_PRODUCT_ID                   274
 #define USB_U100S_PRODUCT_ID                   275
+#define USB_USBCAN_PRO_4HS_PRODUCT_ID          276
 #define USB_HYDRA_PRODUCT_ID_END \
-       USB_U100S_PRODUCT_ID
+       USB_USBCAN_PRO_4HS_PRODUCT_ID
 
 static inline bool kvaser_is_leaf(const struct usb_device_id *id)
 {
@@ -193,6 +194,7 @@ static const struct usb_device_id kvaser_usb_table[] = {
        { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID) },
        { }
 };
 MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
index e6c1e5d..e393e84 100644 (file)
@@ -18,8 +18,6 @@
 
 #include "pcan_usb_core.h"
 
-MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter");
-
 /* PCAN-USB Endpoints */
 #define PCAN_USB_EP_CMDOUT             1
 #define PCAN_USB_EP_CMDIN              (PCAN_USB_EP_CMDOUT | USB_DIR_IN)
index 573b115..28e916a 100644 (file)
@@ -857,7 +857,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
        if (dev->adapter->dev_set_bus) {
                err = dev->adapter->dev_set_bus(dev, 0);
                if (err)
-                       goto lbl_unregister_candev;
+                       goto adap_dev_free;
        }
 
        /* get device number early */
@@ -869,6 +869,10 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
 
        return 0;
 
+adap_dev_free:
+       if (dev->adapter->dev_free)
+               dev->adapter->dev_free(dev);
+
 lbl_unregister_candev:
        unregister_candev(netdev);
 
index f347ecc..bae0785 100644 (file)
@@ -16,9 +16,6 @@
 #include "pcan_usb_core.h"
 #include "pcan_usb_pro.h"
 
-MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB FD adapter");
-MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro FD adapter");
-
 #define PCAN_USBPROFD_CHANNEL_COUNT    2
 #define PCAN_USBFD_CHANNEL_COUNT       1
 
index 275087c..18fa180 100644 (file)
@@ -17,8 +17,6 @@
 #include "pcan_usb_core.h"
 #include "pcan_usb_pro.h"
 
-MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro adapter");
-
 #define PCAN_USBPRO_CHANNEL_COUNT      2
 
 /* PCAN-USB Pro adapter internal clock (MHz) */
index a162499..eb44372 100644 (file)
@@ -1105,13 +1105,6 @@ static int b53_setup(struct dsa_switch *ds)
                        b53_disable_port(ds, port);
        }
 
-       /* Let DSA handle the case were multiple bridges span the same switch
-        * device and different VLAN awareness settings are requested, which
-        * would be breaking filtering semantics for any of the other bridge
-        * devices. (not hardware supported)
-        */
-       ds->vlan_filtering_is_global = true;
-
        return b53_setup_devlink_resources(ds);
 }
 
@@ -2664,6 +2657,13 @@ struct b53_device *b53_switch_alloc(struct device *base,
        ds->ops = &b53_switch_ops;
        ds->untag_bridge_pvid = true;
        dev->vlan_enabled = true;
+       /* Let DSA handle the case were multiple bridges span the same switch
+        * device and different VLAN awareness settings are requested, which
+        * would be breaking filtering semantics for any of the other bridge
+        * devices. (not hardware supported)
+        */
+       ds->vlan_filtering_is_global = true;
+
        mutex_init(&dev->reg_mutex);
        mutex_init(&dev->stats_mutex);
 
index f277df9..ba5d546 100644 (file)
@@ -114,7 +114,10 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
                /* Force link status for IMP port */
                reg = core_readl(priv, offset);
                reg |= (MII_SW_OR | LINK_STS);
-               reg &= ~GMII_SPEED_UP_2G;
+               if (priv->type == BCM4908_DEVICE_ID)
+                       reg |= GMII_SPEED_UP_2G;
+               else
+                       reg &= ~GMII_SPEED_UP_2G;
                core_writel(priv, reg, offset);
 
                /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
@@ -585,8 +588,10 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
         * in bits 15:8 and the patch level in bits 7:0 which is exactly what
         * the REG_PHY_REVISION register layout is.
         */
-
-       return priv->hw_params.gphy_rev;
+       if (priv->int_phy_mask & BIT(port))
+               return priv->hw_params.gphy_rev;
+       else
+               return 0;
 }
 
 static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
index 52e865a..bf5c62e 100644 (file)
 
 /* GSWIP MII Registers */
 #define GSWIP_MII_CFGp(p)              (0x2 * (p))
+#define  GSWIP_MII_CFG_RESET           BIT(15)
 #define  GSWIP_MII_CFG_EN              BIT(14)
+#define  GSWIP_MII_CFG_ISOLATE         BIT(13)
 #define  GSWIP_MII_CFG_LDCLKDIS                BIT(12)
+#define  GSWIP_MII_CFG_RGMII_IBS       BIT(8)
+#define  GSWIP_MII_CFG_RMII_CLK                BIT(7)
 #define  GSWIP_MII_CFG_MODE_MIIP       0x0
 #define  GSWIP_MII_CFG_MODE_MIIM       0x1
 #define  GSWIP_MII_CFG_MODE_RMIIP      0x2
 #define GSWIP_PCE_DEFPVID(p)           (0x486 + ((p) * 0xA))
 
 #define GSWIP_MAC_FLEN                 0x8C5
+#define GSWIP_MAC_CTRL_0p(p)           (0x903 + ((p) * 0xC))
+#define  GSWIP_MAC_CTRL_0_PADEN                BIT(8)
+#define  GSWIP_MAC_CTRL_0_FCS_EN       BIT(7)
+#define  GSWIP_MAC_CTRL_0_FCON_MASK    0x0070
+#define  GSWIP_MAC_CTRL_0_FCON_AUTO    0x0000
+#define  GSWIP_MAC_CTRL_0_FCON_RX      0x0010
+#define  GSWIP_MAC_CTRL_0_FCON_TX      0x0020
+#define  GSWIP_MAC_CTRL_0_FCON_RXTX    0x0030
+#define  GSWIP_MAC_CTRL_0_FCON_NONE    0x0040
+#define  GSWIP_MAC_CTRL_0_FDUP_MASK    0x000C
+#define  GSWIP_MAC_CTRL_0_FDUP_AUTO    0x0000
+#define  GSWIP_MAC_CTRL_0_FDUP_EN      0x0004
+#define  GSWIP_MAC_CTRL_0_FDUP_DIS     0x000C
+#define  GSWIP_MAC_CTRL_0_GMII_MASK    0x0003
+#define  GSWIP_MAC_CTRL_0_GMII_AUTO    0x0000
+#define  GSWIP_MAC_CTRL_0_GMII_MII     0x0001
+#define  GSWIP_MAC_CTRL_0_GMII_RGMII   0x0002
 #define GSWIP_MAC_CTRL_2p(p)           (0x905 + ((p) * 0xC))
 #define GSWIP_MAC_CTRL_2_MLEN          BIT(3) /* Maximum Untagged Frame Lnegth */
 
@@ -653,16 +674,13 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
                          GSWIP_SDMA_PCTRLp(port));
 
        if (!dsa_is_cpu_port(ds, port)) {
-               u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO |
-                             GSWIP_MDIO_PHY_SPEED_AUTO |
-                             GSWIP_MDIO_PHY_FDUP_AUTO |
-                             GSWIP_MDIO_PHY_FCONTX_AUTO |
-                             GSWIP_MDIO_PHY_FCONRX_AUTO |
-                             (phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK);
-
-               gswip_mdio_w(priv, macconf, GSWIP_MDIO_PHYp(port));
-               /* Activate MDIO auto polling */
-               gswip_mdio_mask(priv, 0, BIT(port), GSWIP_MDIO_MDC_CFG0);
+               u32 mdio_phy = 0;
+
+               if (phydev)
+                       mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
+
+               gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
+                               GSWIP_MDIO_PHYp(port));
        }
 
        return 0;
@@ -675,14 +693,6 @@ static void gswip_port_disable(struct dsa_switch *ds, int port)
        if (!dsa_is_user_port(ds, port))
                return;
 
-       if (!dsa_is_cpu_port(ds, port)) {
-               gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN,
-                               GSWIP_MDIO_PHY_LINK_MASK,
-                               GSWIP_MDIO_PHYp(port));
-               /* Deactivate MDIO auto polling */
-               gswip_mdio_mask(priv, BIT(port), 0, GSWIP_MDIO_MDC_CFG0);
-       }
-
        gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
                          GSWIP_FDMA_PCTRLp(port));
        gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
@@ -794,14 +804,32 @@ static int gswip_setup(struct dsa_switch *ds)
        gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
        gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
 
-       /* disable PHY auto polling */
+       /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
+        * interoperability problem with this auto polling mechanism because
+        * their status registers think that the link is in a different state
+        * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
+        * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
+        * auto polling state machine consider the link being negotiated with
+        * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
+        * to the switch port being completely dead (RX and TX are both not
+        * working).
+        * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
+        * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
+        * it would work fine for a few minutes to hours and then stop, on
+        * other device it would no traffic could be sent or received at all.
+        * Testing shows that when PHY auto polling is disabled these problems
+        * go away.
+        */
        gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
+
        /* Configure the MDIO Clock 2.5 MHz */
        gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
 
-       /* Disable the xMII link */
+       /* Disable the xMII interface and clear it's isolation bit */
        for (i = 0; i < priv->hw_info->max_ports; i++)
-               gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
+               gswip_mii_mask_cfg(priv,
+                                  GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
+                                  0, i);
 
        /* enable special tag insertion on cpu port */
        gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
@@ -1450,6 +1478,112 @@ unsupported:
        return;
 }
 
+static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
+{
+       u32 mdio_phy;
+
+       if (link)
+               mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
+       else
+               mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
+
+       gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
+                       GSWIP_MDIO_PHYp(port));
+}
+
+static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
+                                phy_interface_t interface)
+{
+       u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
+
+       switch (speed) {
+       case SPEED_10:
+               mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
+
+               if (interface == PHY_INTERFACE_MODE_RMII)
+                       mii_cfg = GSWIP_MII_CFG_RATE_M50;
+               else
+                       mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
+
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+               break;
+
+       case SPEED_100:
+               mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
+
+               if (interface == PHY_INTERFACE_MODE_RMII)
+                       mii_cfg = GSWIP_MII_CFG_RATE_M50;
+               else
+                       mii_cfg = GSWIP_MII_CFG_RATE_M25;
+
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+               break;
+
+       case SPEED_1000:
+               mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
+
+               mii_cfg = GSWIP_MII_CFG_RATE_M125;
+
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
+               break;
+       }
+
+       gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
+                       GSWIP_MDIO_PHYp(port));
+       gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
+       gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
+                         GSWIP_MAC_CTRL_0p(port));
+}
+
+static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
+{
+       u32 mac_ctrl_0, mdio_phy;
+
+       if (duplex == DUPLEX_FULL) {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
+               mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
+       } else {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
+               mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
+       }
+
+       gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
+                         GSWIP_MAC_CTRL_0p(port));
+       gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
+                       GSWIP_MDIO_PHYp(port));
+}
+
+static void gswip_port_set_pause(struct gswip_priv *priv, int port,
+                                bool tx_pause, bool rx_pause)
+{
+       u32 mac_ctrl_0, mdio_phy;
+
+       if (tx_pause && rx_pause) {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
+               mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+                          GSWIP_MDIO_PHY_FCONRX_EN;
+       } else if (tx_pause) {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
+               mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+                          GSWIP_MDIO_PHY_FCONRX_DIS;
+       } else if (rx_pause) {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
+               mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+                          GSWIP_MDIO_PHY_FCONRX_EN;
+       } else {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
+               mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+                          GSWIP_MDIO_PHY_FCONRX_DIS;
+       }
+
+       gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
+                         mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
+       gswip_mdio_mask(priv,
+                       GSWIP_MDIO_PHY_FCONTX_MASK |
+                       GSWIP_MDIO_PHY_FCONRX_MASK,
+                       mdio_phy, GSWIP_MDIO_PHYp(port));
+}
+
 static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
                                     unsigned int mode,
                                     const struct phylink_link_state *state)
@@ -1469,6 +1603,9 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
                break;
        case PHY_INTERFACE_MODE_RMII:
                miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
+
+               /* Configure the RMII clock as output: */
+               miicfg |= GSWIP_MII_CFG_RMII_CLK;
                break;
        case PHY_INTERFACE_MODE_RGMII:
        case PHY_INTERFACE_MODE_RGMII_ID:
@@ -1481,7 +1618,11 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
                        "Unsupported interface: %d\n", state->interface);
                return;
        }
-       gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_MODE_MASK, miicfg, port);
+
+       gswip_mii_mask_cfg(priv,
+                          GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
+                          GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
+                          miicfg, port);
 
        switch (state->interface) {
        case PHY_INTERFACE_MODE_RGMII_ID:
@@ -1506,6 +1647,9 @@ static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
        struct gswip_priv *priv = ds->priv;
 
        gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
+
+       if (!dsa_is_cpu_port(ds, port))
+               gswip_port_set_link(priv, port, false);
 }
 
 static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -1517,6 +1661,13 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
 {
        struct gswip_priv *priv = ds->priv;
 
+       if (!dsa_is_cpu_port(ds, port)) {
+               gswip_port_set_link(priv, port, true);
+               gswip_port_set_speed(priv, port, speed, interface);
+               gswip_port_set_duplex(priv, port, duplex);
+               gswip_port_set_pause(priv, port, tx_pause, rx_pause);
+       }
+
        gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
 }
 
index f06f5fa..9871d7c 100644 (file)
@@ -436,34 +436,32 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
                             TD_DM_DRVP(8) | TD_DM_DRVN(8));
 
        /* Setup core clock for MT7530 */
-       if (!trgint) {
-               /* Disable MT7530 core clock */
-               core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
-
-               /* Disable PLL, since phy_device has not yet been created
-                * provided for phy_[read,write]_mmd_indirect is called, we
-                * provide our own core_write_mmd_indirect to complete this
-                * function.
-                */
-               core_write_mmd_indirect(priv,
-                                       CORE_GSWPLL_GRP1,
-                                       MDIO_MMD_VEND2,
-                                       0);
-
-               /* Set core clock into 500Mhz */
-               core_write(priv, CORE_GSWPLL_GRP2,
-                          RG_GSWPLL_POSDIV_500M(1) |
-                          RG_GSWPLL_FBKDIV_500M(25));
+       /* Disable MT7530 core clock */
+       core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
 
-               /* Enable PLL */
-               core_write(priv, CORE_GSWPLL_GRP1,
-                          RG_GSWPLL_EN_PRE |
-                          RG_GSWPLL_POSDIV_200M(2) |
-                          RG_GSWPLL_FBKDIV_200M(32));
-
-               /* Enable MT7530 core clock */
-               core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
-       }
+       /* Disable PLL, since phy_device has not yet been created
+        * provided for phy_[read,write]_mmd_indirect is called, we
+        * provide our own core_write_mmd_indirect to complete this
+        * function.
+        */
+       core_write_mmd_indirect(priv,
+                               CORE_GSWPLL_GRP1,
+                               MDIO_MMD_VEND2,
+                               0);
+
+       /* Set core clock into 500Mhz */
+       core_write(priv, CORE_GSWPLL_GRP2,
+                  RG_GSWPLL_POSDIV_500M(1) |
+                  RG_GSWPLL_FBKDIV_500M(25));
+
+       /* Enable PLL */
+       core_write(priv, CORE_GSWPLL_GRP1,
+                  RG_GSWPLL_EN_PRE |
+                  RG_GSWPLL_POSDIV_200M(2) |
+                  RG_GSWPLL_FBKDIV_200M(32));
+
+       /* Enable MT7530 core clock */
+       core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
 
        /* Setup the MT7530 TRGMII Tx Clock */
        core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
index 187b0b9..f78daba 100644 (file)
@@ -1534,8 +1534,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
        pci_set_master(pdev);
 
-       ioaddr = pci_resource_start(pdev, 0);
-       if (!ioaddr) {
+       if (!pci_resource_len(pdev, 0)) {
                if (pcnet32_debug & NETIF_MSG_PROBE)
                        pr_err("card has no PCI IO resources, aborting\n");
                err = -ENODEV;
@@ -1548,6 +1547,8 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
                        pr_err("architecture does not support 32bit PCI busmaster DMA\n");
                goto err_disable_dev;
        }
+
+       ioaddr = pci_resource_start(pdev, 0);
        if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
                if (pcnet32_debug & NETIF_MSG_PROBE)
                        pr_err("io address range already allocated\n");
index ba8321e..3305979 100644 (file)
 #define XGBE_DMA_SYS_AWCR      0x30303030
 
 /* DMA cache settings - PCI device */
-#define XGBE_DMA_PCI_ARCR      0x00000003
-#define XGBE_DMA_PCI_AWCR      0x13131313
-#define XGBE_DMA_PCI_AWARCR    0x00000313
+#define XGBE_DMA_PCI_ARCR      0x000f0f0f
+#define XGBE_DMA_PCI_AWCR      0x0f0f0f0f
+#define XGBE_DMA_PCI_AWARCR    0x00000f0f
 
 /* DMA channel interrupt modes */
 #define XGBE_IRQ_MODE_EDGE     0
index f8a168b..cb88ffb 100644 (file)
@@ -54,7 +54,7 @@ config B44_PCI
 config BCM4908_ENET
        tristate "Broadcom BCM4908 internal mac support"
        depends on ARCH_BCM4908 || COMPILE_TEST
-       default y
+       default y if ARCH_BCM4908
        help
          This driver supports Ethernet controller integrated into Broadcom
          BCM4908 family SoCs.
index 98cf82d..6598193 100644 (file)
@@ -172,6 +172,7 @@ static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
 
 err_free_buf_descs:
        dma_free_coherent(dev, size, ring->cpu_addr, ring->dma_addr);
+       ring->cpu_addr = NULL;
        return -ENOMEM;
 }
 
index 15362d0..6e5cf49 100644 (file)
@@ -3239,6 +3239,9 @@ static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
        bool cmp_b = false;
        bool cmp_c = false;
 
+       if (!macb_is_gem(bp))
+               return;
+
        tp4sp_v = &(fs->h_u.tcp_ip4_spec);
        tp4sp_m = &(fs->m_u.tcp_ip4_spec);
 
@@ -3607,6 +3610,7 @@ static void macb_restore_features(struct macb *bp)
 {
        struct net_device *netdev = bp->dev;
        netdev_features_t features = netdev->features;
+       struct ethtool_rx_fs_item *item;
 
        /* TX checksum offload */
        macb_set_txcsum_feature(bp, features);
@@ -3615,6 +3619,9 @@ static void macb_restore_features(struct macb *bp)
        macb_set_rxcsum_feature(bp, features);
 
        /* RX Flow Filters */
+       list_for_each_entry(item, &bp->rx_fs_list.list, list)
+               gem_prog_cmp_regs(bp, &item->fs);
+
        macb_set_rxflow_feature(bp, features);
 }
 
index 6c85a10..23a2ebd 100644 (file)
@@ -1794,11 +1794,25 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
        struct cudbg_buffer temp_buff = { 0 };
        struct sge_qbase_reg_field *sge_qbase;
        struct ireg_buf *ch_sge_dbg;
+       u8 padap_running = 0;
        int i, rc;
+       u32 size;
 
-       rc = cudbg_get_buff(pdbg_init, dbg_buff,
-                           sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase),
-                           &temp_buff);
+       /* Accessing SGE_QBASE_MAP[0-3] and SGE_QBASE_INDEX regs can
+        * lead to SGE missing doorbells under heavy traffic. So, only
+        * collect them when adapter is idle.
+        */
+       for_each_port(padap, i) {
+               padap_running = netif_running(padap->port[i]);
+               if (padap_running)
+                       break;
+       }
+
+       size = sizeof(*ch_sge_dbg) * 2;
+       if (!padap_running)
+               size += sizeof(*sge_qbase);
+
+       rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
        if (rc)
                return rc;
 
@@ -1820,7 +1834,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
                ch_sge_dbg++;
        }
 
-       if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
+       if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5 &&
+           !padap_running) {
                sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
                /* 1 addr reg SGE_QBASE_INDEX and 4 data reg
                 * SGE_QBASE_MAP[0-3]
index 98829e4..80882cf 100644 (file)
@@ -2090,7 +2090,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x1190, 0x1194,
                0x11a0, 0x11a4,
                0x11b0, 0x11b4,
-               0x11fc, 0x1274,
+               0x11fc, 0x123c,
+               0x1254, 0x1274,
                0x1280, 0x133c,
                0x1800, 0x18fc,
                0x3000, 0x302c,
index 169e10c..1115b8f 100644 (file)
@@ -722,7 +722,7 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
                kvfree(tx_info);
                return 0;
        }
-       tx_info->open_state = false;
+       tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
        spin_unlock(&tx_info->lock);
 
        complete(&tx_info->completion);
index 88bfe21..04421ae 100644 (file)
@@ -1337,6 +1337,7 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
         */
        if (unlikely(priv->need_mac_restart)) {
                ftgmac100_start_hw(priv);
+               priv->need_mac_restart = false;
 
                /* Re-enable "bad" interrupts */
                iowrite32(FTGMAC100_INT_BAD,
index 1cf8ef7..3ec4d9f 100644 (file)
@@ -363,7 +363,11 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
 
 static int gfar_set_mac_addr(struct net_device *dev, void *p)
 {
-       eth_mac_addr(dev, p);
+       int ret;
+
+       ret = eth_mac_addr(dev, p);
+       if (ret)
+               return ret;
 
        gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
 
index e3f81c7..b0dbe6d 100644 (file)
@@ -3966,7 +3966,6 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
         *    normalcy is to reset.
         * 2. A new reset request from the stack due to timeout
         *
-        * For the first case,error event might not have ae handle available.
         * check if this is a new reset request and we are not here just because
         * last reset attempt did not succeed and watchdog hit us again. We will
         * know this if last reset request did not occur very recently (watchdog
@@ -3976,14 +3975,14 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
         * want to make sure we throttle the reset request. Therefore, we will
         * not allow it again before 3*HZ times.
         */
-       if (!handle)
-               handle = &hdev->vport[0].nic;
 
        if (time_before(jiffies, (hdev->last_reset_time +
                                  HCLGE_RESET_INTERVAL))) {
                mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
                return;
-       } else if (hdev->default_reset_request) {
+       }
+
+       if (hdev->default_reset_request) {
                hdev->reset_level =
                        hclge_get_reset_level(ae_dev,
                                              &hdev->default_reset_request);
@@ -11211,7 +11210,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
        if (ret)
                return ret;
 
-       /* RSS indirection table has been configuared by user */
+       /* RSS indirection table has been configured by user */
        if (rxfh_configured)
                goto out;
 
index 700e068..e295d35 100644 (file)
@@ -2193,7 +2193,7 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
 
        if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
                               &hdev->reset_state)) {
-               /* PF has initmated that it is about to reset the hardware.
+               /* PF has intimated that it is about to reset the hardware.
                 * We now have to poll & check if hardware has actually
                 * completed the reset sequence. On hardware reset completion,
                 * VF needs to reset the client and ae device.
@@ -2624,14 +2624,14 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
 {
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 
+       clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
        hclgevf_reset_tqp_stats(handle);
 
        hclgevf_request_link_info(hdev);
 
        hclgevf_update_link_mode(hdev);
 
-       clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
-
        return 0;
 }
 
@@ -3497,7 +3497,7 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
        if (ret)
                return ret;
 
-       /* RSS indirection table has been configuared by user */
+       /* RSS indirection table has been configured by user */
        if (rxfh_configured)
                goto out;
 
index 88faf05..0b1e890 100644 (file)
@@ -899,6 +899,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
        } else {
                data &= ~IGP02E1000_PM_D0_LPLU;
                ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+               if (ret_val)
+                       return ret_val;
                /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
                 * during Dx states where the power conservation is most
                 * important.  During driver activity we should enable
index 69a2329..db79c4e 100644 (file)
@@ -1,8 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright(c) 1999 - 2018 Intel Corporation. */
 
-#ifndef _E1000_HW_H_
-#define _E1000_HW_H_
+#ifndef _E1000E_HW_H_
+#define _E1000E_HW_H_
 
 #include "regs.h"
 #include "defines.h"
@@ -714,4 +714,4 @@ struct e1000_hw {
 #include "80003es2lan.h"
 #include "ich8lan.h"
 
-#endif
+#endif /* _E1000E_HW_H_ */
index e9b82c2..a094800 100644 (file)
@@ -5974,15 +5974,19 @@ static void e1000_reset_task(struct work_struct *work)
        struct e1000_adapter *adapter;
        adapter = container_of(work, struct e1000_adapter, reset_task);
 
+       rtnl_lock();
        /* don't run the task if already down */
-       if (test_bit(__E1000_DOWN, &adapter->state))
+       if (test_bit(__E1000_DOWN, &adapter->state)) {
+               rtnl_unlock();
                return;
+       }
 
        if (!(adapter->flags & FLAG_RESTART_NOW)) {
                e1000e_dump(adapter);
                e_err("Reset adapter unexpectedly\n");
        }
        e1000e_reinit_locked(adapter);
+       rtnl_unlock();
 }
 
 /**
index cd53981..15f93b3 100644 (file)
@@ -142,6 +142,7 @@ enum i40e_state_t {
        __I40E_VIRTCHNL_OP_PENDING,
        __I40E_RECOVERY_MODE,
        __I40E_VF_RESETS_DISABLED,      /* disable resets during i40e_remove */
+       __I40E_VFS_RELEASING,
        /* This must be last as it determines the size of the BITMAP */
        __I40E_STATE_SIZE__,
 };
index d7c13ca..d627b59 100644 (file)
@@ -578,6 +578,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
        case RING_TYPE_XDP:
                ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL);
                break;
+       default:
+               ring = NULL;
+               break;
        }
        if (!ring)
                return;
index c70dec6..0e92668 100644 (file)
@@ -232,6 +232,8 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
        I40E_STAT(struct i40e_vsi, _name, _stat)
 #define I40E_VEB_STAT(_name, _stat) \
        I40E_STAT(struct i40e_veb, _name, _stat)
+#define I40E_VEB_TC_STAT(_name, _stat) \
+       I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat)
 #define I40E_PFC_STAT(_name, _stat) \
        I40E_STAT(struct i40e_pfc_stats, _name, _stat)
 #define I40E_QUEUE_STAT(_name, _stat) \
@@ -266,11 +268,18 @@ static const struct i40e_stats i40e_gstrings_veb_stats[] = {
        I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
 };
 
+struct i40e_cp_veb_tc_stats {
+       u64 tc_rx_packets;
+       u64 tc_rx_bytes;
+       u64 tc_tx_packets;
+       u64 tc_tx_bytes;
+};
+
 static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = {
-       I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets),
-       I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes),
-       I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets),
-       I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes),
+       I40E_VEB_TC_STAT("veb.tc_%u_tx_packets", tc_tx_packets),
+       I40E_VEB_TC_STAT("veb.tc_%u_tx_bytes", tc_tx_bytes),
+       I40E_VEB_TC_STAT("veb.tc_%u_rx_packets", tc_rx_packets),
+       I40E_VEB_TC_STAT("veb.tc_%u_rx_bytes", tc_rx_bytes),
 };
 
 static const struct i40e_stats i40e_gstrings_misc_stats[] = {
@@ -1101,6 +1110,7 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
 
        /* Set flow control settings */
        ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
+       ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
 
        switch (hw->fc.requested_mode) {
        case I40E_FC_FULL:
@@ -2216,6 +2226,29 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
        }
 }
 
+/**
+ * i40e_get_veb_tc_stats - copy VEB TC statistics to formatted structure
+ * @tc: the TC statistics in VEB structure (veb->tc_stats)
+ * @i: the index of traffic class in (veb->tc_stats) structure to copy
+ *
+ * Copy VEB TC statistics from structure of arrays (veb->tc_stats) to
+ * one dimensional structure i40e_cp_veb_tc_stats.
+ * Produce formatted i40e_cp_veb_tc_stats structure of the VEB TC
+ * statistics for the given TC.
+ **/
+static struct i40e_cp_veb_tc_stats
+i40e_get_veb_tc_stats(struct i40e_veb_tc_stats *tc, unsigned int i)
+{
+       struct i40e_cp_veb_tc_stats veb_tc = {
+               .tc_rx_packets = tc->tc_rx_packets[i],
+               .tc_rx_bytes = tc->tc_rx_bytes[i],
+               .tc_tx_packets = tc->tc_tx_packets[i],
+               .tc_tx_bytes = tc->tc_tx_bytes[i],
+       };
+
+       return veb_tc;
+}
+
 /**
  * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
  * @pf: the PF device structure
@@ -2300,8 +2333,16 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
                               i40e_gstrings_veb_stats);
 
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
-               i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
-                                      i40e_gstrings_veb_tc_stats);
+               if (veb_stats) {
+                       struct i40e_cp_veb_tc_stats veb_tc =
+                               i40e_get_veb_tc_stats(&veb->tc_stats, i);
+
+                       i40e_add_ethtool_stats(&data, &veb_tc,
+                                              i40e_gstrings_veb_tc_stats);
+               } else {
+                       i40e_add_ethtool_stats(&data, NULL,
+                                              i40e_gstrings_veb_tc_stats);
+               }
 
        i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
 
@@ -5439,7 +5480,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
 
                status = i40e_aq_get_phy_register(hw,
                                I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
-                               true, addr, offset, &value, NULL);
+                               addr, true, offset, &value, NULL);
                if (status)
                        return -EIO;
                data[i] = value;
index 353deae..30ad7c0 100644 (file)
@@ -2560,8 +2560,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                                 i40e_stat_str(hw, aq_ret),
                                 i40e_aq_str(hw, hw->aq.asq_last_status));
                } else {
-                       dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
-                                vsi->netdev->name,
+                       dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
                                 cur_multipromisc ? "entering" : "leaving");
                }
        }
@@ -3258,6 +3257,17 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
        return 0;
 }
 
+/**
+ * i40e_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
+{
+       return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
+}
+
 /**
  * i40e_configure_rx_ring - Configure a receive ring context
  * @ring: The Rx ring to configure
@@ -3369,6 +3379,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
        else
                set_ring_build_skb_enabled(ring);
 
+       ring->rx_offset = i40e_rx_offset(ring);
+
        /* cache tail for quicker writes, and clear the reg before use */
        ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
        writel(0, ring->tail);
@@ -6725,9 +6737,9 @@ out:
                        set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
                        set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
                }
-       /* registers are set, lets apply */
-       if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
-               ret = i40e_hw_set_dcb_config(pf, new_cfg);
+               /* registers are set, lets apply */
+               if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
+                       ret = i40e_hw_set_dcb_config(pf, new_cfg);
        }
 
 err:
@@ -10560,12 +10572,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
                goto end_core_reset;
        }
 
-       if (!lock_acquired)
-               rtnl_lock();
-       ret = i40e_setup_pf_switch(pf, reinit);
-       if (ret)
-               goto end_unlock;
-
 #ifdef CONFIG_I40E_DCB
        /* Enable FW to write a default DCB config on link-up
         * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
@@ -10580,7 +10586,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
                        i40e_aq_set_dcb_parameters(hw, false, NULL);
                        dev_warn(&pf->pdev->dev,
                                 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
-                                pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+                       pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
                } else {
                        i40e_aq_set_dcb_parameters(hw, true, NULL);
                        ret = i40e_init_pf_dcb(pf);
@@ -10594,6 +10600,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
        }
 
 #endif /* CONFIG_I40E_DCB */
+       if (!lock_acquired)
+               rtnl_lock();
+       ret = i40e_setup_pf_switch(pf, reinit);
+       if (ret)
+               goto end_unlock;
 
        /* The driver only wants link up/down and module qualification
         * reports from firmware.  Note the negative logic.
@@ -15127,12 +15138,16 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
         * in order to register the netdev
         */
        v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
-       if (v_idx < 0)
+       if (v_idx < 0) {
+               err = v_idx;
                goto err_switch_setup;
+       }
        pf->lan_vsi = v_idx;
        vsi = pf->vsi[v_idx];
-       if (!vsi)
+       if (!vsi) {
+               err = -EFAULT;
                goto err_switch_setup;
+       }
        vsi->alloc_queue_pairs = 1;
        err = i40e_config_netdev(vsi);
        if (err)
index 627794b..06b4271 100644 (file)
@@ -1569,17 +1569,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
        }
 }
 
-/**
- * i40e_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
- *
- * Returns the offset value for ring into the data buffer.
- */
-static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
-{
-       return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
-}
-
 /**
  * i40e_setup_rx_descriptors - Allocate Rx descriptors
  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
@@ -1608,7 +1597,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
        rx_ring->next_to_alloc = 0;
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
-       rx_ring->rx_offset = i40e_rx_offset(rx_ring);
 
        /* XDP RX-queue info only needed for RX rings exposed to XDP */
        if (rx_ring->vsi->type == I40E_VSI_MAIN) {
@@ -2307,8 +2295,7 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
  * @rx_ring: Rx ring being processed
  * @xdp: XDP buffer containing the frame
  **/
-static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
-                                   struct xdp_buff *xdp)
+static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
 {
        int err, result = I40E_XDP_PASS;
        struct i40e_ring *xdp_ring;
@@ -2347,7 +2334,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
        }
 xdp_out:
        rcu_read_unlock();
-       return ERR_PTR(-result);
+       return result;
 }
 
 /**
@@ -2460,6 +2447,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
        unsigned int xdp_xmit = 0;
        bool failure = false;
        struct xdp_buff xdp;
+       int xdp_res = 0;
 
 #if (PAGE_SIZE < 8192)
        frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
@@ -2525,12 +2513,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                        /* At larger PAGE_SIZE, frame_sz depend on len size */
                        xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
 #endif
-                       skb = i40e_run_xdp(rx_ring, &xdp);
+                       xdp_res = i40e_run_xdp(rx_ring, &xdp);
                }
 
-               if (IS_ERR(skb)) {
-                       unsigned int xdp_res = -PTR_ERR(skb);
-
+               if (xdp_res) {
                        if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
                                xdp_xmit |= xdp_res;
                                i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
index 1b6ec9b..5d301a4 100644 (file)
@@ -137,6 +137,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
  **/
 static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
 {
+       struct i40e_pf *pf = vf->pf;
        int i;
 
        i40e_vc_notify_vf_reset(vf);
@@ -147,6 +148,11 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
         * ensure a reset.
         */
        for (i = 0; i < 20; i++) {
+               /* If PF is in VFs releasing state reset VF is impossible,
+                * so leave it.
+                */
+               if (test_bit(__I40E_VFS_RELEASING, pf->state))
+                       return;
                if (i40e_reset_vf(vf, false))
                        return;
                usleep_range(10000, 20000);
@@ -1574,6 +1580,8 @@ void i40e_free_vfs(struct i40e_pf *pf)
 
        if (!pf->vf)
                return;
+
+       set_bit(__I40E_VFS_RELEASING, pf->state);
        while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
                usleep_range(1000, 2000);
 
@@ -1631,6 +1639,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
                }
        }
        clear_bit(__I40E_VF_DISABLE, pf->state);
+       clear_bit(__I40E_VFS_RELEASING, pf->state);
 }
 
 #ifdef CONFIG_PCI_IOV
index fc32c50..12ca841 100644 (file)
@@ -471,7 +471,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
 
        nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
        if (!nb_pkts)
-               return false;
+               return true;
 
        if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
                nb_processed = xdp_ring->count - xdp_ring->next_to_use;
@@ -488,7 +488,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
 
        i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
 
-       return true;
+       return nb_pkts < budget;
 }
 
 /**
index 3577064..17101c4 100644 (file)
@@ -196,7 +196,6 @@ enum ice_state {
        __ICE_NEEDS_RESTART,
        __ICE_PREPARED_FOR_RESET,       /* set by driver when prepared */
        __ICE_RESET_OICR_RECV,          /* set by driver after rcv reset OICR */
-       __ICE_DCBNL_DEVRESET,           /* set by dcbnl devreset */
        __ICE_PFR_REQ,                  /* set by driver and peers */
        __ICE_CORER_REQ,                /* set by driver and peers */
        __ICE_GLOBR_REQ,                /* set by driver and peers */
@@ -624,7 +623,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
 void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
 const char *ice_stat_str(enum ice_status stat_err);
 const char *ice_aq_str(enum ice_aq_err aq_err);
-bool ice_is_wol_supported(struct ice_pf *pf);
+bool ice_is_wol_supported(struct ice_hw *hw);
 int
 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
                    bool is_tun);
@@ -642,6 +641,7 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf);
 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
                          struct ice_rq_event_info *event);
 int ice_open(struct net_device *netdev);
+int ice_open_internal(struct net_device *netdev);
 int ice_stop(struct net_device *netdev);
 void ice_service_task_schedule(struct ice_pf *pf);
 
index 3124a3b..1148d76 100644 (file)
@@ -274,6 +274,22 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
        tlan_ctx->legacy_int = ICE_TX_LEGACY;
 }
 
+/**
+ * ice_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
+{
+       if (ice_ring_uses_build_skb(rx_ring))
+               return ICE_SKB_PAD;
+       else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
+               return XDP_PACKET_HEADROOM;
+
+       return 0;
+}
+
 /**
  * ice_setup_rx_ctx - Configure a receive ring context
  * @ring: The Rx ring to configure
@@ -413,11 +429,15 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
        else
                ice_set_ring_build_skb_ena(ring);
 
+       ring->rx_offset = ice_rx_offset(ring);
+
        /* init queue specific tail register */
        ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
        writel(0, ring->tail);
 
        if (ring->xsk_pool) {
+               bool ok;
+
                if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
                        dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
                                 num_bufs, ring->q_index);
@@ -426,8 +446,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
                        return 0;
                }
 
-               err = ice_alloc_rx_bufs_zc(ring, num_bufs);
-               if (err)
+               ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
+               if (!ok)
                        dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
                                 ring->q_index, pf_q);
                return 0;
index 3d9475e..a20edf1 100644 (file)
@@ -717,8 +717,8 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
 
                        if (!data) {
                                data = devm_kcalloc(ice_hw_to_dev(hw),
-                                                   sizeof(*data),
                                                    ICE_AQC_FW_LOG_ID_MAX,
+                                                   sizeof(*data),
                                                    GFP_KERNEL);
                                if (!data)
                                        return ICE_ERR_NO_MEMORY;
index faaa08e..68866f4 100644 (file)
@@ -31,8 +31,8 @@ enum ice_ctl_q {
        ICE_CTL_Q_MAILBOX,
 };
 
-/* Control Queue timeout settings - max delay 250ms */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT       2500  /* Count 2500 times */
+/* Control Queue timeout settings - max delay 1s */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT       10000 /* Count 10000 times */
 #define ICE_CTL_Q_SQ_CMD_USEC          100   /* Check every 100usec */
 #define ICE_CTL_Q_ADMIN_INIT_TIMEOUT   10    /* Count 10 times */
 #define ICE_CTL_Q_ADMIN_INIT_MSEC      100   /* Check every 100msec */
index e427279..211ac6f 100644 (file)
@@ -738,22 +738,27 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
 /**
  * ice_cee_to_dcb_cfg
  * @cee_cfg: pointer to CEE configuration struct
- * @dcbcfg: DCB configuration struct
+ * @pi: port information structure
  *
  * Convert CEE configuration from firmware to DCB configuration
  */
 static void
 ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
-                  struct ice_dcbx_cfg *dcbcfg)
+                  struct ice_port_info *pi)
 {
        u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
        u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
+       u8 i, j, err, sync, oper, app_index, ice_app_sel_type;
        u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
-       u8 i, err, sync, oper, app_index, ice_app_sel_type;
        u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
+       struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;
        u16 ice_app_prot_id_type;
 
-       /* CEE PG data to ETS config */
+       dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
+       dcbcfg->dcbx_mode = ICE_DCBX_MODE_CEE;
+       dcbcfg->tlv_status = tlv_status;
+
+       /* CEE PG data */
        dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
 
        /* Note that the FW creates the oper_prio_tc nibbles reversed
@@ -780,10 +785,16 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
                }
        }
 
-       /* CEE PFC data to ETS config */
+       /* CEE PFC data */
        dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en;
        dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS;
 
+       /* CEE APP TLV data */
+       if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
+               cmp_dcbcfg = &pi->qos_cfg.desired_dcbx_cfg;
+       else
+               cmp_dcbcfg = &pi->qos_cfg.remote_dcbx_cfg;
+
        app_index = 0;
        for (i = 0; i < 3; i++) {
                if (i == 0) {
@@ -802,6 +813,18 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
                        ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S;
                        ice_app_sel_type = ICE_APP_SEL_TCPIP;
                        ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI;
+
+                       for (j = 0; j < cmp_dcbcfg->numapps; j++) {
+                               u16 prot_id = cmp_dcbcfg->app[j].prot_id;
+                               u8 sel = cmp_dcbcfg->app[j].selector;
+
+                               if  (sel == ICE_APP_SEL_TCPIP &&
+                                    (prot_id == ICE_APP_PROT_ID_ISCSI ||
+                                     prot_id == ICE_APP_PROT_ID_ISCSI_860)) {
+                                       ice_app_prot_id_type = prot_id;
+                                       break;
+                               }
+                       }
                } else {
                        /* FIP APP */
                        ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M;
@@ -892,11 +915,8 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
        ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
        if (!ret) {
                /* CEE mode */
-               dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
-               dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
-               dcbx_cfg->tlv_status = le32_to_cpu(cee_cfg.tlv_status);
-               ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
                ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
+               ice_cee_to_dcb_cfg(&cee_cfg, pi);
        } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
                /* CEE mode not enabled try querying IEEE data */
                dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
index 468a63f..4180f1f 100644 (file)
@@ -18,12 +18,10 @@ static void ice_dcbnl_devreset(struct net_device *netdev)
        while (ice_is_reset_in_progress(pf->state))
                usleep_range(1000, 2000);
 
-       set_bit(__ICE_DCBNL_DEVRESET, pf->state);
        dev_close(netdev);
        netdev_state_change(netdev);
        dev_open(netdev, NULL);
        netdev_state_change(netdev);
-       clear_bit(__ICE_DCBNL_DEVRESET, pf->state);
 }
 
 /**
index 2dcfa68..32ba71a 100644 (file)
@@ -3472,7 +3472,7 @@ static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
                netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n");
 
        /* Get WoL settings based on the HW capability */
-       if (ice_is_wol_supported(pf)) {
+       if (ice_is_wol_supported(&pf->hw)) {
                wol->supported = WAKE_MAGIC;
                wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0;
        } else {
@@ -3492,7 +3492,7 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        struct ice_vsi *vsi = np->vsi;
        struct ice_pf *pf = vsi->back;
 
-       if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(pf))
+       if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(&pf->hw))
                return -EOPNOTSUPP;
 
        /* only magic packet is supported */
index 8d4e2ad..d13c7fc 100644 (file)
@@ -2620,7 +2620,7 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
                        if (!locked)
                                rtnl_lock();
 
-                       err = ice_open(vsi->netdev);
+                       err = ice_open_internal(vsi->netdev);
 
                        if (!locked)
                                rtnl_unlock();
@@ -2649,7 +2649,7 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
                        if (!locked)
                                rtnl_lock();
 
-                       ice_stop(vsi->netdev);
+                       ice_vsi_close(vsi);
 
                        if (!locked)
                                rtnl_unlock();
@@ -3078,7 +3078,6 @@ err_vsi:
 bool ice_is_reset_in_progress(unsigned long *state)
 {
        return test_bit(__ICE_RESET_OICR_RECV, state) ||
-              test_bit(__ICE_DCBNL_DEVRESET, state) ||
               test_bit(__ICE_PFR_REQ, state) ||
               test_bit(__ICE_CORER_REQ, state) ||
               test_bit(__ICE_GLOBR_REQ, state);
index 2c23c8f..d821c68 100644 (file)
@@ -3537,15 +3537,14 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
 }
 
 /**
- * ice_is_wol_supported - get NVM state of WoL
- * @pf: board private structure
+ * ice_is_wol_supported - check if WoL is supported
+ * @hw: pointer to hardware info
  *
  * Check if WoL is supported based on the HW configuration.
  * Returns true if NVM supports and enables WoL for this port, false otherwise
  */
-bool ice_is_wol_supported(struct ice_pf *pf)
+bool ice_is_wol_supported(struct ice_hw *hw)
 {
-       struct ice_hw *hw = &pf->hw;
        u16 wol_ctrl;
 
        /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
@@ -3554,7 +3553,7 @@ bool ice_is_wol_supported(struct ice_pf *pf)
        if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
                return false;
 
-       return !(BIT(hw->pf_id) & wol_ctrl);
+       return !(BIT(hw->port_info->lport) & wol_ctrl);
 }
 
 /**
@@ -4192,28 +4191,25 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
                goto err_send_version_unroll;
        }
 
+       /* not a fatal error if this fails */
        err = ice_init_nvm_phy_type(pf->hw.port_info);
-       if (err) {
+       if (err)
                dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
-               goto err_send_version_unroll;
-       }
 
+       /* not a fatal error if this fails */
        err = ice_update_link_info(pf->hw.port_info);
-       if (err) {
+       if (err)
                dev_err(dev, "ice_update_link_info failed: %d\n", err);
-               goto err_send_version_unroll;
-       }
 
        ice_init_link_dflt_override(pf->hw.port_info);
 
        /* if media available, initialize PHY settings */
        if (pf->hw.port_info->phy.link_info.link_info &
            ICE_AQ_MEDIA_AVAILABLE) {
+               /* not a fatal error if this fails */
                err = ice_init_phy_user_cfg(pf->hw.port_info);
-               if (err) {
+               if (err)
                        dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
-                       goto err_send_version_unroll;
-               }
 
                if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
                        struct ice_vsi *vsi = ice_get_main_vsi(pf);
@@ -4568,6 +4564,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
                        continue;
                ice_vsi_free_q_vectors(pf->vsi[v]);
        }
+       ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
        ice_clear_interrupt_scheme(pf);
 
        pci_save_state(pdev);
@@ -6635,6 +6632,28 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
  * Returns 0 on success, negative value on failure
  */
 int ice_open(struct net_device *netdev)
+{
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_pf *pf = np->vsi->back;
+
+       if (ice_is_reset_in_progress(pf->state)) {
+               netdev_err(netdev, "can't open net device while reset is in progress");
+               return -EBUSY;
+       }
+
+       return ice_open_internal(netdev);
+}
+
+/**
+ * ice_open_internal - Called when a network interface becomes active
+ * @netdev: network interface device structure
+ *
+ * Internal ice_open implementation. Should not be used directly except for ice_open and reset
+ * handling routine
+ *
+ * Returns 0 on success, negative value on failure
+ */
+int ice_open_internal(struct net_device *netdev)
 {
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ice_vsi *vsi = np->vsi;
@@ -6715,6 +6734,12 @@ int ice_stop(struct net_device *netdev)
 {
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ice_vsi *vsi = np->vsi;
+       struct ice_pf *pf = vsi->back;
+
+       if (ice_is_reset_in_progress(pf->state)) {
+               netdev_err(netdev, "can't stop net device while reset is in progress");
+               return -EBUSY;
+       }
 
        ice_vsi_close(vsi);
 
index 67c965a..834cbd3 100644 (file)
@@ -1238,6 +1238,9 @@ ice_add_update_vsi_list(struct ice_hw *hw,
                        ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
                                                vsi_list_id);
 
+               if (!m_entry->vsi_list_info)
+                       return ICE_ERR_NO_MEMORY;
+
                /* If this entry was large action then the large action needs
                 * to be updated to point to FWD to VSI list
                 */
@@ -2220,6 +2223,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
        return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
                 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
                (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
+                fm_entry->vsi_list_info &&
                 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
 }
 
@@ -2292,14 +2296,12 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
                return ICE_ERR_PARAM;
 
        list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
-               struct ice_fltr_info *fi;
-
-               fi = &fm_entry->fltr_info;
-               if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
+               if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
                        continue;
 
                status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
-                                                       vsi_list_head, fi);
+                                                       vsi_list_head,
+                                                       &fm_entry->fltr_info);
                if (status)
                        return status;
        }
@@ -2622,7 +2624,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
                                          &remove_list_head);
        mutex_unlock(rule_lock);
        if (status)
-               return;
+               goto free_fltr_list;
 
        switch (lkup) {
        case ICE_SW_LKUP_MAC:
@@ -2645,6 +2647,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
                break;
        }
 
+free_fltr_list:
        list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
                list_del(&fm_entry->list_entry);
                devm_kfree(ice_hw_to_dev(hw), fm_entry);
index b7dc25d..b91dcfd 100644 (file)
@@ -443,22 +443,6 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
        }
 }
 
-/**
- * ice_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
- *
- * Returns the offset value for ring into the data buffer.
- */
-static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
-{
-       if (ice_ring_uses_build_skb(rx_ring))
-               return ICE_SKB_PAD;
-       else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
-               return XDP_PACKET_HEADROOM;
-
-       return 0;
-}
-
 /**
  * ice_setup_rx_ring - Allocate the Rx descriptors
  * @rx_ring: the Rx ring to set up
@@ -493,7 +477,6 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
 
        rx_ring->next_to_use = 0;
        rx_ring->next_to_clean = 0;
-       rx_ring->rx_offset = ice_rx_offset(rx_ring);
 
        if (ice_is_xdp_ena_vsi(rx_ring->vsi))
                WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
index a6cb0c3..266036b 100644 (file)
@@ -535,6 +535,7 @@ struct ice_dcb_app_priority_table {
 #define ICE_TLV_STATUS_ERR     0x4
 #define ICE_APP_PROT_ID_FCOE   0x8906
 #define ICE_APP_PROT_ID_ISCSI  0x0cbc
+#define ICE_APP_PROT_ID_ISCSI_860 0x035c
 #define ICE_APP_PROT_ID_FIP    0x8914
 #define ICE_APP_SEL_ETHTYPE    0x1
 #define ICE_APP_SEL_TCPIP      0x2
index 83f3c95..9f94d91 100644 (file)
@@ -358,18 +358,18 @@ xsk_pool_if_up:
  * This function allocates a number of Rx buffers from the fill ring
  * or the internal recycle mechanism and places them on the Rx ring.
  *
- * Returns false if all allocations were successful, true if any fail.
+ * Returns true if all allocations were successful, false if any fail.
  */
 bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
 {
        union ice_32b_rx_flex_desc *rx_desc;
        u16 ntu = rx_ring->next_to_use;
        struct ice_rx_buf *rx_buf;
-       bool ret = false;
+       bool ok = true;
        dma_addr_t dma;
 
        if (!count)
-               return false;
+               return true;
 
        rx_desc = ICE_RX_DESC(rx_ring, ntu);
        rx_buf = &rx_ring->rx_buf[ntu];
@@ -377,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
        do {
                rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
                if (!rx_buf->xdp) {
-                       ret = true;
+                       ok = false;
                        break;
                }
 
@@ -402,7 +402,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
                ice_release_rx_desc(rx_ring, ntu);
        }
 
-       return ret;
+       return ok;
 }
 
 /**
index 5d87957..44111f6 100644 (file)
@@ -1,8 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright(c) 2007 - 2018 Intel Corporation. */
 
-#ifndef _E1000_HW_H_
-#define _E1000_HW_H_
+#ifndef _E1000_IGB_HW_H_
+#define _E1000_IGB_HW_H_
 
 #include <linux/types.h>
 #include <linux/delay.h>
@@ -551,4 +551,4 @@ s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
 
 void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
 void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
-#endif /* _E1000_HW_H_ */
+#endif /* _E1000_IGB_HW_H_ */
index aaa954a..7bda8c5 100644 (file)
@@ -748,8 +748,8 @@ void igb_ptp_suspend(struct igb_adapter *adapter);
 void igb_ptp_rx_hang(struct igb_adapter *adapter);
 void igb_ptp_tx_hang(struct igb_adapter *adapter);
 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
-                        struct sk_buff *skb);
+int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+                       struct sk_buff *skb);
 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
 void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
index 878b31d..a45cd2b 100644 (file)
@@ -8214,7 +8214,8 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
        new_buff->pagecnt_bias  = old_buff->pagecnt_bias;
 }
 
-static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
+static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
+                                 int rx_buf_pgcnt)
 {
        unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
        struct page *page = rx_buffer->page;
@@ -8225,7 +8226,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
 
 #if (PAGE_SIZE < 8192)
        /* if we are only owner of page we can reuse it */
-       if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+       if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
                return false;
 #else
 #define IGB_LAST_OFFSET \
@@ -8301,9 +8302,10 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
                return NULL;
 
        if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
-               igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb);
-               xdp->data += IGB_TS_HDR_LEN;
-               size -= IGB_TS_HDR_LEN;
+               if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
+                       xdp->data += IGB_TS_HDR_LEN;
+                       size -= IGB_TS_HDR_LEN;
+               }
        }
 
        /* Determine available headroom for copy */
@@ -8364,8 +8366,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
 
        /* pull timestamp out of packet data */
        if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-               igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
-               __skb_pull(skb, IGB_TS_HDR_LEN);
+               if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
+                       __skb_pull(skb, IGB_TS_HDR_LEN);
        }
 
        /* update buffer offset */
@@ -8614,11 +8616,17 @@ static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
 }
 
 static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
-                                              const unsigned int size)
+                                              const unsigned int size, int *rx_buf_pgcnt)
 {
        struct igb_rx_buffer *rx_buffer;
 
        rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+       *rx_buf_pgcnt =
+#if (PAGE_SIZE < 8192)
+               page_count(rx_buffer->page);
+#else
+               0;
+#endif
        prefetchw(rx_buffer->page);
 
        /* we are reusing so sync this buffer for CPU use */
@@ -8634,9 +8642,9 @@ static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
 }
 
 static void igb_put_rx_buffer(struct igb_ring *rx_ring,
-                             struct igb_rx_buffer *rx_buffer)
+                             struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt)
 {
-       if (igb_can_reuse_rx_page(rx_buffer)) {
+       if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) {
                /* hand second half of page back to the ring */
                igb_reuse_rx_page(rx_ring, rx_buffer);
        } else {
@@ -8664,6 +8672,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
        unsigned int xdp_xmit = 0;
        struct xdp_buff xdp;
        u32 frame_sz = 0;
+       int rx_buf_pgcnt;
 
        /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
 #if (PAGE_SIZE < 8192)
@@ -8693,7 +8702,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
                 */
                dma_rmb();
 
-               rx_buffer = igb_get_rx_buffer(rx_ring, size);
+               rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
 
                /* retrieve a buffer from the ring */
                if (!skb) {
@@ -8736,7 +8745,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
                        break;
                }
 
-               igb_put_rx_buffer(rx_ring, rx_buffer);
+               igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt);
                cleaned_count++;
 
                /* fetch next buffer in frame if non-eop */
index 7cc5428..86a5762 100644 (file)
@@ -856,6 +856,9 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
        dev_kfree_skb_any(skb);
 }
 
+#define IGB_RET_PTP_DISABLED 1
+#define IGB_RET_PTP_INVALID 2
+
 /**
  * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
  * @q_vector: Pointer to interrupt specific structure
@@ -864,19 +867,29 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
  *
  * This function is meant to retrieve a timestamp from the first buffer of an
  * incoming frame.  The value is stored in little endian format starting on
- * byte 8.
+ * byte 8
+ *
+ * Returns: 0 if success, nonzero if failure
  **/
-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
-                        struct sk_buff *skb)
+int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+                       struct sk_buff *skb)
 {
-       __le64 *regval = (__le64 *)va;
        struct igb_adapter *adapter = q_vector->adapter;
+       __le64 *regval = (__le64 *)va;
        int adjust = 0;
 
+       if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
+               return IGB_RET_PTP_DISABLED;
+
        /* The timestamp is recorded in little endian format.
         * DWORD: 0        1        2        3
         * Field: Reserved Reserved SYSTIML  SYSTIMH
         */
+
+       /* check reserved dwords are zero, be/le doesn't matter for zero */
+       if (regval[0])
+               return IGB_RET_PTP_INVALID;
+
        igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
                                   le64_to_cpu(regval[1]));
 
@@ -896,6 +909,8 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
        }
        skb_hwtstamps(skb)->hwtstamp =
                ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+
+       return 0;
 }
 
 /**
@@ -906,13 +921,15 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
  * This function is meant to retrieve a timestamp from the internal registers
  * of the adapter and store it in the skb.
  **/
-void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
-                        struct sk_buff *skb)
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
 {
        struct igb_adapter *adapter = q_vector->adapter;
        struct e1000_hw *hw = &adapter->hw;
-       u64 regval;
        int adjust = 0;
+       u64 regval;
+
+       if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
+               return;
 
        /* If this bit is set, then the RX registers contain the time stamp. No
         * other packet will be time stamped until we read these registers, so
index 5d2809d..1b08a7d 100644 (file)
@@ -547,7 +547,7 @@ void igc_ptp_init(struct igc_adapter *adapter);
 void igc_ptp_reset(struct igc_adapter *adapter);
 void igc_ptp_suspend(struct igc_adapter *adapter);
 void igc_ptp_stop(struct igc_adapter *adapter);
-void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
+void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
                         struct sk_buff *skb);
 int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
 int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
index 824a6c4..8722294 100644 (file)
@@ -1711,6 +1711,9 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
                                                     Autoneg);
        }
 
+       /* Set pause flow control settings */
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+
        switch (hw->fc.requested_mode) {
        case igc_fc_full:
                ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
@@ -1725,9 +1728,7 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
                                                     Asym_Pause);
                break;
        default:
-               ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
-               ethtool_link_ksettings_add_link_mode(cmd, advertising,
-                                                    Asym_Pause);
+               break;
        }
 
        status = pm_runtime_suspended(&adapter->pdev->dev) ?
index 7ac9597..4d989eb 100644 (file)
@@ -3831,10 +3831,19 @@ static void igc_reset_task(struct work_struct *work)
 
        adapter = container_of(work, struct igc_adapter, reset_task);
 
+       rtnl_lock();
+       /* If we're already down or resetting, just bail */
+       if (test_bit(__IGC_DOWN, &adapter->state) ||
+           test_bit(__IGC_RESETTING, &adapter->state)) {
+               rtnl_unlock();
+               return;
+       }
+
        igc_rings_dump(adapter);
        igc_regs_dump(adapter);
        netdev_err(adapter->netdev, "Reset adapter\n");
        igc_reinit_locked(adapter);
+       rtnl_unlock();
 }
 
 /**
index ac0b9c8..545f4d0 100644 (file)
@@ -152,46 +152,54 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
 }
 
 /**
- * igc_ptp_rx_pktstamp - retrieve Rx per packet timestamp
+ * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer
  * @q_vector: Pointer to interrupt specific structure
  * @va: Pointer to address containing Rx buffer
  * @skb: Buffer containing timestamp and packet
  *
- * This function is meant to retrieve the first timestamp from the
- * first buffer of an incoming frame. The value is stored in little
- * endian format starting on byte 0. There's a second timestamp
- * starting on byte 8.
- **/
-void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
+ * This function retrieves the timestamp saved in the beginning of packet
+ * buffer. While two timestamps are available, one in timer0 reference and the
+ * other in timer1 reference, this function considers only the timestamp in
+ * timer0 reference.
+ */
+void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
                         struct sk_buff *skb)
 {
        struct igc_adapter *adapter = q_vector->adapter;
-       __le64 *regval = (__le64 *)va;
-       int adjust = 0;
-
-       /* The timestamp is recorded in little endian format.
-        * DWORD: | 0          | 1           | 2          | 3
-        * Field: | Timer0 Low | Timer0 High | Timer1 Low | Timer1 High
+       u64 regval;
+       int adjust;
+
+       /* Timestamps are saved in little endian at the beginning of the packet
+        * buffer following the layout:
+        *
+        * DWORD: | 0              | 1              | 2              | 3              |
+        * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH |
+        *
+        * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds
+        * part of the timestamp.
         */
-       igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
-                                  le64_to_cpu(regval[0]));
-
-       /* adjust timestamp for the RX latency based on link speed */
-       if (adapter->hw.mac.type == igc_i225) {
-               switch (adapter->link_speed) {
-               case SPEED_10:
-                       adjust = IGC_I225_RX_LATENCY_10;
-                       break;
-               case SPEED_100:
-                       adjust = IGC_I225_RX_LATENCY_100;
-                       break;
-               case SPEED_1000:
-                       adjust = IGC_I225_RX_LATENCY_1000;
-                       break;
-               case SPEED_2500:
-                       adjust = IGC_I225_RX_LATENCY_2500;
-                       break;
-               }
+       regval = le32_to_cpu(va[2]);
+       regval |= (u64)le32_to_cpu(va[3]) << 32;
+       igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+
+       /* Adjust timestamp for the RX latency based on link speed */
+       switch (adapter->link_speed) {
+       case SPEED_10:
+               adjust = IGC_I225_RX_LATENCY_10;
+               break;
+       case SPEED_100:
+               adjust = IGC_I225_RX_LATENCY_100;
+               break;
+       case SPEED_1000:
+               adjust = IGC_I225_RX_LATENCY_1000;
+               break;
+       case SPEED_2500:
+               adjust = IGC_I225_RX_LATENCY_2500;
+               break;
+       default:
+               adjust = 0;
+               netdev_warn_once(adapter->netdev, "Imprecise timestamp\n");
+               break;
        }
        skb_hwtstamps(skb)->hwtstamp =
                ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
index 9f3f12e..03d9aad 100644 (file)
@@ -4118,6 +4118,8 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
 #endif
        }
 
+       ring->rx_offset = ixgbe_rx_offset(ring);
+
        if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
                u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
 
@@ -6578,7 +6580,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
 
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
-       rx_ring->rx_offset = ixgbe_rx_offset(rx_ring);
 
        /* XDP RX-queue info */
        if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
index 7fe15a3..fe0989c 100644 (file)
@@ -6,7 +6,7 @@
 config NET_VENDOR_MARVELL
        bool "Marvell devices"
        default y
-       depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET || COMPILE_TEST
+       depends on PCI || CPU_PXA168 || PPC32 || PLAT_ORION || INET || COMPILE_TEST
        help
          If you have a network (Ethernet) card belonging to this class, say Y.
 
@@ -19,7 +19,7 @@ if NET_VENDOR_MARVELL
 
 config MV643XX_ETH
        tristate "Marvell Discovery (643XX) and Orion ethernet support"
-       depends on MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST
+       depends on PPC32 || PLAT_ORION || COMPILE_TEST
        depends on INET
        select PHYLIB
        select MVMDIO
index 90e6111..3bfb659 100644 (file)
@@ -2684,7 +2684,7 @@ static const struct of_device_id mv643xx_eth_shared_ids[] = {
 MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
 #endif
 
-#if defined(CONFIG_OF_IRQ) && !defined(CONFIG_MV64X60)
+#ifdef CONFIG_OF_IRQ
 #define mv643xx_eth_property(_np, _name, _v)                           \
        do {                                                            \
                u32 tmp;                                                \
index b192692..5c372d2 100644 (file)
@@ -13499,8 +13499,6 @@ static struct npc_mcam_kex npc_mkex_default = {
                        [NPC_LT_LC_IP] = {
                                /* SIP+DIP: 8 bytes, KW2[63:0] */
                                KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
-                               /* TOS: 1 byte, KW1[63:56] */
-                               KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf),
                        },
                        /* Layer C: IPv6 */
                        [NPC_LT_LC_IP6] = {
index d9a1a71..ab24a5e 100644 (file)
@@ -2462,8 +2462,10 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
                    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
 
        for (irq = 0; irq < rvu->num_vec; irq++) {
-               if (rvu->irq_allocated[irq])
+               if (rvu->irq_allocated[irq]) {
                        free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
+                       rvu->irq_allocated[irq] = false;
+               }
        }
 
        pci_free_irq_vectors(rvu->pdev);
@@ -2975,8 +2977,8 @@ static void rvu_remove(struct pci_dev *pdev)
        struct rvu *rvu = pci_get_drvdata(pdev);
 
        rvu_dbg_exit(rvu);
-       rvu_unregister_interrupts(rvu);
        rvu_unregister_dl(rvu);
+       rvu_unregister_interrupts(rvu);
        rvu_flr_wq_destroy(rvu);
        rvu_cgx_exit(rvu);
        rvu_fwdata_exit(rvu);
index fa6e46e..76f3992 100644 (file)
@@ -678,6 +678,7 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
                         u8 *intf, u8 *ena);
 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
 u32  rvu_cgx_get_fifolen(struct rvu *rvu);
+void *rvu_first_cgx_pdata(struct rvu *rvu);
 
 /* CPT APIs */
 int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
index e668e48..6e2bf4f 100644 (file)
@@ -89,6 +89,21 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
        return rvu->cgx_idmap[cgx_id];
 }
 
+/* Return first enabled CGX instance if none are enabled then return NULL */
+void *rvu_first_cgx_pdata(struct rvu *rvu)
+{
+       int first_enabled_cgx = 0;
+       void *cgxd = NULL;
+
+       for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
+               cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
+               if (cgxd)
+                       break;
+       }
+
+       return cgxd;
+}
+
 /* Based on P2X connectivity find mapped NIX block for a PF */
 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
                                  int cgx_id, int lmac_id)
@@ -711,10 +726,9 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
 u32 rvu_cgx_get_fifolen(struct rvu *rvu)
 {
        struct mac_ops *mac_ops;
-       int rvu_def_cgx_id = 0;
        u32 fifo_len;
 
-       mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
+       mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
        fifo_len = mac_ops ? mac_ops->fifo_len : 0;
 
        return fifo_len;
index aa2ca87..de3968d 100644 (file)
@@ -234,12 +234,14 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
                                          char __user *buffer,
                                          size_t count, loff_t *ppos)
 {
-       int index, off = 0, flag = 0, go_back = 0, off_prev;
+       int index, off = 0, flag = 0, go_back = 0, len = 0;
        struct rvu *rvu = filp->private_data;
        int lf, pf, vf, pcifunc;
        struct rvu_block block;
        int bytes_not_copied;
+       int lf_str_size = 12;
        int buf_size = 2048;
+       char *lfs;
        char *buf;
 
        /* don't allow partial reads */
@@ -249,12 +251,20 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
        buf = kzalloc(buf_size, GFP_KERNEL);
        if (!buf)
                return -ENOSPC;
-       off +=  scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t");
+
+       lfs = kzalloc(lf_str_size, GFP_KERNEL);
+       if (!lfs) {
+               kfree(buf);
+               return -ENOMEM;
+       }
+       off +=  scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
+                         "pcifunc");
        for (index = 0; index < BLK_COUNT; index++)
-               if (strlen(rvu->hw->block[index].name))
-                       off +=  scnprintf(&buf[off], buf_size - 1 - off,
-                                         "%*s\t", (index - 1) * 2,
-                                         rvu->hw->block[index].name);
+               if (strlen(rvu->hw->block[index].name)) {
+                       off += scnprintf(&buf[off], buf_size - 1 - off,
+                                        "%-*s", lf_str_size,
+                                        rvu->hw->block[index].name);
+               }
        off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
        for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
                for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
@@ -263,14 +273,15 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
                                continue;
 
                        if (vf) {
+                               sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
                                go_back = scnprintf(&buf[off],
                                                    buf_size - 1 - off,
-                                                   "PF%d:VF%d\t\t", pf,
-                                                   vf - 1);
+                                                   "%-*s", lf_str_size, lfs);
                        } else {
+                               sprintf(lfs, "PF%d", pf);
                                go_back = scnprintf(&buf[off],
                                                    buf_size - 1 - off,
-                                                   "PF%d\t\t", pf);
+                                                   "%-*s", lf_str_size, lfs);
                        }
 
                        off += go_back;
@@ -278,20 +289,22 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
                                block = rvu->hw->block[index];
                                if (!strlen(block.name))
                                        continue;
-                               off_prev = off;
+                               len = 0;
+                               lfs[len] = '\0';
                                for (lf = 0; lf < block.lf.max; lf++) {
                                        if (block.fn_map[lf] != pcifunc)
                                                continue;
                                        flag = 1;
-                                       off += scnprintf(&buf[off], buf_size - 1
-                                                       - off, "%3d,", lf);
+                                       len += sprintf(&lfs[len], "%d,", lf);
                                }
-                               if (flag && off_prev != off)
-                                       off--;
-                               else
-                                       go_back++;
+
+                               if (flag)
+                                       len--;
+                               lfs[len] = '\0';
                                off += scnprintf(&buf[off], buf_size - 1 - off,
-                                               "\t");
+                                                "%-*s", lf_str_size, lfs);
+                               if (!strlen(lfs))
+                                       go_back += lf_str_size;
                        }
                        if (!flag)
                                off -= go_back;
@@ -303,6 +316,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
        }
 
        bytes_not_copied = copy_to_user(buffer, buf, off);
+       kfree(lfs);
        kfree(buf);
 
        if (bytes_not_copied)
@@ -319,7 +333,6 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
        struct rvu *rvu = filp->private;
        struct pci_dev *pdev = NULL;
        struct mac_ops *mac_ops;
-       int rvu_def_cgx_id = 0;
        char cgx[10], lmac[10];
        struct rvu_pfvf *pfvf;
        int pf, domain, blkid;
@@ -327,7 +340,10 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
        u16 pcifunc;
 
        domain = 2;
-       mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
+       mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
+       /* There can be no CGX devices at all */
+       if (!mac_ops)
+               return 0;
        seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
                   mac_ops->name);
        for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
@@ -1818,7 +1834,6 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
 {
        struct mac_ops *mac_ops;
        unsigned long lmac_bmap;
-       int rvu_def_cgx_id = 0;
        int i, lmac_id;
        char dname[20];
        void *cgx;
@@ -1826,7 +1841,7 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
        if (!cgx_get_cgxcnt_max())
                return;
 
-       mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
+       mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
        if (!mac_ops)
                return;
 
index d300019..3d068b7 100644 (file)
@@ -2629,7 +2629,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
        struct nix_rx_flowkey_alg *field;
        struct nix_rx_flowkey_alg tmp;
        u32 key_type, valid_key;
-       int l4_key_offset;
+       int l4_key_offset = 0;
 
        if (!alg)
                return -EINVAL;
index 04bb080..0bd49c7 100644 (file)
@@ -2490,10 +2490,10 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
                index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
                if (index >= mcam->bmap_entries)
                        break;
+               entry = index + 1;
                if (mcam->entry2cntr_map[index] != req->cntr)
                        continue;
 
-               entry = index + 1;
                npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
                                              index, req->cntr);
        }
index 0dbbf38..dc17784 100644 (file)
@@ -257,17 +257,19 @@ int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
                       u32 *rule_locs)
 {
+       u32 rule_cnt = nfc->rule_cnt;
        u32 location = 0;
        int idx = 0;
        int err = 0;
 
        nfc->data = pfvf->flow_cfg->ntuple_max_flows;
-       while ((!err || err == -ENOENT) && idx < nfc->rule_cnt) {
+       while ((!err || err == -ENOENT) && idx < rule_cnt) {
                err = otx2_get_flow(pfvf, nfc, location);
                if (!err)
                        rule_locs[idx++] = location;
                location++;
        }
+       nfc->rule_cnt = rule_cnt;
 
        return err;
 }
index 53ab181..2fd3d23 100644 (file)
@@ -1672,6 +1672,7 @@ int otx2_stop(struct net_device *netdev)
        struct otx2_nic *pf = netdev_priv(netdev);
        struct otx2_cq_poll *cq_poll = NULL;
        struct otx2_qset *qset = &pf->qset;
+       struct otx2_rss_info *rss;
        int qidx, vec, wrk;
 
        netif_carrier_off(netdev);
@@ -1684,6 +1685,10 @@ int otx2_stop(struct net_device *netdev)
        /* First stop packet Rx/Tx */
        otx2_rxtx_enable(pf, false);
 
+       /* Clear RSS enable flag */
+       rss = &pf->hw.rss_info;
+       rss->enable = false;
+
        /* Cleanup Queue IRQ */
        vec = pci_irq_vector(pf->pdev,
                             pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
index d1e4d42..3712e17 100644 (file)
@@ -1544,8 +1544,8 @@ static int pxa168_eth_remove(struct platform_device *pdev)
        clk_disable_unprepare(pep->clk);
        mdiobus_unregister(pep->smi_bus);
        mdiobus_free(pep->smi_bus);
-       unregister_netdev(dev);
        cancel_work_sync(&pep->tx_timeout_task);
+       unregister_netdev(dev);
        free_netdev(dev);
        return 0;
 }
index b051417..9153c9b 100644 (file)
@@ -191,12 +191,12 @@ static bool is_ib_supported(struct mlx5_core_dev *dev)
 }
 
 enum {
-       MLX5_INTERFACE_PROTOCOL_ETH_REP,
        MLX5_INTERFACE_PROTOCOL_ETH,
+       MLX5_INTERFACE_PROTOCOL_ETH_REP,
 
+       MLX5_INTERFACE_PROTOCOL_IB,
        MLX5_INTERFACE_PROTOCOL_IB_REP,
        MLX5_INTERFACE_PROTOCOL_MPIB,
-       MLX5_INTERFACE_PROTOCOL_IB,
 
        MLX5_INTERFACE_PROTOCOL_VNET,
 };
index 7435fe6..bc6f77e 100644 (file)
@@ -92,14 +92,15 @@ struct page_pool;
                                    MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
 #define MLX5_MPWRQ_PAGES_PER_WQE               BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
 
-#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
+#define MLX5_ALIGN_MTTS(mtts)          (ALIGN(mtts, 8))
+#define MLX5_ALIGNED_MTTS_OCTW(mtts)   ((mtts) / 2)
+#define MLX5_MTT_OCTW(mtts)            (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts)))
 /* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
  * WQEs, This page will absorb write overflow by the hardware, when
  * receiving packets larger than MTU. These oversize packets are
  * dropped by the driver at a later stage.
  */
-#define MLX5E_REQUIRED_WQE_MTTS                (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE + 1, 8))
-#define MLX5E_LOG_ALIGNED_MPWQE_PPW    (ilog2(MLX5E_REQUIRED_WQE_MTTS))
+#define MLX5E_REQUIRED_WQE_MTTS                (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
 #define MLX5E_REQUIRED_MTTS(wqes)      (wqes * MLX5E_REQUIRED_WQE_MTTS)
 #define MLX5E_MAX_RQ_NUM_MTTS  \
        ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
@@ -515,6 +516,7 @@ struct mlx5e_icosq {
        struct mlx5_wq_cyc         wq;
        void __iomem              *uar_map;
        u32                        sqn;
+       u16                        reserved_room;
        unsigned long              state;
 
        /* control path */
index f3f6eb0..68e54cc 100644 (file)
@@ -185,6 +185,28 @@ mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
        return !!(entry->tuple_nat_node.next);
 }
 
+static int
+mlx5_get_label_mapping(struct mlx5_tc_ct_priv *ct_priv,
+                      u32 *labels, u32 *id)
+{
+       if (!memchr_inv(labels, 0, sizeof(u32) * 4)) {
+               *id = 0;
+               return 0;
+       }
+
+       if (mapping_add(ct_priv->labels_mapping, labels, id))
+               return -EOPNOTSUPP;
+
+       return 0;
+}
+
+static void
+mlx5_put_label_mapping(struct mlx5_tc_ct_priv *ct_priv, u32 id)
+{
+       if (id)
+               mapping_remove(ct_priv->labels_mapping, id);
+}
+
 static int
 mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
 {
@@ -436,7 +458,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
        mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
        mlx5e_mod_hdr_detach(ct_priv->dev,
                             ct_priv->mod_hdr_tbl, zone_rule->mh);
-       mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+       mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
        kfree(attr);
 }
 
@@ -639,8 +661,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
        if (!meta)
                return -EOPNOTSUPP;
 
-       err = mapping_add(ct_priv->labels_mapping, meta->ct_metadata.labels,
-                         &attr->ct_attr.ct_labels_id);
+       err = mlx5_get_label_mapping(ct_priv, meta->ct_metadata.labels,
+                                    &attr->ct_attr.ct_labels_id);
        if (err)
                return -EOPNOTSUPP;
        if (nat) {
@@ -677,7 +699,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
 
 err_mapping:
        dealloc_mod_hdr_actions(&mod_acts);
-       mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+       mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
        return err;
 }
 
@@ -745,7 +767,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
 err_rule:
        mlx5e_mod_hdr_detach(ct_priv->dev,
                             ct_priv->mod_hdr_tbl, zone_rule->mh);
-       mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+       mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
 err_mod_hdr:
        kfree(attr);
 err_attr:
@@ -1181,7 +1203,8 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
 
        mlx5e_tc_match_to_reg_get_match(spec, CTSTATE_TO_REG,
                                        &ctstate, &ctstate_mask);
-       if (ctstate_mask)
+
+       if ((ctstate & ctstate_mask) == MLX5_CT_STATE_TRK_BIT)
                return -EOPNOTSUPP;
 
        ctstate_mask |= MLX5_CT_STATE_TRK_BIT;
@@ -1196,7 +1219,7 @@ void mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_
        if (!priv || !ct_attr->ct_labels_id)
                return;
 
-       mapping_remove(priv->labels_mapping, ct_attr->ct_labels_id);
+       mlx5_put_label_mapping(priv, ct_attr->ct_labels_id);
 }
 
 int
@@ -1279,7 +1302,7 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
                ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
                ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
                ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
-               if (mapping_add(priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
+               if (mlx5_get_label_mapping(priv, ct_labels, &ct_attr->ct_labels_id))
                        return -EOPNOTSUPP;
                mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
                                            MLX5_CT_LABELS_MASK);
index f8075a6..172e047 100644 (file)
@@ -685,14 +685,14 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
        u16 vport_num;
        int err = 0;
 
-       if (flow_attr->ip_version == 4) {
+       if (flow_attr->tun_ip_version == 4) {
                /* Addresses are swapped for decap */
                attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4;
                attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4;
                err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr);
        }
 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
-       else if (flow_attr->ip_version == 6) {
+       else if (flow_attr->tun_ip_version == 6) {
                /* Addresses are swapped for decap */
                attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6;
                attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6;
@@ -718,10 +718,10 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
        esw_attr->rx_tun_attr->decap_vport = vport_num;
 
 out:
-       if (flow_attr->ip_version == 4)
+       if (flow_attr->tun_ip_version == 4)
                mlx5e_route_lookup_ipv4_put(&attr);
 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
-       else if (flow_attr->ip_version == 6)
+       else if (flow_attr->tun_ip_version == 6)
                mlx5e_route_lookup_ipv6_put(&attr);
 #endif
        return err;
index 67de2bf..e127199 100644 (file)
@@ -21,6 +21,11 @@ enum {
        MLX5E_TC_TUNNEL_TYPE_MPLSOUDP,
 };
 
+struct mlx5e_encap_key {
+       const struct ip_tunnel_key *ip_tun_key;
+       struct mlx5e_tc_tunnel     *tc_tunnel;
+};
+
 struct mlx5e_tc_tunnel {
        int tunnel_type;
        enum mlx5_flow_match_level match_level;
@@ -44,6 +49,8 @@ struct mlx5e_tc_tunnel {
                            struct flow_cls_offload *f,
                            void *headers_c,
                            void *headers_v);
+       bool (*encap_info_equal)(struct mlx5e_encap_key *a,
+                                struct mlx5e_encap_key *b);
 };
 
 extern struct mlx5e_tc_tunnel vxlan_tunnel;
@@ -101,6 +108,9 @@ int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
                                 void *headers_c,
                                 void *headers_v);
 
+bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
+                                          struct mlx5e_encap_key *b);
+
 #endif /* CONFIG_MLX5_ESWITCH */
 
 #endif //__MLX5_EN_TC_TUNNEL_H__
index 6a11633..9f16ad2 100644 (file)
@@ -89,6 +89,7 @@ int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
         * required to establish routing.
         */
        flow_flag_set(flow, TUN_RX);
+       flow->attr->tun_ip_version = ip_version;
        return 0;
 }
 
@@ -475,16 +476,11 @@ void mlx5e_detach_decap(struct mlx5e_priv *priv,
        mlx5e_decap_dealloc(priv, d);
 }
 
-struct encap_key {
-       const struct ip_tunnel_key *ip_tun_key;
-       struct mlx5e_tc_tunnel *tc_tunnel;
-};
-
-static int cmp_encap_info(struct encap_key *a,
-                         struct encap_key *b)
+bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
+                                          struct mlx5e_encap_key *b)
 {
-       return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
-               a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
+       return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) == 0 &&
+               a->tc_tunnel->tunnel_type == b->tc_tunnel->tunnel_type;
 }
 
 static int cmp_decap_info(struct mlx5e_decap_key *a,
@@ -493,7 +489,7 @@ static int cmp_decap_info(struct mlx5e_decap_key *a,
        return memcmp(&a->key, &b->key, sizeof(b->key));
 }
 
-static int hash_encap_info(struct encap_key *key)
+static int hash_encap_info(struct mlx5e_encap_key *key)
 {
        return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
                     key->tc_tunnel->tunnel_type);
@@ -515,18 +511,18 @@ static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
 }
 
 static struct mlx5e_encap_entry *
-mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
+mlx5e_encap_get(struct mlx5e_priv *priv, struct mlx5e_encap_key *key,
                uintptr_t hash_key)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       struct mlx5e_encap_key e_key;
        struct mlx5e_encap_entry *e;
-       struct encap_key e_key;
 
        hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
                                   encap_hlist, hash_key) {
                e_key.ip_tun_key = &e->tun_info->key;
                e_key.tc_tunnel = e->tunnel;
-               if (!cmp_encap_info(&e_key, key) &&
+               if (e->tunnel->encap_info_equal(&e_key, key) &&
                    mlx5e_encap_take(e))
                        return e;
        }
@@ -693,8 +689,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
        struct mlx5_flow_attr *attr = flow->attr;
        const struct ip_tunnel_info *tun_info;
        unsigned long tbl_time_before = 0;
-       struct encap_key key;
        struct mlx5e_encap_entry *e;
+       struct mlx5e_encap_key key;
        bool entry_created = false;
        unsigned short family;
        uintptr_t hash_key;
@@ -1091,7 +1087,7 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv,
        if (err || !esw_attr->rx_tun_attr->decap_vport)
                goto out;
 
-       key.ip_version = attr->ip_version;
+       key.ip_version = attr->tun_ip_version;
        if (key.ip_version == 4)
                key.endpoint_ip.v4 = esw_attr->rx_tun_attr->dst_ip.v4;
        else
index e472ed0..f5b26f5 100644 (file)
@@ -227,6 +227,10 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
        option_key = (struct geneve_opt *)&enc_opts.key->data[0];
        option_mask = (struct geneve_opt *)&enc_opts.mask->data[0];
 
+       if (option_mask->opt_class == 0 && option_mask->type == 0 &&
+           !memchr_inv(option_mask->opt_data, 0, option_mask->length * 4))
+               return 0;
+
        if (option_key->length > max_tlv_option_data_len) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "Matching on GENEVE options: unsupported option len");
@@ -325,6 +329,34 @@ static int mlx5e_tc_tun_parse_geneve(struct mlx5e_priv *priv,
        return mlx5e_tc_tun_parse_geneve_options(priv, spec, f);
 }
 
+static bool mlx5e_tc_tun_encap_info_equal_geneve(struct mlx5e_encap_key *a,
+                                                struct mlx5e_encap_key *b)
+{
+       struct ip_tunnel_info *a_info;
+       struct ip_tunnel_info *b_info;
+       bool a_has_opts, b_has_opts;
+
+       if (!mlx5e_tc_tun_encap_info_equal_generic(a, b))
+               return false;
+
+       a_has_opts = !!(a->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT);
+       b_has_opts = !!(b->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT);
+
+       /* keys are equal when both don't have any options attached */
+       if (!a_has_opts && !b_has_opts)
+               return true;
+
+       if (a_has_opts != b_has_opts)
+               return false;
+
+       /* geneve options stored in memory next to ip_tunnel_info struct */
+       a_info = container_of(a->ip_tun_key, struct ip_tunnel_info, key);
+       b_info = container_of(b->ip_tun_key, struct ip_tunnel_info, key);
+
+       return a_info->options_len == b_info->options_len &&
+               memcmp(a_info + 1, b_info + 1, a_info->options_len) == 0;
+}
+
 struct mlx5e_tc_tunnel geneve_tunnel = {
        .tunnel_type          = MLX5E_TC_TUNNEL_TYPE_GENEVE,
        .match_level          = MLX5_MATCH_L4,
@@ -334,4 +366,5 @@ struct mlx5e_tc_tunnel geneve_tunnel = {
        .generate_ip_tun_hdr  = mlx5e_gen_ip_tunnel_header_geneve,
        .parse_udp_ports      = mlx5e_tc_tun_parse_udp_ports_geneve,
        .parse_tunnel         = mlx5e_tc_tun_parse_geneve,
+       .encap_info_equal     = mlx5e_tc_tun_encap_info_equal_geneve,
 };
index 2805416..ada14f0 100644 (file)
@@ -94,4 +94,5 @@ struct mlx5e_tc_tunnel gre_tunnel = {
        .generate_ip_tun_hdr  = mlx5e_gen_ip_tunnel_header_gretap,
        .parse_udp_ports      = NULL,
        .parse_tunnel         = mlx5e_tc_tun_parse_gretap,
+       .encap_info_equal     = mlx5e_tc_tun_encap_info_equal_generic,
 };
index 3479672..60952b3 100644 (file)
@@ -131,4 +131,5 @@ struct mlx5e_tc_tunnel mplsoudp_tunnel = {
        .generate_ip_tun_hdr  = generate_ip_tun_hdr,
        .parse_udp_ports      = parse_udp_ports,
        .parse_tunnel         = parse_tunnel,
+       .encap_info_equal     = mlx5e_tc_tun_encap_info_equal_generic,
 };
index 038a0f1..4267f3a 100644 (file)
@@ -150,4 +150,5 @@ struct mlx5e_tc_tunnel vxlan_tunnel = {
        .generate_ip_tun_hdr  = mlx5e_gen_ip_tunnel_header_vxlan,
        .parse_udp_ports      = mlx5e_tc_tun_parse_udp_ports_vxlan,
        .parse_tunnel         = mlx5e_tc_tun_parse_vxlan,
+       .encap_info_equal     = mlx5e_tc_tun_encap_info_equal_generic,
 };
index 2371b83..055c3bc 100644 (file)
@@ -441,4 +441,10 @@ static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
        return wqe_size * 2 - 1;
 }
 
+static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
+{
+       u16 room = sq->reserved_room + mlx5e_stop_room_for_wqe(wqe_size);
+
+       return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
+}
 #endif
index d06532d..19d22a6 100644 (file)
@@ -46,7 +46,8 @@ struct mlx5e_ktls_offload_context_rx {
        struct tls12_crypto_info_aes_gcm_128 crypto_info;
        struct accel_rule rule;
        struct sock *sk;
-       struct mlx5e_rq_stats *stats;
+       struct mlx5e_rq_stats *rq_stats;
+       struct mlx5e_tls_sw_stats *sw_stats;
        struct completion add_ctx;
        u32 tirn;
        u32 key_id;
@@ -137,11 +138,10 @@ post_static_params(struct mlx5e_icosq *sq,
 {
        struct mlx5e_set_tls_static_params_wqe *wqe;
        struct mlx5e_icosq_wqe_info wi;
-       u16 pi, num_wqebbs, room;
+       u16 pi, num_wqebbs;
 
        num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
-       room = mlx5e_stop_room_for_wqe(num_wqebbs);
-       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
+       if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
                return ERR_PTR(-ENOSPC);
 
        pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
@@ -168,11 +168,10 @@ post_progress_params(struct mlx5e_icosq *sq,
 {
        struct mlx5e_set_tls_progress_params_wqe *wqe;
        struct mlx5e_icosq_wqe_info wi;
-       u16 pi, num_wqebbs, room;
+       u16 pi, num_wqebbs;
 
        num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
-       room = mlx5e_stop_room_for_wqe(num_wqebbs);
-       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
+       if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
                return ERR_PTR(-ENOSPC);
 
        pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
@@ -218,7 +217,7 @@ unlock:
        return err;
 
 err_out:
-       priv_rx->stats->tls_resync_req_skip++;
+       priv_rx->rq_stats->tls_resync_req_skip++;
        err = PTR_ERR(cseg);
        complete(&priv_rx->add_ctx);
        goto unlock;
@@ -277,17 +276,15 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
 
        buf->priv_rx = priv_rx;
 
-       BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
-
        spin_lock_bh(&sq->channel->async_icosq_lock);
 
-       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
+       if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) {
                spin_unlock_bh(&sq->channel->async_icosq_lock);
                err = -ENOSPC;
                goto err_dma_unmap;
        }
 
-       pi = mlx5e_icosq_get_next_pi(sq, 1);
+       pi = mlx5e_icosq_get_next_pi(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS);
        wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi);
 
 #define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS))
@@ -307,7 +304,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
 
        wi = (struct mlx5e_icosq_wqe_info) {
                .wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS,
-               .num_wqebbs = 1,
+               .num_wqebbs = MLX5E_KTLS_GET_PROGRESS_WQEBBS,
                .tls_get_params.buf = buf,
        };
        icosq_fill_wi(sq, pi, &wi);
@@ -322,7 +319,7 @@ err_dma_unmap:
 err_free:
        kfree(buf);
 err_out:
-       priv_rx->stats->tls_resync_req_skip++;
+       priv_rx->rq_stats->tls_resync_req_skip++;
        return err;
 }
 
@@ -378,13 +375,13 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
 
        cseg = post_static_params(sq, priv_rx);
        if (IS_ERR(cseg)) {
-               priv_rx->stats->tls_resync_res_skip++;
+               priv_rx->rq_stats->tls_resync_res_skip++;
                err = PTR_ERR(cseg);
                goto unlock;
        }
        /* Do not increment priv_rx refcnt, CQE handling is empty */
        mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
-       priv_rx->stats->tls_resync_res_ok++;
+       priv_rx->rq_stats->tls_resync_res_ok++;
 unlock:
        spin_unlock_bh(&c->async_icosq_lock);
 
@@ -420,13 +417,13 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
        auth_state = MLX5_GET(tls_progress_params, ctx, auth_state);
        if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
            auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
-               priv_rx->stats->tls_resync_req_skip++;
+               priv_rx->rq_stats->tls_resync_req_skip++;
                goto out;
        }
 
        hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
        tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
-       priv_rx->stats->tls_resync_req_end++;
+       priv_rx->rq_stats->tls_resync_req_end++;
 out:
        mlx5e_ktls_priv_rx_put(priv_rx);
        dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
@@ -609,7 +606,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
        priv_rx->rxq = rxq;
        priv_rx->sk = sk;
 
-       priv_rx->stats = &priv->channel_stats[rxq].rq;
+       priv_rx->rq_stats = &priv->channel_stats[rxq].rq;
+       priv_rx->sw_stats = &priv->tls->sw_stats;
        mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
 
        rqtn = priv->direct_tir[rxq].rqt.rqtn;
@@ -630,7 +628,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
        if (err)
                goto err_post_wqes;
 
-       priv_rx->stats->tls_ctx++;
+       atomic64_inc(&priv_rx->sw_stats->rx_tls_ctx);
 
        return 0;
 
@@ -666,7 +664,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
        if (cancel_work_sync(&resync->work))
                mlx5e_ktls_priv_rx_put(priv_rx);
 
-       priv_rx->stats->tls_del++;
+       atomic64_inc(&priv_rx->sw_stats->rx_tls_del);
        if (priv_rx->rule.rule)
                mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
 
index d16def6..51bdf71 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
 // Copyright (c) 2019 Mellanox Technologies.
 
+#include "en_accel/tls.h"
 #include "en_accel/ktls_txrx.h"
 #include "en_accel/ktls_utils.h"
 
@@ -50,6 +51,7 @@ static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
 struct mlx5e_ktls_offload_context_tx {
        struct tls_offload_context_tx *tx_ctx;
        struct tls12_crypto_info_aes_gcm_128 crypto_info;
+       struct mlx5e_tls_sw_stats *sw_stats;
        u32 expected_seq;
        u32 tisn;
        u32 key_id;
@@ -99,6 +101,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
        if (err)
                goto err_create_key;
 
+       priv_tx->sw_stats = &priv->tls->sw_stats;
        priv_tx->expected_seq = start_offload_tcp_sn;
        priv_tx->crypto_info  =
                *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
@@ -111,6 +114,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
                goto err_create_tis;
 
        priv_tx->ctx_post_pending = true;
+       atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
 
        return 0;
 
@@ -452,7 +456,6 @@ bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *s
 
        if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
                mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
-               stats->tls_ctx++;
        }
 
        seq = ntohl(tcp_hdr(skb)->seq);
index bd270a8..4c9274d 100644 (file)
 #include "en.h"
 
 struct mlx5e_tls_sw_stats {
+       atomic64_t tx_tls_ctx;
        atomic64_t tx_tls_drop_metadata;
        atomic64_t tx_tls_drop_resync_alloc;
        atomic64_t tx_tls_drop_no_sync_data;
        atomic64_t tx_tls_drop_bypass_required;
+       atomic64_t rx_tls_ctx;
+       atomic64_t rx_tls_del;
        atomic64_t rx_tls_drop_resync_request;
        atomic64_t rx_tls_resync_request;
        atomic64_t rx_tls_resync_reply;
index b949b9a..29463bd 100644 (file)
@@ -45,49 +45,60 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) },
 };
 
+static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
+       { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) },
+};
+
 #define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
        atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
 
-#define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc)
-
-static bool is_tls_atomic_stats(struct mlx5e_priv *priv)
+static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv)
 {
-       return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev);
+       if (!priv->tls)
+               return NULL;
+       if (mlx5_accel_is_ktls_device(priv->mdev))
+               return mlx5e_ktls_sw_stats_desc;
+       return mlx5e_tls_sw_stats_desc;
 }
 
 int mlx5e_tls_get_count(struct mlx5e_priv *priv)
 {
-       if (!is_tls_atomic_stats(priv))
+       if (!priv->tls)
                return 0;
-
-       return NUM_TLS_SW_COUNTERS;
+       if (mlx5_accel_is_ktls_device(priv->mdev))
+               return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
+       return ARRAY_SIZE(mlx5e_tls_sw_stats_desc);
 }
 
 int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
 {
-       unsigned int i, idx = 0;
+       const struct counter_desc *stats_desc;
+       unsigned int i, n, idx = 0;
 
-       if (!is_tls_atomic_stats(priv))
-               return 0;
+       stats_desc = get_tls_atomic_stats(priv);
+       n = mlx5e_tls_get_count(priv);
 
-       for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+       for (i = 0; i < n; i++)
                strcpy(data + (idx++) * ETH_GSTRING_LEN,
-                      mlx5e_tls_sw_stats_desc[i].format);
+                      stats_desc[i].format);
 
-       return NUM_TLS_SW_COUNTERS;
+       return n;
 }
 
 int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
 {
-       int i, idx = 0;
+       const struct counter_desc *stats_desc;
+       unsigned int i, n, idx = 0;
 
-       if (!is_tls_atomic_stats(priv))
-               return 0;
+       stats_desc = get_tls_atomic_stats(priv);
+       n = mlx5e_tls_get_count(priv);
 
-       for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+       for (i = 0; i < n; i++)
                data[idx++] =
                    MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
-                                           mlx5e_tls_sw_stats_desc, i);
+                                           stats_desc, i);
 
-       return NUM_TLS_SW_COUNTERS;
+       return n;
 }
index abdf721..53802e1 100644 (file)
@@ -758,11 +758,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
        return 0;
 }
 
-static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
-                                                  u32 eth_proto_cap,
-                                                  u8 connector_type, bool ext)
+static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev,
+                                                  struct ethtool_link_ksettings *link_ksettings,
+                                                  u32 eth_proto_cap, u8 connector_type)
 {
-       if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
+       if (!MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) {
                if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
                                   | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
                                   | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
@@ -898,9 +898,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
                [MLX5E_PORT_OTHER]              = PORT_OTHER,
        };
 
-static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext)
+static u8 get_connector_port(struct mlx5_core_dev *mdev, u32 eth_proto, u8 connector_type)
 {
-       if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
+       if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type))
                return ptys2connector_type[connector_type];
 
        if (eth_proto &
@@ -1001,11 +1001,11 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
                         data_rate_oper, link_ksettings);
 
        eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
-
-       link_ksettings->base.port = get_connector_port(eth_proto_oper,
-                                                      connector_type, ext);
-       ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
-                                              connector_type, ext);
+       connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ?
+                        connector_type : MLX5E_PORT_UNKNOWN;
+       link_ksettings->base.port = get_connector_port(mdev, eth_proto_oper, connector_type);
+       ptys2ethtool_supported_advertised_port(mdev, link_ksettings, eth_proto_admin,
+                                              connector_type);
        get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
 
        if (an_status == MLX5_AN_COMPLETE)
@@ -1887,6 +1887,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
+       int err;
 
        if (!MLX5_CAP_GEN(mdev, cqe_compression))
                return -EOPNOTSUPP;
@@ -1896,7 +1897,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
                return -EINVAL;
        }
 
-       mlx5e_modify_rx_cqe_compression_locked(priv, enable);
+       err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
+       if (err)
+               return err;
+
        priv->channels.params.rx_cqe_compress_def = enable;
 
        return 0;
@@ -2014,8 +2018,13 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
         */
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               struct mlx5e_params old_params;
+
+               old_params = priv->channels.params;
                priv->channels.params = new_channels.params;
                err = mlx5e_num_channels_changed(priv);
+               if (err)
+                       priv->channels.params = old_params;
                goto out;
        }
 
index ec2fcb2..5db63b9 100644 (file)
@@ -334,9 +334,9 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
                                     rq->wqe_overflow.addr);
 }
 
-static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
+static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix)
 {
-       return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
+       return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT;
 }
 
 static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
@@ -577,7 +577,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
                                mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
                        u32 byte_count =
                                rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
-                       u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
+                       u64 dma_offset = mlx5e_get_mpwqe_offset(i);
 
                        wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
                        wqe->data[0].byte_count = cpu_to_be32(byte_count);
@@ -1091,6 +1091,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
 
        sq->channel   = c;
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
+       sq->reserved_room = param->stop_room;
 
        param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -2350,6 +2351,24 @@ void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
        mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
 }
 
+static void mlx5e_build_async_icosq_param(struct mlx5e_priv *priv,
+                                         struct mlx5e_params *params,
+                                         u8 log_wq_size,
+                                         struct mlx5e_sq_param *param)
+{
+       void *sqc = param->sqc;
+       void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+       mlx5e_build_sq_param_common(priv, param);
+
+       /* async_icosq is used by XSK only if xdp_prog is active */
+       if (params->xdp_prog)
+               param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
+       MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
+       MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
+       mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
+}
+
 void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
                             struct mlx5e_params *params,
                             struct mlx5e_sq_param *param)
@@ -2368,8 +2387,9 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
 {
        switch (params->rq_wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-               return order_base_2(MLX5E_UMR_WQEBBS) +
-                       mlx5e_get_rq_log_wq_sz(rqp->rqc);
+               return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE,
+                            order_base_2(MLX5E_UMR_WQEBBS) +
+                            mlx5e_get_rq_log_wq_sz(rqp->rqc));
        default: /* MLX5_WQ_TYPE_CYCLIC */
                return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
        }
@@ -2397,7 +2417,7 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
        mlx5e_build_sq_param(priv, params, &cparam->txq_sq);
        mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
        mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
-       mlx5e_build_icosq_param(priv, async_icosq_log_wq_sz, &cparam->async_icosq);
+       mlx5e_build_async_icosq_param(priv, params, async_icosq_log_wq_sz, &cparam->async_icosq);
 }
 
 int mlx5e_open_channels(struct mlx5e_priv *priv,
@@ -2502,8 +2522,10 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
 {
        int i;
 
-       if (chs->port_ptp)
+       if (chs->port_ptp) {
                mlx5e_port_ptp_close(chs->port_ptp);
+               chs->port_ptp = NULL;
+       }
 
        for (i = 0; i < chs->num; i++)
                mlx5e_close_channel(chs->c[i]);
@@ -3810,6 +3832,15 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
                for (j = 0; j < priv->max_opened_tc; j++) {
                        struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
 
+                       s->tx_packets    += sq_stats->packets;
+                       s->tx_bytes      += sq_stats->bytes;
+                       s->tx_dropped    += sq_stats->dropped;
+               }
+       }
+       if (priv->port_ptp_opened) {
+               for (i = 0; i < priv->max_opened_tc; i++) {
+                       struct mlx5e_sq_stats *sq_stats = &priv->port_ptp_stats.sq[i];
+
                        s->tx_packets    += sq_stats->packets;
                        s->tx_bytes      += sq_stats->bytes;
                        s->tx_dropped    += sq_stats->dropped;
@@ -3834,10 +3865,17 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
        }
 
        if (mlx5e_is_uplink_rep(priv)) {
+               struct mlx5e_vport_stats *vstats = &priv->stats.vport;
+
                stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
                stats->rx_bytes   = PPORT_802_3_GET(pstats, a_octets_received_ok);
                stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
                stats->tx_bytes   = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
+
+               /* vport multicast also counts packets that are dropped due to steering
+                * or rx out of buffer
+                */
+               stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
        } else {
                mlx5e_fold_sw_stats64(priv, stats);
        }
@@ -4683,8 +4721,10 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
                struct mlx5e_channel *c = priv->channels.c[i];
 
                mlx5e_rq_replace_xdp_prog(&c->rq, prog);
-               if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
+               if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) {
+                       bpf_prog_inc(prog);
                        mlx5e_rq_replace_xdp_prog(&c->xskrq, prog);
+               }
        }
 
 unlock:
@@ -4958,6 +4998,11 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
                                     priv->max_nch);
        params->num_tc       = 1;
 
+       /* Set an initial non-zero value, so that mlx5e_select_queue won't
+        * divide by zero if called before first activating channels.
+        */
+       priv->num_tc_x_num_ch = params->num_channels * params->num_tc;
+
        /* SQ */
        params->log_sq_size = is_kdump_kernel() ?
                MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
@@ -5474,8 +5519,6 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
                    struct net_device *netdev,
                    struct mlx5_core_dev *mdev)
 {
-       memset(priv, 0, sizeof(*priv));
-
        /* priv init */
        priv->mdev        = mdev;
        priv->netdev      = netdev;
@@ -5508,12 +5551,18 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
 {
        int i;
 
+       /* bail if change profile failed and also rollback failed */
+       if (!priv->mdev)
+               return;
+
        destroy_workqueue(priv->wq);
        free_cpumask_var(priv->scratchpad.cpumask);
 
        for (i = 0; i < priv->htb.max_qos_sqs; i++)
                kfree(priv->htb.qos_sq_stats[i]);
        kvfree(priv->htb.qos_sq_stats);
+
+       memset(priv, 0, sizeof(*priv));
 }
 
 struct net_device *
@@ -5630,11 +5679,10 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
 }
 
 static int
-mlx5e_netdev_attach_profile(struct mlx5e_priv *priv,
+mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
                            const struct mlx5e_profile *new_profile, void *new_ppriv)
 {
-       struct net_device *netdev = priv->netdev;
-       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5e_priv *priv = netdev_priv(netdev);
        int err;
 
        err = mlx5e_priv_init(priv, netdev, mdev);
@@ -5647,10 +5695,16 @@ mlx5e_netdev_attach_profile(struct mlx5e_priv *priv,
        priv->ppriv = new_ppriv;
        err = new_profile->init(priv->mdev, priv->netdev);
        if (err)
-               return err;
+               goto priv_cleanup;
        err = mlx5e_attach_netdev(priv);
        if (err)
-               new_profile->cleanup(priv);
+               goto profile_cleanup;
+       return err;
+
+profile_cleanup:
+       new_profile->cleanup(priv);
+priv_cleanup:
+       mlx5e_priv_cleanup(priv);
        return err;
 }
 
@@ -5659,13 +5713,14 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
 {
        unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile);
        const struct mlx5e_profile *orig_profile = priv->profile;
+       struct net_device *netdev = priv->netdev;
+       struct mlx5_core_dev *mdev = priv->mdev;
        void *orig_ppriv = priv->ppriv;
        int err, rollback_err;
 
        /* sanity */
        if (new_max_nch != priv->max_nch) {
-               netdev_warn(priv->netdev,
-                           "%s: Replacing profile with different max channels\n",
+               netdev_warn(netdev, "%s: Replacing profile with different max channels\n",
                            __func__);
                return -EINVAL;
        }
@@ -5675,22 +5730,19 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
        priv->profile->cleanup(priv);
        mlx5e_priv_cleanup(priv);
 
-       err = mlx5e_netdev_attach_profile(priv, new_profile, new_ppriv);
+       err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv);
        if (err) { /* roll back to original profile */
-               netdev_warn(priv->netdev, "%s: new profile init failed, %d\n",
-                           __func__, err);
+               netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err);
                goto rollback;
        }
 
        return 0;
 
 rollback:
-       rollback_err = mlx5e_netdev_attach_profile(priv, orig_profile, orig_ppriv);
-       if (rollback_err) {
-               netdev_err(priv->netdev,
-                          "%s: failed to rollback to orig profile, %d\n",
+       rollback_err = mlx5e_netdev_attach_profile(netdev, mdev, orig_profile, orig_ppriv);
+       if (rollback_err)
+               netdev_err(netdev, "%s: failed to rollback to orig profile, %d\n",
                           __func__, rollback_err);
-       }
        return err;
 }
 
index a132fff..8d39bfe 100644 (file)
@@ -1107,8 +1107,9 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
 
        mlx5e_rep_tc_enable(priv);
 
-       mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
-                                     0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
+       if (MLX5_CAP_GEN(mdev, uplink_follow))
+               mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
+                                             0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
        mlx5_lag_add(mdev, netdev);
        priv->events_nb.notifier_call = uplink_rep_async_event;
        mlx5_notifier_register(mdev, &priv->events_nb);
index 1b6ad94..249d890 100644 (file)
@@ -500,7 +500,6 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
        struct mlx5e_icosq *sq = rq->icosq;
        struct mlx5_wq_cyc *wq = &sq->wq;
        struct mlx5e_umr_wqe *umr_wqe;
-       u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
        u16 pi;
        int err;
        int i;
@@ -531,7 +530,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
        umr_wqe->ctrl.opmod_idx_opcode =
                cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
                            MLX5_OPCODE_UMR);
-       umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
+       umr_wqe->uctrl.xlt_offset =
+               cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix)));
 
        sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
                .wqe_type   = MLX5E_ICOSQ_WQE_UMR_RX,
index 92c5b81..88a01c5 100644 (file)
@@ -116,7 +116,6 @@ static const struct counter_desc sw_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
@@ -180,8 +179,6 @@ static const struct counter_desc sw_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
@@ -342,8 +339,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
 #ifdef CONFIG_MLX5_EN_TLS
        s->rx_tls_decrypted_packets   += rq_stats->tls_decrypted_packets;
        s->rx_tls_decrypted_bytes     += rq_stats->tls_decrypted_bytes;
-       s->rx_tls_ctx                 += rq_stats->tls_ctx;
-       s->rx_tls_del                 += rq_stats->tls_del;
        s->rx_tls_resync_req_pkt      += rq_stats->tls_resync_req_pkt;
        s->rx_tls_resync_req_start    += rq_stats->tls_resync_req_start;
        s->rx_tls_resync_req_end      += rq_stats->tls_resync_req_end;
@@ -390,7 +385,6 @@ static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
 #ifdef CONFIG_MLX5_EN_TLS
        s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
        s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
-       s->tx_tls_ctx               += sq_stats->tls_ctx;
        s->tx_tls_ooo               += sq_stats->tls_ooo;
        s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
        s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
@@ -1622,8 +1616,6 @@ static const struct counter_desc rq_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
-       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) },
-       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
@@ -1650,7 +1642,6 @@ static const struct counter_desc sq_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
-       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
@@ -1776,7 +1767,6 @@ static const struct counter_desc qos_sq_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
-       { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
index 93c4131..adf9b7b 100644 (file)
@@ -191,7 +191,6 @@ struct mlx5e_sw_stats {
 #ifdef CONFIG_MLX5_EN_TLS
        u64 tx_tls_encrypted_packets;
        u64 tx_tls_encrypted_bytes;
-       u64 tx_tls_ctx;
        u64 tx_tls_ooo;
        u64 tx_tls_dump_packets;
        u64 tx_tls_dump_bytes;
@@ -202,8 +201,6 @@ struct mlx5e_sw_stats {
 
        u64 rx_tls_decrypted_packets;
        u64 rx_tls_decrypted_bytes;
-       u64 rx_tls_ctx;
-       u64 rx_tls_del;
        u64 rx_tls_resync_req_pkt;
        u64 rx_tls_resync_req_start;
        u64 rx_tls_resync_req_end;
@@ -334,8 +331,6 @@ struct mlx5e_rq_stats {
 #ifdef CONFIG_MLX5_EN_TLS
        u64 tls_decrypted_packets;
        u64 tls_decrypted_bytes;
-       u64 tls_ctx;
-       u64 tls_del;
        u64 tls_resync_req_pkt;
        u64 tls_resync_req_start;
        u64 tls_resync_req_end;
@@ -364,7 +359,6 @@ struct mlx5e_sq_stats {
 #ifdef CONFIG_MLX5_EN_TLS
        u64 tls_encrypted_packets;
        u64 tls_encrypted_bytes;
-       u64 tls_ctx;
        u64 tls_ooo;
        u64 tls_dump_packets;
        u64 tls_dump_bytes;
index 0da69b9..df2a0af 100644 (file)
@@ -2296,6 +2296,16 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                        *match_level = MLX5_MATCH_L4;
        }
 
+       /* Currenlty supported only for MPLS over UDP */
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
+           !netif_is_bareudp(filter_dev)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Matching on MPLS is supported only for MPLS over UDP");
+               netdev_err(priv->netdev,
+                          "Matching on MPLS is supported only for MPLS over UDP\n");
+               return -EOPNOTSUPP;
+       }
+
        return 0;
 }
 
@@ -2899,6 +2909,37 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
        return 0;
 }
 
+static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
+                                  bool ct_flow, struct netlink_ext_ack *extack,
+                                  struct mlx5e_priv *priv,
+                                  struct mlx5_flow_spec *spec)
+{
+       if (!modify_tuple || ct_clear)
+               return true;
+
+       if (ct_flow) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "can't offload tuple modification with non-clear ct()");
+               netdev_info(priv->netdev,
+                           "can't offload tuple modification with non-clear ct()");
+               return false;
+       }
+
+       /* Add ct_state=-trk match so it will be offloaded for non ct flows
+        * (or after clear action), as otherwise, since the tuple is changed,
+        * we can't restore ct state
+        */
+       if (mlx5_tc_ct_add_no_trk_match(spec)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "can't offload tuple modification with ct matches and no ct(clear) action");
+               netdev_info(priv->netdev,
+                           "can't offload tuple modification with ct matches and no ct(clear) action");
+               return false;
+       }
+
+       return true;
+}
+
 static bool modify_header_match_supported(struct mlx5e_priv *priv,
                                          struct mlx5_flow_spec *spec,
                                          struct flow_action *flow_action,
@@ -2937,18 +2978,9 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
                        return err;
        }
 
-       /* Add ct_state=-trk match so it will be offloaded for non ct flows
-        * (or after clear action), as otherwise, since the tuple is changed,
-        *  we can't restore ct state
-        */
-       if (!ct_clear && modify_tuple &&
-           mlx5_tc_ct_add_no_trk_match(spec)) {
-               NL_SET_ERR_MSG_MOD(extack,
-                                  "can't offload tuple modify header with ct matches");
-               netdev_info(priv->netdev,
-                           "can't offload tuple modify header with ct matches");
+       if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
+                                   priv, spec))
                return false;
-       }
 
        ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
        if (modify_ip_header && ip_proto != IPPROTO_TCP &&
@@ -4445,7 +4477,8 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
         */
        if (rate) {
                rate = (rate * BITS_PER_BYTE) + 500000;
-               rate_mbps = max_t(u64, do_div(rate, 1000000), 1);
+               do_div(rate, 1000000);
+               rate_mbps = max_t(u32, rate, 1);
        }
 
        err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
index 89003ae..25c0917 100644 (file)
@@ -79,6 +79,7 @@ struct mlx5_flow_attr {
        u8 inner_match_level;
        u8 outer_match_level;
        u8 ip_version;
+       u8 tun_ip_version;
        u32 flags;
        union {
                struct mlx5_esw_flow_attr esw_attr[0];
index 174dfbc..1fa9c18 100644 (file)
@@ -931,13 +931,24 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
        mutex_unlock(&table->lock);
 }
 
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+#define MLX5_MAX_ASYNC_EQS 4
+#else
+#define MLX5_MAX_ASYNC_EQS 3
+#endif
+
 int mlx5_eq_table_create(struct mlx5_core_dev *dev)
 {
        struct mlx5_eq_table *eq_table = dev->priv.eq_table;
+       int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
+                     MLX5_CAP_GEN(dev, max_num_eqs) :
+                     1 << MLX5_CAP_GEN(dev, log_max_eq);
        int err;
 
        eq_table->num_comp_eqs =
-               mlx5_irq_get_num_comp(eq_table->irq_table);
+               min_t(int,
+                     mlx5_irq_get_num_comp(eq_table->irq_table),
+                     num_eqs - MLX5_MAX_ASYNC_EQS);
 
        err = create_async_eqs(dev);
        if (err) {
index 6f6772b..3da7bec 100644 (file)
@@ -248,7 +248,7 @@ err_mod_hdr_regc0:
 err_ethertype:
        kfree(rule);
 out:
-       kfree(rule_spec);
+       kvfree(rule_spec);
        return err;
 }
 
@@ -328,7 +328,7 @@ static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw,
        e->recirc_cnt = 0;
 
 out:
-       kfree(in);
+       kvfree(in);
        return err;
 }
 
@@ -347,7 +347,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
 
        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
        if (!spec) {
-               kfree(in);
+               kvfree(in);
                return -ENOMEM;
        }
 
@@ -371,8 +371,8 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
        }
 
 err_out:
-       kfree(spec);
-       kfree(in);
+       kvfree(spec);
+       kvfree(in);
        return err;
 }
 
index 94cb021..d4a2f8d 100644 (file)
@@ -537,6 +537,14 @@ esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *
        return i;
 }
 
+static bool
+esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
+{
+       return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
+              mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+              MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
+}
+
 static int
 esw_setup_dests(struct mlx5_flow_destination *dest,
                struct mlx5_flow_act *flow_act,
@@ -550,8 +558,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
        int err = 0;
 
        if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
-           MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) &&
-           mlx5_eswitch_vport_match_metadata_enabled(esw))
+           esw_src_port_rewrite_supported(esw))
                attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
 
        if (attr->dest_ft) {
@@ -1715,36 +1722,40 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
        }
        esw->fdb_table.offloads.send_to_vport_grp = g;
 
-       /* meta send to vport */
-       memset(flow_group_in, 0, inlen);
-       MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
-                MLX5_MATCH_MISC_PARAMETERS_2);
-
-       match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+       if (esw_src_port_rewrite_supported(esw)) {
+               /* meta send to vport */
+               memset(flow_group_in, 0, inlen);
+               MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+                        MLX5_MATCH_MISC_PARAMETERS_2);
 
-       MLX5_SET(fte_match_param, match_criteria,
-                misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
-       MLX5_SET(fte_match_param, match_criteria,
-                misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+               match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
 
-       num_vfs = esw->esw_funcs.num_vfs;
-       if (num_vfs) {
-               MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
-               MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + num_vfs - 1);
-               ix += num_vfs;
+               MLX5_SET(fte_match_param, match_criteria,
+                        misc_parameters_2.metadata_reg_c_0,
+                        mlx5_eswitch_get_vport_metadata_mask());
+               MLX5_SET(fte_match_param, match_criteria,
+                        misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
 
-               g = mlx5_create_flow_group(fdb, flow_group_in);
-               if (IS_ERR(g)) {
-                       err = PTR_ERR(g);
-                       esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
-                                err);
-                       goto send_vport_meta_err;
+               num_vfs = esw->esw_funcs.num_vfs;
+               if (num_vfs) {
+                       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+                       MLX5_SET(create_flow_group_in, flow_group_in,
+                                end_flow_index, ix + num_vfs - 1);
+                       ix += num_vfs;
+
+                       g = mlx5_create_flow_group(fdb, flow_group_in);
+                       if (IS_ERR(g)) {
+                               err = PTR_ERR(g);
+                               esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
+                                        err);
+                               goto send_vport_meta_err;
+                       }
+                       esw->fdb_table.offloads.send_to_vport_meta_grp = g;
+
+                       err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
+                       if (err)
+                               goto meta_rule_err;
                }
-               esw->fdb_table.offloads.send_to_vport_meta_grp = g;
-
-               err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
-               if (err)
-                       goto meta_rule_err;
        }
 
        if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
index 80da50e..bd66ab2 100644 (file)
@@ -575,6 +575,7 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
        MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size));
        MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
        MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
+       MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev));
        MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
        if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
                MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
index 1eeca45..6f7cef4 100644 (file)
@@ -233,6 +233,7 @@ int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
        }
 
        qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+       MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(priv->mdev));
        MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
        MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
        MLX5_SET(qpc, qpc, ulp_stateless_offload_mode,
@@ -694,6 +695,7 @@ static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
 static void mlx5_rdma_netdev_free(struct net_device *netdev)
 {
        struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5i_priv *ipriv = priv->ppriv;
        const struct mlx5e_profile *profile = priv->profile;
 
@@ -702,7 +704,7 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev)
 
        if (!ipriv->sub_interface) {
                mlx5i_pkey_qpn_ht_cleanup(netdev);
-               mlx5e_destroy_mdev_resources(priv->mdev);
+               mlx5e_destroy_mdev_resources(mdev);
        }
 }
 
index b0e129d..1e7f26b 100644 (file)
@@ -495,15 +495,15 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
                return -EINVAL;
 
        field_select = MLX5_MTPPS_FS_ENABLE;
+       pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
+       if (pin < 0)
+               return -EBUSY;
+
        if (on) {
                bool rt_mode = mlx5_real_time_mode(mdev);
                u32 nsec;
                s64 sec;
 
-               pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
-               if (pin < 0)
-                       return -EBUSY;
-
                pin_mode = MLX5_PIN_MODE_OUT;
                pattern = MLX5_OUT_PATTERN_PERIODIC;
                ts.tv_sec = rq->perout.period.sec;
index b265f27..90b524c 100644 (file)
@@ -181,15 +181,13 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
        u16 max_functions;
        u16 function_id;
        int err = 0;
-       bool ecpu;
        int i;
 
        max_functions = mlx5_sf_max_functions(dev);
        function_id = MLX5_CAP_GEN(dev, sf_base_id);
-       ecpu = mlx5_read_embedded_cpu(dev);
        /* Arm the vhca context as the vhca event notifier */
        for (i = 0; i < max_functions; i++) {
-               err = mlx5_vhca_event_arm(dev, function_id, ecpu);
+               err = mlx5_vhca_event_arm(dev, function_id);
                if (err)
                        return err;
 
index 58b6be0..a5a0f60 100644 (file)
@@ -6,7 +6,7 @@
 #include "sf.h"
 #include "mlx5_ifc_vhca_event.h"
 #include "vhca_event.h"
-#include "ecpf.h"
+#include "mlx5_core.h"
 
 struct mlx5_sf_hw {
        u32 usr_sfnum;
@@ -18,7 +18,6 @@ struct mlx5_sf_hw_table {
        struct mlx5_core_dev *dev;
        struct mlx5_sf_hw *sfs;
        int max_local_functions;
-       u8 ecpu: 1;
        struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
        struct notifier_block vhca_nb;
 };
@@ -64,7 +63,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
        }
        if (sw_id == -ENOSPC) {
                err = -ENOSPC;
-               goto err;
+               goto exist_err;
        }
 
        hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id);
@@ -72,7 +71,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
        if (err)
                goto err;
 
-       err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, table->ecpu, usr_sfnum);
+       err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, usr_sfnum);
        if (err)
                goto vhca_err;
 
@@ -118,7 +117,7 @@ void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id)
 
        hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id);
        mutex_lock(&table->table_lock);
-       err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, table->ecpu, out, sizeof(out));
+       err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out));
        if (err)
                goto err;
        state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
@@ -164,7 +163,6 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
        table->dev = dev;
        table->sfs = sfs;
        table->max_local_functions = max_functions;
-       table->ecpu = mlx5_read_embedded_cpu(dev);
        dev->priv.sf_hw_table = table;
        mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions);
        return 0;
index 1daf5a1..4fc8701 100644 (file)
@@ -20,7 +20,7 @@ struct mlx5_ifc_vhca_state_context_bits {
 
        u8         sw_function_id[0x20];
 
-       u8         reserved_at_40[0x80];
+       u8         reserved_at_40[0x40];
 };
 
 struct mlx5_ifc_query_vhca_state_out_bits {
index af2f2dd..28b14b0 100644 (file)
@@ -19,52 +19,51 @@ struct mlx5_vhca_event_work {
        struct mlx5_vhca_state_event event;
 };
 
-int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
-                             bool ecpu, u32 *out, u32 outlen)
+int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen)
 {
        u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {};
 
        MLX5_SET(query_vhca_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_STATE);
        MLX5_SET(query_vhca_state_in, in, function_id, function_id);
-       MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, ecpu);
+       MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, 0);
 
        return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
 }
 
 static int mlx5_cmd_modify_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
-                                     bool ecpu, u32 *in, u32 inlen)
+                                     u32 *in, u32 inlen)
 {
        u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
 
        MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
        MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
-       MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu);
+       MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0);
 
        return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
 }
 
-int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id)
+int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id)
 {
        u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
        u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
 
        MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
        MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
-       MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu);
+       MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0);
        MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.sw_function_id, 1);
        MLX5_SET(modify_vhca_state_in, in, vhca_state_context.sw_function_id, sw_fn_id);
 
        return mlx5_cmd_exec_inout(dev, modify_vhca_state, in, out);
 }
 
-int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu)
+int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id)
 {
        u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
 
        MLX5_SET(modify_vhca_state_in, in, vhca_state_context.arm_change_event, 1);
        MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.arm_change_event, 1);
 
-       return mlx5_cmd_modify_vhca_state(dev, function_id, ecpu, in, sizeof(in));
+       return mlx5_cmd_modify_vhca_state(dev, function_id, in, sizeof(in));
 }
 
 static void
@@ -73,7 +72,7 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *
        u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
        int err;
 
-       err = mlx5_cmd_query_vhca_state(dev, event->function_id, event->ecpu, out, sizeof(out));
+       err = mlx5_cmd_query_vhca_state(dev, event->function_id, out, sizeof(out));
        if (err)
                return;
 
@@ -82,7 +81,7 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *
        event->new_vhca_state = MLX5_GET(query_vhca_state_out, out,
                                         vhca_state_context.vhca_state);
 
-       mlx5_vhca_event_arm(dev, event->function_id, event->ecpu);
+       mlx5_vhca_event_arm(dev, event->function_id);
 
        blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event);
 }
@@ -94,6 +93,7 @@ static void mlx5_vhca_state_work_handler(struct work_struct *_work)
        struct mlx5_core_dev *dev = notifier->dev;
 
        mlx5_vhca_event_notify(dev, &work->event);
+       kfree(work);
 }
 
 static int
@@ -110,7 +110,6 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v
        INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
        work->notifier = notifier;
        work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
-       work->event.ecpu = be16_to_cpu(eqe->data.vhca_state.ec_function);
        mlx5_events_work_enqueue(notifier->dev, &work->work);
        return NOTIFY_OK;
 }
index 1fe1ec6..013cdfe 100644 (file)
@@ -10,7 +10,6 @@ struct mlx5_vhca_state_event {
        u16 function_id;
        u16 sw_function_id;
        u8 new_vhca_state;
-       bool ecpu;
 };
 
 static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev)
@@ -25,10 +24,10 @@ void mlx5_vhca_event_start(struct mlx5_core_dev *dev);
 void mlx5_vhca_event_stop(struct mlx5_core_dev *dev);
 int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
 void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
-int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id);
-int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu);
+int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id);
+int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id);
 int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
-                             bool ecpu, u32 *out, u32 outlen);
+                             u32 *out, u32 outlen);
 #else
 
 static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
index 83c4c87..8a6a56f 100644 (file)
@@ -169,6 +169,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
        MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
        MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
        MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt));
+       MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev));
        MLX5_SET64(qpc, qpc, dbr_addr, dr_qp->wq_ctrl.db.dma);
        if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
                MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
index 4088d6e..9143ec3 100644 (file)
@@ -264,8 +264,8 @@ static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
 static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
 {
        u64 index =
-               (MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
-                MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32) << 26);
+               ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
+                ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32)) << 26);
 
        return index << 6;
 }
index d9d9e1f..ba28ac7 100644 (file)
@@ -21,6 +21,7 @@
 #include <net/red.h>
 #include <net/vxlan.h>
 #include <net/flow_offload.h>
+#include <net/inet_ecn.h>
 
 #include "port.h"
 #include "core.h"
@@ -347,6 +348,20 @@ struct mlxsw_sp_port_type_speed_ops {
        u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
 };
 
+static inline u8 mlxsw_sp_tunnel_ecn_decap(u8 outer_ecn, u8 inner_ecn,
+                                          bool *trap_en)
+{
+       bool set_ce = false;
+
+       *trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
+       if (set_ce)
+               return INET_ECN_CE;
+       else if (outer_ecn == INET_ECN_ECT_1 && inner_ecn == INET_ECN_ECT_0)
+               return INET_ECN_ECT_1;
+       else
+               return inner_ecn;
+}
+
 static inline struct net_device *
 mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev)
 {
index 0bd6416..078601d 100644 (file)
@@ -1230,16 +1230,22 @@ mlxsw_sp1_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
                              u32 ptys_eth_proto,
                              struct ethtool_link_ksettings *cmd)
 {
+       struct mlxsw_sp1_port_link_mode link;
        int i;
 
-       cmd->link_mode = -1;
+       cmd->base.speed = SPEED_UNKNOWN;
+       cmd->base.duplex = DUPLEX_UNKNOWN;
+       cmd->lanes = 0;
 
        if (!carrier_ok)
                return;
 
        for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
-               if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask)
-                       cmd->link_mode = mlxsw_sp1_port_link_mode[i].mask_ethtool;
+               if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) {
+                       link = mlxsw_sp1_port_link_mode[i];
+                       ethtool_params_from_link_mode(cmd,
+                                                     link.mask_ethtool);
+               }
        }
 }
 
@@ -1672,7 +1678,9 @@ mlxsw_sp2_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
        struct mlxsw_sp2_port_link_mode link;
        int i;
 
-       cmd->link_mode = -1;
+       cmd->base.speed = SPEED_UNKNOWN;
+       cmd->base.duplex = DUPLEX_UNKNOWN;
+       cmd->lanes = 0;
 
        if (!carrier_ok)
                return;
@@ -1680,7 +1688,8 @@ mlxsw_sp2_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
        for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
                if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) {
                        link = mlxsw_sp2_port_link_mode[i];
-                       cmd->link_mode = link.mask_ethtool[1];
+                       ethtool_params_from_link_mode(cmd,
+                                                     link.mask_ethtool[1]);
                }
        }
 }
index 6ccca39..64a8f83 100644 (file)
@@ -335,12 +335,11 @@ static int mlxsw_sp_ipip_ecn_decap_init_one(struct mlxsw_sp *mlxsw_sp,
                                            u8 inner_ecn, u8 outer_ecn)
 {
        char tidem_pl[MLXSW_REG_TIDEM_LEN];
-       bool trap_en, set_ce = false;
        u8 new_inner_ecn;
+       bool trap_en;
 
-       trap_en = __INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
-       new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
-
+       new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
+                                                 &trap_en);
        mlxsw_reg_tidem_pack(tidem_pl, outer_ecn, inner_ecn, new_inner_ecn,
                             trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tidem), tidem_pl);
index e5ec595..9eba8fa 100644 (file)
@@ -909,12 +909,11 @@ static int __mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp,
                                         u8 inner_ecn, u8 outer_ecn)
 {
        char tndem_pl[MLXSW_REG_TNDEM_LEN];
-       bool trap_en, set_ce = false;
        u8 new_inner_ecn;
+       bool trap_en;
 
-       trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
-       new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
-
+       new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
+                                                 &trap_en);
        mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn,
                             trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl);
index 1c3e204..7b6794a 100644 (file)
@@ -885,8 +885,8 @@ static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
        }
 
        mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
-       mac_rx |= (((new_mtu + ETH_HLEN + 4) << MAC_RX_MAX_SIZE_SHIFT_) &
-                 MAC_RX_MAX_SIZE_MASK_);
+       mac_rx |= (((new_mtu + ETH_HLEN + ETH_FCS_LEN)
+                 << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
        lan743x_csr_write(adapter, MAC_RX, mac_rx);
 
        if (enabled) {
@@ -1944,7 +1944,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
        struct sk_buff *skb;
        dma_addr_t dma_ptr;
 
-       buffer_length = netdev->mtu + ETH_HLEN + 4 + RX_HEAD_PADDING;
+       buffer_length = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + RX_HEAD_PADDING;
 
        descriptor = &rx->ring_cpu_ptr[index];
        buffer_info = &rx->buffer_info[index];
@@ -2040,7 +2040,7 @@ lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length)
                dev_kfree_skb_irq(skb);
                return NULL;
        }
-       frame_length = max_t(int, 0, frame_length - RX_HEAD_PADDING - 4);
+       frame_length = max_t(int, 0, frame_length - ETH_FCS_LEN);
        if (skb->len > frame_length) {
                skb->tail -= skb->len - frame_length;
                skb->len = frame_length;
index 1634ca6..c84c8bf 100644 (file)
@@ -2897,7 +2897,7 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
                        dev_kfree_skb_any(curr);
                        if (segs != NULL) {
                                curr = segs;
-                               segs = segs->next;
+                               segs = next;
                                curr->next = NULL;
                                dev_kfree_skb_any(segs);
                        }
index 0e2db6e..2ec62c8 100644 (file)
@@ -454,6 +454,7 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
                        dev_consume_skb_any(skb);
                else
                        dev_kfree_skb_any(skb);
+               return;
        }
 
        nfp_ccm_rx(&bpf->ccm, skb);
index caf12ee..56833a4 100644 (file)
@@ -190,6 +190,7 @@ struct nfp_fl_internal_ports {
  * @qos_rate_limiters: Current active qos rate limiters
  * @qos_stats_lock:    Lock on qos stats updates
  * @pre_tun_rule_cnt:  Number of pre-tunnel rules offloaded
+ * @merge_table:       Hash table to store merged flows
  */
 struct nfp_flower_priv {
        struct nfp_app *app;
@@ -223,6 +224,7 @@ struct nfp_flower_priv {
        unsigned int qos_rate_limiters;
        spinlock_t qos_stats_lock; /* Protect the qos stats */
        int pre_tun_rule_cnt;
+       struct rhashtable merge_table;
 };
 
 /**
@@ -350,6 +352,12 @@ struct nfp_fl_payload_link {
 };
 
 extern const struct rhashtable_params nfp_flower_table_params;
+extern const struct rhashtable_params merge_table_params;
+
+struct nfp_merge_info {
+       u64 parent_ctx;
+       struct rhash_head ht_node;
+};
 
 struct nfp_fl_stats_frame {
        __be32 stats_con_id;
index 5defd31..327bb56 100644 (file)
@@ -327,8 +327,14 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
                goto err_free_ctx_entry;
        }
 
+       /* Do net allocate a mask-id for pre_tun_rules. These flows are used to
+        * configure the pre_tun table and are never actually send to the
+        * firmware as an add-flow message. This causes the mask-id allocation
+        * on the firmware to get out of sync if allocated here.
+        */
        new_mask_id = 0;
-       if (!nfp_check_mask_add(app, nfp_flow->mask_data,
+       if (!nfp_flow->pre_tun_rule.dev &&
+           !nfp_check_mask_add(app, nfp_flow->mask_data,
                                nfp_flow->meta.mask_len,
                                &nfp_flow->meta.flags, &new_mask_id)) {
                NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
@@ -359,7 +365,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
                        goto err_remove_mask;
                }
 
-               if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
+               if (!nfp_flow->pre_tun_rule.dev &&
+                   !nfp_check_mask_remove(app, nfp_flow->mask_data,
                                           nfp_flow->meta.mask_len,
                                           NULL, &new_mask_id)) {
                        NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
@@ -374,8 +381,10 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
        return 0;
 
 err_remove_mask:
-       nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len,
-                             NULL, &new_mask_id);
+       if (!nfp_flow->pre_tun_rule.dev)
+               nfp_check_mask_remove(app, nfp_flow->mask_data,
+                                     nfp_flow->meta.mask_len,
+                                     NULL, &new_mask_id);
 err_remove_rhash:
        WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
                                            &ctx_entry->ht_node,
@@ -406,9 +415,10 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
 
        __nfp_modify_flow_metadata(priv, nfp_flow);
 
-       nfp_check_mask_remove(app, nfp_flow->mask_data,
-                             nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
-                             &new_mask_id);
+       if (!nfp_flow->pre_tun_rule.dev)
+               nfp_check_mask_remove(app, nfp_flow->mask_data,
+                                     nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
+                                     &new_mask_id);
 
        /* Update flow payload with mask ids. */
        nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
@@ -480,6 +490,12 @@ const struct rhashtable_params nfp_flower_table_params = {
        .automatic_shrinking    = true,
 };
 
+const struct rhashtable_params merge_table_params = {
+       .key_offset     = offsetof(struct nfp_merge_info, parent_ctx),
+       .head_offset    = offsetof(struct nfp_merge_info, ht_node),
+       .key_len        = sizeof(u64),
+};
+
 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
                             unsigned int host_num_mems)
 {
@@ -496,6 +512,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
        if (err)
                goto err_free_flow_table;
 
+       err = rhashtable_init(&priv->merge_table, &merge_table_params);
+       if (err)
+               goto err_free_stats_ctx_table;
+
        get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
 
        /* Init ring buffer and unallocated mask_ids. */
@@ -503,7 +523,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
                kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
                              NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
        if (!priv->mask_ids.mask_id_free_list.buf)
-               goto err_free_stats_ctx_table;
+               goto err_free_merge_table;
 
        priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
 
@@ -540,6 +560,8 @@ err_free_last_used:
        kfree(priv->mask_ids.last_used);
 err_free_mask_id:
        kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_merge_table:
+       rhashtable_destroy(&priv->merge_table);
 err_free_stats_ctx_table:
        rhashtable_destroy(&priv->stats_ctx_table);
 err_free_flow_table:
@@ -558,6 +580,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
                                    nfp_check_rhashtable_empty, NULL);
        rhashtable_free_and_destroy(&priv->stats_ctx_table,
                                    nfp_check_rhashtable_empty, NULL);
+       rhashtable_free_and_destroy(&priv->merge_table,
+                                   nfp_check_rhashtable_empty, NULL);
        kvfree(priv->stats);
        kfree(priv->mask_ids.mask_id_free_list.buf);
        kfree(priv->mask_ids.last_used);
index 1c59aff..e95969c 100644 (file)
@@ -1009,6 +1009,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
        struct netlink_ext_ack *extack = NULL;
        struct nfp_fl_payload *merge_flow;
        struct nfp_fl_key_ls merge_key_ls;
+       struct nfp_merge_info *merge_info;
+       u64 parent_ctx = 0;
        int err;
 
        ASSERT_RTNL();
@@ -1019,6 +1021,15 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
            nfp_flower_is_merge_flow(sub_flow2))
                return -EINVAL;
 
+       /* check if the two flows are already merged */
+       parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
+       parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
+       if (rhashtable_lookup_fast(&priv->merge_table,
+                                  &parent_ctx, merge_table_params)) {
+               nfp_flower_cmsg_warn(app, "The two flows are already merged.\n");
+               return 0;
+       }
+
        err = nfp_flower_can_merge(sub_flow1, sub_flow2);
        if (err)
                return err;
@@ -1060,16 +1071,33 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
        if (err)
                goto err_release_metadata;
 
+       merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL);
+       if (!merge_info) {
+               err = -ENOMEM;
+               goto err_remove_rhash;
+       }
+       merge_info->parent_ctx = parent_ctx;
+       err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node,
+                                    merge_table_params);
+       if (err)
+               goto err_destroy_merge_info;
+
        err = nfp_flower_xmit_flow(app, merge_flow,
                                   NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
        if (err)
-               goto err_remove_rhash;
+               goto err_remove_merge_info;
 
        merge_flow->in_hw = true;
        sub_flow1->in_hw = false;
 
        return 0;
 
+err_remove_merge_info:
+       WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
+                                           &merge_info->ht_node,
+                                           merge_table_params));
+err_destroy_merge_info:
+       kfree(merge_info);
 err_remove_rhash:
        WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
                                            &merge_flow->fl_node,
@@ -1142,6 +1170,12 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
                return -EOPNOTSUPP;
        }
 
+       if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
+           !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
+               NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present");
+               return -EOPNOTSUPP;
+       }
+
        /* Skip fields known to exist. */
        mask += sizeof(struct nfp_flower_meta_tci);
        ext += sizeof(struct nfp_flower_meta_tci);
@@ -1152,6 +1186,13 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
        mask += sizeof(struct nfp_flower_in_port);
        ext += sizeof(struct nfp_flower_in_port);
 
+       /* Ensure destination MAC address matches pre_tun_dev. */
+       mac = (struct nfp_flower_mac_mpls *)ext;
+       if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) {
+               NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC");
+               return -EOPNOTSUPP;
+       }
+
        /* Ensure destination MAC address is fully matched. */
        mac = (struct nfp_flower_mac_mpls *)mask;
        if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
@@ -1159,6 +1200,11 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
                return -EOPNOTSUPP;
        }
 
+       if (mac->mpls_lse) {
+               NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported");
+               return -EOPNOTSUPP;
+       }
+
        mask += sizeof(struct nfp_flower_mac_mpls);
        ext += sizeof(struct nfp_flower_mac_mpls);
        if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
@@ -1341,7 +1387,9 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
 {
        struct nfp_flower_priv *priv = app->priv;
        struct nfp_fl_payload_link *link, *temp;
+       struct nfp_merge_info *merge_info;
        struct nfp_fl_payload *origin;
+       u64 parent_ctx = 0;
        bool mod = false;
        int err;
 
@@ -1378,8 +1426,22 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
 err_free_links:
        /* Clean any links connected with the merged flow. */
        list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
-                                merge_flow.list)
+                                merge_flow.list) {
+               u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id);
+
+               parent_ctx = (parent_ctx << 32) | (u64)(ctx_id);
                nfp_flower_unlink_flow(link);
+       }
+
+       merge_info = rhashtable_lookup_fast(&priv->merge_table,
+                                           &parent_ctx,
+                                           merge_table_params);
+       if (merge_info) {
+               WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
+                                                   &merge_info->ht_node,
+                                                   merge_table_params));
+               kfree(merge_info);
+       }
 
        kfree(merge_flow->action_data);
        kfree(merge_flow->mask_data);
index 7248d24..d19c02e 100644 (file)
@@ -16,8 +16,9 @@
 #define NFP_FL_MAX_ROUTES               32
 
 #define NFP_TUN_PRE_TUN_RULE_LIMIT     32
-#define NFP_TUN_PRE_TUN_RULE_DEL       0x1
-#define NFP_TUN_PRE_TUN_IDX_BIT                0x8
+#define NFP_TUN_PRE_TUN_RULE_DEL       BIT(0)
+#define NFP_TUN_PRE_TUN_IDX_BIT                BIT(3)
+#define NFP_TUN_PRE_TUN_IPV6_BIT       BIT(7)
 
 /**
  * struct nfp_tun_pre_run_rule - rule matched before decap
@@ -1268,6 +1269,7 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
 {
        struct nfp_flower_priv *app_priv = app->priv;
        struct nfp_tun_offloaded_mac *mac_entry;
+       struct nfp_flower_meta_tci *key_meta;
        struct nfp_tun_pre_tun_rule payload;
        struct net_device *internal_dev;
        int err;
@@ -1290,6 +1292,15 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
        if (!mac_entry)
                return -ENOENT;
 
+       /* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being
+        * set/clear for port_idx.
+        */
+       key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data;
+       if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6)
+               mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT;
+       else
+               mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT;
+
        payload.port_idx = cpu_to_be16(mac_entry->index);
 
        /* Copy mac id and vlan to flow - dev may not exist at delete time. */
index 162a1ff..4087311 100644 (file)
@@ -1079,15 +1079,17 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
 {
        int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems;
        struct ionic_tx_stats *stats = q_to_tx_stats(q);
+       int ndescs;
        int err;
 
-       /* If TSO, need roundup(skb->len/mss) descs */
+       /* Each desc is mss long max, so a descriptor for each gso_seg */
        if (skb_is_gso(skb))
-               return (skb->len / skb_shinfo(skb)->gso_size) + 1;
+               ndescs = skb_shinfo(skb)->gso_segs;
+       else
+               ndescs = 1;
 
-       /* If non-TSO, just need 1 desc and nr_frags sg elems */
        if (skb_shinfo(skb)->nr_frags <= sg_elems)
-               return 1;
+               return ndescs;
 
        /* Too many frags, so linearize */
        err = skb_linearize(skb);
@@ -1096,8 +1098,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
 
        stats->linearize++;
 
-       /* Need 1 desc and zero sg elems */
-       return 1;
+       return ndescs;
 }
 
 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
index 7760a33..7ecb3df 100644 (file)
@@ -1425,6 +1425,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
 
        if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
                vfree(fw_dump->tmpl_hdr);
+               fw_dump->tmpl_hdr = NULL;
 
                if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
                        extended = !qlcnic_83xx_extend_md_capab(adapter);
@@ -1443,6 +1444,8 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
                        struct qlcnic_83xx_dump_template_hdr *hdr;
 
                        hdr = fw_dump->tmpl_hdr;
+                       if (!hdr)
+                               return;
                        hdr->drv_cap_mask = 0x1f;
                        fw_dump->cap_mask = 0x1f;
                        dev_info(&pdev->dev,
index 7aad0ba..581a92f 100644 (file)
@@ -4646,6 +4646,9 @@ static void rtl8169_down(struct rtl8169_private *tp)
 
        rtl8169_update_counters(tp);
 
+       pci_clear_master(tp->pci_dev);
+       rtl_pci_commit(tp);
+
        rtl8169_cleanup(tp, true);
 
        rtl_prepare_power_down(tp);
@@ -4653,6 +4656,7 @@ static void rtl8169_down(struct rtl8169_private *tp)
 
 static void rtl8169_up(struct rtl8169_private *tp)
 {
+       pci_set_master(tp->pci_dev);
        phy_resume(tp->phydev);
        rtl8169_init_phy(tp);
        napi_enable(&tp->napi);
@@ -5307,8 +5311,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        rtl_hw_reset(tp);
 
-       pci_set_master(pdev);
-
        rc = rtl_alloc_irq(tp);
        if (rc < 0) {
                dev_err(&pdev->dev, "Can't allocate interrupt\n");
index 3c53051..200785e 100644 (file)
@@ -1715,14 +1715,17 @@ static int netsec_netdev_init(struct net_device *ndev)
                goto err1;
 
        /* set phy power down */
-       data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) |
-               BMCR_PDOWN;
-       netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
+       data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR);
+       netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR,
+                        data | BMCR_PDOWN);
 
        ret = netsec_reset_hardware(priv, true);
        if (ret)
                goto err2;
 
+       /* Restore phy power state */
+       netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
+
        spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock);
        spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock);
 
index 6b75cf2..e62efd1 100644 (file)
@@ -1214,6 +1214,8 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
        plat_dat->init = sun8i_dwmac_init;
        plat_dat->exit = sun8i_dwmac_exit;
        plat_dat->setup = sun8i_dwmac_setup;
+       plat_dat->tx_fifo_size = 4096;
+       plat_dat->rx_fifo_size = 16384;
 
        ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
        if (ret)
index 1e966a3..aca7f82 100644 (file)
@@ -504,6 +504,18 @@ static inline u32 axinet_ior_read_mcr(struct axienet_local *lp)
        return axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
 }
 
+static inline void axienet_lock_mii(struct axienet_local *lp)
+{
+       if (lp->mii_bus)
+               mutex_lock(&lp->mii_bus->mdio_lock);
+}
+
+static inline void axienet_unlock_mii(struct axienet_local *lp)
+{
+       if (lp->mii_bus)
+               mutex_unlock(&lp->mii_bus->mdio_lock);
+}
+
 /**
  * axienet_iow - Memory mapped Axi Ethernet register write
  * @lp:         Pointer to axienet local structure
index 3a8775e..f8f8654 100644 (file)
@@ -1053,9 +1053,9 @@ static int axienet_open(struct net_device *ndev)
         * including the MDIO. MDIO must be disabled before resetting.
         * Hold MDIO bus lock to avoid MDIO accesses during the reset.
         */
-       mutex_lock(&lp->mii_bus->mdio_lock);
+       axienet_lock_mii(lp);
        ret = axienet_device_reset(ndev);
-       mutex_unlock(&lp->mii_bus->mdio_lock);
+       axienet_unlock_mii(lp);
 
        ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
        if (ret) {
@@ -1148,9 +1148,9 @@ static int axienet_stop(struct net_device *ndev)
        }
 
        /* Do a reset to ensure DMA is really stopped */
-       mutex_lock(&lp->mii_bus->mdio_lock);
+       axienet_lock_mii(lp);
        __axienet_device_reset(lp);
-       mutex_unlock(&lp->mii_bus->mdio_lock);
+       axienet_unlock_mii(lp);
 
        cancel_work_sync(&lp->dma_err_task);
 
@@ -1709,9 +1709,9 @@ static void axienet_dma_err_handler(struct work_struct *work)
         * including the MDIO. MDIO must be disabled before resetting.
         * Hold MDIO bus lock to avoid MDIO accesses during the reset.
         */
-       mutex_lock(&lp->mii_bus->mdio_lock);
+       axienet_lock_mii(lp);
        __axienet_device_reset(lp);
-       mutex_unlock(&lp->mii_bus->mdio_lock);
+       axienet_unlock_mii(lp);
 
        for (i = 0; i < lp->tx_bd_num; i++) {
                cur_p = &lp->tx_bd_v[i];
@@ -1880,7 +1880,7 @@ static int axienet_probe(struct platform_device *pdev)
        if (IS_ERR(lp->regs)) {
                dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
                ret = PTR_ERR(lp->regs);
-               goto free_netdev;
+               goto cleanup_clk;
        }
        lp->regs_start = ethres->start;
 
@@ -1958,18 +1958,18 @@ static int axienet_probe(struct platform_device *pdev)
                        break;
                default:
                        ret = -EINVAL;
-                       goto free_netdev;
+                       goto cleanup_clk;
                }
        } else {
                ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
                if (ret)
-                       goto free_netdev;
+                       goto cleanup_clk;
        }
        if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
            lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
                dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
                ret = -EINVAL;
-               goto free_netdev;
+               goto cleanup_clk;
        }
 
        /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
@@ -1982,7 +1982,7 @@ static int axienet_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev,
                                "unable to get DMA resource\n");
                        of_node_put(np);
-                       goto free_netdev;
+                       goto cleanup_clk;
                }
                lp->dma_regs = devm_ioremap_resource(&pdev->dev,
                                                     &dmares);
@@ -2002,12 +2002,12 @@ static int axienet_probe(struct platform_device *pdev)
        if (IS_ERR(lp->dma_regs)) {
                dev_err(&pdev->dev, "could not map DMA regs\n");
                ret = PTR_ERR(lp->dma_regs);
-               goto free_netdev;
+               goto cleanup_clk;
        }
        if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
                dev_err(&pdev->dev, "could not determine irqs\n");
                ret = -ENOMEM;
-               goto free_netdev;
+               goto cleanup_clk;
        }
 
        /* Autodetect the need for 64-bit DMA pointers.
@@ -2037,7 +2037,7 @@ static int axienet_probe(struct platform_device *pdev)
        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
        if (ret) {
                dev_err(&pdev->dev, "No suitable DMA available\n");
-               goto free_netdev;
+               goto cleanup_clk;
        }
 
        /* Check for Ethernet core IRQ (optional) */
@@ -2068,12 +2068,12 @@ static int axienet_probe(struct platform_device *pdev)
                if (!lp->phy_node) {
                        dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n");
                        ret = -EINVAL;
-                       goto free_netdev;
+                       goto cleanup_mdio;
                }
                lp->pcs_phy = of_mdio_find_device(lp->phy_node);
                if (!lp->pcs_phy) {
                        ret = -EPROBE_DEFER;
-                       goto free_netdev;
+                       goto cleanup_mdio;
                }
                lp->phylink_config.pcs_poll = true;
        }
@@ -2087,17 +2087,30 @@ static int axienet_probe(struct platform_device *pdev)
        if (IS_ERR(lp->phylink)) {
                ret = PTR_ERR(lp->phylink);
                dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
-               goto free_netdev;
+               goto cleanup_mdio;
        }
 
        ret = register_netdev(lp->ndev);
        if (ret) {
                dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
-               goto free_netdev;
+               goto cleanup_phylink;
        }
 
        return 0;
 
+cleanup_phylink:
+       phylink_destroy(lp->phylink);
+
+cleanup_mdio:
+       if (lp->pcs_phy)
+               put_device(&lp->pcs_phy->dev);
+       if (lp->mii_bus)
+               axienet_mdio_teardown(lp);
+       of_node_put(lp->phy_node);
+
+cleanup_clk:
+       clk_disable_unprepare(lp->clk);
+
 free_netdev:
        free_netdev(ndev);
 
index 4ac0373..d5b1e48 100644 (file)
@@ -908,8 +908,16 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
 
                info = skb_tunnel_info(skb);
                if (info) {
-                       info->key.u.ipv4.dst = fl4.saddr;
-                       info->key.u.ipv4.src = fl4.daddr;
+                       struct ip_tunnel_info *unclone;
+
+                       unclone = skb_tunnel_info_unclone(skb);
+                       if (unlikely(!unclone)) {
+                               dst_release(&rt->dst);
+                               return -ENOMEM;
+                       }
+
+                       unclone->key.u.ipv4.dst = fl4.saddr;
+                       unclone->key.u.ipv4.src = fl4.daddr;
                }
 
                if (!pskb_may_pull(skb, ETH_HLEN)) {
@@ -993,8 +1001,16 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                struct ip_tunnel_info *info = skb_tunnel_info(skb);
 
                if (info) {
-                       info->key.u.ipv6.dst = fl6.saddr;
-                       info->key.u.ipv6.src = fl6.daddr;
+                       struct ip_tunnel_info *unclone;
+
+                       unclone = skb_tunnel_info_unclone(skb);
+                       if (unlikely(!unclone)) {
+                               dst_release(dst);
+                               return -ENOMEM;
+                       }
+
+                       unclone->key.u.ipv6.dst = fl6.saddr;
+                       unclone->key.u.ipv6.src = fl6.daddr;
                }
 
                if (!pskb_may_pull(skb, ETH_HLEN)) {
index 36eeb80..4690c6a 100644 (file)
@@ -2167,7 +2167,6 @@ static void __exit scc_cleanup_driver(void)
 
 MODULE_AUTHOR("Joerg Reuter <jreuter@yaina.de>");
 MODULE_DESCRIPTION("AX.25 Device Driver for Z8530 based HDLC cards");
-MODULE_SUPPORTED_DEVICE("Z8530 based SCC cards for Amateur Radio");
 MODULE_LICENSE("GPL");
 module_init(scc_init_driver);
 module_exit(scc_cleanup_driver);
index 0dd0ba9..23ee0b1 100644 (file)
@@ -365,6 +365,7 @@ static int atusb_alloc_urbs(struct atusb *atusb, int n)
                        return -ENOMEM;
                }
                usb_anchor_urb(urb, &atusb->idle_urbs);
+               usb_free_urb(urb);
                n--;
        }
        return 0;
index 35e3585..d73b03a 100644 (file)
@@ -175,21 +175,23 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
                            : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
        if (mem->offset > offset_max ||
            ipa->mem_offset > offset_max - mem->offset) {
-               dev_err(dev, "IPv%c %s%s table region offset too large "
-                             "(0x%04x + 0x%04x > 0x%04x)\n",
-                             ipv6 ? '6' : '4', hashed ? "hashed " : "",
-                             route ? "route" : "filter",
-                             ipa->mem_offset, mem->offset, offset_max);
+               dev_err(dev, "IPv%c %s%s table region offset too large\n",
+                       ipv6 ? '6' : '4', hashed ? "hashed " : "",
+                       route ? "route" : "filter");
+               dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
+                       ipa->mem_offset, mem->offset, offset_max);
+
                return false;
        }
 
        if (mem->offset > ipa->mem_size ||
            mem->size > ipa->mem_size - mem->offset) {
-               dev_err(dev, "IPv%c %s%s table region out of range "
-                             "(0x%04x + 0x%04x > 0x%04x)\n",
-                             ipv6 ? '6' : '4', hashed ? "hashed " : "",
-                             route ? "route" : "filter",
-                             mem->offset, mem->size, ipa->mem_size);
+               dev_err(dev, "IPv%c %s%s table region out of range\n",
+                       ipv6 ? '6' : '4', hashed ? "hashed " : "",
+                       route ? "route" : "filter");
+               dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
+                       mem->offset, mem->size, ipa->mem_size);
+
                return false;
        }
 
@@ -205,22 +207,36 @@ static bool ipa_cmd_header_valid(struct ipa *ipa)
        u32 size_max;
        u32 size;
 
+       /* In ipa_cmd_hdr_init_local_add() we record the offset and size
+        * of the header table memory area.  Make sure the offset and size
+        * fit in the fields that need to hold them, and that the entire
+        * range is within the overall IPA memory range.
+        */
        offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
        if (mem->offset > offset_max ||
            ipa->mem_offset > offset_max - mem->offset) {
-               dev_err(dev, "header table region offset too large "
-                             "(0x%04x + 0x%04x > 0x%04x)\n",
-                             ipa->mem_offset + mem->offset, offset_max);
+               dev_err(dev, "header table region offset too large\n");
+               dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
+                       ipa->mem_offset, mem->offset, offset_max);
+
                return false;
        }
 
        size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
        size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
        size += ipa->mem[IPA_MEM_AP_HEADER].size;
-       if (mem->offset > ipa->mem_size || size > ipa->mem_size - mem->offset) {
-               dev_err(dev, "header table region out of range "
-                             "(0x%04x + 0x%04x > 0x%04x)\n",
-                             mem->offset, size, ipa->mem_size);
+
+       if (size > size_max) {
+               dev_err(dev, "header table region size too large\n");
+               dev_err(dev, "    (0x%04x > 0x%08x)\n", size, size_max);
+
+               return false;
+       }
+       if (size > ipa->mem_size || mem->offset > ipa->mem_size - size) {
+               dev_err(dev, "header table region out of range\n");
+               dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
+                       mem->offset, size, ipa->mem_size);
+
                return false;
        }
 
index 2fc6448..e594bf3 100644 (file)
@@ -249,6 +249,7 @@ static const struct qmi_msg_handler ipa_server_msg_handlers[] = {
                .decoded_size   = IPA_QMI_DRIVER_INIT_COMPLETE_REQ_SZ,
                .fn             = ipa_server_driver_init_complete,
        },
+       { },
 };
 
 /* Handle an INIT_DRIVER response message from the modem. */
@@ -269,6 +270,7 @@ static const struct qmi_msg_handler ipa_client_msg_handlers[] = {
                .decoded_size   = IPA_QMI_INIT_DRIVER_RSP_SZ,
                .fn             = ipa_client_init_driver,
        },
+       { },
 };
 
 /* Return a pointer to an init modem driver request structure, which contains
index 53282a6..287cccf 100644 (file)
@@ -369,7 +369,7 @@ EXPORT_SYMBOL_GPL(bcm_phy_enable_apd);
 
 int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
 {
-       int val;
+       int val, mask = 0;
 
        /* Enable EEE at PHY level */
        val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL);
@@ -388,10 +388,17 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
        if (val < 0)
                return val;
 
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+                             phydev->supported))
+               mask |= MDIO_EEE_1000T;
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+                             phydev->supported))
+               mask |= MDIO_EEE_100TX;
+
        if (enable)
-               val |= (MDIO_EEE_100TX | MDIO_EEE_1000T);
+               val |= mask;
        else
-               val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T);
+               val &= ~mask;
 
        phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val);
 
index fa0be59..82fe5f4 100644 (file)
@@ -342,6 +342,10 @@ static int bcm54xx_config_init(struct phy_device *phydev)
        bcm54xx_adjust_rxrefclk(phydev);
 
        switch (BRCM_PHY_MODEL(phydev)) {
+       case PHY_ID_BCM50610:
+       case PHY_ID_BCM50610M:
+               err = bcm54xx_config_clock_delay(phydev);
+               break;
        case PHY_ID_BCM54210E:
                err = bcm54210e_config_init(phydev);
                break;
@@ -399,6 +403,11 @@ static int bcm54xx_resume(struct phy_device *phydev)
        if (ret < 0)
                return ret;
 
+       /* Upon exiting power down, the PHY remains in an internal reset state
+        * for 40us
+        */
+       fsleep(40);
+
        return bcm54xx_config_init(phydev);
 }
 
index 053c92e..dc2800b 100644 (file)
@@ -476,7 +476,7 @@ static void phylink_major_config(struct phylink *pl, bool restart,
                err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode,
                                              state->interface);
                if (err < 0)
-                       phylink_err(pl, "mac_prepare failed: %pe\n",
+                       phylink_err(pl, "mac_finish failed: %pe\n",
                                    ERR_PTR(err));
        }
 }
index fc86da7..4cf38be 100644 (file)
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
 #include <linux/mutex.h>
+#include <linux/ieee802154.h>
+#include <linux/if_ltalk.h>
+#include <uapi/linux/if_fddi.h>
+#include <uapi/linux/if_hippi.h>
+#include <uapi/linux/if_fc.h>
+#include <net/ax25.h>
+#include <net/rose.h>
+#include <net/6lowpan.h>
 
 #include <linux/uaccess.h>
 #include <linux/proc_fs.h>
@@ -2919,6 +2927,45 @@ static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
        return __tun_set_ebpf(tun, prog_p, prog);
 }
 
+/* Return correct value for tun->dev->addr_len based on tun->dev->type. */
+static unsigned char tun_get_addr_len(unsigned short type)
+{
+       switch (type) {
+       case ARPHRD_IP6GRE:
+       case ARPHRD_TUNNEL6:
+               return sizeof(struct in6_addr);
+       case ARPHRD_IPGRE:
+       case ARPHRD_TUNNEL:
+       case ARPHRD_SIT:
+               return 4;
+       case ARPHRD_ETHER:
+               return ETH_ALEN;
+       case ARPHRD_IEEE802154:
+       case ARPHRD_IEEE802154_MONITOR:
+               return IEEE802154_EXTENDED_ADDR_LEN;
+       case ARPHRD_PHONET_PIPE:
+       case ARPHRD_PPP:
+       case ARPHRD_NONE:
+               return 0;
+       case ARPHRD_6LOWPAN:
+               return EUI64_ADDR_LEN;
+       case ARPHRD_FDDI:
+               return FDDI_K_ALEN;
+       case ARPHRD_HIPPI:
+               return HIPPI_ALEN;
+       case ARPHRD_IEEE802:
+               return FC_ALEN;
+       case ARPHRD_ROSE:
+               return ROSE_ADDR_LEN;
+       case ARPHRD_NETROM:
+               return AX25_ADDR_LEN;
+       case ARPHRD_LOCALTLK:
+               return LTALK_ALEN;
+       default:
+               return 0;
+       }
+}
+
 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                            unsigned long arg, int ifreq_len)
 {
@@ -3082,6 +3129,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                                break;
                        }
                        tun->dev->type = (int) arg;
+                       tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
                        netif_info(tun, drv, tun->dev, "linktype set to %d\n",
                                   tun->dev->type);
                        call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
index 02e6bbb..8d1f69d 100644 (file)
@@ -387,6 +387,8 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i
 
        err = register_netdev(dev);
        if (err) {
+               /* Set disconnected flag so that disconnect() returns early. */
+               pnd->disconnected = 1;
                usb_driver_release_interface(&usbpn_driver, data_intf);
                goto out;
        }
index 31d5134..9bc58e6 100644 (file)
@@ -611,7 +611,7 @@ static struct hso_serial *get_serial_by_index(unsigned index)
        return serial;
 }
 
-static int get_free_serial_index(void)
+static int obtain_minor(struct hso_serial *serial)
 {
        int index;
        unsigned long flags;
@@ -619,8 +619,10 @@ static int get_free_serial_index(void)
        spin_lock_irqsave(&serial_table_lock, flags);
        for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) {
                if (serial_table[index] == NULL) {
+                       serial_table[index] = serial->parent;
+                       serial->minor = index;
                        spin_unlock_irqrestore(&serial_table_lock, flags);
-                       return index;
+                       return 0;
                }
        }
        spin_unlock_irqrestore(&serial_table_lock, flags);
@@ -629,15 +631,12 @@ static int get_free_serial_index(void)
        return -1;
 }
 
-static void set_serial_by_index(unsigned index, struct hso_serial *serial)
+static void release_minor(struct hso_serial *serial)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&serial_table_lock, flags);
-       if (serial)
-               serial_table[index] = serial->parent;
-       else
-               serial_table[index] = NULL;
+       serial_table[serial->minor] = NULL;
        spin_unlock_irqrestore(&serial_table_lock, flags);
 }
 
@@ -2230,6 +2229,7 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
 static void hso_serial_tty_unregister(struct hso_serial *serial)
 {
        tty_unregister_device(tty_drv, serial->minor);
+       release_minor(serial);
 }
 
 static void hso_serial_common_free(struct hso_serial *serial)
@@ -2253,24 +2253,22 @@ static void hso_serial_common_free(struct hso_serial *serial)
 static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
                                    int rx_size, int tx_size)
 {
-       int minor;
        int i;
 
        tty_port_init(&serial->port);
 
-       minor = get_free_serial_index();
-       if (minor < 0)
+       if (obtain_minor(serial))
                goto exit2;
 
        /* register our minor number */
        serial->parent->dev = tty_port_register_device_attr(&serial->port,
-                       tty_drv, minor, &serial->parent->interface->dev,
+                       tty_drv, serial->minor, &serial->parent->interface->dev,
                        serial->parent, hso_serial_dev_groups);
-       if (IS_ERR(serial->parent->dev))
+       if (IS_ERR(serial->parent->dev)) {
+               release_minor(serial);
                goto exit2;
+       }
 
-       /* fill in specific data for later use */
-       serial->minor = minor;
        serial->magic = HSO_SERIAL_MAGIC;
        spin_lock_init(&serial->serial_lock);
        serial->num_rx_urbs = num_urbs;
@@ -2667,9 +2665,6 @@ static struct hso_device *hso_create_bulk_serial_device(
 
        serial->write_data = hso_std_serial_write_data;
 
-       /* and record this serial */
-       set_serial_by_index(serial->minor, serial);
-
        /* setup the proc dirs and files if needed */
        hso_log_port(hso_dev);
 
@@ -2726,9 +2721,6 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
        serial->shared_int->ref_count++;
        mutex_unlock(&serial->shared_int->shared_int_lock);
 
-       /* and record this serial */
-       set_serial_by_index(serial->minor, serial);
-
        /* setup the proc dirs and files if needed */
        hso_log_port(hso_dev);
 
@@ -3113,7 +3105,6 @@ static void hso_free_interface(struct usb_interface *interface)
                        cancel_work_sync(&serial_table[i]->async_get_intf);
                        hso_serial_tty_unregister(serial);
                        kref_put(&serial_table[i]->ref, hso_serial_ref_free);
-                       set_serial_by_index(i, NULL);
                }
        }
 
index 90f1c02..20fb563 100644 (file)
@@ -6553,7 +6553,10 @@ static int rtl_ops_init(struct r8152 *tp)
                ops->in_nway            = rtl8153_in_nway;
                ops->hw_phy_cfg         = r8153_hw_phy_cfg;
                ops->autosuspend_en     = rtl8153_runtime_enable;
-               tp->rx_buf_sz           = 32 * 1024;
+               if (tp->udev->speed < USB_SPEED_SUPER)
+                       tp->rx_buf_sz   = 16 * 1024;
+               else
+                       tp->rx_buf_sz   = 32 * 1024;
                tp->eee_en              = true;
                tp->eee_adv             = MDIO_EEE_1000T | MDIO_EEE_100TX;
                break;
index aa1a66a..34e49c7 100644 (file)
@@ -302,8 +302,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
        if (rxq < rcv->real_num_rx_queues) {
                rq = &rcv_priv->rq[rxq];
                rcv_xdp = rcu_access_pointer(rq->xdp_prog);
-               if (rcv_xdp)
-                       skb_record_rx_queue(skb, rxq);
+               skb_record_rx_queue(skb, rxq);
        }
 
        skb_tx_timestamp(skb);
index 82e520d..0824e69 100644 (file)
@@ -406,9 +406,13 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
        offset += hdr_padded_len;
        p += hdr_padded_len;
 
-       copy = len;
-       if (copy > skb_tailroom(skb))
-               copy = skb_tailroom(skb);
+       /* Copy all frame if it fits skb->head, otherwise
+        * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
+        */
+       if (len <= skb_tailroom(skb))
+               copy = len;
+       else
+               copy = ETH_HLEN + metasize;
        skb_put_data(skb, p, copy);
 
        if (metasize) {
index 666dd20..53dbc67 100644 (file)
@@ -2725,12 +2725,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        goto tx_error;
                } else if (err) {
                        if (info) {
+                               struct ip_tunnel_info *unclone;
                                struct in_addr src, dst;
 
+                               unclone = skb_tunnel_info_unclone(skb);
+                               if (unlikely(!unclone))
+                                       goto tx_error;
+
                                src = remote_ip.sin.sin_addr;
                                dst = local_ip.sin.sin_addr;
-                               info->key.u.ipv4.src = src.s_addr;
-                               info->key.u.ipv4.dst = dst.s_addr;
+                               unclone->key.u.ipv4.src = src.s_addr;
+                               unclone->key.u.ipv4.dst = dst.s_addr;
                        }
                        vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
                        dst_release(ndst);
@@ -2781,12 +2786,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        goto tx_error;
                } else if (err) {
                        if (info) {
+                               struct ip_tunnel_info *unclone;
                                struct in6_addr src, dst;
 
+                               unclone = skb_tunnel_info_unclone(skb);
+                               if (unlikely(!unclone))
+                                       goto tx_error;
+
                                src = remote_ip.sin6.sin6_addr;
                                dst = local_ip.sin6.sin6_addr;
-                               info->key.u.ipv6.src = src;
-                               info->key.u.ipv6.dst = dst;
+                               unclone->key.u.ipv6.src = src;
+                               unclone->key.u.ipv6.dst = dst;
                        }
 
                        vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
index 0720f5f..4d9dc7d 100644 (file)
@@ -415,7 +415,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
 
                if (pad > 0) { /* Pad the frame with zeros */
                        if (__skb_pad(skb, pad, false))
-                               goto drop;
+                               goto out;
                        skb_put(skb, pad);
                }
        }
@@ -448,8 +448,9 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 
 drop:
-       dev->stats.tx_dropped++;
        kfree_skb(skb);
+out:
+       dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
 }
 
index 4aaa638..5a6a945 100644 (file)
@@ -23,6 +23,8 @@
 
 struct x25_state {
        x25_hdlc_proto settings;
+       bool up;
+       spinlock_t up_lock; /* Protects "up" */
 };
 
 static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
@@ -104,6 +106,8 @@ static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
 
 static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
 {
+       hdlc_device *hdlc = dev_to_hdlc(dev);
+       struct x25_state *x25st = state(hdlc);
        int result;
 
        /* There should be a pseudo header of 1 byte added by upper layers.
@@ -114,11 +118,19 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
+       spin_lock_bh(&x25st->up_lock);
+       if (!x25st->up) {
+               spin_unlock_bh(&x25st->up_lock);
+               kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+
        switch (skb->data[0]) {
        case X25_IFACE_DATA:    /* Data to be transmitted */
                skb_pull(skb, 1);
                if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
                        dev_kfree_skb(skb);
+               spin_unlock_bh(&x25st->up_lock);
                return NETDEV_TX_OK;
 
        case X25_IFACE_CONNECT:
@@ -147,6 +159,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
                break;
        }
 
+       spin_unlock_bh(&x25st->up_lock);
        dev_kfree_skb(skb);
        return NETDEV_TX_OK;
 }
@@ -164,6 +177,7 @@ static int x25_open(struct net_device *dev)
                .data_transmit = x25_data_transmit,
        };
        hdlc_device *hdlc = dev_to_hdlc(dev);
+       struct x25_state *x25st = state(hdlc);
        struct lapb_parms_struct params;
        int result;
 
@@ -190,6 +204,10 @@ static int x25_open(struct net_device *dev)
        if (result != LAPB_OK)
                return -EINVAL;
 
+       spin_lock_bh(&x25st->up_lock);
+       x25st->up = true;
+       spin_unlock_bh(&x25st->up_lock);
+
        return 0;
 }
 
@@ -197,6 +215,13 @@ static int x25_open(struct net_device *dev)
 
 static void x25_close(struct net_device *dev)
 {
+       hdlc_device *hdlc = dev_to_hdlc(dev);
+       struct x25_state *x25st = state(hdlc);
+
+       spin_lock_bh(&x25st->up_lock);
+       x25st->up = false;
+       spin_unlock_bh(&x25st->up_lock);
+
        lapb_unregister(dev);
 }
 
@@ -205,15 +230,28 @@ static void x25_close(struct net_device *dev)
 static int x25_rx(struct sk_buff *skb)
 {
        struct net_device *dev = skb->dev;
+       hdlc_device *hdlc = dev_to_hdlc(dev);
+       struct x25_state *x25st = state(hdlc);
 
        if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
                dev->stats.rx_dropped++;
                return NET_RX_DROP;
        }
 
-       if (lapb_data_received(dev, skb) == LAPB_OK)
+       spin_lock_bh(&x25st->up_lock);
+       if (!x25st->up) {
+               spin_unlock_bh(&x25st->up_lock);
+               kfree_skb(skb);
+               dev->stats.rx_dropped++;
+               return NET_RX_DROP;
+       }
+
+       if (lapb_data_received(dev, skb) == LAPB_OK) {
+               spin_unlock_bh(&x25st->up_lock);
                return NET_RX_SUCCESS;
+       }
 
+       spin_unlock_bh(&x25st->up_lock);
        dev->stats.rx_errors++;
        dev_kfree_skb_any(skb);
        return NET_RX_DROP;
@@ -298,6 +336,8 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
                        return result;
 
                memcpy(&state(hdlc)->settings, &new_settings, size);
+               state(hdlc)->up = false;
+               spin_lock_init(&state(hdlc)->up_lock);
 
                /* There's no header_ops so hard_header_len should be 0. */
                dev->hard_header_len = 0;
index c41e725..2db9c94 100644 (file)
@@ -28,7 +28,6 @@
 MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
 MODULE_AUTHOR("Jouni Malinen <j@w1.fi>");
 MODULE_DESCRIPTION("Driver for IEEE 802.11b wireless cards based on ADMtek ADM8211");
-MODULE_SUPPORTED_DEVICE("ADM8211");
 MODULE_LICENSE("GPL");
 
 static unsigned int tx_ring_size __read_mostly = 16;
index 4c6e57f..cef17f3 100644 (file)
@@ -90,7 +90,6 @@ MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");
 MODULE_AUTHOR("Jiri Slaby");
 MODULE_AUTHOR("Nick Kossifidis");
 MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
-MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
 MODULE_LICENSE("Dual BSD/GPL");
 
 static int ath5k_init(struct ieee80211_hw *hw);
index b66eeb5..5abc2a5 100644 (file)
@@ -34,7 +34,6 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
 
 MODULE_AUTHOR("Atheros Communications");
 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
-MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
 MODULE_LICENSE("Dual BSD/GPL");
 
 static void ath9k_hw_set_clockrate(struct ath_hw *ah)
index 42a2087..01f9c26 100644 (file)
@@ -37,7 +37,6 @@ static char *dev_info = "ath9k";
 
 MODULE_AUTHOR("Atheros Communications");
 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
-MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
 MODULE_LICENSE("Dual BSD/GPL");
 
 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
index 707fe66..febce4e 100644 (file)
@@ -75,7 +75,6 @@
 MODULE_AUTHOR("Simon Kelley");
 MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("Atmel at76c50x wireless cards");
 
 /* The name of the firmware file to be loaded
    over-rides any automatic selection */
index 368eebe..453bb84 100644 (file)
@@ -57,7 +57,6 @@
 MODULE_AUTHOR("Simon Kelley");
 MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("Atmel at76c50x PCMCIA cards");
 
 /*====================================================================*/
 
index 47f7ccb..f428dc7 100644 (file)
@@ -16,7 +16,6 @@
 MODULE_AUTHOR("Simon Kelley");
 MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards");
 
 static const struct pci_device_id card_ids[] = {
        { 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID },
index 6d30a0f..34cd8a7 100644 (file)
@@ -2439,7 +2439,7 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool locked)
        vif = ifp->vif;
        cfg = wdev_to_cfg(&vif->wdev);
        cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
-       if (locked) {
+       if (!locked) {
                rtnl_lock();
                wiphy_lock(cfg->wiphy);
                cfg80211_unregister_wdev(&vif->wdev);
index 818e523..39f3af2 100644 (file)
@@ -87,7 +87,6 @@ static int n_adapters_found;
 
 MODULE_AUTHOR("Broadcom Corporation");
 MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
-MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
 MODULE_LICENSE("Dual BSD/GPL");
 /* This needs to be adjusted when brcms_firmwares changes */
 MODULE_FIRMWARE("brcm/bcm43xx-0.fw");
index 4c84c30..e87e68c 100644 (file)
@@ -12,7 +12,6 @@
 
 MODULE_AUTHOR("Broadcom Corporation");
 MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver utilities.");
-MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
 MODULE_LICENSE("Dual BSD/GPL");
 
 struct sk_buff *brcmu_pkt_buf_get_skb(uint len)
index e35e138..60db38c 100644 (file)
@@ -251,7 +251,6 @@ MODULE_AUTHOR("Benjamin Reed");
 MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet cards.  "
                   "Direct support for ISA/PCI/MPI cards and support for PCMCIA when used with airo_cs.");
 MODULE_LICENSE("Dual BSD/GPL");
-MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340/350");
 module_param_hw_array(io, int, ioport, NULL, 0);
 module_param_hw_array(irq, int, irq, NULL, 0);
 module_param_array(rates, int, NULL, 0);
index 3718f95..fcfe4c6 100644 (file)
@@ -47,7 +47,6 @@ MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet "
                   "cards.  This is the module that links the PCMCIA card "
                   "with the airo module.");
 MODULE_LICENSE("Dual BSD/GPL");
-MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340 PCMCIA cards");
 
 /*====================================================================*/
 
index 3dbc6f3..231d251 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2005-2014 Intel Corporation
+ * Copyright (C) 2005-2014, 2021 Intel Corporation
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
 #include <linux/sched.h>
@@ -26,7 +26,7 @@ bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
        if (!list_empty(&notif_wait->notif_waits)) {
                struct iwl_notification_wait *w;
 
-               spin_lock(&notif_wait->notif_wait_lock);
+               spin_lock_bh(&notif_wait->notif_wait_lock);
                list_for_each_entry(w, &notif_wait->notif_waits, list) {
                        int i;
                        bool found = false;
@@ -59,7 +59,7 @@ bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
                                triggered = true;
                        }
                }
-               spin_unlock(&notif_wait->notif_wait_lock);
+               spin_unlock_bh(&notif_wait->notif_wait_lock);
        }
 
        return triggered;
@@ -70,10 +70,10 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
 {
        struct iwl_notification_wait *wait_entry;
 
-       spin_lock(&notif_wait->notif_wait_lock);
+       spin_lock_bh(&notif_wait->notif_wait_lock);
        list_for_each_entry(wait_entry, &notif_wait->notif_waits, list)
                wait_entry->aborted = true;
-       spin_unlock(&notif_wait->notif_wait_lock);
+       spin_unlock_bh(&notif_wait->notif_wait_lock);
 
        wake_up_all(&notif_wait->notif_waitq);
 }
index 75f99ff..c4f5da7 100644 (file)
@@ -414,6 +414,7 @@ struct iwl_cfg {
 #define IWL_CFG_MAC_TYPE_QNJ           0x36
 #define IWL_CFG_MAC_TYPE_SO            0x37
 #define IWL_CFG_MAC_TYPE_SNJ           0x42
+#define IWL_CFG_MAC_TYPE_SOF           0x43
 #define IWL_CFG_MAC_TYPE_MA            0x44
 
 #define IWL_CFG_RF_TYPE_TH             0x105
index af684f8..c5a1e84 100644 (file)
@@ -232,7 +232,7 @@ enum iwl_reg_capa_flags_v2 {
        REG_CAPA_V2_MCS_9_ALLOWED       = BIT(6),
        REG_CAPA_V2_WEATHER_DISABLED    = BIT(7),
        REG_CAPA_V2_40MHZ_ALLOWED       = BIT(8),
-       REG_CAPA_V2_11AX_DISABLED       = BIT(13),
+       REG_CAPA_V2_11AX_DISABLED       = BIT(10),
 };
 
 /*
index 1307605..34ddef9 100644 (file)
@@ -1786,10 +1786,13 @@ static ssize_t iwl_dbgfs_rfi_freq_table_write(struct iwl_mvm *mvm, char *buf,
                return -EINVAL;
 
        /* value zero triggers re-sending the default table to the device */
-       if (!op_id)
+       if (!op_id) {
+               mutex_lock(&mvm->mutex);
                ret = iwl_rfi_send_config_cmd(mvm, NULL);
-       else
+               mutex_unlock(&mvm->mutex);
+       } else {
                ret = -EOPNOTSUPP; /* in the future a new table will be added */
+       }
 
        return ret ?: count;
 }
index 8739190..0b81806 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2020 Intel Corporation
+ * Copyright (C) 2020 - 2021 Intel Corporation
  */
 
 #include "mvm.h"
@@ -66,6 +66,8 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t
        if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT))
                return -EOPNOTSUPP;
 
+       lockdep_assert_held(&mvm->mutex);
+
        /* in case no table is passed, use the default one */
        if (!rfi_table) {
                memcpy(cmd.table, iwl_rfi_table, sizeof(cmd.table));
@@ -75,9 +77,7 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t
                cmd.oem = 1;
        }
 
-       mutex_lock(&mvm->mutex);
        ret = iwl_mvm_send_cmd(mvm, &hcmd);
-       mutex_unlock(&mvm->mutex);
 
        if (ret)
                IWL_ERR(mvm, "Failed to send RFI config cmd %d\n", ret);
index c21736f..af5a6dd 100644 (file)
@@ -272,10 +272,10 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
        rx_status->chain_signal[2] = S8_MIN;
 }
 
-static int iwl_mvm_rx_mgmt_crypto(struct ieee80211_sta *sta,
-                                 struct ieee80211_hdr *hdr,
-                                 struct iwl_rx_mpdu_desc *desc,
-                                 u32 status)
+static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+                               struct ieee80211_hdr *hdr,
+                               struct iwl_rx_mpdu_desc *desc,
+                               u32 status)
 {
        struct iwl_mvm_sta *mvmsta;
        struct iwl_mvm_vif *mvmvif;
@@ -285,6 +285,9 @@ static int iwl_mvm_rx_mgmt_crypto(struct ieee80211_sta *sta,
        u32 len = le16_to_cpu(desc->mpdu_len);
        const u8 *frame = (void *)hdr;
 
+       if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE)
+               return 0;
+
        /*
         * For non-beacon, we don't really care. But beacons may
         * be filtered out, and we thus need the firmware's replay
@@ -356,6 +359,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
            IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on)
                return -1;
 
+       if (unlikely(ieee80211_is_mgmt(hdr->frame_control) &&
+                    !ieee80211_has_protected(hdr->frame_control)))
+               return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status);
+
        if (!ieee80211_has_protected(hdr->frame_control) ||
            (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
            IWL_RX_MPDU_STATUS_SEC_NONE)
@@ -411,7 +418,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                stats->flag |= RX_FLAG_DECRYPTED;
                return 0;
        case RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC:
-               return iwl_mvm_rx_mgmt_crypto(sta, hdr, desc, status);
+               break;
        default:
                /*
                 * Sometimes we can get frames that were not decrypted
index 8fba190..cecc32e 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #include "iwl-trans.h"
 #include "iwl-fh.h"
@@ -75,15 +75,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
                                 const struct fw_img *fw)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
-                     u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
-                                     CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
-                     u32_encode_bits(250,
-                                     CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
-                     CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
-                     u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
-                                     CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
-                     u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
        struct iwl_context_info_gen3 *ctxt_info_gen3;
        struct iwl_prph_scratch *prph_scratch;
        struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
@@ -217,26 +208,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
        iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
                    CSR_AUTO_FUNC_BOOT_ENA);
 
-       /*
-        * To workaround hardware latency issues during the boot process,
-        * initialize the LTR to ~250 usec (see ltr_val above).
-        * The firmware initializes this again later (to a smaller value).
-        */
-       if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
-            trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
-           !trans->trans_cfg->integrated) {
-               iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
-       } else if (trans->trans_cfg->integrated &&
-                  trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
-               iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
-               iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
-       }
-
-       if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
-               iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
-       else
-               iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
-
        return 0;
 
 err_free_ctxt_info:
index d1bb273..74ce31f 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
  * Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #include "iwl-trans.h"
 #include "iwl-fh.h"
@@ -240,7 +240,6 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
 
        /* kick FW self load */
        iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
-       iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
 
        /* Context info will be released upon alive or failure to get one */
 
index ffaf973..558a0b2 100644 (file)
@@ -592,6 +592,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
        IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
        IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
        IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+       IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL),
 
        /* So with HR */
        IWL_DEV_INFO(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL),
@@ -1040,7 +1041,31 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
                      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
                      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
                      IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
-                     iwl_cfg_so_a0_hr_a0, iwl_ax201_name)
+                     iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
+
+/* So-F with Hr */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+                     IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+                     iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
+                     IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+                     iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+                     IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+                     iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
+
+/* So-F with Gf */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
+                     IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+                     iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
 
 #endif /* CONFIG_IWLMVM */
 };
index 497ef34..94ffc1a 100644 (file)
@@ -266,6 +266,34 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
        mutex_unlock(&trans_pcie->mutex);
 }
 
+static void iwl_pcie_set_ltr(struct iwl_trans *trans)
+{
+       u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
+                     u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+                                     CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
+                     u32_encode_bits(250,
+                                     CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
+                     CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
+                     u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+                                     CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
+                     u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
+
+       /*
+        * To workaround hardware latency issues during the boot process,
+        * initialize the LTR to ~250 usec (see ltr_val above).
+        * The firmware initializes this again later (to a smaller value).
+        */
+       if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
+            trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
+           !trans->trans_cfg->integrated) {
+               iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
+       } else if (trans->trans_cfg->integrated &&
+                  trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
+               iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
+               iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
+       }
+}
+
 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
                                 const struct fw_img *fw, bool run_in_rfkill)
 {
@@ -332,6 +360,13 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
        if (ret)
                goto out;
 
+       iwl_pcie_set_ltr(trans);
+
+       if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+               iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
+       else
+               iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
+
        /* re-check RF-Kill state since we may have missed the interrupt */
        hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
        if (hw_rfkill && !run_in_rfkill)
index 381e8f9..7ae3249 100644 (file)
@@ -928,6 +928,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        u32 cmd_pos;
        const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
        u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
+       unsigned long flags;
 
        if (WARN(!trans->wide_cmd_header &&
                 group_id > IWL_ALWAYS_LONG_GROUP,
@@ -1011,10 +1012,10 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                goto free_dup_buf;
        }
 
-       spin_lock_bh(&txq->lock);
+       spin_lock_irqsave(&txq->lock, flags);
 
        if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
-               spin_unlock_bh(&txq->lock);
+               spin_unlock_irqrestore(&txq->lock, flags);
 
                IWL_ERR(trans, "No space in command queue\n");
                iwl_op_mode_cmd_queue_full(trans->op_mode);
@@ -1174,7 +1175,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
  unlock_reg:
        spin_unlock(&trans_pcie->reg_lock);
  out:
-       spin_unlock_bh(&txq->lock);
+       spin_unlock_irqrestore(&txq->lock, flags);
  free_dup_buf:
        if (idx < 0)
                kfree(dup_buf);
index 1a74867..ec7db2b 100644 (file)
@@ -26,7 +26,6 @@ static char *dev_info = "hostap_cs";
 MODULE_AUTHOR("Jouni Malinen");
 MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN "
                   "cards (PC Card).");
-MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PC Card)");
 MODULE_LICENSE("GPL");
 
 
index 101887e..52d7750 100644 (file)
@@ -27,7 +27,6 @@ static char *dev_info = "hostap_pci";
 MODULE_AUTHOR("Jouni Malinen");
 MODULE_DESCRIPTION("Support for Intersil Prism2.5-based 802.11 wireless LAN "
                   "PCI cards.");
-MODULE_SUPPORTED_DEVICE("Intersil Prism2.5-based WLAN PCI cards");
 MODULE_LICENSE("GPL");
 
 
index 841cfc6..5824729 100644 (file)
@@ -30,7 +30,6 @@ static char *dev_info = "hostap_plx";
 MODULE_AUTHOR("Jouni Malinen");
 MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN "
                   "cards (PLX).");
-MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PLX)");
 MODULE_LICENSE("GPL");
 
 
index 18980bb..6dad7f6 100644 (file)
 
 #define MT_WTBLON_TOP_BASE             0x34000
 #define MT_WTBLON_TOP(ofs)             (MT_WTBLON_TOP_BASE + (ofs))
-#define MT_WTBLON_TOP_WDUCR            MT_WTBLON_TOP(0x0)
+#define MT_WTBLON_TOP_WDUCR            MT_WTBLON_TOP(0x200)
 #define MT_WTBLON_TOP_WDUCR_GROUP      GENMASK(2, 0)
 
-#define MT_WTBL_UPDATE                 MT_WTBLON_TOP(0x030)
+#define MT_WTBL_UPDATE                 MT_WTBLON_TOP(0x230)
 #define MT_WTBL_UPDATE_WLAN_IDX                GENMASK(9, 0)
 #define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
 #define MT_WTBL_UPDATE_BUSY            BIT(31)
index 8f860c1..dec6ffd 100644 (file)
@@ -1821,7 +1821,6 @@ static const struct pci_device_id rt2400pci_device_table[] = {
 MODULE_AUTHOR(DRV_PROJECT);
 MODULE_VERSION(DRV_VERSION);
 MODULE_DESCRIPTION("Ralink RT2400 PCI & PCMCIA Wireless LAN driver.");
-MODULE_SUPPORTED_DEVICE("Ralink RT2460 PCI & PCMCIA chipset based cards");
 MODULE_DEVICE_TABLE(pci, rt2400pci_device_table);
 MODULE_LICENSE("GPL");
 
index e940443..8faa0a8 100644 (file)
@@ -2119,7 +2119,6 @@ static const struct pci_device_id rt2500pci_device_table[] = {
 MODULE_AUTHOR(DRV_PROJECT);
 MODULE_VERSION(DRV_VERSION);
 MODULE_DESCRIPTION("Ralink RT2500 PCI & PCMCIA Wireless LAN driver.");
-MODULE_SUPPORTED_DEVICE("Ralink RT2560 PCI & PCMCIA chipset based cards");
 MODULE_DEVICE_TABLE(pci, rt2500pci_device_table);
 MODULE_LICENSE("GPL");
 
index fce05fc..bb5ed66 100644 (file)
@@ -1956,7 +1956,6 @@ static const struct usb_device_id rt2500usb_device_table[] = {
 MODULE_AUTHOR(DRV_PROJECT);
 MODULE_VERSION(DRV_VERSION);
 MODULE_DESCRIPTION("Ralink RT2500 USB Wireless LAN driver.");
-MODULE_SUPPORTED_DEVICE("Ralink RT2570 USB chipset based cards");
 MODULE_DEVICE_TABLE(usb, rt2500usb_device_table);
 MODULE_LICENSE("GPL");
 
index 9a33baa..1fde0e7 100644 (file)
@@ -439,7 +439,6 @@ static const struct pci_device_id rt2800pci_device_table[] = {
 MODULE_AUTHOR(DRV_PROJECT);
 MODULE_VERSION(DRV_VERSION);
 MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver.");
-MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards");
 MODULE_FIRMWARE(FIRMWARE_RT2860);
 MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
 MODULE_LICENSE("GPL");
index 36ac18c..b5c67f6 100644 (file)
@@ -1248,7 +1248,6 @@ static const struct usb_device_id rt2800usb_device_table[] = {
 MODULE_AUTHOR(DRV_PROJECT);
 MODULE_VERSION(DRV_VERSION);
 MODULE_DESCRIPTION("Ralink RT2800 USB Wireless LAN driver.");
-MODULE_SUPPORTED_DEVICE("Ralink RT2870 USB chipset based cards");
 MODULE_DEVICE_TABLE(usb, rt2800usb_device_table);
 MODULE_FIRMWARE(FIRMWARE_RT2870);
 MODULE_LICENSE("GPL");
index 02da5dd..82cfc2a 100644 (file)
@@ -2993,8 +2993,6 @@ static const struct pci_device_id rt61pci_device_table[] = {
 MODULE_AUTHOR(DRV_PROJECT);
 MODULE_VERSION(DRV_VERSION);
 MODULE_DESCRIPTION("Ralink RT61 PCI & PCMCIA Wireless LAN driver.");
-MODULE_SUPPORTED_DEVICE("Ralink RT2561, RT2561s & RT2661 "
-                       "PCI & PCMCIA chipset based cards");
 MODULE_DEVICE_TABLE(pci, rt61pci_device_table);
 MODULE_FIRMWARE(FIRMWARE_RT2561);
 MODULE_FIRMWARE(FIRMWARE_RT2561s);
index e697937..5ff2c74 100644 (file)
@@ -2513,7 +2513,6 @@ static const struct usb_device_id rt73usb_device_table[] = {
 MODULE_AUTHOR(DRV_PROJECT);
 MODULE_VERSION(DRV_VERSION);
 MODULE_DESCRIPTION("Ralink RT73 USB Wireless LAN driver.");
-MODULE_SUPPORTED_DEVICE("Ralink RT2571W & RT2671 USB chipset based cards");
 MODULE_DEVICE_TABLE(usb, rt73usb_device_table);
 MODULE_FIRMWARE(FIRMWARE_RT2571);
 MODULE_LICENSE("GPL");
index 9a3d243..d984832 100644 (file)
@@ -441,6 +441,5 @@ module_init(rsi_91x_hal_module_init);
 module_exit(rsi_91x_hal_module_exit);
 MODULE_AUTHOR("Redpine Signals Inc");
 MODULE_DESCRIPTION("Station driver for RSI 91x devices");
-MODULE_SUPPORTED_DEVICE("RSI-91x");
 MODULE_VERSION("0.1");
 MODULE_LICENSE("Dual BSD/GPL");
index 592e9da..fe0287b 100644 (file)
@@ -1571,7 +1571,6 @@ module_exit(rsi_module_exit);
 
 MODULE_AUTHOR("Redpine Signals Inc");
 MODULE_DESCRIPTION("Common SDIO layer for RSI drivers");
-MODULE_SUPPORTED_DEVICE("RSI-91x");
 MODULE_DEVICE_TABLE(sdio, rsi_dev_table);
 MODULE_FIRMWARE(FIRMWARE_RSI9113);
 MODULE_VERSION("0.1");
index a4a533c..3fbe2a3 100644 (file)
@@ -928,7 +928,6 @@ module_usb_driver(rsi_driver);
 
 MODULE_AUTHOR("Redpine Signals Inc");
 MODULE_DESCRIPTION("Common USB layer for RSI drivers");
-MODULE_SUPPORTED_DEVICE("RSI-91x");
 MODULE_DEVICE_TABLE(usb, rsi_dev_table);
 MODULE_FIRMWARE(FIRMWARE_RSI9113);
 MODULE_VERSION("0.1");
index c878097..1df9595 100644 (file)
@@ -12,6 +12,7 @@
 #include <net/cfg80211.h>
 #include <net/rtnetlink.h>
 #include <linux/etherdevice.h>
+#include <linux/math64.h>
 #include <linux/module.h>
 
 static struct wiphy *common_wiphy;
@@ -168,11 +169,11 @@ static void virt_wifi_scan_result(struct work_struct *work)
                             scan_result.work);
        struct wiphy *wiphy = priv_to_wiphy(priv);
        struct cfg80211_scan_info scan_info = { .aborted = false };
+       u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
 
        informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
                                           CFG80211_BSS_FTYPE_PRESP,
-                                          fake_router_bssid,
-                                          ktime_get_boottime_ns(),
+                                          fake_router_bssid, tsf,
                                           WLAN_CAPABILITY_ESS, 0,
                                           (void *)&ssid, sizeof(ssid),
                                           DBM_TO_MBM(-50), GFP_KERNEL);
index a565389..0896e21 100644 (file)
@@ -1226,28 +1226,12 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
                queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
 }
 
-static int nvme_keep_alive(struct nvme_ctrl *ctrl)
-{
-       struct request *rq;
-
-       rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
-                       BLK_MQ_REQ_RESERVED);
-       if (IS_ERR(rq))
-               return PTR_ERR(rq);
-
-       rq->timeout = ctrl->kato * HZ;
-       rq->end_io_data = ctrl;
-
-       blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io);
-
-       return 0;
-}
-
 static void nvme_keep_alive_work(struct work_struct *work)
 {
        struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
                        struct nvme_ctrl, ka_work);
        bool comp_seen = ctrl->comp_seen;
+       struct request *rq;
 
        if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
                dev_dbg(ctrl->device,
@@ -1257,12 +1241,18 @@ static void nvme_keep_alive_work(struct work_struct *work)
                return;
        }
 
-       if (nvme_keep_alive(ctrl)) {
+       rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd,
+                               BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+       if (IS_ERR(rq)) {
                /* allocation failure, reset the controller */
-               dev_err(ctrl->device, "keep-alive failed\n");
+               dev_err(ctrl->device, "keep-alive failed: %ld\n", PTR_ERR(rq));
                nvme_reset_ctrl(ctrl);
                return;
        }
+
+       rq->timeout = ctrl->kato * HZ;
+       rq->end_io_data = ctrl;
+       blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io);
 }
 
 static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
@@ -1964,30 +1954,18 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
                blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
 }
 
-static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
+/*
+ * Even though NVMe spec explicitly states that MDTS is not applicable to the
+ * write-zeroes, we are cautious and limit the size to the controllers
+ * max_hw_sectors value, which is based on the MDTS field and possibly other
+ * limiting factors.
+ */
+static void nvme_config_write_zeroes(struct request_queue *q,
+               struct nvme_ctrl *ctrl)
 {
-       u64 max_blocks;
-
-       if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
-           (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
-               return;
-       /*
-        * Even though NVMe spec explicitly states that MDTS is not
-        * applicable to the write-zeroes:- "The restriction does not apply to
-        * commands that do not transfer data between the host and the
-        * controller (e.g., Write Uncorrectable ro Write Zeroes command).".
-        * In order to be more cautious use controller's max_hw_sectors value
-        * to configure the maximum sectors for the write-zeroes which is
-        * configured based on the controller's MDTS field in the
-        * nvme_init_identify() if available.
-        */
-       if (ns->ctrl->max_hw_sectors == UINT_MAX)
-               max_blocks = (u64)USHRT_MAX + 1;
-       else
-               max_blocks = ns->ctrl->max_hw_sectors + 1;
-
-       blk_queue_max_write_zeroes_sectors(disk->queue,
-                                          nvme_lba_to_sect(ns, max_blocks));
+       if ((ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) &&
+           !(ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
+               blk_queue_max_write_zeroes_sectors(q, ctrl->max_hw_sectors);
 }
 
 static bool nvme_ns_ids_valid(struct nvme_ns_ids *ids)
@@ -2159,7 +2137,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
        set_capacity_and_notify(disk, capacity);
 
        nvme_config_discard(disk, ns);
-       nvme_config_write_zeroes(disk, ns);
+       nvme_config_write_zeroes(disk->queue, ns->ctrl);
 
        set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) ||
                test_bit(NVME_NS_FORCE_RO, &ns->flags));
index 733010d..888b108 100644 (file)
 /* default is -1: the fail fast mechanism is disabled  */
 #define NVMF_DEF_FAIL_FAST_TMO         -1
 
+/*
+ * Reserved one command for internal usage.  This command is used for sending
+ * the connect command, as well as for the keep alive command on the admin
+ * queue once live.
+ */
+#define NVMF_RESERVED_TAGS     1
+
 /*
  * Define a host as seen by the target.  We allocate one at boot, but also
  * allow the override it when creating controllers.  This is both to provide
index 73d0737..6ffa8de 100644 (file)
@@ -2863,7 +2863,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
        memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
        ctrl->tag_set.ops = &nvme_fc_mq_ops;
        ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
-       ctrl->tag_set.reserved_tags = 1; /* fabric connect */
+       ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS;
        ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
        ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
        ctrl->tag_set.cmd_size =
@@ -3485,7 +3485,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
        memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
        ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
        ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
-       ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
+       ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
        ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
        ctrl->admin_tag_set.cmd_size =
                struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
index 53ac4d7..be905d4 100644 (file)
@@ -736,8 +736,11 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
                return ret;
 
        ctrl->ctrl.queue_count = nr_io_queues + 1;
-       if (ctrl->ctrl.queue_count < 2)
-               return 0;
+       if (ctrl->ctrl.queue_count < 2) {
+               dev_err(ctrl->ctrl.device,
+                       "unable to set any I/O queues\n");
+               return -ENOMEM;
+       }
 
        dev_info(ctrl->ctrl.device,
                "creating %d I/O queues.\n", nr_io_queues);
@@ -798,7 +801,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
                memset(set, 0, sizeof(*set));
                set->ops = &nvme_rdma_admin_mq_ops;
                set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
-               set->reserved_tags = 2; /* connect + keep-alive */
+               set->reserved_tags = NVMF_RESERVED_TAGS;
                set->numa_node = nctrl->numa_node;
                set->cmd_size = sizeof(struct nvme_rdma_request) +
                                NVME_RDMA_DATA_SGL_SIZE;
@@ -811,7 +814,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
                memset(set, 0, sizeof(*set));
                set->ops = &nvme_rdma_mq_ops;
                set->queue_depth = nctrl->sqsize + 1;
-               set->reserved_tags = 1; /* fabric connect */
+               set->reserved_tags = NVMF_RESERVED_TAGS;
                set->numa_node = nctrl->numa_node;
                set->flags = BLK_MQ_F_SHOULD_MERGE;
                set->cmd_size = sizeof(struct nvme_rdma_request) +
index 69f59d2..a0f00cb 100644 (file)
@@ -287,7 +287,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
         * directly, otherwise queue io_work. Also, only do that if we
         * are on the same cpu, so we don't introduce contention.
         */
-       if (queue->io_cpu == __smp_processor_id() &&
+       if (queue->io_cpu == raw_smp_processor_id() &&
            sync && empty && mutex_trylock(&queue->send_mutex)) {
                queue->more_requests = !last;
                nvme_tcp_send_all(queue);
@@ -568,6 +568,13 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
        req->pdu_len = le32_to_cpu(pdu->r2t_length);
        req->pdu_sent = 0;
 
+       if (unlikely(!req->pdu_len)) {
+               dev_err(queue->ctrl->ctrl.device,
+                       "req %d r2t len is %u, probably a bug...\n",
+                       rq->tag, req->pdu_len);
+               return -EPROTO;
+       }
+
        if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
                dev_err(queue->ctrl->ctrl.device,
                        "req %d r2t len %u exceeded data len %u (%zu sent)\n",
@@ -1575,7 +1582,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
                memset(set, 0, sizeof(*set));
                set->ops = &nvme_tcp_admin_mq_ops;
                set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
-               set->reserved_tags = 2; /* connect + keep-alive */
+               set->reserved_tags = NVMF_RESERVED_TAGS;
                set->numa_node = nctrl->numa_node;
                set->flags = BLK_MQ_F_BLOCKING;
                set->cmd_size = sizeof(struct nvme_tcp_request);
@@ -1587,7 +1594,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
                memset(set, 0, sizeof(*set));
                set->ops = &nvme_tcp_mq_ops;
                set->queue_depth = nctrl->sqsize + 1;
-               set->reserved_tags = 1; /* fabric connect */
+               set->reserved_tags = NVMF_RESERVED_TAGS;
                set->numa_node = nctrl->numa_node;
                set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
                set->cmd_size = sizeof(struct nvme_tcp_request);
@@ -1745,8 +1752,11 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
                return ret;
 
        ctrl->queue_count = nr_io_queues + 1;
-       if (ctrl->queue_count < 2)
-               return 0;
+       if (ctrl->queue_count < 2) {
+               dev_err(ctrl->device,
+                       "unable to set any I/O queues\n");
+               return -ENOMEM;
+       }
 
        dev_info(ctrl->device,
                "creating %d I/O queues.\n", nr_io_queues);
index be6fcda..a027433 100644 (file)
@@ -1118,9 +1118,20 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
 {
        lockdep_assert_held(&ctrl->lock);
 
-       if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
-           nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
-           nvmet_cc_mps(ctrl->cc) != 0 ||
+       /*
+        * Only I/O controllers should verify iosqes,iocqes.
+        * Strictly speaking, the spec says a discovery controller
+        * should verify iosqes,iocqes are zeroed, however that
+        * would break backwards compatibility, so don't enforce it.
+        */
+       if (ctrl->subsys->type != NVME_NQN_DISC &&
+           (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
+            nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
+               ctrl->csts = NVME_CSTS_CFS;
+               return;
+       }
+
+       if (nvmet_cc_mps(ctrl->cc) != 0 ||
            nvmet_cc_ams(ctrl->cc) != 0 ||
            nvmet_cc_css(ctrl->cc) != 0) {
                ctrl->csts = NVME_CSTS_CFS;
index cb6f865..3e189e7 100644 (file)
@@ -349,7 +349,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
        memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
        ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
        ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
-       ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
+       ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
        ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
        ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
                NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
@@ -520,7 +520,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
        memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
        ctrl->tag_set.ops = &nvme_loop_mq_ops;
        ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
-       ctrl->tag_set.reserved_tags = 1; /* fabric connect */
+       ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS;
        ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
        ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
        ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
index 8b0485a..d658c6e 100644 (file)
@@ -1098,11 +1098,11 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
                cmd->rbytes_done += ret;
        }
 
+       nvmet_tcp_unmap_pdu_iovec(cmd);
        if (queue->data_digest) {
                nvmet_tcp_prep_recv_ddgst(cmd);
                return 0;
        }
-       nvmet_tcp_unmap_pdu_iovec(cmd);
 
        if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
            cmd->rbytes_done == cmd->req.transfer_len) {
index dcc1dd9..adb26af 100644 (file)
@@ -205,7 +205,7 @@ static void populate_properties(const void *blob,
                *pprev = NULL;
 }
 
-static bool populate_node(const void *blob,
+static int populate_node(const void *blob,
                          int offset,
                          void **mem,
                          struct device_node *dad,
@@ -214,24 +214,24 @@ static bool populate_node(const void *blob,
 {
        struct device_node *np;
        const char *pathp;
-       unsigned int l, allocl;
+       int len;
 
-       pathp = fdt_get_name(blob, offset, &l);
+       pathp = fdt_get_name(blob, offset, &len);
        if (!pathp) {
                *pnp = NULL;
-               return false;
+               return len;
        }
 
-       allocl = ++l;
+       len++;
 
-       np = unflatten_dt_alloc(mem, sizeof(struct device_node) + allocl,
+       np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len,
                                __alignof__(struct device_node));
        if (!dryrun) {
                char *fn;
                of_node_init(np);
                np->full_name = fn = ((char *)np) + sizeof(*np);
 
-               memcpy(fn, pathp, l);
+               memcpy(fn, pathp, len);
 
                if (dad != NULL) {
                        np->parent = dad;
@@ -295,6 +295,7 @@ static int unflatten_dt_nodes(const void *blob,
        struct device_node *nps[FDT_MAX_DEPTH];
        void *base = mem;
        bool dryrun = !base;
+       int ret;
 
        if (nodepp)
                *nodepp = NULL;
@@ -322,9 +323,10 @@ static int unflatten_dt_nodes(const void *blob,
                    !of_fdt_device_is_available(blob, offset))
                        continue;
 
-               if (!populate_node(blob, offset, &mem, nps[depth],
-                                  &nps[depth+1], dryrun))
-                       return mem - base;
+               ret = populate_node(blob, offset, &mem, nps[depth],
+                                  &nps[depth+1], dryrun);
+               if (ret < 0)
+                       return ret;
 
                if (!dryrun && nodepp && !*nodepp)
                        *nodepp = nps[depth+1];
@@ -372,6 +374,10 @@ void *__unflatten_device_tree(const void *blob,
 {
        int size;
        void *mem;
+       int ret;
+
+       if (mynodes)
+               *mynodes = NULL;
 
        pr_debug(" -> unflatten_device_tree()\n");
 
@@ -392,7 +398,7 @@ void *__unflatten_device_tree(const void *blob,
 
        /* First pass, scan for size */
        size = unflatten_dt_nodes(blob, NULL, dad, NULL);
-       if (size < 0)
+       if (size <= 0)
                return NULL;
 
        size = ALIGN(size, 4);
@@ -410,12 +416,16 @@ void *__unflatten_device_tree(const void *blob,
        pr_debug("  unflattening %p...\n", mem);
 
        /* Second pass, do actual unflattening */
-       unflatten_dt_nodes(blob, mem, dad, mynodes);
+       ret = unflatten_dt_nodes(blob, mem, dad, mynodes);
+
        if (be32_to_cpup(mem + size) != 0xdeadbeef)
                pr_warn("End of tree marker overwritten: %08x\n",
                        be32_to_cpup(mem + size));
 
-       if (detached && mynodes) {
+       if (ret <= 0)
+               return NULL;
+
+       if (detached && mynodes && *mynodes) {
                of_node_set_flag(*mynodes, OF_DETACHED);
                pr_debug("unflattened tree is detached\n");
        }
index d9e6a32..d717efb 100644 (file)
@@ -8,6 +8,8 @@
  * Copyright (C) 1996-2005 Paul Mackerras.
  */
 
+#define FDT_ALIGN_SIZE 8
+
 /**
  * struct alias_prop - Alias property in 'aliases' node
  * @link:      List node to link the structure in aliases_lookup list
index 50bbe0e..23effe5 100644 (file)
@@ -57,7 +57,7 @@ struct fragment {
  * struct overlay_changeset
  * @id:                        changeset identifier
  * @ovcs_list:         list on which we are located
- * @fdt:               FDT that was unflattened to create @overlay_tree
+ * @fdt:               base of memory allocated to hold aligned FDT that was unflattened to create @overlay_tree
  * @overlay_tree:      expanded device tree that contains the fragment nodes
  * @count:             count of fragment structures
  * @fragments:         fragment nodes in the overlay expanded device tree
@@ -719,8 +719,8 @@ static struct device_node *find_target(struct device_node *info_node)
 /**
  * init_overlay_changeset() - initialize overlay changeset from overlay tree
  * @ovcs:      Overlay changeset to build
- * @fdt:       the FDT that was unflattened to create @tree
- * @tree:      Contains all the overlay fragments and overlay fixup nodes
+ * @fdt:       base of memory allocated to hold aligned FDT that was unflattened to create @tree
+ * @tree:      Contains the overlay fragments and overlay fixup nodes
  *
  * Initialize @ovcs.  Populate @ovcs->fragments with node information from
  * the top level of @tree.  The relevant top level nodes are the fragment
@@ -873,7 +873,7 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs)
  * internal documentation
  *
  * of_overlay_apply() - Create and apply an overlay changeset
- * @fdt:       the FDT that was unflattened to create @tree
+ * @fdt:       base of memory allocated to hold the aligned FDT
  * @tree:      Expanded overlay device tree
  * @ovcs_id:   Pointer to overlay changeset id
  *
@@ -953,7 +953,9 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree,
        /*
         * after overlay_notify(), ovcs->overlay_tree related pointers may have
         * leaked to drivers, so can not kfree() tree, aka ovcs->overlay_tree;
-        * and can not free fdt, aka ovcs->fdt
+        * and can not free memory containing aligned fdt.  The aligned fdt
+        * is contained within the memory at ovcs->fdt, possibly at an offset
+        * from ovcs->fdt.
         */
        ret = overlay_notify(ovcs, OF_OVERLAY_PRE_APPLY);
        if (ret) {
@@ -1014,10 +1016,11 @@ out:
 int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
                         int *ovcs_id)
 {
-       const void *new_fdt;
+       void *new_fdt;
+       void *new_fdt_align;
        int ret;
        u32 size;
-       struct device_node *overlay_root;
+       struct device_node *overlay_root = NULL;
 
        *ovcs_id = 0;
        ret = 0;
@@ -1036,11 +1039,14 @@ int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
         * Must create permanent copy of FDT because of_fdt_unflatten_tree()
         * will create pointers to the passed in FDT in the unflattened tree.
         */
-       new_fdt = kmemdup(overlay_fdt, size, GFP_KERNEL);
+       new_fdt = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL);
        if (!new_fdt)
                return -ENOMEM;
 
-       of_fdt_unflatten_tree(new_fdt, NULL, &overlay_root);
+       new_fdt_align = PTR_ALIGN(new_fdt, FDT_ALIGN_SIZE);
+       memcpy(new_fdt_align, overlay_fdt, size);
+
+       of_fdt_unflatten_tree(new_fdt_align, NULL, &overlay_root);
        if (!overlay_root) {
                pr_err("unable to unflatten overlay_fdt\n");
                ret = -EINVAL;
index 5036a36..78427c8 100644 (file)
@@ -1262,7 +1262,16 @@ DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL)
 DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL)
 DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
 DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
-DEFINE_SUFFIX_PROP(gpios, "-gpios", "#gpio-cells")
+
+static struct device_node *parse_gpios(struct device_node *np,
+                                      const char *prop_name, int index)
+{
+       if (!strcmp_suffix(prop_name, ",nr-gpios"))
+               return NULL;
+
+       return parse_suffix_prop_cells(np, prop_name, index, "-gpios",
+                                      "#gpio-cells");
+}
 
 static struct device_node *parse_iommu_maps(struct device_node *np,
                                            const char *prop_name, int index)
index eb10062..819a20a 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
+#include <linux/kernel.h>
 
 #include <linux/i2c.h>
 #include <linux/i2c-mux.h>
@@ -1408,7 +1409,8 @@ static void attach_node_and_children(struct device_node *np)
 static int __init unittest_data_add(void)
 {
        void *unittest_data;
-       struct device_node *unittest_data_node, *np;
+       void *unittest_data_align;
+       struct device_node *unittest_data_node = NULL, *np;
        /*
         * __dtb_testcases_begin[] and __dtb_testcases_end[] are magically
         * created by cmd_dt_S_dtb in scripts/Makefile.lib
@@ -1417,21 +1419,29 @@ static int __init unittest_data_add(void)
        extern uint8_t __dtb_testcases_end[];
        const int size = __dtb_testcases_end - __dtb_testcases_begin;
        int rc;
+       void *ret;
 
        if (!size) {
-               pr_warn("%s: No testcase data to attach; not running tests\n",
-                       __func__);
+               pr_warn("%s: testcases is empty\n", __func__);
                return -ENODATA;
        }
 
        /* creating copy */
-       unittest_data = kmemdup(__dtb_testcases_begin, size, GFP_KERNEL);
+       unittest_data = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL);
        if (!unittest_data)
                return -ENOMEM;
 
-       of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node);
+       unittest_data_align = PTR_ALIGN(unittest_data, FDT_ALIGN_SIZE);
+       memcpy(unittest_data_align, __dtb_testcases_begin, size);
+
+       ret = of_fdt_unflatten_tree(unittest_data_align, NULL, &unittest_data_node);
+       if (!ret) {
+               pr_warn("%s: unflatten testcases tree failed\n", __func__);
+               kfree(unittest_data);
+               return -ENODATA;
+       }
        if (!unittest_data_node) {
-               pr_warn("%s: No tree to attach; not running tests\n", __func__);
+               pr_warn("%s: testcases tree is empty\n", __func__);
                kfree(unittest_data);
                return -ENODATA;
        }
index 1e88bcf..84d5701 100644 (file)
@@ -241,6 +241,5 @@ module_platform_driver_probe(amiga_parallel_driver, amiga_parallel_probe);
 
 MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>");
 MODULE_DESCRIPTION("Parport Driver for Amiga builtin Port");
-MODULE_SUPPORTED_DEVICE("Amiga builtin Parallel Port");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:amiga-parallel");
index 2ff0fe0..1623f01 100644 (file)
@@ -218,7 +218,6 @@ static void __exit parport_atari_exit(void)
 
 MODULE_AUTHOR("Andreas Schwab");
 MODULE_DESCRIPTION("Parport Driver for Atari builtin Port");
-MODULE_SUPPORTED_DEVICE("Atari builtin Parallel Port");
 MODULE_LICENSE("GPL");
 
 module_init(parport_atari_init)
index 9228e8f..1e43b3f 100644 (file)
@@ -41,7 +41,6 @@
 
 MODULE_AUTHOR("Helge Deller <deller@gmx.de>");
 MODULE_DESCRIPTION("HP-PARISC PC-style parallel port driver");
-MODULE_SUPPORTED_DEVICE("integrated PC-style parallel port");
 MODULE_LICENSE("GPL");
 
 
index d6bbe84..f4d0da7 100644 (file)
@@ -359,7 +359,6 @@ static void __exit parport_mfc3_exit(void)
 
 MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>");
 MODULE_DESCRIPTION("Parport Driver for Multiface 3 expansion cards Parallel Port");
-MODULE_SUPPORTED_DEVICE("Multiface 3 Parallel Port");
 MODULE_LICENSE("GPL");
 
 module_init(parport_mfc3_init)
index e840c1b..865fc41 100644 (file)
@@ -377,6 +377,5 @@ module_platform_driver(bpp_sbus_driver);
 
 MODULE_AUTHOR("Derrick J Brashear");
 MODULE_DESCRIPTION("Parport Driver for Sparc bidirectional Port");
-MODULE_SUPPORTED_DEVICE("Sparc Bidirectional Parallel Port");
 MODULE_VERSION("2.0");
 MODULE_LICENSE("GPL");
index cdbfa5d..dbfa0b5 100644 (file)
@@ -34,12 +34,11 @@ static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr,
        if (nbytes >= MAX_DRC_NAME_LEN)
                return 0;
 
-       memcpy(drc_name, buf, nbytes);
+       strscpy(drc_name, buf, nbytes + 1);
 
        end = strchr(drc_name, '\n');
-       if (!end)
-               end = &drc_name[nbytes];
-       *end = '\0';
+       if (end)
+               *end = '\0';
 
        rc = dlpar_add_slot(drc_name);
        if (rc)
@@ -65,12 +64,11 @@ static ssize_t remove_slot_store(struct kobject *kobj,
        if (nbytes >= MAX_DRC_NAME_LEN)
                return 0;
 
-       memcpy(drc_name, buf, nbytes);
+       strscpy(drc_name, buf, nbytes + 1);
 
        end = strchr(drc_name, '\n');
-       if (!end)
-               end = &drc_name[nbytes];
-       *end = '\0';
+       if (end)
+               *end = '\0';
 
        rc = dlpar_remove_slot(drc_name);
        if (rc)
index c9e790c..a047c42 100644 (file)
@@ -93,8 +93,9 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
                pci_dev_put(pdev);
                return -EBUSY;
        }
+       pci_dev_put(pdev);
 
-       zpci_remove_device(zdev);
+       zpci_remove_device(zdev, false);
 
        rc = zpci_disable_device(zdev);
        if (rc)
index 8085782..9f3361c 100644 (file)
@@ -1357,6 +1357,7 @@ static int intel_pinctrl_add_padgroups_by_gpps(struct intel_pinctrl *pctrl,
                                gpps[i].gpio_base = 0;
                                break;
                        case INTEL_GPIO_BASE_NOMAP:
+                               break;
                        default:
                                break;
                }
@@ -1393,6 +1394,7 @@ static int intel_pinctrl_add_padgroups_by_size(struct intel_pinctrl *pctrl,
                gpps[i].size = min(gpp_size, npins);
                npins -= gpps[i].size;
 
+               gpps[i].gpio_base = gpps[i].base;
                gpps[i].padown_num = padown_num;
 
                /*
@@ -1491,8 +1493,13 @@ static int intel_pinctrl_probe(struct platform_device *pdev,
                if (IS_ERR(regs))
                        return PTR_ERR(regs);
 
-               /* Determine community features based on the revision */
+               /*
+                * Determine community features based on the revision.
+                * A value of all ones means the device is not present.
+                */
                value = readl(regs + REVID);
+               if (value == ~0u)
+                       return -ENODEV;
                if (((value & REVID_MASK) >> REVID_SHIFT) >= 0x94) {
                        community->features |= PINCTRL_FEATURE_DEBOUNCE;
                        community->features |= PINCTRL_FEATURE_1K_PD;
index f35edb0..c12fa57 100644 (file)
@@ -572,7 +572,7 @@ static void microchip_sgpio_irq_settype(struct irq_data *data,
        /* Type value spread over 2 registers sets: low, high bit */
        sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, addr.bit,
                         BIT(addr.port), (!!(type & 0x1)) << addr.port);
-       sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER + SGPIO_MAX_BITS, addr.bit,
+       sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, SGPIO_MAX_BITS + addr.bit,
                         BIT(addr.port), (!!(type & 0x2)) << addr.port);
 
        if (type == SGPIO_INT_TRG_LEVEL)
index aa1a1c8..53a0bad 100644 (file)
@@ -3727,12 +3727,15 @@ static int __maybe_unused rockchip_pinctrl_suspend(struct device *dev)
 static int __maybe_unused rockchip_pinctrl_resume(struct device *dev)
 {
        struct rockchip_pinctrl *info = dev_get_drvdata(dev);
-       int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
-                              rk3288_grf_gpio6c_iomux |
-                              GPIO6C6_SEL_WRITE_ENABLE);
+       int ret;
 
-       if (ret)
-               return ret;
+       if (info->ctrl->type == RK3288) {
+               ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
+                                  rk3288_grf_gpio6c_iomux |
+                                  GPIO6C6_SEL_WRITE_ENABLE);
+               if (ret)
+                       return ret;
+       }
 
        return pinctrl_force_default(info->pctl_dev);
 }
index 369ee20..2f19ab4 100644 (file)
@@ -392,7 +392,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
                          unsigned long *configs, unsigned int nconfs)
 {
        struct lpi_pinctrl *pctrl = dev_get_drvdata(pctldev->dev);
-       unsigned int param, arg, pullup, strength;
+       unsigned int param, arg, pullup = LPI_GPIO_BIAS_DISABLE, strength = 2;
        bool value, output_enabled = false;
        const struct lpi_pingroup *g;
        unsigned long sval;
index 8daccd5..9d41abf 100644 (file)
@@ -1439,14 +1439,14 @@ static const struct msm_pingroup sc7280_groups[] = {
        [172] = PINGROUP(172, qdss, _, _, _, _, _, _, _, _),
        [173] = PINGROUP(173, qdss, _, _, _, _, _, _, _, _),
        [174] = PINGROUP(174, qdss, _, _, _, _, _, _, _, _),
-       [175] = UFS_RESET(ufs_reset, 0x1be000),
-       [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x1b3000, 15, 0),
-       [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0x1b3000, 13, 6),
-       [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x1b3000, 11, 3),
-       [179] = SDC_QDSD_PINGROUP(sdc1_data, 0x1b3000, 9, 0),
-       [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0x1b4000, 14, 6),
-       [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x1b4000, 11, 3),
-       [182] = SDC_QDSD_PINGROUP(sdc2_data, 0x1b4000, 9, 0),
+       [175] = UFS_RESET(ufs_reset, 0xbe000),
+       [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0xb3004, 0, 6),
+       [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0xb3000, 13, 6),
+       [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0xb3000, 11, 3),
+       [179] = SDC_QDSD_PINGROUP(sdc1_data, 0xb3000, 9, 0),
+       [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0xb4000, 14, 6),
+       [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xb4000, 11, 3),
+       [182] = SDC_QDSD_PINGROUP(sdc2_data, 0xb4000, 9, 0),
 };
 
 static const struct msm_pinctrl_soc_data sc7280_pinctrl = {
index 2b5b0e2..5aaf57b 100644 (file)
@@ -423,7 +423,7 @@ static const char * const gpio_groups[] = {
 
 static const char * const qdss_stm_groups[] = {
        "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio12", "gpio13",
-       "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19" "gpio20", "gpio21", "gpio22",
+       "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
        "gpio23", "gpio44", "gpio45", "gpio52", "gpio53", "gpio56", "gpio57", "gpio61", "gpio62",
        "gpio63", "gpio64", "gpio65", "gpio66",
 };
index ad4e630..461ec61 100644 (file)
@@ -1173,15 +1173,20 @@ config INTEL_PMC_CORE
        depends on PCI
        help
          The Intel Platform Controller Hub for Intel Core SoCs provides access
-         to Power Management Controller registers via a PCI interface. This
+         to Power Management Controller registers via various interfaces. This
          driver can utilize debugging capabilities and supported features as
-         exposed by the Power Management Controller.
+         exposed by the Power Management Controller. It also may perform some
+         tasks in the PMC in order to enable transition into the SLPS0 state.
+         It should be selected on all Intel platforms supported by the driver.
 
          Supported features:
                - SLP_S0_RESIDENCY counter
                - PCH IP Power Gating status
-               - LTR Ignore
+               - LTR Ignore / LTR Show
                - MPHY/PLL gating status (Sunrisepoint PCH only)
+               - SLPS0 Debug registers (Cannonlake/Icelake PCH)
+               - Low Power Mode registers (Tigerlake and beyond)
+               - PMC quirks as needed to enable SLPS0/S0ix
 
 config INTEL_PMT_CLASS
        tristate
index 80f4b77..091e48c 100644 (file)
@@ -185,5 +185,8 @@ void exit_enum_attributes(void)
                        sysfs_remove_group(wmi_priv.enumeration_data[instance_id].attr_name_kobj,
                                                                &enumeration_attr_group);
        }
+       wmi_priv.enumeration_instances_count = 0;
+
        kfree(wmi_priv.enumeration_data);
+       wmi_priv.enumeration_data = NULL;
 }
index 75aedbb..8a49ba6 100644 (file)
@@ -175,5 +175,8 @@ void exit_int_attributes(void)
                        sysfs_remove_group(wmi_priv.integer_data[instance_id].attr_name_kobj,
                                                                &integer_attr_group);
        }
+       wmi_priv.integer_instances_count = 0;
+
        kfree(wmi_priv.integer_data);
+       wmi_priv.integer_data = NULL;
 }
index 3abcd95..834b3e8 100644 (file)
@@ -183,5 +183,8 @@ void exit_po_attributes(void)
                        sysfs_remove_group(wmi_priv.po_data[instance_id].attr_name_kobj,
                                                                &po_attr_group);
        }
+       wmi_priv.po_instances_count = 0;
+
        kfree(wmi_priv.po_data);
+       wmi_priv.po_data = NULL;
 }
index ac75dce..5525378 100644 (file)
@@ -155,5 +155,8 @@ void exit_str_attributes(void)
                        sysfs_remove_group(wmi_priv.str_data[instance_id].attr_name_kobj,
                                                                &str_attr_group);
        }
+       wmi_priv.str_instances_count = 0;
+
        kfree(wmi_priv.str_data);
+       wmi_priv.str_data = NULL;
 }
index cb81010..7410cca 100644 (file)
@@ -210,25 +210,17 @@ static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot);
  */
 static int create_attributes_level_sysfs_files(void)
 {
-       int ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
+       int ret;
 
-       if (ret) {
-               pr_debug("could not create reset_bios file\n");
+       ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
+       if (ret)
                return ret;
-       }
 
        ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr);
-       if (ret) {
-               pr_debug("could not create changing_pending_reboot file\n");
-               sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
-       }
-       return ret;
-}
+       if (ret)
+               return ret;
 
-static void release_reset_bios_data(void)
-{
-       sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
-       sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr);
+       return 0;
 }
 
 static ssize_t wmi_sysman_attr_show(struct kobject *kobj, struct attribute *attr,
@@ -373,8 +365,6 @@ static void destroy_attribute_objs(struct kset *kset)
  */
 static void release_attributes_data(void)
 {
-       release_reset_bios_data();
-
        mutex_lock(&wmi_priv.mutex);
        exit_enum_attributes();
        exit_int_attributes();
@@ -386,11 +376,13 @@ static void release_attributes_data(void)
                wmi_priv.authentication_dir_kset = NULL;
        }
        if (wmi_priv.main_dir_kset) {
+               sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
+               sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr);
                destroy_attribute_objs(wmi_priv.main_dir_kset);
                kset_unregister(wmi_priv.main_dir_kset);
+               wmi_priv.main_dir_kset = NULL;
        }
        mutex_unlock(&wmi_priv.mutex);
-
 }
 
 /**
@@ -497,7 +489,6 @@ nextobj:
 
 err_attr_init:
        mutex_unlock(&wmi_priv.mutex);
-       release_attributes_data();
        kfree(obj);
        return retval;
 }
@@ -513,102 +504,91 @@ static int __init sysman_init(void)
        }
 
        ret = init_bios_attr_set_interface();
-       if (ret || !wmi_priv.bios_attr_wdev) {
-               pr_debug("failed to initialize set interface\n");
-               goto fail_set_interface;
-       }
+       if (ret)
+               return ret;
 
        ret = init_bios_attr_pass_interface();
-       if (ret || !wmi_priv.password_attr_wdev) {
-               pr_debug("failed to initialize pass interface\n");
-               goto fail_pass_interface;
+       if (ret)
+               goto err_exit_bios_attr_set_interface;
+
+       if (!wmi_priv.bios_attr_wdev || !wmi_priv.password_attr_wdev) {
+               pr_debug("failed to find set or pass interface\n");
+               ret = -ENODEV;
+               goto err_exit_bios_attr_pass_interface;
        }
 
        ret = class_register(&firmware_attributes_class);
        if (ret)
-               goto fail_class;
+               goto err_exit_bios_attr_pass_interface;
 
        wmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
                                  NULL, "%s", DRIVER_NAME);
        if (IS_ERR(wmi_priv.class_dev)) {
                ret = PTR_ERR(wmi_priv.class_dev);
-               goto fail_classdev;
+               goto err_unregister_class;
        }
 
        wmi_priv.main_dir_kset = kset_create_and_add("attributes", NULL,
                                                     &wmi_priv.class_dev->kobj);
        if (!wmi_priv.main_dir_kset) {
                ret = -ENOMEM;
-               goto fail_main_kset;
+               goto err_destroy_classdev;
        }
 
        wmi_priv.authentication_dir_kset = kset_create_and_add("authentication", NULL,
                                                                &wmi_priv.class_dev->kobj);
        if (!wmi_priv.authentication_dir_kset) {
                ret = -ENOMEM;
-               goto fail_authentication_kset;
+               goto err_release_attributes_data;
        }
 
        ret = create_attributes_level_sysfs_files();
        if (ret) {
                pr_debug("could not create reset BIOS attribute\n");
-               goto fail_reset_bios;
+               goto err_release_attributes_data;
        }
 
        ret = init_bios_attributes(ENUM, DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID);
        if (ret) {
                pr_debug("failed to populate enumeration type attributes\n");
-               goto fail_create_group;
+               goto err_release_attributes_data;
        }
 
        ret = init_bios_attributes(INT, DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID);
        if (ret) {
                pr_debug("failed to populate integer type attributes\n");
-               goto fail_create_group;
+               goto err_release_attributes_data;
        }
 
        ret = init_bios_attributes(STR, DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID);
        if (ret) {
                pr_debug("failed to populate string type attributes\n");
-               goto fail_create_group;
+               goto err_release_attributes_data;
        }
 
        ret = init_bios_attributes(PO, DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID);
        if (ret) {
                pr_debug("failed to populate pass object type attributes\n");
-               goto fail_create_group;
+               goto err_release_attributes_data;
        }
 
        return 0;
 
-fail_create_group:
+err_release_attributes_data:
        release_attributes_data();
 
-fail_reset_bios:
-       if (wmi_priv.authentication_dir_kset) {
-               kset_unregister(wmi_priv.authentication_dir_kset);
-               wmi_priv.authentication_dir_kset = NULL;
-       }
-
-fail_authentication_kset:
-       if (wmi_priv.main_dir_kset) {
-               kset_unregister(wmi_priv.main_dir_kset);
-               wmi_priv.main_dir_kset = NULL;
-       }
-
-fail_main_kset:
+err_destroy_classdev:
        device_destroy(&firmware_attributes_class, MKDEV(0, 0));
 
-fail_classdev:
+err_unregister_class:
        class_unregister(&firmware_attributes_class);
 
-fail_class:
+err_exit_bios_attr_pass_interface:
        exit_bios_attr_pass_interface();
 
-fail_pass_interface:
+err_exit_bios_attr_set_interface:
        exit_bios_attr_set_interface();
 
-fail_set_interface:
        return ret;
 }
 
index 2f5b8d0..078648a 100644 (file)
@@ -90,6 +90,13 @@ static const struct dmi_system_id button_array_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x2 Detachable"),
                },
        },
+       {
+               .ident = "Lenovo ThinkPad X1 Tablet Gen 2",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"),
+               },
+       },
        { }
 };
 
@@ -476,11 +483,16 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
                        goto wakeup;
 
                /*
-                * Switch events will wake the device and report the new switch
-                * position to the input subsystem.
+                * Some devices send (duplicate) tablet-mode events when moved
+                * around even though the mode has not changed; and they do this
+                * even when suspended.
+                * Update the switch state in case it changed and then return
+                * without waking up to avoid spurious wakeups.
                 */
-               if (priv->switches && (event == 0xcc || event == 0xcd))
-                       goto wakeup;
+               if (event == 0xcc || event == 0xcd) {
+                       report_tablet_mode_event(priv->switches, event);
+                       return;
+               }
 
                /* Wake up on 5-button array events only. */
                if (event == 0xc0 || !priv->array)
@@ -494,9 +506,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
 wakeup:
                pm_wakeup_hard_event(&device->dev);
 
-               if (report_tablet_mode_event(priv->switches, event))
-                       return;
-
                return;
        }
 
index 8a8017f..3fdf4cb 100644 (file)
@@ -48,8 +48,16 @@ static const struct key_entry intel_vbtn_keymap[] = {
 };
 
 static const struct key_entry intel_vbtn_switchmap[] = {
-       { KE_SW,     0xCA, { .sw = { SW_DOCK, 1 } } },          /* Docked */
-       { KE_SW,     0xCB, { .sw = { SW_DOCK, 0 } } },          /* Undocked */
+       /*
+        * SW_DOCK should only be reported for docking stations, but DSDTs using the
+        * intel-vbtn code, always seem to use this for 2-in-1s / convertibles and set
+        * SW_DOCK=1 when in laptop-mode (in tandem with setting SW_TABLET_MODE=0).
+        * This causes userspace to think the laptop is docked to a port-replicator
+        * and to disable suspend-on-lid-close, which is undesirable.
+        * Map the dock events to KEY_IGNORE to avoid this broken SW_DOCK reporting.
+        */
+       { KE_IGNORE, 0xCA, { .sw = { SW_DOCK, 1 } } },          /* Docked */
+       { KE_IGNORE, 0xCB, { .sw = { SW_DOCK, 0 } } },          /* Undocked */
        { KE_SW,     0xCC, { .sw = { SW_TABLET_MODE, 1 } } },   /* Tablet */
        { KE_SW,     0xCD, { .sw = { SW_TABLET_MODE, 0 } } },   /* Laptop */
        { KE_END }
index ee2f757..b5888ae 100644 (file)
@@ -863,34 +863,45 @@ out_unlock:
 }
 DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
 
-static ssize_t pmc_core_ltr_ignore_write(struct file *file,
-                                        const char __user *userbuf,
-                                        size_t count, loff_t *ppos)
+static int pmc_core_send_ltr_ignore(u32 value)
 {
        struct pmc_dev *pmcdev = &pmc;
        const struct pmc_reg_map *map = pmcdev->map;
-       u32 val, buf_size, fd;
-       int err;
-
-       buf_size = count < 64 ? count : 64;
-
-       err = kstrtou32_from_user(userbuf, buf_size, 10, &val);
-       if (err)
-               return err;
+       u32 reg;
+       int err = 0;
 
        mutex_lock(&pmcdev->lock);
 
-       if (val > map->ltr_ignore_max) {
+       if (value > map->ltr_ignore_max) {
                err = -EINVAL;
                goto out_unlock;
        }
 
-       fd = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
-       fd |= (1U << val);
-       pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, fd);
+       reg = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
+       reg |= BIT(value);
+       pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, reg);
 
 out_unlock:
        mutex_unlock(&pmcdev->lock);
+
+       return err;
+}
+
+static ssize_t pmc_core_ltr_ignore_write(struct file *file,
+                                        const char __user *userbuf,
+                                        size_t count, loff_t *ppos)
+{
+       u32 buf_size, value;
+       int err;
+
+       buf_size = min_t(u32, count, 64);
+
+       err = kstrtou32_from_user(userbuf, buf_size, 10, &value);
+       if (err)
+               return err;
+
+       err = pmc_core_send_ltr_ignore(value);
+
        return err == 0 ? count : err;
 }
 
@@ -1244,6 +1255,15 @@ static int pmc_core_probe(struct platform_device *pdev)
        pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
        dmi_check_system(pmc_core_dmi_table);
 
+       /*
+        * On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when
+        * a cable is attached. Tell the PMC to ignore it.
+        */
+       if (pmcdev->map == &tgl_reg_map) {
+               dev_dbg(&pdev->dev, "ignoring GBE LTR\n");
+               pmc_core_send_ltr_ignore(3);
+       }
+
        pmc_core_dbgfs_register(pmcdev);
 
        device_initialized = true;
index c8939fb..ee2b3bb 100644 (file)
@@ -173,7 +173,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry,
                                  struct intel_pmt_namespace *ns,
                                  struct device *parent)
 {
-       struct resource res;
+       struct resource res = {0};
        struct device *dev;
        int ret;
 
index 97dd749..92d315a 100644 (file)
 #define CRASH_TYPE_OOBMSM      1
 
 /* Control Flags */
-#define CRASHLOG_FLAG_DISABLE          BIT(27)
+#define CRASHLOG_FLAG_DISABLE          BIT(28)
 
 /*
- * Bits 28 and 29 control the state of bit 31.
+ * Bits 29 and 30 control the state of bit 31.
  *
- * Bit 28 will clear bit 31, if set, allowing a new crashlog to be captured.
- * Bit 29 will immediately trigger a crashlog to be generated, setting bit 31.
- * Bit 30 is read-only and reserved as 0.
+ * Bit 29 will clear bit 31, if set, allowing a new crashlog to be captured.
+ * Bit 30 will immediately trigger a crashlog to be generated, setting bit 31.
  * Bit 31 is the read-only status with a 1 indicating log is complete.
  */
-#define CRASHLOG_FLAG_TRIGGER_CLEAR    BIT(28)
-#define CRASHLOG_FLAG_TRIGGER_EXECUTE  BIT(29)
+#define CRASHLOG_FLAG_TRIGGER_CLEAR    BIT(29)
+#define CRASHLOG_FLAG_TRIGGER_EXECUTE  BIT(30)
 #define CRASHLOG_FLAG_TRIGGER_COMPLETE BIT(31)
 #define CRASHLOG_FLAG_TRIGGER_MASK     GENMASK(31, 28)
 
index b881044..0d9e2dd 100644 (file)
@@ -4081,13 +4081,19 @@ static bool hotkey_notify_6xxx(const u32 hkey,
 
        case TP_HKEY_EV_KEY_NUMLOCK:
        case TP_HKEY_EV_KEY_FN:
-       case TP_HKEY_EV_KEY_FN_ESC:
                /* key press events, we just ignore them as long as the EC
                 * is still reporting them in the normal keyboard stream */
                *send_acpi_ev = false;
                *ignore_acpi_ev = true;
                return true;
 
+       case TP_HKEY_EV_KEY_FN_ESC:
+               /* Get the media key status to foce the status LED to update */
+               acpi_evalf(hkey_handle, NULL, "GMKS", "v");
+               *send_acpi_ev = false;
+               *ignore_acpi_ev = true;
+               return true;
+
        case TP_HKEY_EV_TABLET_CHANGED:
                tpacpi_input_send_tabletsw();
                hotkey_tablet_mode_notify_change();
@@ -9845,6 +9851,11 @@ static struct ibm_struct lcdshadow_driver_data = {
  * Thinkpad sensor interfaces
  */
 
+#define DYTC_CMD_QUERY        0 /* To get DYTC status - enable/revision */
+#define DYTC_QUERY_ENABLE_BIT 8  /* Bit        8 - 0 = disabled, 1 = enabled */
+#define DYTC_QUERY_SUBREV_BIT 16 /* Bits 16 - 27 - sub revision */
+#define DYTC_QUERY_REV_BIT    28 /* Bits 28 - 31 - revision */
+
 #define DYTC_CMD_GET          2 /* To get current IC function and mode */
 #define DYTC_GET_LAPMODE_BIT 17 /* Set when in lapmode */
 
@@ -9855,6 +9866,7 @@ static bool has_palmsensor;
 static bool has_lapsensor;
 static bool palm_state;
 static bool lap_state;
+static int dytc_version;
 
 static int dytc_command(int command, int *output)
 {
@@ -9869,6 +9881,33 @@ static int dytc_command(int command, int *output)
        return 0;
 }
 
+static int dytc_get_version(void)
+{
+       int err, output;
+
+       /* Check if we've been called before - and just return cached value */
+       if (dytc_version)
+               return dytc_version;
+
+       /* Otherwise query DYTC and extract version information */
+       err = dytc_command(DYTC_CMD_QUERY, &output);
+       /*
+        * If support isn't available (ENODEV) then don't return an error
+        * and don't create the sysfs group
+        */
+       if (err == -ENODEV)
+               return 0;
+       /* For all other errors we can flag the failure */
+       if (err)
+               return err;
+
+       /* Check DYTC is enabled and supports mode setting */
+       if (output & BIT(DYTC_QUERY_ENABLE_BIT))
+               dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
+
+       return 0;
+}
+
 static int lapsensor_get(bool *present, bool *state)
 {
        int output, err;
@@ -9974,7 +10013,18 @@ static int tpacpi_proxsensor_init(struct ibm_init_struct *iibm)
                if (err)
                        return err;
        }
-       if (has_lapsensor) {
+
+       /* Check if we know the DYTC version, if we don't then get it */
+       if (!dytc_version) {
+               err = dytc_get_version();
+               if (err)
+                       return err;
+       }
+       /*
+        * Platforms before DYTC version 5 claim to have a lap sensor, but it doesn't work, so we
+        * ignore them
+        */
+       if (has_lapsensor && (dytc_version >= 5)) {
                err = sysfs_create_file(&tpacpi_pdev->dev.kobj, &dev_attr_dytc_lapmode.attr);
                if (err)
                        return err;
@@ -9999,14 +10049,9 @@ static struct ibm_struct proxsensor_driver_data = {
  * DYTC Platform Profile interface
  */
 
-#define DYTC_CMD_QUERY        0 /* To get DYTC status - enable/revision */
 #define DYTC_CMD_SET          1 /* To enable/disable IC function mode */
 #define DYTC_CMD_RESET    0x1ff /* To reset back to default */
 
-#define DYTC_QUERY_ENABLE_BIT 8  /* Bit        8 - 0 = disabled, 1 = enabled */
-#define DYTC_QUERY_SUBREV_BIT 16 /* Bits 16 - 27 - sub revision */
-#define DYTC_QUERY_REV_BIT    28 /* Bits 28 - 31 - revision */
-
 #define DYTC_GET_FUNCTION_BIT 8  /* Bits  8-11 - function setting */
 #define DYTC_GET_MODE_BIT     12 /* Bits 12-15 - mode setting */
 
@@ -10142,8 +10187,13 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
                return err;
 
        if (profile == PLATFORM_PROFILE_BALANCED) {
-               /* To get back to balanced mode we just issue a reset command */
-               err = dytc_command(DYTC_CMD_RESET, &output);
+               /*
+                * To get back to balanced mode we need to issue a reset command.
+                * Note we still need to disable CQL mode before hand and re-enable
+                * it afterwards, otherwise dytc_lapmode gets reset to 0 and stays
+                * stuck at 0 for aprox. 30 minutes.
+                */
+               err = dytc_cql_command(DYTC_CMD_RESET, &output);
                if (err)
                        goto unlock;
        } else {
@@ -10211,28 +10261,28 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
        if (err)
                return err;
 
+       /* Check if we know the DYTC version, if we don't then get it */
+       if (!dytc_version) {
+               err = dytc_get_version();
+               if (err)
+                       return err;
+       }
        /* Check DYTC is enabled and supports mode setting */
-       if (output & BIT(DYTC_QUERY_ENABLE_BIT)) {
-               /* Only DYTC v5.0 and later has this feature. */
-               int dytc_version;
-
-               dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
-               if (dytc_version >= 5) {
-                       dbg_printk(TPACPI_DBG_INIT,
-                                  "DYTC version %d: thermal mode available\n", dytc_version);
-                       /* Create platform_profile structure and register */
-                       err = platform_profile_register(&dytc_profile);
-                       /*
-                        * If for some reason platform_profiles aren't enabled
-                        * don't quit terminally.
-                        */
-                       if (err)
-                               return 0;
+       if (dytc_version >= 5) {
+               dbg_printk(TPACPI_DBG_INIT,
+                               "DYTC version %d: thermal mode available\n", dytc_version);
+               /* Create platform_profile structure and register */
+               err = platform_profile_register(&dytc_profile);
+               /*
+                * If for some reason platform_profiles aren't enabled
+                * don't quit terminally.
+                */
+               if (err)
+                       return 0;
 
-                       dytc_profile_available = true;
-                       /* Ensure initial values are correct */
-                       dytc_profile_refresh();
-               }
+               dytc_profile_available = true;
+               /* Ensure initial values are correct */
+               dytc_profile_refresh();
        }
        return 0;
 }
index beb5f74..08f4cf0 100644 (file)
@@ -189,15 +189,16 @@ int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
        tmr_add = ptp_qoriq->tmr_add;
        adj = tmr_add;
 
-       /* calculate diff as adj*(scaled_ppm/65536)/1000000
-        * and round() to the nearest integer
+       /*
+        * Calculate diff and round() to the nearest integer
+        *
+        * diff = adj * (ppb / 1000000000)
+        *      = adj * scaled_ppm / 65536000000
         */
-       adj *= scaled_ppm;
-       diff = div_u64(adj, 8000000);
-       diff = (diff >> 13) + ((diff >> 12) & 1);
+       diff = mul_u64_u64_div_u64(adj, scaled_ppm, 32768000000);
+       diff = DIV64_U64_ROUND_UP(diff, 2);
 
        tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
-
        ptp_qoriq->write(&regs->ctrl_regs->tmr_add, tmr_add);
 
        return 0;
index ddecf25..d7894f1 100644 (file)
@@ -309,11 +309,20 @@ static bool sanity_check(struct ce_array *ca)
        return ret;
 }
 
+/**
+ * cec_add_elem - Add an element to the CEC array.
+ * @pfn:       page frame number to insert
+ *
+ * Return values:
+ * - <0:       on error
+ * -  0:       on success
+ * - >0:       when the inserted pfn was offlined
+ */
 static int cec_add_elem(u64 pfn)
 {
        struct ce_array *ca = &ce_arr;
+       int count, err, ret = 0;
        unsigned int to = 0;
-       int count, ret = 0;
 
        /*
         * We can be called very early on the identify_cpu() path where we are
@@ -330,8 +339,8 @@ static int cec_add_elem(u64 pfn)
        if (ca->n == MAX_ELEMS)
                WARN_ON(!del_lru_elem_unlocked(ca));
 
-       ret = find_elem(ca, pfn, &to);
-       if (ret < 0) {
+       err = find_elem(ca, pfn, &to);
+       if (err < 0) {
                /*
                 * Shift range [to-end] to make room for one more element.
                 */
index 7b0cd08..ba020a4 100644 (file)
@@ -125,7 +125,7 @@ static const struct regulator_ops vid_ops = {
 
 static const struct regulator_desc regulators[] = {
        BD9571MWV_REG("VD09", "vd09", VD09, avs_ops, 0, 0x7f,
-                     0x80, 600000, 10000, 0x3c),
+                     0x6f, 600000, 10000, 0x3c),
        BD9571MWV_REG("VD18", "vd18", VD18, vid_ops, BD9571MWV_VD18_VID, 0xf,
                      16, 1625000, 25000, 0),
        BD9571MWV_REG("VD25", "vd25", VD25, vid_ops, BD9571MWV_VD25_VID, 0xf,
@@ -134,7 +134,7 @@ static const struct regulator_desc regulators[] = {
                      11, 2800000, 100000, 0),
        BD9571MWV_REG("DVFS", "dvfs", DVFS, reg_ops,
                      BD9571MWV_DVFS_MONIVDAC, 0x7f,
-                     0x80, 600000, 10000, 0x3c),
+                     0x6f, 600000, 10000, 0x3c),
 };
 
 #ifdef CONFIG_PM_SLEEP
@@ -174,7 +174,7 @@ static ssize_t backup_mode_show(struct device *dev,
 {
        struct bd9571mwv_reg *bdreg = dev_get_drvdata(dev);
 
-       return sprintf(buf, "%s\n", bdreg->bkup_mode_enabled ? "on" : "off");
+       return sysfs_emit(buf, "%s\n", bdreg->bkup_mode_enabled ? "on" : "off");
 }
 
 static ssize_t backup_mode_store(struct device *dev,
@@ -301,7 +301,7 @@ static int bd9571mwv_regulator_probe(struct platform_device *pdev)
                                               &config);
                if (IS_ERR(rdev)) {
                        dev_err(&pdev->dev, "failed to register %s regulator\n",
-                               pdev->name);
+                               regulators[i].name);
                        return PTR_ERR(rdev);
                }
        }
index 2667919..dcb380e 100644 (file)
@@ -450,6 +450,24 @@ static void *pru_i_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
        if (len == 0)
                return NULL;
 
+       /*
+        * GNU binutils do not support multiple address spaces. The GNU
+        * linker's default linker script places IRAM at an arbitrary high
+        * offset, in order to differentiate it from DRAM. Hence we need to
+        * strip the artificial offset in the IRAM addresses coming from the
+        * ELF file.
+        *
+        * The TI proprietary linker would never set those higher IRAM address
+        * bits anyway. PRU architecture limits the program counter to 16-bit
+        * word-address range. This in turn corresponds to 18-bit IRAM
+        * byte-address range for ELF.
+        *
+        * Two more bits are added just in case to make the final 20-bit mask.
+        * Idea is to have a safeguard in case TI decides to add banking
+        * in future SoCs.
+        */
+       da &= 0xfffff;
+
        if (da >= PRU_IRAM_DA &&
            da + len <= PRU_IRAM_DA + pru->mem_regions[PRU_IOMEM_IRAM].size) {
                offset = da - PRU_IRAM_DA;
@@ -585,7 +603,7 @@ pru_rproc_load_elf_segments(struct rproc *rproc, const struct firmware *fw)
                        break;
                }
 
-               if (pru->data->is_k3 && is_iram) {
+               if (pru->data->is_k3) {
                        ret = pru_rproc_memcpy(ptr, elf_data + phdr->p_offset,
                                               filesz);
                        if (ret) {
index 5521c44..7c007dd 100644 (file)
@@ -56,7 +56,7 @@ static int qcom_pil_info_init(void)
        memset_io(base, 0, resource_size(&imem));
 
        _reloc.base = base;
-       _reloc.num_entries = resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
+       _reloc.num_entries = (u32)resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
 
        return 0;
 }
index ba9ce4e..3a945ab 100644 (file)
@@ -63,7 +63,6 @@ void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
                   " Copyright IBM Corp. 2000");
-MODULE_SUPPORTED_DEVICE("dasd");
 MODULE_LICENSE("GPL");
 
 /*
index 00e72b9..d93595b 100644 (file)
@@ -50,7 +50,6 @@ MODULE_PARM_DESC(sol_compat,
 MODULE_AUTHOR("Eric Brower <ebrower@usa.net>");
 MODULE_DESCRIPTION("7-Segment Display driver for Sun Microsystems CP1400/1500");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("d7s");
 
 struct d7s {
        void __iomem    *regs;
index 3836976..f135a10 100644 (file)
@@ -80,7 +80,6 @@
 MODULE_AUTHOR("Hewlett-Packard Company");
 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
        HPSA_DRIVER_VERSION);
-MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
 MODULE_VERSION(HPSA_DRIVER_VERSION);
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("cciss");
index d126bb8..ba6a3aa 100644 (file)
 #ifndef HPSA_CMD_H
 #define HPSA_CMD_H
 
+#include <linux/compiler.h>
+
+#include <linux/build_bug.h> /* static_assert */
+#include <linux/stddef.h> /* offsetof */
+
 /* general boundary defintions */
 #define SENSEINFOBYTES          32 /* may vary between hbas */
 #define SG_ENTRIES_IN_CMD      32 /* Max SG entries excluding chain blocks */
@@ -200,12 +205,10 @@ union u64bit {
        MAX_EXT_TARGETS + 1) /* + 1 is for the controller itself */
 
 /* SCSI-3 Commands */
-#pragma pack(1)
-
 #define HPSA_INQUIRY 0x12
 struct InquiryData {
        u8 data_byte[36];
-};
+} __packed;
 
 #define HPSA_REPORT_LOG 0xc2    /* Report Logical LUNs */
 #define HPSA_REPORT_PHYS 0xc3   /* Report Physical LUNs */
@@ -221,7 +224,7 @@ struct raid_map_disk_data {
        u8    xor_mult[2];            /**< XOR multipliers for this position,
                                        *  valid for data disks only */
        u8    reserved[2];
-};
+} __packed;
 
 struct raid_map_data {
        __le32   structure_size;        /* Size of entire structure in bytes */
@@ -247,14 +250,14 @@ struct raid_map_data {
        __le16   dekindex;              /* Data encryption key index. */
        u8    reserved[16];
        struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES];
-};
+} __packed;
 
 struct ReportLUNdata {
        u8 LUNListLength[4];
        u8 extended_response_flag;
        u8 reserved[3];
        u8 LUN[HPSA_MAX_LUN][8];
-};
+} __packed;
 
 struct ext_report_lun_entry {
        u8 lunid[8];
@@ -269,20 +272,20 @@ struct ext_report_lun_entry {
        u8 lun_count; /* multi-lun device, how many luns */
        u8 redundant_paths;
        u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */
-};
+} __packed;
 
 struct ReportExtendedLUNdata {
        u8 LUNListLength[4];
        u8 extended_response_flag;
        u8 reserved[3];
        struct ext_report_lun_entry LUN[HPSA_MAX_PHYS_LUN];
-};
+} __packed;
 
 struct SenseSubsystem_info {
        u8 reserved[36];
        u8 portname[8];
        u8 reserved1[1108];
-};
+} __packed;
 
 /* BMIC commands */
 #define BMIC_READ 0x26
@@ -317,7 +320,7 @@ union SCSI3Addr {
                u8 Targ:6;
                u8 Mode:2;        /* b10 */
        } LogUnit;
-};
+} __packed;
 
 struct PhysDevAddr {
        u32             TargetId:24;
@@ -325,20 +328,20 @@ struct PhysDevAddr {
        u32             Mode:2;
        /* 2 level target device addr */
        union SCSI3Addr  Target[2];
-};
+} __packed;
 
 struct LogDevAddr {
        u32            VolId:30;
        u32            Mode:2;
        u8             reserved[4];
-};
+} __packed;
 
 union LUNAddr {
        u8               LunAddrBytes[8];
        union SCSI3Addr    SCSI3Lun[4];
        struct PhysDevAddr PhysDev;
        struct LogDevAddr  LogDev;
-};
+} __packed;
 
 struct CommandListHeader {
        u8              ReplyQueue;
@@ -346,7 +349,7 @@ struct CommandListHeader {
        __le16          SGTotal;
        __le64          tag;
        union LUNAddr     LUN;
-};
+} __packed;
 
 struct RequestBlock {
        u8   CDBLen;
@@ -365,18 +368,18 @@ struct RequestBlock {
 #define GET_DIR(tad) (((tad) >> 6) & 0x03)
        u16  Timeout;
        u8   CDB[16];
-};
+} __packed;
 
 struct ErrDescriptor {
        __le64 Addr;
        __le32 Len;
-};
+} __packed;
 
 struct SGDescriptor {
        __le64 Addr;
        __le32 Len;
        __le32 Ext;
-};
+} __packed;
 
 union MoreErrInfo {
        struct {
@@ -390,7 +393,8 @@ union MoreErrInfo {
                u8  offense_num;  /* byte # of offense 0-base */
                u32 offense_value;
        } Invalid_Cmd;
-};
+} __packed;
+
 struct ErrorInfo {
        u8               ScsiStatus;
        u8               SenseLen;
@@ -398,7 +402,7 @@ struct ErrorInfo {
        u32              ResidualCnt;
        union MoreErrInfo  MoreErrInfo;
        u8               SenseInfo[SENSEINFOBYTES];
-};
+} __packed;
 /* Command types */
 #define CMD_IOCTL_PEND  0x01
 #define CMD_SCSI       0x03
@@ -453,6 +457,15 @@ struct CommandList {
        atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */
 } __aligned(COMMANDLIST_ALIGNMENT);
 
+/*
+ * Make sure our embedded atomic variable is aligned. Otherwise we break atomic
+ * operations on architectures that don't support unaligned atomics like IA64.
+ *
+ * The assert guards against reintroductin against unwanted __packed to
+ * the struct CommandList.
+ */
+static_assert(offsetof(struct CommandList, refcount) % __alignof__(atomic_t) == 0);
+
 /* Max S/G elements in I/O accelerator command */
 #define IOACCEL1_MAXSGENTRIES           24
 #define IOACCEL2_MAXSGENTRIES          28
@@ -489,7 +502,7 @@ struct io_accel1_cmd {
        __le64 host_addr;               /* 0x70 - 0x77 */
        u8  CISS_LUN[8];                /* 0x78 - 0x7F */
        struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
-} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
+} __packed __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
 
 #define IOACCEL1_FUNCTION_SCSIIO        0x00
 #define IOACCEL1_SGLOFFSET              32
@@ -519,7 +532,7 @@ struct ioaccel2_sg_element {
        u8 chain_indicator;
 #define IOACCEL2_CHAIN 0x80
 #define IOACCEL2_LAST_SG 0x40
-};
+} __packed;
 
 /*
  * SCSI Response Format structure for IO Accelerator Mode 2
@@ -559,7 +572,7 @@ struct io_accel2_scsi_response {
        u8 sense_data_len;              /* sense/response data length */
        u8 resid_cnt[4];                /* residual count */
        u8 sense_data_buff[32];         /* sense/response data buffer */
-};
+} __packed;
 
 /*
  * Structure for I/O accelerator (mode 2 or m2) commands.
@@ -592,7 +605,7 @@ struct io_accel2_cmd {
        __le32 tweak_upper;             /* Encryption tweak, upper 4 bytes */
        struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
        struct io_accel2_scsi_response error_data;
-} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
+} __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
 
 /*
  * defines for Mode 2 command struct
@@ -618,7 +631,7 @@ struct hpsa_tmf_struct {
        __le64 abort_tag;       /* cciss tag of SCSI cmd or TMF to abort */
        __le64 error_ptr;               /* Error Pointer */
        __le32 error_len;               /* Error Length */
-} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
+} __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
 
 /* Configuration Table Structure */
 struct HostWrite {
@@ -626,7 +639,7 @@ struct HostWrite {
        __le32          command_pool_addr_hi;
        __le32          CoalIntDelay;
        __le32          CoalIntCount;
-};
+} __packed;
 
 #define SIMPLE_MODE     0x02
 #define PERFORMANT_MODE 0x04
@@ -675,7 +688,7 @@ struct CfgTable {
 #define                HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30)
 #define                HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31)
        __le32          clear_event_notify;
-};
+} __packed;
 
 #define NUM_BLOCKFETCH_ENTRIES 8
 struct TransTable_struct {
@@ -686,14 +699,14 @@ struct TransTable_struct {
        __le32          RepQCtrAddrHigh32;
 #define MAX_REPLY_QUEUES 64
        struct vals32  RepQAddr[MAX_REPLY_QUEUES];
-};
+} __packed;
 
 struct hpsa_pci_info {
        unsigned char   bus;
        unsigned char   dev_fn;
        unsigned short  domain;
        u32             board_id;
-};
+} __packed;
 
 struct bmic_identify_controller {
        u8      configured_logical_drive_count; /* offset 0 */
@@ -702,7 +715,7 @@ struct bmic_identify_controller {
        u8      pad2[136];
        u8      controller_mode;        /* offset 292 */
        u8      pad3[32];
-};
+} __packed;
 
 
 struct bmic_identify_physical_device {
@@ -845,7 +858,7 @@ struct bmic_identify_physical_device {
        u8     max_link_rate[256];
        u8     neg_phys_link_rate[256];
        u8     box_conn_name[8];
-} __attribute((aligned(512)));
+} __packed __attribute((aligned(512)));
 
 struct bmic_sense_subsystem_info {
        u8      primary_slot_number;
@@ -858,7 +871,7 @@ struct bmic_sense_subsystem_info {
        u8      secondary_array_serial_number[32];
        u8      secondary_cache_serial_number[32];
        u8      pad[332];
-};
+} __packed;
 
 struct bmic_sense_storage_box_params {
        u8      reserved[36];
@@ -870,7 +883,6 @@ struct bmic_sense_storage_box_params {
        u8      reserver_3[84];
        u8      phys_connector[2];
        u8      reserved_4[296];
-};
+} __packed;
 
-#pragma pack()
 #endif /* HPSA_CMD_H */
index 1b68734..61831f2 100644 (file)
@@ -2371,6 +2371,24 @@ static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
        return 0;
 }
 
+/**
+ * ibmvfc_event_is_free - Check if event is free or not
+ * @evt:       ibmvfc event struct
+ *
+ * Returns:
+ *     true / false
+ **/
+static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
+{
+       struct ibmvfc_event *loop_evt;
+
+       list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
+               if (loop_evt == evt)
+                       return true;
+
+       return false;
+}
+
 /**
  * ibmvfc_wait_for_ops - Wait for ops to complete
  * @vhost:     ibmvfc host struct
@@ -2385,35 +2403,58 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
 {
        struct ibmvfc_event *evt;
        DECLARE_COMPLETION_ONSTACK(comp);
-       int wait;
+       int wait, i, q_index, q_size;
        unsigned long flags;
        signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
+       struct ibmvfc_queue *queues;
 
        ENTER;
+       if (vhost->mq_enabled && vhost->using_channels) {
+               queues = vhost->scsi_scrqs.scrqs;
+               q_size = vhost->scsi_scrqs.active_queues;
+       } else {
+               queues = &vhost->crq;
+               q_size = 1;
+       }
+
        do {
                wait = 0;
-               spin_lock_irqsave(&vhost->crq.l_lock, flags);
-               list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
-                       if (match(evt, device)) {
-                               evt->eh_comp = &comp;
-                               wait++;
+               spin_lock_irqsave(vhost->host->host_lock, flags);
+               for (q_index = 0; q_index < q_size; q_index++) {
+                       spin_lock(&queues[q_index].l_lock);
+                       for (i = 0; i < queues[q_index].evt_pool.size; i++) {
+                               evt = &queues[q_index].evt_pool.events[i];
+                               if (!ibmvfc_event_is_free(evt)) {
+                                       if (match(evt, device)) {
+                                               evt->eh_comp = &comp;
+                                               wait++;
+                                       }
+                               }
                        }
+                       spin_unlock(&queues[q_index].l_lock);
                }
-               spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
+               spin_unlock_irqrestore(vhost->host->host_lock, flags);
 
                if (wait) {
                        timeout = wait_for_completion_timeout(&comp, timeout);
 
                        if (!timeout) {
                                wait = 0;
-                               spin_lock_irqsave(&vhost->crq.l_lock, flags);
-                               list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
-                                       if (match(evt, device)) {
-                                               evt->eh_comp = NULL;
-                                               wait++;
+                               spin_lock_irqsave(vhost->host->host_lock, flags);
+                               for (q_index = 0; q_index < q_size; q_index++) {
+                                       spin_lock(&queues[q_index].l_lock);
+                                       for (i = 0; i < queues[q_index].evt_pool.size; i++) {
+                                               evt = &queues[q_index].evt_pool.events[i];
+                                               if (!ibmvfc_event_is_free(evt)) {
+                                                       if (match(evt, device)) {
+                                                               evt->eh_comp = NULL;
+                                                               wait++;
+                                                       }
+                                               }
                                        }
+                                       spin_unlock(&queues[q_index].l_lock);
                                }
-                               spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
+                               spin_unlock_irqrestore(vhost->host->host_lock, flags);
                                if (wait)
                                        dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
                                LEAVE;
@@ -5784,6 +5825,8 @@ static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
                          vhost->disc_buf_dma);
        dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
                          vhost->login_buf, vhost->login_buf_dma);
+       dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf),
+                         vhost->channel_setup_buf, vhost->channel_setup_dma);
        dma_pool_destroy(vhost->sg_pool);
        ibmvfc_free_queue(vhost, async_q);
        LEAVE;
index bc79a01..46a8f2d 100644 (file)
@@ -2421,7 +2421,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
        memset(dstbuf, 0, 33);
        size = (nbytes < 32) ? nbytes : 32;
        if (copy_from_user(dstbuf, buf, size))
-               return 0;
+               return -EFAULT;
 
        if (dent == phba->debug_InjErrLBA) {
                if ((dstbuf[0] == 'o') && (dstbuf[1] == 'f') &&
@@ -2430,7 +2430,7 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
        }
 
        if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp)))
-               return 0;
+               return -EINVAL;
 
        if (dent == phba->debug_writeGuard)
                phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp;
index ac066f8..ac0eef9 100644 (file)
@@ -7806,14 +7806,18 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
                ioc->pend_os_device_add_sz++;
        ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
            GFP_KERNEL);
-       if (!ioc->pend_os_device_add)
+       if (!ioc->pend_os_device_add) {
+               r = -ENOMEM;
                goto out_free_resources;
+       }
 
        ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
        ioc->device_remove_in_progress =
                kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
-       if (!ioc->device_remove_in_progress)
+       if (!ioc->device_remove_in_progress) {
+               r = -ENOMEM;
                goto out_free_resources;
+       }
 
        ioc->fwfault_debug = mpt3sas_fwfault_debug;
 
index ffca030..6aa6de7 100644 (file)
@@ -413,7 +413,7 @@ mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
         * And add this object to port_table_list.
         */
        if (!ioc->multipath_on_hba) {
-               port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
+               port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
                if (!port)
                        return NULL;
 
index 4adf9de..329fd02 100644 (file)
@@ -2273,12 +2273,12 @@ static void myrs_cleanup(struct myrs_hba *cs)
        if (cs->mmio_base) {
                cs->disable_intr(cs);
                iounmap(cs->mmio_base);
+               cs->mmio_base = NULL;
        }
        if (cs->irq)
                free_irq(cs->irq, cs);
        if (cs->io_addr)
                release_region(cs->io_addr, 0x80);
-       iounmap(cs->mmio_base);
        pci_set_drvdata(pdev, NULL);
        pci_disable_device(pdev);
        scsi_host_put(cs->host);
index 5d5f50d..ac89002 100644 (file)
@@ -55,7 +55,6 @@
 
 MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>");
 MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module");
-MODULE_SUPPORTED_DEVICE("sd,sr,sg,st");
 MODULE_LICENSE("GPL");
 
 #include "nsp_io.h"
index 49bf2f7..31e5455 100644 (file)
@@ -223,7 +223,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
                PM8001_EVENT_LOG_SIZE;
        pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option         = 0x01;
        pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt          = 0x01;
-       for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
+       for (i = 0; i < pm8001_ha->max_q_num; i++) {
                pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt  =
                        PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
                pm8001_ha->inbnd_q_tbl[i].upper_base_addr       =
@@ -249,7 +249,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
                pm8001_ha->inbnd_q_tbl[i].producer_idx          = 0;
                pm8001_ha->inbnd_q_tbl[i].consumer_index        = 0;
        }
-       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
+       for (i = 0; i < pm8001_ha->max_q_num; i++) {
                pm8001_ha->outbnd_q_tbl[i].element_size_cnt     =
                        PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
                pm8001_ha->outbnd_q_tbl[i].upper_base_addr      =
@@ -671,9 +671,9 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
        read_outbnd_queue_table(pm8001_ha);
        /* update main config table ,inbound table and outbound table */
        update_main_config_table(pm8001_ha);
-       for (i = 0; i < PM8001_MAX_INB_NUM; i++)
+       for (i = 0; i < pm8001_ha->max_q_num; i++)
                update_inbnd_queue_table(pm8001_ha, i);
-       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
+       for (i = 0; i < pm8001_ha->max_q_num; i++)
                update_outbnd_queue_table(pm8001_ha, i);
        /* 8081 controller donot require these operations */
        if (deviceid != 0x8081 && deviceid != 0x0042) {
index 47ad64b..69c5b5e 100644 (file)
@@ -1675,6 +1675,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
                if (!qedi->global_queues[i]) {
                        QEDI_ERR(&qedi->dbg_ctx,
                                 "Unable to allocation global queue %d.\n", i);
+                       status = -ENOMEM;
                        goto mem_alloc_failure;
                }
 
index c48daf5..480e7d2 100644 (file)
@@ -3222,8 +3222,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
        if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
            (cmd->sess && cmd->sess->deleted)) {
                cmd->state = QLA_TGT_STATE_PROCESSED;
-               res = 0;
-               goto free;
+               return 0;
        }
 
        ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
@@ -3234,8 +3233,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
 
        res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
            &full_req_cnt);
-       if (unlikely(res != 0))
-               goto free;
+       if (unlikely(res != 0)) {
+               return res;
+       }
 
        spin_lock_irqsave(qpair->qp_lock_ptr, flags);
 
@@ -3255,8 +3255,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
                        vha->flags.online, qla2x00_reset_active(vha),
                        cmd->reset_count, qpair->chip_reset);
                spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-               res = 0;
-               goto free;
+               return 0;
        }
 
        /* Does F/W have an IOCBs for this request */
@@ -3359,8 +3358,6 @@ out_unmap_unlock:
        qlt_unmap_sg(vha, cmd);
        spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
 
-free:
-       vha->hw->tgt.tgt_ops->free_cmd(cmd);
        return res;
 }
 EXPORT_SYMBOL(qlt_xmit_response);
index 10e5e6c..01620f3 100644 (file)
        (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
                QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
 #endif
-#endif
 
 #define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha))                        \
                         ? le16_to_cpu((iocb)->u.isp2x.target.extended) \
@@ -244,6 +243,7 @@ struct ctio_to_2xxx {
 #ifndef CTIO_RET_TYPE
 #define CTIO_RET_TYPE  0x17            /* CTIO return entry */
 #define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
+#endif
 
 struct fcp_hdr {
        uint8_t  r_ctl;
index b55fc76..8b4890c 100644 (file)
@@ -644,7 +644,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
 {
        struct qla_tgt_cmd *cmd = container_of(se_cmd,
                                struct qla_tgt_cmd, se_cmd);
-       struct scsi_qla_host *vha = cmd->vha;
 
        if (cmd->aborted) {
                /* Cmd can loop during Q-full.  tcm_qla2xxx_aborted_task
@@ -657,7 +656,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
                        cmd->se_cmd.transport_state,
                        cmd->se_cmd.t_state,
                        cmd->se_cmd.se_cmd_flags);
-               vha->hw->tgt.tgt_ops->free_cmd(cmd);
                return 0;
        }
 
@@ -685,7 +683,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
 {
        struct qla_tgt_cmd *cmd = container_of(se_cmd,
                                struct qla_tgt_cmd, se_cmd);
-       struct scsi_qla_host *vha = cmd->vha;
        int xmit_type = QLA_TGT_XMIT_STATUS;
 
        if (cmd->aborted) {
@@ -699,7 +696,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
                    cmd, kref_read(&cmd->se_cmd.cmd_kref),
                    cmd->se_cmd.transport_state, cmd->se_cmd.t_state,
                    cmd->se_cmd.se_cmd_flags);
-               vha->hw->tgt.tgt_ops->free_cmd(cmd);
                return 0;
        }
        cmd->bufflen = se_cmd->data_length;
index 91074fd..f4bf62b 100644 (file)
@@ -2475,6 +2475,7 @@ static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
         */
        mutex_lock(&conn_mutex);
        conn->transport->stop_conn(conn, flag);
+       conn->state = ISCSI_CONN_DOWN;
        mutex_unlock(&conn_mutex);
 
 }
@@ -2901,6 +2902,13 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
        default:
                err = transport->set_param(conn, ev->u.set_param.param,
                                           data, ev->u.set_param.len);
+               if ((conn->state == ISCSI_CONN_BOUND) ||
+                       (conn->state == ISCSI_CONN_UP)) {
+                       err = transport->set_param(conn, ev->u.set_param.param,
+                                       data, ev->u.set_param.len);
+               } else {
+                       return -ENOTCONN;
+               }
        }
 
        return err;
@@ -2960,6 +2968,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
                mutex_lock(&conn->ep_mutex);
                conn->ep = NULL;
                mutex_unlock(&conn->ep_mutex);
+               conn->state = ISCSI_CONN_DOWN;
        }
 
        transport->ep_disconnect(ep);
@@ -3727,6 +3736,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
                ev->r.retcode = transport->bind_conn(session, conn,
                                                ev->u.b_conn.transport_eph,
                                                ev->u.b_conn.is_leading);
+               if (!ev->r.retcode)
+                       conn->state = ISCSI_CONN_BOUND;
                mutex_unlock(&conn_mutex);
 
                if (ev->r.retcode || !transport->ep_connect)
@@ -3966,7 +3977,8 @@ iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR);
 static const char *const connection_state_names[] = {
        [ISCSI_CONN_UP] = "up",
        [ISCSI_CONN_DOWN] = "down",
-       [ISCSI_CONN_FAILED] = "failed"
+       [ISCSI_CONN_FAILED] = "failed",
+       [ISCSI_CONN_BOUND] = "bound"
 };
 
 static ssize_t show_conn_state(struct device *dev,
index 1e939a2..98a34ed 100644 (file)
@@ -541,7 +541,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
        res = mutex_lock_interruptible(&rport->mutex);
        if (res)
                goto out;
-       if (rport->state != SRP_RPORT_FAIL_FAST)
+       if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST)
                /*
                 * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
                 * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
index ee55867..994f1b8 100644 (file)
@@ -280,27 +280,28 @@ static int sd_zbc_update_wp_offset_cb(struct blk_zone *zone, unsigned int idx,
 static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
 {
        struct scsi_disk *sdkp;
+       unsigned long flags;
        unsigned int zno;
        int ret;
 
        sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
 
-       spin_lock_bh(&sdkp->zones_wp_offset_lock);
+       spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
        for (zno = 0; zno < sdkp->nr_zones; zno++) {
                if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)
                        continue;
 
-               spin_unlock_bh(&sdkp->zones_wp_offset_lock);
+               spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
                ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf,
                                             SD_BUF_SIZE,
                                             zno * sdkp->zone_blocks, true);
-               spin_lock_bh(&sdkp->zones_wp_offset_lock);
+               spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
                if (!ret)
                        sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64,
                                            zno, sd_zbc_update_wp_offset_cb,
                                            sdkp);
        }
-       spin_unlock_bh(&sdkp->zones_wp_offset_lock);
+       spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
 
        scsi_device_put(sdkp->device);
 }
@@ -324,6 +325,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
        struct request *rq = cmd->request;
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
        unsigned int wp_offset, zno = blk_rq_zone_no(rq);
+       unsigned long flags;
        blk_status_t ret;
 
        ret = sd_zbc_cmnd_checks(cmd);
@@ -337,7 +339,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
        if (!blk_req_zone_write_trylock(rq))
                return BLK_STS_ZONE_RESOURCE;
 
-       spin_lock_bh(&sdkp->zones_wp_offset_lock);
+       spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
        wp_offset = sdkp->zones_wp_offset[zno];
        switch (wp_offset) {
        case SD_ZBC_INVALID_WP_OFST:
@@ -366,7 +368,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
 
                *lba += wp_offset;
        }
-       spin_unlock_bh(&sdkp->zones_wp_offset_lock);
+       spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
        if (ret)
                blk_req_zone_write_unlock(rq);
        return ret;
@@ -445,6 +447,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
        unsigned int zno = blk_rq_zone_no(rq);
        enum req_opf op = req_op(rq);
+       unsigned long flags;
 
        /*
         * If we got an error for a command that needs updating the write
@@ -452,7 +455,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
         * invalid to force an update from disk the next time a zone append
         * command is issued.
         */
-       spin_lock_bh(&sdkp->zones_wp_offset_lock);
+       spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
 
        if (result && op != REQ_OP_ZONE_RESET_ALL) {
                if (op == REQ_OP_ZONE_APPEND) {
@@ -496,7 +499,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
        }
 
 unlock_wp_offset:
-       spin_unlock_bh(&sdkp->zones_wp_offset_lock);
+       spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
 
        return good_bytes;
 }
index c53f456..a1dacb6 100644 (file)
@@ -48,7 +48,6 @@
 MODULE_AUTHOR("Microsemi");
 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
        DRIVER_VERSION);
-MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
 MODULE_VERSION(DRIVER_VERSION);
 MODULE_LICENSE("GPL");
 
index 841ad2f..9ca536a 100644 (file)
@@ -1269,8 +1269,8 @@ static int st_open(struct inode *inode, struct file *filp)
        spin_lock(&st_use_lock);
        if (STp->in_use) {
                spin_unlock(&st_use_lock);
-               scsi_tape_put(STp);
                DEBC_printk(STp, "Device already in use.\n");
+               scsi_tape_put(STp);
                return (-EBUSY);
        }
 
index c55202b..a981f26 100644 (file)
@@ -911,7 +911,7 @@ static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
        if (!hba->vreg_info.vccq2 || !hba->vreg_info.vcc)
                return;
 
-       if (lpm & !hba->vreg_info.vcc->enabled)
+       if (lpm && !hba->vreg_info.vcc->enabled)
                regulator_set_mode(hba->vreg_info.vccq2->reg,
                                   REGULATOR_MODE_IDLE);
        else if (!lpm)
index c867607..d3d05e9 100644 (file)
@@ -6386,37 +6386,34 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        DECLARE_COMPLETION_ONSTACK(wait);
        struct request *req;
        unsigned long flags;
-       int free_slot, task_tag, err;
+       int task_tag, err;
 
        /*
-        * Get free slot, sleep if slots are unavailable.
-        * Even though we use wait_event() which sleeps indefinitely,
-        * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
+        * blk_get_request() is used here only to get a free tag.
         */
        req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
        if (IS_ERR(req))
                return PTR_ERR(req);
 
        req->end_io_data = &wait;
-       free_slot = req->tag;
-       WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
        ufshcd_hold(hba, false);
 
        spin_lock_irqsave(host->host_lock, flags);
-       task_tag = hba->nutrs + free_slot;
+       blk_mq_start_request(req);
 
+       task_tag = req->tag;
        treq->req_header.dword_0 |= cpu_to_be32(task_tag);
 
-       memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
-       ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
+       memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
+       ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
 
        /* send command to the controller */
-       __set_bit(free_slot, &hba->outstanding_tasks);
+       __set_bit(task_tag, &hba->outstanding_tasks);
 
        /* Make sure descriptors are ready before ringing the task doorbell */
        wmb();
 
-       ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+       ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
        /* Make sure that doorbell is committed immediately */
        wmb();
 
@@ -6436,24 +6433,24 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
                ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
                dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
                                __func__, tm_function);
-               if (ufshcd_clear_tm_cmd(hba, free_slot))
-                       dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
-                                       __func__, free_slot);
+               if (ufshcd_clear_tm_cmd(hba, task_tag))
+                       dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
+                                       __func__, task_tag);
                err = -ETIMEDOUT;
        } else {
                err = 0;
-               memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
+               memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
 
                ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
        }
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       __clear_bit(free_slot, &hba->outstanding_tasks);
+       __clear_bit(task_tag, &hba->outstanding_tasks);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+       ufshcd_release(hba);
        blk_put_request(req);
 
-       ufshcd_release(hba);
        return err;
 }
 
index e5d7fb8..bd0fbcd 100644 (file)
@@ -30,7 +30,6 @@
 MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
 MODULE_DESCRIPTION("Maple bus driver for Dreamcast");
 MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("{{SEGA, Dreamcast/Maple}}");
 
 static void maple_dma_handler(struct work_struct *work);
 static void maple_vblank_handler(struct work_struct *work);
index a1b9be1..fde4edd 100644 (file)
@@ -186,7 +186,7 @@ struct qm_eqcr_entry {
        __be32 tag;
        struct qm_fd fd;
        u8 __reserved3[32];
-} __packed;
+} __packed __aligned(8);
 #define QM_EQCR_VERB_VBIT              0x80
 #define QM_EQCR_VERB_CMD_MASK          0x61    /* but only one value; */
 #define QM_EQCR_VERB_CMD_ENQUEUE       0x01
index 6268bfa..c3e379a 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/platform_device.h>
 #include <linux/printk.h>
 #include <linux/module.h>
-#include <linux/errno.h>
 #include <linux/io.h>
 #include <linux/reboot.h>
 
index f42954e..1fd29f9 100644 (file)
@@ -3,7 +3,6 @@
 
 #include <linux/acpi.h>
 #include <linux/clk.h>
-#include <linux/console.h>
 #include <linux/slab.h>
 #include <linux/dma-mapping.h>
 #include <linux/io.h>
@@ -92,14 +91,11 @@ struct geni_wrapper {
        struct device *dev;
        void __iomem *base;
        struct clk_bulk_data ahb_clks[NUM_AHB_CLKS];
-       struct geni_icc_path to_core;
 };
 
 static const char * const icc_path_names[] = {"qup-core", "qup-config",
                                                "qup-memory"};
 
-static struct geni_wrapper *earlycon_wrapper;
-
 #define QUP_HW_VER_REG                 0x4
 
 /* Common SE registers */
@@ -843,44 +839,11 @@ int geni_icc_disable(struct geni_se *se)
 }
 EXPORT_SYMBOL(geni_icc_disable);
 
-void geni_remove_earlycon_icc_vote(void)
-{
-       struct platform_device *pdev;
-       struct geni_wrapper *wrapper;
-       struct device_node *parent;
-       struct device_node *child;
-
-       if (!earlycon_wrapper)
-               return;
-
-       wrapper = earlycon_wrapper;
-       parent = of_get_next_parent(wrapper->dev->of_node);
-       for_each_child_of_node(parent, child) {
-               if (!of_device_is_compatible(child, "qcom,geni-se-qup"))
-                       continue;
-
-               pdev = of_find_device_by_node(child);
-               if (!pdev)
-                       continue;
-
-               wrapper = platform_get_drvdata(pdev);
-               icc_put(wrapper->to_core.path);
-               wrapper->to_core.path = NULL;
-
-       }
-       of_node_put(parent);
-
-       earlycon_wrapper = NULL;
-}
-EXPORT_SYMBOL(geni_remove_earlycon_icc_vote);
-
 static int geni_se_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct resource *res;
        struct geni_wrapper *wrapper;
-       struct console __maybe_unused *bcon;
-       bool __maybe_unused has_earlycon = false;
        int ret;
 
        wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL);
@@ -903,43 +866,6 @@ static int geni_se_probe(struct platform_device *pdev)
                }
        }
 
-#ifdef CONFIG_SERIAL_EARLYCON
-       for_each_console(bcon) {
-               if (!strcmp(bcon->name, "qcom_geni")) {
-                       has_earlycon = true;
-                       break;
-               }
-       }
-       if (!has_earlycon)
-               goto exit;
-
-       wrapper->to_core.path = devm_of_icc_get(dev, "qup-core");
-       if (IS_ERR(wrapper->to_core.path))
-               return PTR_ERR(wrapper->to_core.path);
-       /*
-        * Put minmal BW request on core clocks on behalf of early console.
-        * The vote will be removed earlycon exit function.
-        *
-        * Note: We are putting vote on each QUP wrapper instead only to which
-        * earlycon is connected because QUP core clock of different wrapper
-        * share same voltage domain. If core1 is put to 0, then core2 will
-        * also run at 0, if not voted. Default ICC vote will be removed ASA
-        * we touch any of the core clock.
-        * core1 = core2 = max(core1, core2)
-        */
-       ret = icc_set_bw(wrapper->to_core.path, GENI_DEFAULT_BW,
-                               GENI_DEFAULT_BW);
-       if (ret) {
-               dev_err(&pdev->dev, "%s: ICC BW voting failed for core: %d\n",
-                       __func__, ret);
-               return ret;
-       }
-
-       if (of_get_compatible_child(pdev->dev.of_node, "qcom,geni-debug-uart"))
-               earlycon_wrapper = wrapper;
-       of_node_put(pdev->dev.of_node);
-exit:
-#endif
        dev_set_drvdata(dev, wrapper);
        dev_dbg(dev, "GENI SE Driver probed\n");
        return devm_of_platform_populate(dev);
index bf1468e..51143a6 100644 (file)
@@ -332,7 +332,7 @@ static const struct omap_prm_data dra7_prm_data[] = {
        {
                .name = "l3init", .base = 0x4ae07300,
                .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
-               .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012,
+               .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01,
                .clkdm_name = "pcie"
        },
        {
@@ -830,8 +830,12 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev,
                       reset->prm->data->name, id);
 
 exit:
-       if (reset->clkdm)
+       if (reset->clkdm) {
+               /* At least dra7 iva needs a delay before clkdm idle */
+               if (has_rstst)
+                       udelay(1);
                pdata->clkdm_allow_idle(reset->clkdm);
+       }
 
        return ret;
 }
index 442cc7c..52ddb32 100644 (file)
@@ -1433,6 +1433,7 @@ static int cqspi_probe(struct platform_device *pdev)
        cqspi = spi_master_get_devdata(master);
 
        cqspi->pdev = pdev;
+       platform_set_drvdata(pdev, cqspi);
 
        /* Obtain configuration from OF. */
        ret = cqspi_of_get_pdata(cqspi);
index d740c47..2f20bd5 100644 (file)
@@ -1281,7 +1281,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
             devpriv->amcc + AMCC_OP_REG_INTCSR);
 
        ret = request_irq(pcidev->irq, cb_pcidas_interrupt, IRQF_SHARED,
-                         dev->board_name, dev);
+                         "cb_pcidas", dev);
        if (ret) {
                dev_dbg(dev->class_dev, "unable to allocate irq %d\n",
                        pcidev->irq);
index fa987bb..6d3ba39 100644 (file)
@@ -4035,7 +4035,7 @@ static int auto_attach(struct comedi_device *dev,
        init_stc_registers(dev);
 
        retval = request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED,
-                            dev->board_name, dev);
+                            "cb_pcidas64", dev);
        if (retval) {
                dev_dbg(dev->class_dev, "unable to allocate irq %u\n",
                        pcidev->irq);
index 7956abc..9f92081 100644 (file)
@@ -877,5 +877,4 @@ module_comedi_usb_driver(vmk80xx_driver, vmk80xx_usb_driver);
 
 MODULE_AUTHOR("Manuel Gebele <forensixs@gmx.de>");
 MODULE_DESCRIPTION("Velleman USB Board Low-Level Driver");
-MODULE_SUPPORTED_DEVICE("K8055/K8061 aka VM110/VM140");
 MODULE_LICENSE("GPL");
index 7a09061..df5ca35 100644 (file)
@@ -1131,8 +1131,8 @@ static void tegra_channel_host1x_syncpts_free(struct tegra_vi_channel *chan)
        int i;
 
        for (i = 0; i < chan->numgangports; i++) {
-               host1x_syncpt_free(chan->mw_ack_sp[i]);
-               host1x_syncpt_free(chan->frame_start_sp[i]);
+               host1x_syncpt_put(chan->mw_ack_sp[i]);
+               host1x_syncpt_put(chan->frame_start_sp[i]);
        }
 }
 
@@ -1177,7 +1177,7 @@ static int tegra_channel_host1x_syncpt_init(struct tegra_vi_channel *chan)
                mw_sp = host1x_syncpt_request(&vi->client, flags);
                if (!mw_sp) {
                        dev_err(vi->dev, "failed to request memory ack syncpoint\n");
-                       host1x_syncpt_free(fs_sp);
+                       host1x_syncpt_put(fs_sp);
                        ret = -ENOMEM;
                        goto free_syncpts;
                }
index b84f00b..4cabaf2 100644 (file)
@@ -1105,7 +1105,7 @@ struct rtllib_network {
        bool    bWithAironetIE;
        bool    bCkipSupported;
        bool    bCcxRmEnable;
-       u16     CcxRmState[2];
+       u     CcxRmState[2];
        bool    bMBssidValid;
        u8      MBssidMask;
        u8      MBssid[ETH_ALEN];
index 66c1353..15bbb63 100644 (file)
@@ -1967,7 +1967,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
            info_element->data[2] == 0x96 &&
            info_element->data[3] == 0x01) {
                if (info_element->len == 6) {
-                       memcpy(network->CcxRmState, &info_element[4], 2);
+                       memcpy(network->CcxRmState, &info_element->data[4], 2);
                        if (network->CcxRmState[0] != 0)
                                network->bCcxRmEnable = true;
                        else
index e7061d3..c3c2c15 100644 (file)
@@ -150,7 +150,7 @@ struct vnt_cts {
        u16 reserved;
        struct ieee80211_cts data;
        u16 reserved2;
-} __packed;
+} __packed __aligned(2);
 
 struct vnt_cts_fb {
        struct vnt_phy_field b;
@@ -160,7 +160,7 @@ struct vnt_cts_fb {
        __le16 cts_duration_ba_f1;
        struct ieee80211_cts data;
        u16 reserved2;
-} __packed;
+} __packed __aligned(2);
 
 struct vnt_tx_fifo_head {
        u8 tx_key[WLAN_KEY_LEN_CCMP];
index d0e7ed8..e5c443b 100644 (file)
@@ -1166,6 +1166,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
        target_get_sess_cmd(&cmd->se_cmd, true);
 
+       cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
        cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb);
        if (cmd->sense_reason) {
                if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
@@ -1180,8 +1181,6 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        if (cmd->sense_reason)
                goto attach_cmd;
 
-       /* only used for printks or comparing with ->ref_task_tag */
-       cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
        cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd);
        if (cmd->sense_reason)
                goto attach_cmd;
index 3cbc074..9ee797b 100644 (file)
@@ -882,7 +882,6 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                        if (!bio) {
 new_bio:
                                nr_vecs = bio_max_segs(nr_pages);
-                               nr_pages -= nr_vecs;
                                /*
                                 * Calls bio_kmalloc() and sets bio->bi_end_io()
                                 */
@@ -939,6 +938,14 @@ new_bio:
 
        return 0;
 fail:
+       if (bio)
+               bio_put(bio);
+       while (req->bio) {
+               bio = req->bio;
+               req->bio = bio->bi_next;
+               bio_put(bio);
+       }
+       req->biotail = NULL;
        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 }
 
index cf4718c..319a1e7 100644 (file)
@@ -747,7 +747,6 @@ module_platform_driver(optee_driver);
 
 MODULE_AUTHOR("Linaro");
 MODULE_DESCRIPTION("OP-TEE driver");
-MODULE_SUPPORTED_DEVICE("");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("platform:optee");
index 345917a..1c4aac8 100644 (file)
@@ -674,6 +674,9 @@ void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
 {
        struct cooling_dev_stats *stats = cdev->stats;
 
+       if (!stats)
+               return;
+
        spin_lock(&stats->lock);
 
        if (stats->state == new_state)
index 620bcf5..c44fad2 100644 (file)
@@ -347,7 +347,7 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
        ret = tb_retimer_nvm_add(rt);
        if (ret) {
                dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
-               device_del(&rt->dev);
+               device_unregister(&rt->dev);
                return ret;
        }
 
@@ -406,7 +406,7 @@ static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
  */
 int tb_retimer_scan(struct tb_port *port)
 {
-       u32 status[TB_MAX_RETIMER_INDEX] = {};
+       u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
        int ret, i, last_idx = 0;
 
        if (!port->cap_usb4)
index b63fecc..2a95b4c 100644 (file)
@@ -768,12 +768,6 @@ static int tb_init_port(struct tb_port *port)
 
        tb_dump_port(port->sw->tb, &port->config);
 
-       /* Control port does not need HopID allocation */
-       if (port->port) {
-               ida_init(&port->in_hopids);
-               ida_init(&port->out_hopids);
-       }
-
        INIT_LIST_HEAD(&port->list);
        return 0;
 
@@ -1842,10 +1836,8 @@ static void tb_switch_release(struct device *dev)
        dma_port_free(sw->dma_port);
 
        tb_switch_for_each_port(sw, port) {
-               if (!port->disabled) {
-                       ida_destroy(&port->in_hopids);
-                       ida_destroy(&port->out_hopids);
-               }
+               ida_destroy(&port->in_hopids);
+               ida_destroy(&port->out_hopids);
        }
 
        kfree(sw->uuid);
@@ -2025,6 +2017,12 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
                /* minimum setup for tb_find_cap and tb_drom_read to work */
                sw->ports[i].sw = sw;
                sw->ports[i].port = i;
+
+               /* Control port does not need HopID allocation */
+               if (i) {
+                       ida_init(&sw->ports[i].in_hopids);
+                       ida_init(&sw->ports[i].out_hopids);
+               }
        }
 
        ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
index 1f000ac..c348b1f 100644 (file)
@@ -138,6 +138,10 @@ static void tb_discover_tunnels(struct tb_switch *sw)
                                parent->boot = true;
                                parent = tb_switch_parent(parent);
                        }
+               } else if (tb_tunnel_is_dp(tunnel)) {
+                       /* Keep the domain from powering down */
+                       pm_runtime_get_sync(&tunnel->src_port->sw->dev);
+                       pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
                }
 
                list_add_tail(&tunnel->list, &tcm->tunnel_list);
index 9a87275..94af7a5 100644 (file)
@@ -1639,8 +1639,6 @@ module_exit(icom_exit);
 
 MODULE_AUTHOR("Michael Anderson <mjanders@us.ibm.com>");
 MODULE_DESCRIPTION("IBM iSeries Serial IOA driver");
-MODULE_SUPPORTED_DEVICE
-    ("IBM iSeries 2745, 2771, 2772, 2742, 2793 and 2805 Communications adapters");
 MODULE_LICENSE("GPL");
 MODULE_FIRMWARE("icom_call_setup.bin");
 MODULE_FIRMWARE("icom_res_dce.bin");
index cd30da0..0ea799b 100644 (file)
@@ -19,7 +19,6 @@
 MODULE_AUTHOR("Digi International, https://www.digi.com");
 MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("jsm");
 
 #define JSM_DRIVER_NAME "jsm"
 #define NR_PORTS       32
index 291649f..0d85b55 100644 (file)
@@ -1177,12 +1177,6 @@ static inline void qcom_geni_serial_enable_early_read(struct geni_se *se,
                                                      struct console *con) { }
 #endif
 
-static int qcom_geni_serial_earlycon_exit(struct console *con)
-{
-       geni_remove_earlycon_icc_vote();
-       return 0;
-}
-
 static struct qcom_geni_private_data earlycon_private_data;
 
 static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
@@ -1233,7 +1227,6 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
        writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
 
        dev->con->write = qcom_geni_serial_earlycon_write;
-       dev->con->exit = qcom_geni_serial_earlycon_exit;
        dev->con->setup = NULL;
        qcom_geni_serial_enable_early_read(&se, dev->con);
 
index f2ebbac..d7d4bdd 100644 (file)
@@ -1128,6 +1128,10 @@ static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep,
                return -ESHUTDOWN;
        }
 
+       /* Requests has been dequeued during disabling endpoint. */
+       if (!(pep->ep_state & EP_ENABLED))
+               return 0;
+
        spin_lock_irqsave(&pdev->lock, flags);
        ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request));
        spin_unlock_irqrestore(&pdev->lock, flags);
index f9170d1..5f0513c 100644 (file)
@@ -2197,7 +2197,10 @@ static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
         * inverted in the first TDs isoc TRB.
         */
        field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) |
-               start_cycle ? 0 : 1 | TRB_SIA | TRB_TBC(burst_count);
+               TRB_SIA | TRB_TBC(burst_count);
+
+       if (!start_cycle)
+               field |= TRB_CYCLE;
 
        /* Fill the rest of the TRB fields, and remaining normal TRBs. */
        for (i = 0; i < trbs_per_td; i++) {
index 39ddb55..3fda1ec 100644 (file)
@@ -147,17 +147,29 @@ static inline int acm_set_control(struct acm *acm, int control)
 #define acm_send_break(acm, ms) \
        acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0)
 
-static void acm_kill_urbs(struct acm *acm)
+static void acm_poison_urbs(struct acm *acm)
 {
        int i;
 
-       usb_kill_urb(acm->ctrlurb);
+       usb_poison_urb(acm->ctrlurb);
        for (i = 0; i < ACM_NW; i++)
-               usb_kill_urb(acm->wb[i].urb);
+               usb_poison_urb(acm->wb[i].urb);
        for (i = 0; i < acm->rx_buflimit; i++)
-               usb_kill_urb(acm->read_urbs[i]);
+               usb_poison_urb(acm->read_urbs[i]);
+}
+
+static void acm_unpoison_urbs(struct acm *acm)
+{
+       int i;
+
+       for (i = 0; i < acm->rx_buflimit; i++)
+               usb_unpoison_urb(acm->read_urbs[i]);
+       for (i = 0; i < ACM_NW; i++)
+               usb_unpoison_urb(acm->wb[i].urb);
+       usb_unpoison_urb(acm->ctrlurb);
 }
 
+
 /*
  * Write buffer management.
  * All of these assume proper locks taken by the caller.
@@ -226,9 +238,10 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
 
        rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
        if (rc < 0) {
-               dev_err(&acm->data->dev,
-                       "%s - usb_submit_urb(write bulk) failed: %d\n",
-                       __func__, rc);
+               if (rc != -EPERM)
+                       dev_err(&acm->data->dev,
+                               "%s - usb_submit_urb(write bulk) failed: %d\n",
+                               __func__, rc);
                acm_write_done(acm, wb);
        }
        return rc;
@@ -313,8 +326,10 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
                        acm->iocount.dsr++;
                if (difference & ACM_CTRL_DCD)
                        acm->iocount.dcd++;
-               if (newctrl & ACM_CTRL_BRK)
+               if (newctrl & ACM_CTRL_BRK) {
                        acm->iocount.brk++;
+                       tty_insert_flip_char(&acm->port, 0, TTY_BREAK);
+               }
                if (newctrl & ACM_CTRL_RI)
                        acm->iocount.rng++;
                if (newctrl & ACM_CTRL_FRAMING)
@@ -480,11 +495,6 @@ static void acm_read_bulk_callback(struct urb *urb)
        dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
                rb->index, urb->actual_length, status);
 
-       if (!acm->dev) {
-               dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
-               return;
-       }
-
        switch (status) {
        case 0:
                usb_mark_last_busy(acm->dev);
@@ -649,7 +659,8 @@ static void acm_port_dtr_rts(struct tty_port *port, int raise)
 
        res = acm_set_control(acm, val);
        if (res && (acm->ctrl_caps & USB_CDC_CAP_LINE))
-               dev_err(&acm->control->dev, "failed to set dtr/rts\n");
+               /* This is broken in too many devices to spam the logs */
+               dev_dbg(&acm->control->dev, "failed to set dtr/rts\n");
 }
 
 static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
@@ -731,6 +742,7 @@ static void acm_port_shutdown(struct tty_port *port)
         * Need to grab write_lock to prevent race with resume, but no need to
         * hold it due to the tty-port initialised flag.
         */
+       acm_poison_urbs(acm);
        spin_lock_irq(&acm->write_lock);
        spin_unlock_irq(&acm->write_lock);
 
@@ -747,7 +759,8 @@ static void acm_port_shutdown(struct tty_port *port)
                usb_autopm_put_interface_async(acm->control);
        }
 
-       acm_kill_urbs(acm);
+       acm_unpoison_urbs(acm);
+
 }
 
 static void acm_tty_cleanup(struct tty_struct *tty)
@@ -1296,13 +1309,6 @@ skip_normal_probe:
        if (!combined_interfaces && intf != control_interface)
                return -ENODEV;
 
-       if (!combined_interfaces && usb_interface_claimed(data_interface)) {
-               /* valid in this context */
-               dev_dbg(&intf->dev, "The data interface isn't available\n");
-               return -EBUSY;
-       }
-
-
        if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 ||
            control_interface->cur_altsetting->desc.bNumEndpoints == 0)
                return -EINVAL;
@@ -1323,8 +1329,8 @@ made_compressed_probe:
        dev_dbg(&intf->dev, "interfaces are valid\n");
 
        acm = kzalloc(sizeof(struct acm), GFP_KERNEL);
-       if (acm == NULL)
-               goto alloc_fail;
+       if (!acm)
+               return -ENOMEM;
 
        tty_port_init(&acm->port);
        acm->port.ops = &acm_port_ops;
@@ -1341,7 +1347,7 @@ made_compressed_probe:
 
        minor = acm_alloc_minor(acm);
        if (minor < 0)
-               goto alloc_fail1;
+               goto err_put_port;
 
        acm->minor = minor;
        acm->dev = usb_dev;
@@ -1372,15 +1378,15 @@ made_compressed_probe:
 
        buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
        if (!buf)
-               goto alloc_fail1;
+               goto err_put_port;
        acm->ctrl_buffer = buf;
 
        if (acm_write_buffers_alloc(acm) < 0)
-               goto alloc_fail2;
+               goto err_free_ctrl_buffer;
 
        acm->ctrlurb = usb_alloc_urb(0, GFP_KERNEL);
        if (!acm->ctrlurb)
-               goto alloc_fail3;
+               goto err_free_write_buffers;
 
        for (i = 0; i < num_rx_buf; i++) {
                struct acm_rb *rb = &(acm->read_buffers[i]);
@@ -1389,13 +1395,13 @@ made_compressed_probe:
                rb->base = usb_alloc_coherent(acm->dev, readsize, GFP_KERNEL,
                                                                &rb->dma);
                if (!rb->base)
-                       goto alloc_fail4;
+                       goto err_free_read_urbs;
                rb->index = i;
                rb->instance = acm;
 
                urb = usb_alloc_urb(0, GFP_KERNEL);
                if (!urb)
-                       goto alloc_fail4;
+                       goto err_free_read_urbs;
 
                urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
                urb->transfer_dma = rb->dma;
@@ -1416,8 +1422,8 @@ made_compressed_probe:
                struct acm_wb *snd = &(acm->wb[i]);
 
                snd->urb = usb_alloc_urb(0, GFP_KERNEL);
-               if (snd->urb == NULL)
-                       goto alloc_fail5;
+               if (!snd->urb)
+                       goto err_free_write_urbs;
 
                if (usb_endpoint_xfer_int(epwrite))
                        usb_fill_int_urb(snd->urb, usb_dev, acm->out,
@@ -1435,7 +1441,7 @@ made_compressed_probe:
 
        i = device_create_file(&intf->dev, &dev_attr_bmCapabilities);
        if (i < 0)
-               goto alloc_fail5;
+               goto err_free_write_urbs;
 
        if (h.usb_cdc_country_functional_desc) { /* export the country data */
                struct usb_cdc_country_functional_desc * cfd =
@@ -1480,20 +1486,21 @@ skip_countries:
        acm->nb_index = 0;
        acm->nb_size = 0;
 
-       dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor);
-
        acm->line.dwDTERate = cpu_to_le32(9600);
        acm->line.bDataBits = 8;
        acm_set_line(acm, &acm->line);
 
-       usb_driver_claim_interface(&acm_driver, data_interface, acm);
-       usb_set_intfdata(data_interface, acm);
+       if (!acm->combined_interfaces) {
+               rv = usb_driver_claim_interface(&acm_driver, data_interface, acm);
+               if (rv)
+                       goto err_remove_files;
+       }
 
        tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
                        &control_interface->dev);
        if (IS_ERR(tty_dev)) {
                rv = PTR_ERR(tty_dev);
-               goto alloc_fail6;
+               goto err_release_data_interface;
        }
 
        if (quirks & CLEAR_HALT_CONDITIONS) {
@@ -1501,32 +1508,39 @@ skip_countries:
                usb_clear_halt(usb_dev, acm->out);
        }
 
+       dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor);
+
        return 0;
-alloc_fail6:
+
+err_release_data_interface:
+       if (!acm->combined_interfaces) {
+               /* Clear driver data so that disconnect() returns early. */
+               usb_set_intfdata(data_interface, NULL);
+               usb_driver_release_interface(&acm_driver, data_interface);
+       }
+err_remove_files:
        if (acm->country_codes) {
                device_remove_file(&acm->control->dev,
                                &dev_attr_wCountryCodes);
                device_remove_file(&acm->control->dev,
                                &dev_attr_iCountryCodeRelDate);
-               kfree(acm->country_codes);
        }
        device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
-alloc_fail5:
-       usb_set_intfdata(intf, NULL);
+err_free_write_urbs:
        for (i = 0; i < ACM_NW; i++)
                usb_free_urb(acm->wb[i].urb);
-alloc_fail4:
+err_free_read_urbs:
        for (i = 0; i < num_rx_buf; i++)
                usb_free_urb(acm->read_urbs[i]);
        acm_read_buffers_free(acm);
        usb_free_urb(acm->ctrlurb);
-alloc_fail3:
+err_free_write_buffers:
        acm_write_buffers_free(acm);
-alloc_fail2:
+err_free_ctrl_buffer:
        usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
-alloc_fail1:
+err_put_port:
        tty_port_put(&acm->port);
-alloc_fail:
+
        return rv;
 }
 
@@ -1540,8 +1554,14 @@ static void acm_disconnect(struct usb_interface *intf)
        if (!acm)
                return;
 
-       mutex_lock(&acm->mutex);
        acm->disconnected = true;
+       /*
+        * there is a circular dependency. acm_softint() can resubmit
+        * the URBs in error handling so we need to block any
+        * submission right away
+        */
+       acm_poison_urbs(acm);
+       mutex_lock(&acm->mutex);
        if (acm->country_codes) {
                device_remove_file(&acm->control->dev,
                                &dev_attr_wCountryCodes);
@@ -1560,7 +1580,6 @@ static void acm_disconnect(struct usb_interface *intf)
                tty_kref_put(tty);
        }
 
-       acm_kill_urbs(acm);
        cancel_delayed_work_sync(&acm->dwork);
 
        tty_unregister_device(acm_tty_driver, acm->minor);
@@ -1602,7 +1621,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
        if (cnt)
                return 0;
 
-       acm_kill_urbs(acm);
+       acm_poison_urbs(acm);
        cancel_delayed_work_sync(&acm->dwork);
        acm->urbs_in_error_delay = 0;
 
@@ -1615,6 +1634,7 @@ static int acm_resume(struct usb_interface *intf)
        struct urb *urb;
        int rv = 0;
 
+       acm_unpoison_urbs(acm);
        spin_lock_irq(&acm->write_lock);
 
        if (--acm->susp_count)
index 6ade3da..76ac5d6 100644 (file)
@@ -498,6 +498,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* DJI CineSSD */
        { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
 
+       /* Fibocom L850-GL LTE Modem */
+       { USB_DEVICE(0x2cb7, 0x0007), .driver_info =
+                       USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+
        /* INTEL VALUE SSD */
        { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
 
index fc3269f..1a9789e 100644 (file)
@@ -4322,7 +4322,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
        if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
                goto unlock;
 
-       if (hsotg->params.power_down > DWC2_POWER_DOWN_PARAM_PARTIAL)
+       if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL ||
+           hsotg->flags.b.port_connect_status == 0)
                goto skip_power_saving;
 
        /*
@@ -5398,7 +5399,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
        dwc2_writel(hsotg, hprt0, HPRT0);
 
        /* Wait for the HPRT0.PrtSusp register field to be set */
-       if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
+       if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 5000))
                dev_warn(hsotg->dev, "Suspend wasn't generated\n");
 
        /*
index 3d3918a..4c5c697 100644 (file)
@@ -120,6 +120,8 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
 static const struct property_entry dwc3_pci_mrfld_properties[] = {
        PROPERTY_ENTRY_STRING("dr_mode", "otg"),
        PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
+       PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
+       PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
        PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
        {}
 };
index fcaf044..3de291a 100644 (file)
@@ -244,6 +244,9 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
        struct device *dev = qcom->dev;
        int ret;
 
+       if (has_acpi_companion(dev))
+               return 0;
+
        qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr");
        if (IS_ERR(qcom->icc_path_ddr)) {
                dev_err(dev, "failed to get usb-ddr path: %ld\n",
index aebcf8e..c7ef218 100644 (file)
@@ -783,8 +783,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
 
        trace_dwc3_gadget_ep_disable(dep);
 
-       dwc3_remove_requests(dwc, dep);
-
        /* make sure HW endpoint isn't stalled */
        if (dep->flags & DWC3_EP_STALL)
                __dwc3_gadget_ep_set_halt(dep, 0, false);
@@ -793,16 +791,18 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
        reg &= ~DWC3_DALEPENA_EP(dep->number);
        dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
 
-       dep->stream_capable = false;
-       dep->type = 0;
-       dep->flags = 0;
-
        /* Clear out the ep descriptors for non-ep0 */
        if (dep->number > 1) {
                dep->endpoint.comp_desc = NULL;
                dep->endpoint.desc = NULL;
        }
 
+       dwc3_remove_requests(dwc, dep);
+
+       dep->stream_capable = false;
+       dep->type = 0;
+       dep->flags = 0;
+
        return 0;
 }
 
@@ -1617,7 +1617,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
 {
        struct dwc3             *dwc = dep->dwc;
 
-       if (!dep->endpoint.desc || !dwc->pullups_connected) {
+       if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
                dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
                                dep->name);
                return -ESHUTDOWN;
@@ -2083,7 +2083,7 @@ static void __dwc3_gadget_set_speed(struct dwc3 *dwc)
        u32                     reg;
 
        speed = dwc->gadget_max_speed;
-       if (speed > dwc->maximum_speed)
+       if (speed == USB_SPEED_UNKNOWN || speed > dwc->maximum_speed)
                speed = dwc->maximum_speed;
 
        if (speed == USB_SPEED_SUPER_PLUS &&
@@ -2247,6 +2247,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
        if (!is_on) {
                u32 count;
 
+               dwc->connected = false;
                /*
                 * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
                 * Section 4.1.8 Table 4-7, it states that for a device-initiated
@@ -2271,7 +2272,6 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
                        dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
                                                dwc->ev_buf->length;
                }
-               dwc->connected = false;
        } else {
                __dwc3_gadget_start(dwc);
        }
@@ -2523,6 +2523,7 @@ static void dwc3_gadget_set_ssp_rate(struct usb_gadget *g,
        unsigned long           flags;
 
        spin_lock_irqsave(&dwc->lock, flags);
+       dwc->gadget_max_speed = USB_SPEED_SUPER_PLUS;
        dwc->gadget_ssp_rate = rate;
        spin_unlock_irqrestore(&dwc->lock, flags);
 }
@@ -3321,8 +3322,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
 {
        u32                     reg;
 
-       dwc->connected = true;
-
        /*
         * WORKAROUND: DWC3 revisions <1.88a have an issue which
         * would cause a missing Disconnect Event if there's a
@@ -3362,6 +3361,7 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
         * transfers."
         */
        dwc3_stop_active_transfers(dwc);
+       dwc->connected = true;
 
        reg = dwc3_readl(dwc->regs, DWC3_DCTL);
        reg &= ~DWC3_DCTL_TSTCTRL_MASK;
index 0d56f33..15a607c 100644 (file)
@@ -97,6 +97,8 @@ struct gadget_config_name {
        struct list_head list;
 };
 
+#define USB_MAX_STRING_WITH_NULL_LEN   (USB_MAX_STRING_LEN+1)
+
 static int usb_string_copy(const char *s, char **s_copy)
 {
        int ret;
@@ -106,12 +108,16 @@ static int usb_string_copy(const char *s, char **s_copy)
        if (ret > USB_MAX_STRING_LEN)
                return -EOVERFLOW;
 
-       str = kstrdup(s, GFP_KERNEL);
-       if (!str)
-               return -ENOMEM;
+       if (copy) {
+               str = copy;
+       } else {
+               str = kmalloc(USB_MAX_STRING_WITH_NULL_LEN, GFP_KERNEL);
+               if (!str)
+                       return -ENOMEM;
+       }
+       strcpy(str, s);
        if (str[ret - 1] == '\n')
                str[ret - 1] = '\0';
-       kfree(copy);
        *s_copy = str;
        return 0;
 }
index 8d387e0..c80f9bd 100644 (file)
@@ -153,6 +153,11 @@ static int udc_pci_probe(
        pci_set_master(pdev);
        pci_try_set_mwi(pdev);
 
+       dev->phys_addr = resource;
+       dev->irq = pdev->irq;
+       dev->pdev = pdev;
+       dev->dev = &pdev->dev;
+
        /* init dma pools */
        if (use_dma) {
                retval = init_dma_pools(dev);
@@ -160,11 +165,6 @@ static int udc_pci_probe(
                        goto err_dma;
        }
 
-       dev->phys_addr = resource;
-       dev->irq = pdev->irq;
-       dev->pdev = pdev;
-       dev->dev = &pdev->dev;
-
        /* general probing */
        if (udc_probe(dev)) {
                retval = -ENODEV;
index fe010cc..2f27dc0 100644 (file)
@@ -397,6 +397,13 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
        xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
        if (mtk->lpm_support)
                xhci->quirks |= XHCI_LPM_SUPPORT;
+
+       /*
+        * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream,
+        * and it's 3 when support it.
+        */
+       if (xhci->hci_version < 0x100 && HCC_MAX_PSA(xhci->hcc_params) == 4)
+               xhci->quirks |= XHCI_BROKEN_STREAMS;
 }
 
 /* called during probe() after chip reset completes */
@@ -548,7 +555,8 @@ static int xhci_mtk_probe(struct platform_device *pdev)
        if (ret)
                goto put_usb3_hcd;
 
-       if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+       if (HCC_MAX_PSA(xhci->hcc_params) >= 4 &&
+           !(xhci->quirks & XHCI_BROKEN_STREAMS))
                xhci->shared_hcd->can_do_streams = 1;
 
        ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
index 670e4d9..dcc88df 100644 (file)
@@ -117,7 +117,6 @@ MODULE_DEVICE_TABLE(usb, ld_usb_table);
 MODULE_AUTHOR("Michael Hund <mhund@ld-didactic.de>");
 MODULE_DESCRIPTION("LD USB Driver");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("LD USB Devices");
 
 /* All interrupt in transfers are collected in a ring buffer to
  * avoid racing conditions and get better performance of the driver.
index 1cd8772..fc0457d 100644 (file)
@@ -2004,10 +2004,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
                MUSB_DEVCTL_HR;
        switch (devctl & ~s) {
        case MUSB_QUIRK_B_DISCONNECT_99:
-               musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
-               schedule_delayed_work(&musb->irq_work,
-                                     msecs_to_jiffies(1000));
-               break;
+               if (musb->quirk_retries && !musb->flush_irq_work) {
+                       musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
+                       schedule_delayed_work(&musb->irq_work,
+                                             msecs_to_jiffies(1000));
+                       musb->quirk_retries--;
+                       break;
+               }
+               fallthrough;
        case MUSB_QUIRK_B_INVALID_VBUS_91:
                if (musb->quirk_retries && !musb->flush_irq_work) {
                        musb_dbg(musb,
index 5eb895b..f4304ce 100644 (file)
@@ -656,6 +656,13 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
                need_auto_sense = 1;
        }
 
+       /* Some devices (Kindle) require another command after SYNC CACHE */
+       if ((us->fflags & US_FL_SENSE_AFTER_SYNC) &&
+                       srb->cmnd[0] == SYNCHRONIZE_CACHE) {
+               usb_stor_dbg(us, "-- sense after SYNC CACHE\n");
+               need_auto_sense = 1;
+       }
+
        /*
         * If we have a failure, we're going to do a REQUEST_SENSE 
         * automatically.  Note that we differentiate between a command
index 5732e96..efa972b 100644 (file)
@@ -2211,6 +2211,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_READ_DISC_INFO ),
 
+/*
+ * Reported by Matthias Schwarzott <zzam@gentoo.org>
+ * The Amazon Kindle treats SYNCHRONIZE CACHE as an indication that
+ * the host may be finished with it, and automatically ejects its
+ * emulated media unless it receives another command within one second.
+ */
+UNUSUAL_DEV( 0x1949, 0x0004, 0x0000, 0x9999,
+               "Amazon",
+               "Kindle",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_SENSE_AFTER_SYNC ),
+
 /*
  * Reported by Oliver Neukum <oneukum@suse.com>
  * This device morphes spontaneously into another device if the access
index be0b646..ce7af39 100644 (file)
@@ -942,6 +942,7 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
 
        port->supply_voltage = mv;
        port->current_limit = max_ma;
+       power_supply_changed(port->psy);
 
        if (port->tcpc->set_current_limit)
                ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
@@ -2928,6 +2929,7 @@ static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
 
        port->pps_data.supported = false;
        port->usb_type = POWER_SUPPLY_USB_TYPE_PD;
+       power_supply_changed(port->psy);
 
        /*
         * Select the source PDO providing the most power which has a
@@ -2952,6 +2954,7 @@ static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
                                port->pps_data.supported = true;
                                port->usb_type =
                                        POWER_SUPPLY_USB_TYPE_PD_PPS;
+                               power_supply_changed(port->psy);
                        }
                        continue;
                default:
@@ -3109,6 +3112,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
                                                  port->pps_data.out_volt));
                port->pps_data.op_curr = min(port->pps_data.max_curr,
                                             port->pps_data.op_curr);
+               power_supply_changed(port->psy);
        }
 
        return src_pdo;
@@ -3344,6 +3348,7 @@ static int tcpm_set_charge(struct tcpm_port *port, bool charge)
                        return ret;
        }
        port->vbus_charge = charge;
+       power_supply_changed(port->psy);
        return 0;
 }
 
@@ -3523,6 +3528,7 @@ static void tcpm_reset_port(struct tcpm_port *port)
        port->try_src_count = 0;
        port->try_snk_count = 0;
        port->usb_type = POWER_SUPPLY_USB_TYPE_C;
+       power_supply_changed(port->psy);
        port->nr_sink_caps = 0;
        port->sink_cap_done = false;
        if (port->tcpc->enable_frs)
@@ -5167,7 +5173,7 @@ static void tcpm_enable_frs_work(struct kthread_work *work)
                goto unlock;
 
        /* Send when the state machine is idle */
-       if (port->state != SNK_READY || port->vdm_state != VDM_STATE_DONE || port->send_discover)
+       if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover)
                goto resched;
 
        port->upcoming_state = GET_SINK_CAP;
@@ -5905,7 +5911,7 @@ static int tcpm_psy_set_prop(struct power_supply *psy,
                ret = -EINVAL;
                break;
        }
-
+       power_supply_changed(port->psy);
        return ret;
 }
 
@@ -6058,6 +6064,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
        err = devm_tcpm_psy_register(port);
        if (err)
                goto out_role_sw_put;
+       power_supply_changed(port->psy);
 
        port->typec_port = typec_register_port(port->dev, &port->typec_caps);
        if (IS_ERR(port->typec_port)) {
index 6e6ef63..29bd1c5 100644 (file)
@@ -64,7 +64,6 @@ enum {
 struct tps6598x_rx_identity_reg {
        u8 status;
        struct usb_pd_identity identity;
-       u32 vdo[3];
 } __packed;
 
 /* Standard Task return codes */
index 8f1de1f..d8d3892 100644 (file)
@@ -63,6 +63,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
 
                dev_info(dev, "stub up\n");
 
+               mutex_lock(&sdev->ud.sysfs_lock);
                spin_lock_irq(&sdev->ud.lock);
 
                if (sdev->ud.status != SDEV_ST_AVAILABLE) {
@@ -87,13 +88,13 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
                tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx");
                if (IS_ERR(tcp_rx)) {
                        sockfd_put(socket);
-                       return -EINVAL;
+                       goto unlock_mutex;
                }
                tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx");
                if (IS_ERR(tcp_tx)) {
                        kthread_stop(tcp_rx);
                        sockfd_put(socket);
-                       return -EINVAL;
+                       goto unlock_mutex;
                }
 
                /* get task structs now */
@@ -112,6 +113,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
                wake_up_process(sdev->ud.tcp_rx);
                wake_up_process(sdev->ud.tcp_tx);
 
+               mutex_unlock(&sdev->ud.sysfs_lock);
+
        } else {
                dev_info(dev, "stub down\n");
 
@@ -122,6 +125,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
                spin_unlock_irq(&sdev->ud.lock);
 
                usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN);
+               mutex_unlock(&sdev->ud.sysfs_lock);
        }
 
        return count;
@@ -130,6 +134,8 @@ sock_err:
        sockfd_put(socket);
 err:
        spin_unlock_irq(&sdev->ud.lock);
+unlock_mutex:
+       mutex_unlock(&sdev->ud.sysfs_lock);
        return -EINVAL;
 }
 static DEVICE_ATTR_WO(usbip_sockfd);
@@ -270,6 +276,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)
        sdev->ud.side           = USBIP_STUB;
        sdev->ud.status         = SDEV_ST_AVAILABLE;
        spin_lock_init(&sdev->ud.lock);
+       mutex_init(&sdev->ud.sysfs_lock);
        sdev->ud.tcp_socket     = NULL;
        sdev->ud.sockfd         = -1;
 
index d60ce17..ea2a20e 100644 (file)
@@ -263,6 +263,9 @@ struct usbip_device {
        /* lock for status */
        spinlock_t lock;
 
+       /* mutex for synchronizing sysfs store paths */
+       struct mutex sysfs_lock;
+
        int sockfd;
        struct socket *tcp_socket;
 
index 5d88917..086ca76 100644 (file)
@@ -70,6 +70,7 @@ static void event_handler(struct work_struct *work)
        while ((ud = get_event()) != NULL) {
                usbip_dbg_eh("pending event %lx\n", ud->event);
 
+               mutex_lock(&ud->sysfs_lock);
                /*
                 * NOTE: shutdown must come first.
                 * Shutdown the device.
@@ -90,6 +91,7 @@ static void event_handler(struct work_struct *work)
                        ud->eh_ops.unusable(ud);
                        unset_event(ud, USBIP_EH_UNUSABLE);
                }
+               mutex_unlock(&ud->sysfs_lock);
 
                wake_up(&ud->eh_waitq);
        }
index 3209b5d..4ba6bcd 100644 (file)
@@ -594,6 +594,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                                pr_err("invalid port number %d\n", wIndex);
                                goto error;
                        }
+                       if (wValue >= 32)
+                               goto error;
                        if (hcd->speed == HCD_USB3) {
                                if ((vhci_hcd->port_status[rhport] &
                                     USB_SS_PORT_STAT_POWER) != 0) {
@@ -1099,6 +1101,7 @@ static void vhci_device_init(struct vhci_device *vdev)
        vdev->ud.side   = USBIP_VHCI;
        vdev->ud.status = VDEV_ST_NULL;
        spin_lock_init(&vdev->ud.lock);
+       mutex_init(&vdev->ud.sysfs_lock);
 
        INIT_LIST_HEAD(&vdev->priv_rx);
        INIT_LIST_HEAD(&vdev->priv_tx);
index c4b4256..e2847cd 100644 (file)
@@ -185,6 +185,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
 
        usbip_dbg_vhci_sysfs("enter\n");
 
+       mutex_lock(&vdev->ud.sysfs_lock);
+
        /* lock */
        spin_lock_irqsave(&vhci->lock, flags);
        spin_lock(&vdev->ud.lock);
@@ -195,6 +197,7 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
                /* unlock */
                spin_unlock(&vdev->ud.lock);
                spin_unlock_irqrestore(&vhci->lock, flags);
+               mutex_unlock(&vdev->ud.sysfs_lock);
 
                return -EINVAL;
        }
@@ -205,6 +208,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
 
        usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);
 
+       mutex_unlock(&vdev->ud.sysfs_lock);
+
        return 0;
 }
 
@@ -349,30 +354,36 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
        else
                vdev = &vhci->vhci_hcd_hs->vdev[rhport];
 
+       mutex_lock(&vdev->ud.sysfs_lock);
+
        /* Extract socket from fd. */
        socket = sockfd_lookup(sockfd, &err);
        if (!socket) {
                dev_err(dev, "failed to lookup sock");
-               return -EINVAL;
+               err = -EINVAL;
+               goto unlock_mutex;
        }
        if (socket->type != SOCK_STREAM) {
                dev_err(dev, "Expecting SOCK_STREAM - found %d",
                        socket->type);
                sockfd_put(socket);
-               return -EINVAL;
+               err = -EINVAL;
+               goto unlock_mutex;
        }
 
        /* create threads before locking */
        tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx");
        if (IS_ERR(tcp_rx)) {
                sockfd_put(socket);
-               return -EINVAL;
+               err = -EINVAL;
+               goto unlock_mutex;
        }
        tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx");
        if (IS_ERR(tcp_tx)) {
                kthread_stop(tcp_rx);
                sockfd_put(socket);
-               return -EINVAL;
+               err = -EINVAL;
+               goto unlock_mutex;
        }
 
        /* get task structs now */
@@ -397,7 +408,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
                 * Will be retried from userspace
                 * if there's another free port.
                 */
-               return -EBUSY;
+               err = -EBUSY;
+               goto unlock_mutex;
        }
 
        dev_info(dev, "pdev(%u) rhport(%u) sockfd(%d)\n",
@@ -423,7 +435,15 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
 
        rh_port_connect(vdev, speed);
 
+       dev_info(dev, "Device attached\n");
+
+       mutex_unlock(&vdev->ud.sysfs_lock);
+
        return count;
+
+unlock_mutex:
+       mutex_unlock(&vdev->ud.sysfs_lock);
+       return err;
 }
 static DEVICE_ATTR_WO(attach);
 
index c8eeabd..2bc428f 100644 (file)
@@ -572,6 +572,7 @@ static int init_vudc_hw(struct vudc *udc)
        init_waitqueue_head(&udc->tx_waitq);
 
        spin_lock_init(&ud->lock);
+       mutex_init(&ud->sysfs_lock);
        ud->status = SDEV_ST_AVAILABLE;
        ud->side = USBIP_VUDC;
 
index a3ec39f..f7633ee 100644 (file)
@@ -112,6 +112,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
                dev_err(dev, "no device");
                return -ENODEV;
        }
+       mutex_lock(&udc->ud.sysfs_lock);
        spin_lock_irqsave(&udc->lock, flags);
        /* Don't export what we don't have */
        if (!udc->driver || !udc->pullup) {
@@ -174,7 +175,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
 
                udc->ud.tcp_socket = socket;
                udc->ud.tcp_rx = tcp_rx;
-               udc->ud.tcp_rx = tcp_tx;
+               udc->ud.tcp_tx = tcp_tx;
                udc->ud.status = SDEV_ST_USED;
 
                spin_unlock_irq(&udc->ud.lock);
@@ -187,6 +188,8 @@ static ssize_t usbip_sockfd_store(struct device *dev,
 
                wake_up_process(udc->ud.tcp_rx);
                wake_up_process(udc->ud.tcp_tx);
+
+               mutex_unlock(&udc->ud.sysfs_lock);
                return count;
 
        } else {
@@ -207,6 +210,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
        }
 
        spin_unlock_irqrestore(&udc->lock, flags);
+       mutex_unlock(&udc->ud.sysfs_lock);
 
        return count;
 
@@ -216,6 +220,7 @@ unlock_ud:
        spin_unlock_irq(&udc->ud.lock);
 unlock:
        spin_unlock_irqrestore(&udc->lock, flags);
+       mutex_unlock(&udc->ud.sysfs_lock);
 
        return ret;
 }
index 7c8bbfc..d555a6a 100644 (file)
@@ -431,8 +431,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
 
        adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
-                                   dev, &ifc_vdpa_ops,
-                                   IFCVF_MAX_QUEUE_PAIRS * 2, NULL);
+                                   dev, &ifc_vdpa_ops, NULL);
        if (adapter == NULL) {
                IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
                return -ENOMEM;
@@ -456,7 +455,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
                vf->vring[i].irq = -EINVAL;
 
-       ret = vdpa_register_device(&adapter->vdpa);
+       ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2);
        if (ret) {
                IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
                goto err;
index 08f742f..b6cc53b 100644 (file)
@@ -4,9 +4,13 @@
 #ifndef __MLX5_VDPA_H__
 #define __MLX5_VDPA_H__
 
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
 #include <linux/vdpa.h>
 #include <linux/mlx5/driver.h>
 
+#define MLX5V_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+
 struct mlx5_vdpa_direct_mr {
        u64 start;
        u64 end;
index d300f79..3908ff2 100644 (file)
@@ -219,6 +219,11 @@ static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_m
        mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey);
 }
 
+static struct device *get_dma_device(struct mlx5_vdpa_dev *mvdev)
+{
+       return &mvdev->mdev->pdev->dev;
+}
+
 static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
                         struct vhost_iotlb *iotlb)
 {
@@ -234,7 +239,7 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
        u64 pa;
        u64 paend;
        struct scatterlist *sg;
-       struct device *dma = mvdev->mdev->device;
+       struct device *dma = get_dma_device(mvdev);
 
        for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
             map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
@@ -291,7 +296,7 @@ err_map:
 
 static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
 {
-       struct device *dma = mvdev->mdev->device;
+       struct device *dma = get_dma_device(mvdev);
 
        destroy_direct_mr(mvdev, mr);
        dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
index 96e6421..6521cbd 100644 (file)
@@ -246,7 +246,8 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
        if (err)
                goto err_key;
 
-       kick_addr = pci_resource_start(mdev->pdev, 0) + offset;
+       kick_addr = mdev->bar_addr + offset;
+
        res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
        if (!res->kick_addr) {
                err = -ENOMEM;
index 10e9b09..4d2809c 100644 (file)
@@ -820,7 +820,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
        MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn);
        MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent);
        MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0,
-                !!(ndev->mvdev.actual_features & VIRTIO_F_VERSION_1));
+                !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1)));
        MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
        MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
        MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
@@ -1169,6 +1169,7 @@ static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
                return;
        }
        mvq->avail_idx = attr.available_index;
+       mvq->used_idx = attr.used_index;
 }
 
 static void suspend_vqs(struct mlx5_vdpa_net *ndev)
@@ -1426,6 +1427,7 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
                return -EINVAL;
        }
 
+       mvq->used_idx = state->avail_index;
        mvq->avail_idx = state->avail_index;
        return 0;
 }
@@ -1443,7 +1445,11 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
         * that cares about emulating the index after vq is stopped.
         */
        if (!mvq->initialized) {
-               state->avail_index = mvq->avail_idx;
+               /* Firmware returns a wrong value for the available index.
+                * Since both values should be identical, we take the value of
+                * used_idx which is reported correctly.
+                */
+               state->avail_index = mvq->used_idx;
                return 0;
        }
 
@@ -1452,7 +1458,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
                mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
                return err;
        }
-       state->avail_index = attr.available_index;
+       state->avail_index = attr.used_index;
        return 0;
 }
 
@@ -1540,21 +1546,11 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
        }
 }
 
-static void clear_virtqueues(struct mlx5_vdpa_net *ndev)
-{
-       int i;
-
-       for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
-               ndev->vqs[i].avail_idx = 0;
-               ndev->vqs[i].used_idx = 0;
-       }
-}
-
 /* TODO: cross-endian support */
 static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
 {
        return virtio_legacy_is_little_endian() ||
-               (mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1));
+               (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1));
 }
 
 static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
@@ -1785,7 +1781,6 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
        if (!status) {
                mlx5_vdpa_info(mvdev, "performing device reset\n");
                teardown_driver(ndev);
-               clear_virtqueues(ndev);
                mlx5_vdpa_destroy_mr(&ndev->mvdev);
                ndev->mvdev.status = 0;
                ndev->mvdev.mlx_features = 0;
@@ -1907,6 +1902,19 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
        .free = mlx5_vdpa_free,
 };
 
+static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
+{
+       u16 hw_mtu;
+       int err;
+
+       err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
+       if (err)
+               return err;
+
+       *mtu = hw_mtu - MLX5V_ETH_HARD_MTU;
+       return 0;
+}
+
 static int alloc_resources(struct mlx5_vdpa_net *ndev)
 {
        struct mlx5_vdpa_net_resources *res = &ndev->res;
@@ -1982,7 +1990,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
        max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
 
        ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
-                                2 * mlx5_vdpa_max_qps(max_vqs), NULL);
+                                NULL);
        if (IS_ERR(ndev))
                return PTR_ERR(ndev);
 
@@ -1992,7 +2000,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
        init_mvqs(ndev);
        mutex_init(&ndev->reslock);
        config = &ndev->config;
-       err = mlx5_query_nic_vport_mtu(mdev, &ndev->mtu);
+       err = query_mtu(mdev, &ndev->mtu);
        if (err)
                goto err_mtu;
 
@@ -2009,7 +2017,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
        if (err)
                goto err_res;
 
-       err = vdpa_register_device(&mvdev->vdev);
+       err = vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs));
        if (err)
                goto err_reg;
 
index da67f07..5cffce6 100644 (file)
@@ -69,7 +69,6 @@ static void vdpa_release_dev(struct device *d)
  * initialized but before registered.
  * @parent: the parent device
  * @config: the bus operations that is supported by this device
- * @nvqs: number of virtqueues supported by this device
  * @size: size of the parent structure that contains private data
  * @name: name of the vdpa device; optional.
  *
@@ -81,7 +80,7 @@ static void vdpa_release_dev(struct device *d)
  */
 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
                                        const struct vdpa_config_ops *config,
-                                       int nvqs, size_t size, const char *name)
+                                       size_t size, const char *name)
 {
        struct vdpa_device *vdev;
        int err = -EINVAL;
@@ -107,7 +106,6 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
        vdev->index = err;
        vdev->config = config;
        vdev->features_valid = false;
-       vdev->nvqs = nvqs;
 
        if (name)
                err = dev_set_name(&vdev->dev, "%s", name);
@@ -136,10 +134,12 @@ static int vdpa_name_match(struct device *dev, const void *data)
        return (strcmp(dev_name(&vdev->dev), data) == 0);
 }
 
-static int __vdpa_register_device(struct vdpa_device *vdev)
+static int __vdpa_register_device(struct vdpa_device *vdev, int nvqs)
 {
        struct device *dev;
 
+       vdev->nvqs = nvqs;
+
        lockdep_assert_held(&vdpa_dev_mutex);
        dev = bus_find_device(&vdpa_bus, NULL, dev_name(&vdev->dev), vdpa_name_match);
        if (dev) {
@@ -155,15 +155,16 @@ static int __vdpa_register_device(struct vdpa_device *vdev)
  * Caller must invoke this routine in the management device dev_add()
  * callback after setting up valid mgmtdev for this vdpa device.
  * @vdev: the vdpa device to be registered to vDPA bus
+ * @nvqs: number of virtqueues supported by this device
  *
  * Returns an error when fail to add device to vDPA bus
  */
-int _vdpa_register_device(struct vdpa_device *vdev)
+int _vdpa_register_device(struct vdpa_device *vdev, int nvqs)
 {
        if (!vdev->mdev)
                return -EINVAL;
 
-       return __vdpa_register_device(vdev);
+       return __vdpa_register_device(vdev, nvqs);
 }
 EXPORT_SYMBOL_GPL(_vdpa_register_device);
 
@@ -171,15 +172,16 @@ EXPORT_SYMBOL_GPL(_vdpa_register_device);
  * vdpa_register_device - register a vDPA device
  * Callers must have a succeed call of vdpa_alloc_device() before.
  * @vdev: the vdpa device to be registered to vDPA bus
+ * @nvqs: number of virtqueues supported by this device
  *
  * Returns an error when fail to add to vDPA bus
  */
-int vdpa_register_device(struct vdpa_device *vdev)
+int vdpa_register_device(struct vdpa_device *vdev, int nvqs)
 {
        int err;
 
        mutex_lock(&vdpa_dev_mutex);
-       err = __vdpa_register_device(vdev);
+       err = __vdpa_register_device(vdev, nvqs);
        mutex_unlock(&vdpa_dev_mutex);
        return err;
 }
index d594284..5b6b2f8 100644 (file)
@@ -235,7 +235,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
                ops = &vdpasim_config_ops;
 
        vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
-                                   dev_attr->nvqs, dev_attr->name);
+                                   dev_attr->name);
        if (!vdpasim)
                goto err_alloc;
 
index d344c5b..a1ab616 100644 (file)
@@ -110,8 +110,7 @@ out:
 
 static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config)
 {
-       struct virtio_net_config *net_config =
-               (struct virtio_net_config *)config;
+       struct virtio_net_config *net_config = config;
 
        net_config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
        net_config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
@@ -147,7 +146,7 @@ static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
        if (IS_ERR(simdev))
                return PTR_ERR(simdev);
 
-       ret = _vdpa_register_device(&simdev->vdpa);
+       ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_NET_VQ_NUM);
        if (ret)
                goto reg_err;
 
index 5533df9..67d0bf4 100644 (file)
@@ -21,8 +21,8 @@ config VFIO_VIRQFD
 
 menuconfig VFIO
        tristate "VFIO Non-Privileged userspace driver framework"
-       depends on IOMMU_API
-       select VFIO_IOMMU_TYPE1 if (X86 || S390 || ARM || ARM64)
+       select IOMMU_API
+       select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64)
        help
          VFIO provides a framework for secure userspace device drivers.
          See Documentation/driver-api/vfio.rst for more details.
index ac3c1dd..4abddbe 100644 (file)
@@ -42,6 +42,6 @@ config VFIO_PCI_IGD
 
 config VFIO_PCI_NVLINK2
        def_bool y
-       depends on VFIO_PCI && PPC_POWERNV
+       depends on VFIO_PCI && PPC_POWERNV && SPAPR_TCE_IOMMU
        help
          VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs
index dc1a3c4..ab34110 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config VFIO_PLATFORM
        tristate "VFIO support for platform devices"
-       depends on VFIO && EVENTFD && (ARM || ARM64)
+       depends on VFIO && EVENTFD && (ARM || ARM64 || COMPILE_TEST)
        select VFIO_VIRQFD
        help
          Support for platform devices with VFIO. This is required to make
@@ -12,7 +12,7 @@ config VFIO_PLATFORM
 
 config VFIO_AMBA
        tristate "VFIO support for AMBA devices"
-       depends on VFIO_PLATFORM && ARM_AMBA
+       depends on VFIO_PLATFORM && (ARM_AMBA || COMPILE_TEST)
        help
          Support for ARM AMBA devices with VFIO. This is required to make
          use of ARM AMBA devices present on the system using the VFIO
index 4bb162c..45cbfd4 100644 (file)
@@ -189,7 +189,7 @@ static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
 }
 
 static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu,
-                                               dma_addr_t start, size_t size)
+                                               dma_addr_t start, u64 size)
 {
        struct rb_node *res = NULL;
        struct rb_node *node = iommu->dma_list.rb_node;
@@ -739,6 +739,12 @@ out:
        ret = vfio_lock_acct(dma, lock_acct, false);
 
 unpin_out:
+       if (batch->size == 1 && !batch->offset) {
+               /* May be a VM_PFNMAP pfn, which the batch can't remember. */
+               put_pfn(pfn, dma->prot);
+               batch->size = 0;
+       }
+
        if (ret < 0) {
                if (pinned && !rsvd) {
                        for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
@@ -785,7 +791,12 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
                return -ENODEV;
 
        ret = vaddr_get_pfns(mm, vaddr, 1, dma->prot, pfn_base, pages);
-       if (ret == 1 && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
+       if (ret != 1)
+               goto out;
+
+       ret = 0;
+
+       if (do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
                ret = vfio_lock_acct(dma, 1, true);
                if (ret) {
                        put_pfn(*pfn_base, dma->prot);
@@ -797,6 +808,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
                }
        }
 
+out:
        mmput(mm);
        return ret;
 }
@@ -1288,7 +1300,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
        int ret = -EINVAL, retries = 0;
        unsigned long pgshift;
        dma_addr_t iova = unmap->iova;
-       unsigned long size = unmap->size;
+       u64 size = unmap->size;
        bool unmap_all = unmap->flags & VFIO_DMA_UNMAP_FLAG_ALL;
        bool invalidate_vaddr = unmap->flags & VFIO_DMA_UNMAP_FLAG_VADDR;
        struct rb_node *n, *first_n;
@@ -1304,14 +1316,12 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
        if (unmap_all) {
                if (iova || size)
                        goto unlock;
-               size = SIZE_MAX;
-       } else if (!size || size & (pgsize - 1)) {
+               size = U64_MAX;
+       } else if (!size || size & (pgsize - 1) ||
+                  iova + size - 1 < iova || size > SIZE_MAX) {
                goto unlock;
        }
 
-       if (iova + size - 1 < iova || size > SIZE_MAX)
-               goto unlock;
-
        /* When dirty tracking is enabled, allow only min supported pgsize */
        if ((unmap->flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) &&
            (!iommu->dirty_page_tracking || (bitmap->pgsize != pgsize))) {
index ef688c8..e0a27e3 100644 (file)
@@ -308,8 +308,10 @@ static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
 
 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
 {
-       if (v->config_ctx)
+       if (v->config_ctx) {
                eventfd_ctx_put(v->config_ctx);
+               v->config_ctx = NULL;
+       }
 }
 
 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
@@ -329,8 +331,12 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
        if (!IS_ERR_OR_NULL(ctx))
                eventfd_ctx_put(ctx);
 
-       if (IS_ERR(v->config_ctx))
-               return PTR_ERR(v->config_ctx);
+       if (IS_ERR(v->config_ctx)) {
+               long ret = PTR_ERR(v->config_ctx);
+
+               v->config_ctx = NULL;
+               return ret;
+       }
 
        v->vdpa->config->set_config_cb(v->vdpa, &cb);
 
@@ -900,14 +906,10 @@ err:
 
 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
 {
-       struct vhost_virtqueue *vq;
        int i;
 
-       for (i = 0; i < v->nvqs; i++) {
-               vq = &v->vqs[i];
-               if (vq->call_ctx.producer.irq)
-                       irq_bypass_unregister_producer(&vq->call_ctx.producer);
-       }
+       for (i = 0; i < v->nvqs; i++)
+               vhost_vdpa_unsetup_vq_irq(v, i);
 }
 
 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
index a262e12..5ccb070 100644 (file)
@@ -332,8 +332,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
        vq->error_ctx = NULL;
        vq->kick = NULL;
        vq->log_ctx = NULL;
-       vhost_reset_is_le(vq);
        vhost_disable_cross_endian(vq);
+       vhost_reset_is_le(vq);
        vq->busyloop_timeout = 0;
        vq->umem = NULL;
        vq->iotlb = NULL;
index 44a5cd2..3406067 100644 (file)
@@ -1333,6 +1333,9 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
 
        ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
 
+       if (!ops->cursor)
+               return;
+
        ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
                    get_color(vc, info, c, 0));
 }
index c8b0ae6..4dc9077 100644 (file)
@@ -1031,7 +1031,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
                        PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
                if (!pdev) {
                        pr_err("Unable to find PCI Hyper-V video\n");
-                       kfree(info->apertures);
                        return -ENODEV;
                }
 
@@ -1129,7 +1128,6 @@ getmem_done:
        } else {
                pci_dev_put(pdev);
        }
-       kfree(info->apertures);
 
        return 0;
 
@@ -1141,7 +1139,6 @@ err2:
 err1:
        if (!gen2vm)
                pci_dev_put(pdev);
-       kfree(info->apertures);
 
        return -ENOMEM;
 }
index 42e09cc..4b15c00 100644 (file)
@@ -141,15 +141,14 @@ void virtio_config_changed(struct virtio_device *dev)
 }
 EXPORT_SYMBOL_GPL(virtio_config_changed);
 
-void virtio_config_disable(struct virtio_device *dev)
+static void virtio_config_disable(struct virtio_device *dev)
 {
        spin_lock_irq(&dev->config_lock);
        dev->config_enabled = false;
        spin_unlock_irq(&dev->config_lock);
 }
-EXPORT_SYMBOL_GPL(virtio_config_disable);
 
-void virtio_config_enable(struct virtio_device *dev)
+static void virtio_config_enable(struct virtio_device *dev)
 {
        spin_lock_irq(&dev->config_lock);
        dev->config_enabled = true;
@@ -158,7 +157,6 @@ void virtio_config_enable(struct virtio_device *dev)
        dev->config_change_pending = false;
        spin_unlock_irq(&dev->config_lock);
 }
-EXPORT_SYMBOL_GPL(virtio_config_enable);
 
 void virtio_add_status(struct virtio_device *dev, unsigned int status)
 {
index a286d22..56128b9 100644 (file)
@@ -548,8 +548,7 @@ static void virtio_mmio_release_dev(struct device *_d)
 {
        struct virtio_device *vdev =
                        container_of(_d, struct virtio_device, dev);
-       struct virtio_mmio_device *vm_dev =
-                       container_of(vdev, struct virtio_mmio_device, vdev);
+       struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
        struct platform_device *pdev = vm_dev->pdev;
 
        devm_kfree(&pdev->dev, vm_dev);
index e5dcb26..1635f42 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Watchdog driver for Marvell Armada 37xx SoCs
  *
- * Author: Marek Behun <marek.behun@nic.cz>
+ * Author: Marek Behún <kabel@kernel.org>
  */
 
 #include <linux/clk.h>
@@ -366,7 +366,7 @@ static struct platform_driver armada_37xx_wdt_driver = {
 
 module_platform_driver(armada_37xx_wdt_driver);
 
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
 MODULE_DESCRIPTION("Armada 37xx CPU Watchdog");
 
 MODULE_LICENSE("GPL v2");
index 9867a3a..688b112 100644 (file)
@@ -273,7 +273,6 @@ module_exit(cpu5wdt_exit_module);
 
 MODULE_AUTHOR("Heiko Ronsdorf <hero@ihg.uni-duisburg.de>");
 MODULE_DESCRIPTION("sma cpu5 watchdog driver");
-MODULE_SUPPORTED_DEVICE("sma cpu5 watchdog");
 MODULE_LICENSE("GPL");
 
 module_param_hw(port, int, ioport, 0);
index 808eeb4..1eafe0b 100644 (file)
@@ -172,7 +172,6 @@ MODULE_PARM_DESC(wd2_timeout, "Default watchdog2 timeout in 1/10secs");
 MODULE_AUTHOR("Eric Brower <ebrower@usa.net>");
 MODULE_DESCRIPTION("Hardware watchdog driver for Sun Microsystems CP1400/1500");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("watchdog");
 
 static void cpwd_writew(u16 val, void __iomem *addr)
 {
index 7008596..747e346 100644 (file)
@@ -46,7 +46,6 @@
 
 MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
 MODULE_DESCRIPTION("Hardware watchdog driver for Sun RIO");
-MODULE_SUPPORTED_DEVICE("watchdog");
 MODULE_LICENSE("GPL");
 
 #define DRIVER_NAME    "riowd"
index 41645fe..ea0efd2 100644 (file)
@@ -50,11 +50,11 @@ config XEN_BALLOON_MEMORY_HOTPLUG
 
          SUBSYSTEM=="memory", ACTION=="add", RUN+="/bin/sh -c '[ -f /sys$devpath/state ] && echo online > /sys$devpath/state'"
 
-config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+config XEN_MEMORY_HOTPLUG_LIMIT
        int "Hotplugged memory limit (in GiB) for a PV guest"
        default 512
        depends on XEN_HAVE_PVMMU
-       depends on XEN_BALLOON_MEMORY_HOTPLUG
+       depends on MEMORY_HOTPLUG
        help
          Maxmium amount of memory (in GiB) that a PV guest can be
          expanded to when using memory hotplug.
index 8236e23..7bbfd58 100644 (file)
@@ -110,7 +110,7 @@ struct irq_info {
        unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
        unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
        u64 eoi_time;           /* Time in jiffies when to EOI. */
-       spinlock_t lock;
+       raw_spinlock_t lock;
 
        union {
                unsigned short virq;
@@ -312,7 +312,7 @@ static int xen_irq_info_common_setup(struct irq_info *info,
        info->evtchn = evtchn;
        info->cpu = cpu;
        info->mask_reason = EVT_MASK_REASON_EXPLICIT;
-       spin_lock_init(&info->lock);
+       raw_spin_lock_init(&info->lock);
 
        ret = set_evtchn_to_irq(evtchn, irq);
        if (ret < 0)
@@ -472,28 +472,28 @@ static void do_mask(struct irq_info *info, u8 reason)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&info->lock, flags);
+       raw_spin_lock_irqsave(&info->lock, flags);
 
        if (!info->mask_reason)
                mask_evtchn(info->evtchn);
 
        info->mask_reason |= reason;
 
-       spin_unlock_irqrestore(&info->lock, flags);
+       raw_spin_unlock_irqrestore(&info->lock, flags);
 }
 
 static void do_unmask(struct irq_info *info, u8 reason)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&info->lock, flags);
+       raw_spin_lock_irqsave(&info->lock, flags);
 
        info->mask_reason &= ~reason;
 
        if (!info->mask_reason)
                unmask_evtchn(info->evtchn);
 
-       spin_unlock_irqrestore(&info->lock, flags);
+       raw_spin_unlock_irqrestore(&info->lock, flags);
 }
 
 #ifdef CONFIG_X86
index 714fcca..17548c1 100644 (file)
@@ -70,7 +70,6 @@ const struct inode_operations afs_dir_inode_operations = {
        .permission     = afs_permission,
        .getattr        = afs_getattr,
        .setattr        = afs_setattr,
-       .listxattr      = afs_listxattr,
 };
 
 const struct address_space_operations afs_dir_aops = {
index 85f5adf..960b642 100644 (file)
@@ -43,7 +43,6 @@ const struct inode_operations afs_file_inode_operations = {
        .getattr        = afs_getattr,
        .setattr        = afs_setattr,
        .permission     = afs_permission,
-       .listxattr      = afs_listxattr,
 };
 
 const struct address_space_operations afs_fs_aops = {
index 97cab12..71c5872 100644 (file)
@@ -181,10 +181,13 @@ void afs_wait_for_operation(struct afs_operation *op)
                if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags) &&
                    op->ops->issue_yfs_rpc)
                        op->ops->issue_yfs_rpc(op);
-               else
+               else if (op->ops->issue_afs_rpc)
                        op->ops->issue_afs_rpc(op);
+               else
+                       op->ac.error = -ENOTSUPP;
 
-               op->error = afs_wait_for_call_to_complete(op->call, &op->ac);
+               if (op->call)
+                       op->error = afs_wait_for_call_to_complete(op->call, &op->ac);
        }
 
        switch (op->error) {
index 1156b2d..12be887 100644 (file)
@@ -27,7 +27,6 @@
 
 static const struct inode_operations afs_symlink_inode_operations = {
        .get_link       = page_get_link,
-       .listxattr      = afs_listxattr,
 };
 
 static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *parent_vnode)
index b626e38..1627b18 100644 (file)
@@ -1509,7 +1509,6 @@ extern int afs_launder_page(struct page *);
  * xattr.c
  */
 extern const struct xattr_handler *afs_xattr_handlers[];
-extern ssize_t afs_listxattr(struct dentry *, char *, size_t);
 
 /*
  * yfsclient.c
index 052dab2..bbb2c21 100644 (file)
@@ -32,7 +32,6 @@ const struct inode_operations afs_mntpt_inode_operations = {
        .lookup         = afs_mntpt_lookup,
        .readlink       = page_readlink,
        .getattr        = afs_getattr,
-       .listxattr      = afs_listxattr,
 };
 
 const struct inode_operations afs_autocell_inode_operations = {
index c9195fc..eb737ed 100644 (file)
@@ -851,8 +851,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
        fscache_wait_on_page_write(vnode->cache, vmf->page);
 #endif
 
-       if (PageWriteback(vmf->page) &&
-           wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
+       if (wait_on_page_writeback_killable(vmf->page))
                return VM_FAULT_RETRY;
 
        if (lock_page_killable(vmf->page) < 0)
index c629caa..7751b0b 100644 (file)
 #include <linux/xattr.h>
 #include "internal.h"
 
-static const char afs_xattr_list[] =
-       "afs.acl\0"
-       "afs.cell\0"
-       "afs.fid\0"
-       "afs.volume\0"
-       "afs.yfs.acl\0"
-       "afs.yfs.acl_inherited\0"
-       "afs.yfs.acl_num_cleaned\0"
-       "afs.yfs.vol_acl";
-
-/*
- * Retrieve a list of the supported xattrs.
- */
-ssize_t afs_listxattr(struct dentry *dentry, char *buffer, size_t size)
-{
-       if (size == 0)
-               return sizeof(afs_xattr_list);
-       if (size < sizeof(afs_xattr_list))
-               return -ERANGE;
-       memcpy(buffer, afs_xattr_list, sizeof(afs_xattr_list));
-       return sizeof(afs_xattr_list);
-}
-
 /*
  * Deal with the result of a successful fetch ACL operation.
  */
@@ -231,6 +208,8 @@ static int afs_xattr_get_yfs(const struct xattr_handler *handler,
                        else
                                ret = -ERANGE;
                }
+       } else if (ret == -ENOTSUPP) {
+               ret = -ENODATA;
        }
 
 error_yacl:
@@ -256,6 +235,7 @@ static int afs_xattr_set_yfs(const struct xattr_handler *handler,
 {
        struct afs_operation *op;
        struct afs_vnode *vnode = AFS_FS_I(inode);
+       int ret;
 
        if (flags == XATTR_CREATE ||
            strcmp(name, "acl") != 0)
@@ -270,7 +250,10 @@ static int afs_xattr_set_yfs(const struct xattr_handler *handler,
                return afs_put_operation(op);
 
        op->ops = &yfs_store_opaque_acl2_operation;
-       return afs_do_sync_operation(op);
+       ret = afs_do_sync_operation(op);
+       if (ret == -ENOTSUPP)
+               ret = -ENODATA;
+       return ret;
 }
 
 static const struct xattr_handler afs_xattr_yfs_handler = {
index 92ed7d5..09d6f72 100644 (file)
@@ -275,6 +275,8 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
                bio.bi_opf = dio_bio_write_op(iocb);
                task_io_account_write(ret);
        }
+       if (iocb->ki_flags & IOCB_NOWAIT)
+               bio.bi_opf |= REQ_NOWAIT;
        if (iocb->ki_flags & IOCB_HIPRI)
                bio_set_polled(&bio, iocb);
 
@@ -428,6 +430,8 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                        bio->bi_opf = dio_bio_write_op(iocb);
                        task_io_account_write(bio->bi_iter.bi_size);
                }
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       bio->bi_opf |= REQ_NOWAIT;
 
                dio->size += bio->bi_iter.bi_size;
                pos += bio->bi_iter.bi_size;
@@ -1240,13 +1244,13 @@ int bdev_disk_changed(struct block_device *bdev, bool invalidate)
 
        lockdep_assert_held(&bdev->bd_mutex);
 
-       clear_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
-
 rescan:
        ret = blk_drop_partitions(bdev);
        if (ret)
                return ret;
 
+       clear_bit(GD_NEED_PART_SCAN, &disk->state);
+
        /*
         * Historically we only set the capacity to zero for devices that
         * support partitions (independ of actually having partitions created).
index b634c42..b4fb997 100644 (file)
@@ -7,10 +7,12 @@ subdir-ccflags-y += -Wmissing-format-attribute
 subdir-ccflags-y += -Wmissing-prototypes
 subdir-ccflags-y += -Wold-style-definition
 subdir-ccflags-y += -Wmissing-include-dirs
-subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable)
-subdir-ccflags-y += $(call cc-option, -Wunused-const-variable)
-subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned)
-subdir-ccflags-y += $(call cc-option, -Wstringop-truncation)
+condflags := \
+       $(call cc-option, -Wunused-but-set-variable)            \
+       $(call cc-option, -Wunused-const-variable)              \
+       $(call cc-option, -Wpacked-not-aligned)                 \
+       $(call cc-option, -Wstringop-truncation)
+subdir-ccflags-y += $(condflags)
 # The following turn off the warnings enabled by -Wextra
 subdir-ccflags-y += -Wno-missing-field-initializers
 subdir-ccflags-y += -Wno-sign-compare
index d56730a..34b929b 100644 (file)
@@ -1365,7 +1365,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
                                   "failed to read tree block %llu from get_old_root",
                                   logical);
                } else {
+                       btrfs_tree_read_lock(old);
                        eb = btrfs_clone_extent_buffer(old);
+                       btrfs_tree_read_unlock(old);
                        free_extent_buffer(old);
                }
        } else if (old_root) {
index 3a9c1e0..d05f735 100644 (file)
@@ -81,6 +81,9 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
        struct btrfs_dev_replace_item *ptr;
        u64 src_devid;
 
+       if (!dev_root)
+               return 0;
+
        path = btrfs_alloc_path();
        if (!path) {
                ret = -ENOMEM;
index 41b718c..289f1f0 100644 (file)
@@ -2387,8 +2387,9 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
        } else {
                set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
                fs_info->dev_root = root;
-               btrfs_init_devices_late(fs_info);
        }
+       /* Initialize fs_info for all devices in any case */
+       btrfs_init_devices_late(fs_info);
 
        /* If IGNOREDATACSUMS is set don't bother reading the csum root. */
        if (!btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
@@ -3009,6 +3010,21 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
                }
        }
 
+       /*
+        * btrfs_find_orphan_roots() is responsible for finding all the dead
+        * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load
+        * them into the fs_info->fs_roots_radix tree. This must be done before
+        * calling btrfs_orphan_cleanup() on the tree root. If we don't do it
+        * first, then btrfs_orphan_cleanup() will delete a dead root's orphan
+        * item before the root's tree is deleted - this means that if we unmount
+        * or crash before the deletion completes, on the next mount we will not
+        * delete what remains of the tree because the orphan item does not
+        * exists anymore, which is what tells us we have a pending deletion.
+        */
+       ret = btrfs_find_orphan_roots(fs_info);
+       if (ret)
+               goto out;
+
        ret = btrfs_cleanup_fs_roots(fs_info);
        if (ret)
                goto out;
@@ -3068,7 +3084,6 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
                }
        }
 
-       ret = btrfs_find_orphan_roots(fs_info);
 out:
        return ret;
 }
index 78ad31a..36a3c97 100644 (file)
@@ -3323,6 +3323,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
 
        if (last_ref && btrfs_header_generation(buf) == trans->transid) {
                struct btrfs_block_group *cache;
+               bool must_pin = false;
 
                if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
                        ret = check_ref_cleanup(trans, buf->start);
@@ -3340,7 +3341,27 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                        goto out;
                }
 
-               if (btrfs_is_zoned(fs_info)) {
+               /*
+                * If this is a leaf and there are tree mod log users, we may
+                * have recorded mod log operations that point to this leaf.
+                * So we must make sure no one reuses this leaf's extent before
+                * mod log operations are applied to a node, otherwise after
+                * rewinding a node using the mod log operations we get an
+                * inconsistent btree, as the leaf's extent may now be used as
+                * a node or leaf for another different btree.
+                * We are safe from races here because at this point no other
+                * node or root points to this extent buffer, so if after this
+                * check a new tree mod log user joins, it will not be able to
+                * find a node pointing to this leaf and record operations that
+                * point to this leaf.
+                */
+               if (btrfs_header_level(buf) == 0) {
+                       read_lock(&fs_info->tree_mod_log_lock);
+                       must_pin = !list_empty(&fs_info->tree_mod_seq_list);
+                       read_unlock(&fs_info->tree_mod_log_lock);
+               }
+
+               if (must_pin || btrfs_is_zoned(fs_info)) {
                        btrfs_redirty_list_add(trans->transaction, buf);
                        pin_down_extent(trans, cache, buf->start, buf->len, 1);
                        btrfs_put_block_group(cache);
index 191e358..910769d 100644 (file)
@@ -2885,6 +2885,35 @@ static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
                btrfs_subpage_end_reader(fs_info, page, start, len);
 }
 
+/*
+ * Find extent buffer for a givne bytenr.
+ *
+ * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking
+ * in endio context.
+ */
+static struct extent_buffer *find_extent_buffer_readpage(
+               struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
+{
+       struct extent_buffer *eb;
+
+       /*
+        * For regular sectorsize, we can use page->private to grab extent
+        * buffer
+        */
+       if (fs_info->sectorsize == PAGE_SIZE) {
+               ASSERT(PagePrivate(page) && page->private);
+               return (struct extent_buffer *)page->private;
+       }
+
+       /* For subpage case, we need to lookup buffer radix tree */
+       rcu_read_lock();
+       eb = radix_tree_lookup(&fs_info->buffer_radix,
+                              bytenr >> fs_info->sectorsize_bits);
+       rcu_read_unlock();
+       ASSERT(eb);
+       return eb;
+}
+
 /*
  * after a readpage IO is done, we need to:
  * clear the uptodate bits on error
@@ -2996,7 +3025,7 @@ static void end_bio_extent_readpage(struct bio *bio)
                } else {
                        struct extent_buffer *eb;
 
-                       eb = (struct extent_buffer *)page->private;
+                       eb = find_extent_buffer_readpage(fs_info, page, start);
                        set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
                        eb->read_mirror = mirror;
                        atomic_dec(&eb->io_pages);
@@ -3020,7 +3049,7 @@ readpage_ok:
                         */
                        if (page->index == end_index && i_size <= end) {
                                u32 zero_start = max(offset_in_page(i_size),
-                                                    offset_in_page(end));
+                                                    offset_in_page(start));
 
                                zero_user_segment(page, zero_start,
                                                  offset_in_page(end) + 1);
index 35bfa05..a520775 100644 (file)
@@ -3099,11 +3099,13 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
  * @bio_offset:        offset to the beginning of the bio (in bytes)
  * @page:      page where is the data to be verified
  * @pgoff:     offset inside the page
+ * @start:     logical offset in the file
  *
  * The length of such check is always one sector size.
  */
 static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio,
-                          u32 bio_offset, struct page *page, u32 pgoff)
+                          u32 bio_offset, struct page *page, u32 pgoff,
+                          u64 start)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
@@ -3130,8 +3132,8 @@ static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio,
        kunmap_atomic(kaddr);
        return 0;
 zeroit:
-       btrfs_print_data_csum_error(BTRFS_I(inode), page_offset(page) + pgoff,
-                                   csum, csum_expected, io_bio->mirror_num);
+       btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
+                                   io_bio->mirror_num);
        if (io_bio->device)
                btrfs_dev_stat_inc_and_print(io_bio->device,
                                             BTRFS_DEV_STAT_CORRUPTION_ERRS);
@@ -3184,7 +3186,8 @@ int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u32 bio_offset,
             pg_off += sectorsize, bio_offset += sectorsize) {
                int ret;
 
-               ret = check_data_csum(inode, io_bio, bio_offset, page, pg_off);
+               ret = check_data_csum(inode, io_bio, bio_offset, page, pg_off,
+                                     page_offset(page) + pg_off);
                if (ret < 0)
                        return -EIO;
        }
@@ -7910,7 +7913,8 @@ static blk_status_t btrfs_check_read_dio_bio(struct inode *inode,
                        ASSERT(pgoff < PAGE_SIZE);
                        if (uptodate &&
                            (!csum || !check_data_csum(inode, io_bio,
-                                       bio_offset, bvec.bv_page, pgoff))) {
+                                                      bio_offset, bvec.bv_page,
+                                                      pgoff, start))) {
                                clean_io_failure(fs_info, failure_tree, io_tree,
                                                 start, bvec.bv_page,
                                                 btrfs_ino(BTRFS_I(inode)),
@@ -8169,10 +8173,6 @@ static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
                bio->bi_end_io = btrfs_end_dio_bio;
                btrfs_io_bio(bio)->logical = file_offset;
 
-               WARN_ON_ONCE(write && btrfs_is_zoned(fs_info) &&
-                            fs_info->max_zone_append_size &&
-                            bio_op(bio) != REQ_OP_ZONE_APPEND);
-
                if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
                        status = extract_ordered_extent(BTRFS_I(inode), bio,
                                                        file_offset);
@@ -9008,7 +9008,7 @@ int __init btrfs_init_cachep(void)
 
        btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
                                                        PAGE_SIZE, PAGE_SIZE,
-                                                       SLAB_RED_ZONE, NULL);
+                                                       SLAB_MEM_SPREAD, NULL);
        if (!btrfs_free_space_bitmap_cachep)
                goto fail;
 
@@ -9877,6 +9877,7 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
        struct btrfs_path *path;
        u64 start = ins->objectid;
        u64 len = ins->offset;
+       int qgroup_released;
        int ret;
 
        memset(&stack_fi, 0, sizeof(stack_fi));
@@ -9889,16 +9890,16 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
        btrfs_set_stack_file_extent_compression(&stack_fi, BTRFS_COMPRESS_NONE);
        /* Encryption and other encoding is reserved and all 0 */
 
-       ret = btrfs_qgroup_release_data(inode, file_offset, len);
-       if (ret < 0)
-               return ERR_PTR(ret);
+       qgroup_released = btrfs_qgroup_release_data(inode, file_offset, len);
+       if (qgroup_released < 0)
+               return ERR_PTR(qgroup_released);
 
        if (trans) {
                ret = insert_reserved_file_extent(trans, inode,
                                                  file_offset, &stack_fi,
-                                                 true, ret);
+                                                 true, qgroup_released);
                if (ret)
-                       return ERR_PTR(ret);
+                       goto free_qgroup;
                return trans;
        }
 
@@ -9909,21 +9910,35 @@ static struct btrfs_trans_handle *insert_prealloc_file_extent(
        extent_info.file_offset = file_offset;
        extent_info.extent_buf = (char *)&stack_fi;
        extent_info.is_new_extent = true;
-       extent_info.qgroup_reserved = ret;
+       extent_info.qgroup_reserved = qgroup_released;
        extent_info.insertions = 0;
 
        path = btrfs_alloc_path();
-       if (!path)
-               return ERR_PTR(-ENOMEM);
+       if (!path) {
+               ret = -ENOMEM;
+               goto free_qgroup;
+       }
 
        ret = btrfs_replace_file_extents(&inode->vfs_inode, path, file_offset,
                                     file_offset + len - 1, &extent_info,
                                     &trans);
        btrfs_free_path(path);
        if (ret)
-               return ERR_PTR(ret);
-
+               goto free_qgroup;
        return trans;
+
+free_qgroup:
+       /*
+        * We have released qgroup data range at the beginning of the function,
+        * and normally qgroup_released bytes will be freed when committing
+        * transaction.
+        * But if we error out early, we have to free what we have released
+        * or we leak qgroup data reservation.
+        */
+       btrfs_qgroup_free_refroot(inode->root->fs_info,
+                       inode->root->root_key.objectid, qgroup_released,
+                       BTRFS_QGROUP_RSV_DATA);
+       return ERR_PTR(ret);
 }
 
 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
index 14ff388..f0b9ef1 100644 (file)
@@ -226,7 +226,6 @@ static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
 {
        struct btrfs_qgroup_list *list;
 
-       btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
        list_del(&qgroup->dirty);
        while (!list_empty(&qgroup->groups)) {
                list = list_first_entry(&qgroup->groups,
@@ -243,7 +242,6 @@ static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
                list_del(&list->next_member);
                kfree(list);
        }
-       kfree(qgroup);
 }
 
 /* must be called with qgroup_lock held */
@@ -569,6 +567,8 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
                qgroup = rb_entry(n, struct btrfs_qgroup, node);
                rb_erase(n, &fs_info->qgroup_tree);
                __del_qgroup_rb(fs_info, qgroup);
+               btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
+               kfree(qgroup);
        }
        /*
         * We call btrfs_free_qgroup_config() when unmounting
@@ -1578,6 +1578,14 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
        spin_lock(&fs_info->qgroup_lock);
        del_qgroup_rb(fs_info, qgroupid);
        spin_unlock(&fs_info->qgroup_lock);
+
+       /*
+        * Remove the qgroup from sysfs now without holding the qgroup_lock
+        * spinlock, since the sysfs_remove_group() function needs to take
+        * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
+        */
+       btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
+       kfree(qgroup);
 out:
        mutex_unlock(&fs_info->qgroup_ioctl_lock);
        return ret;
index 20fd4aa..06713a8 100644 (file)
@@ -209,7 +209,7 @@ int btree_readahead_hook(struct extent_buffer *eb, int err)
        /* find extent */
        spin_lock(&fs_info->reada_lock);
        re = radix_tree_lookup(&fs_info->reada_tree,
-                              eb->start >> PAGE_SHIFT);
+                              eb->start >> fs_info->sectorsize_bits);
        if (re)
                re->refcnt++;
        spin_unlock(&fs_info->reada_lock);
@@ -240,7 +240,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
        zone = NULL;
        spin_lock(&fs_info->reada_lock);
        ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
-                                    logical >> PAGE_SHIFT, 1);
+                                    logical >> fs_info->sectorsize_bits, 1);
        if (ret == 1 && logical >= zone->start && logical <= zone->end) {
                kref_get(&zone->refcnt);
                spin_unlock(&fs_info->reada_lock);
@@ -283,13 +283,13 @@ static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
 
        spin_lock(&fs_info->reada_lock);
        ret = radix_tree_insert(&dev->reada_zones,
-                               (unsigned long)(zone->end >> PAGE_SHIFT),
-                               zone);
+                       (unsigned long)(zone->end >> fs_info->sectorsize_bits),
+                       zone);
 
        if (ret == -EEXIST) {
                kfree(zone);
                ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
-                                            logical >> PAGE_SHIFT, 1);
+                                       logical >> fs_info->sectorsize_bits, 1);
                if (ret == 1 && logical >= zone->start && logical <= zone->end)
                        kref_get(&zone->refcnt);
                else
@@ -315,7 +315,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
        u64 length;
        int real_stripes;
        int nzones = 0;
-       unsigned long index = logical >> PAGE_SHIFT;
+       unsigned long index = logical >> fs_info->sectorsize_bits;
        int dev_replace_is_ongoing;
        int have_zone = 0;
 
@@ -497,7 +497,7 @@ static void reada_extent_put(struct btrfs_fs_info *fs_info,
                             struct reada_extent *re)
 {
        int i;
-       unsigned long index = re->logical >> PAGE_SHIFT;
+       unsigned long index = re->logical >> fs_info->sectorsize_bits;
 
        spin_lock(&fs_info->reada_lock);
        if (--re->refcnt) {
@@ -538,11 +538,12 @@ static void reada_extent_put(struct btrfs_fs_info *fs_info,
 static void reada_zone_release(struct kref *kref)
 {
        struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
+       struct btrfs_fs_info *fs_info = zone->device->fs_info;
 
-       lockdep_assert_held(&zone->device->fs_info->reada_lock);
+       lockdep_assert_held(&fs_info->reada_lock);
 
        radix_tree_delete(&zone->device->reada_zones,
-                         zone->end >> PAGE_SHIFT);
+                         zone->end >> fs_info->sectorsize_bits);
 
        kfree(zone);
 }
@@ -593,7 +594,7 @@ static int reada_add_block(struct reada_control *rc, u64 logical,
 static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
 {
        int i;
-       unsigned long index = zone->end >> PAGE_SHIFT;
+       unsigned long index = zone->end >> zone->device->fs_info->sectorsize_bits;
 
        for (i = 0; i < zone->ndevs; ++i) {
                struct reada_zone *peer;
@@ -628,7 +629,7 @@ static int reada_pick_zone(struct btrfs_device *dev)
                                             (void **)&zone, index, 1);
                if (ret == 0)
                        break;
-               index = (zone->end >> PAGE_SHIFT) + 1;
+               index = (zone->end >> dev->fs_info->sectorsize_bits) + 1;
                if (zone->locked) {
                        if (zone->elems > top_locked_elems) {
                                top_locked_elems = zone->elems;
@@ -709,7 +710,7 @@ static int reada_start_machine_dev(struct btrfs_device *dev)
         * plugging to speed things up
         */
        ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
-                                    dev->reada_next >> PAGE_SHIFT, 1);
+                               dev->reada_next >> fs_info->sectorsize_bits, 1);
        if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
                ret = reada_pick_zone(dev);
                if (!ret) {
@@ -718,7 +719,7 @@ static int reada_start_machine_dev(struct btrfs_device *dev)
                }
                re = NULL;
                ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
-                                       dev->reada_next >> PAGE_SHIFT, 1);
+                               dev->reada_next >> fs_info->sectorsize_bits, 1);
        }
        if (ret == 0) {
                spin_unlock(&fs_info->reada_lock);
@@ -885,7 +886,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
                                pr_cont(" curr off %llu",
                                        device->reada_next - zone->start);
                        pr_cont("\n");
-                       index = (zone->end >> PAGE_SHIFT) + 1;
+                       index = (zone->end >> fs_info->sectorsize_bits) + 1;
                }
                cnt = 0;
                index = 0;
@@ -910,7 +911,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
                                }
                        }
                        pr_cont("\n");
-                       index = (re->logical >> PAGE_SHIFT) + 1;
+                       index = (re->logical >> fs_info->sectorsize_bits) + 1;
                        if (++cnt > 15)
                                break;
                }
@@ -926,7 +927,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
                if (ret == 0)
                        break;
                if (!re->scheduled) {
-                       index = (re->logical >> PAGE_SHIFT) + 1;
+                       index = (re->logical >> fs_info->sectorsize_bits) + 1;
                        continue;
                }
                pr_debug("re: logical %llu size %u list empty %d scheduled %d",
@@ -942,7 +943,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
                        }
                }
                pr_cont("\n");
-               index = (re->logical >> PAGE_SHIFT) + 1;
+               index = (re->logical >> fs_info->sectorsize_bits) + 1;
        }
        spin_unlock(&fs_info->reada_lock);
 }
index 2f1acc9..92a3686 100644 (file)
@@ -3169,10 +3169,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 
        mutex_lock(&log_root_tree->log_mutex);
 
-       index2 = log_root_tree->log_transid % 2;
-       list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
-       root_log_ctx.log_transid = log_root_tree->log_transid;
-
        if (btrfs_is_zoned(fs_info)) {
                if (!log_root_tree->node) {
                        ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
@@ -3183,6 +3179,10 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
                }
        }
 
+       index2 = log_root_tree->log_transid % 2;
+       list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
+       root_log_ctx.log_transid = log_root_tree->log_transid;
+
        /*
         * Now we are safe to update the log_root_tree because we're under the
         * log_mutex, and we're a current writer so we're holding the commit
index bc3b33e..1c6810b 100644 (file)
@@ -7448,6 +7448,9 @@ static int btrfs_device_init_dev_stats(struct btrfs_device *device,
        int item_size;
        int i, ret, slot;
 
+       if (!device->fs_info->dev_root)
+               return 0;
+
        key.objectid = BTRFS_DEV_STATS_OBJECTID;
        key.type = BTRFS_PERSISTENT_ITEM_KEY;
        key.offset = device->devid;
index 1f972b7..eeb3ebe 100644 (file)
 /* Pseudo write pointer value for conventional zone */
 #define WP_CONVENTIONAL ((u64)-2)
 
+/*
+ * Location of the first zone of superblock logging zone pairs.
+ *
+ * - primary superblock:    0B (zone 0)
+ * - first copy:          512G (zone starting at that offset)
+ * - second copy:           4T (zone starting at that offset)
+ */
+#define BTRFS_SB_LOG_PRIMARY_OFFSET    (0ULL)
+#define BTRFS_SB_LOG_FIRST_OFFSET      (512ULL * SZ_1G)
+#define BTRFS_SB_LOG_SECOND_OFFSET     (4096ULL * SZ_1G)
+
+#define BTRFS_SB_LOG_FIRST_SHIFT       const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
+#define BTRFS_SB_LOG_SECOND_SHIFT      const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
+
 /* Number of superblock log zones */
 #define BTRFS_NR_SB_LOG_ZONES 2
 
+/*
+ * Maximum supported zone size. Currently, SMR disks have a zone size of
+ * 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range. We do not
+ * expect the zone size to become larger than 8GiB in the near future.
+ */
+#define BTRFS_MAX_ZONE_SIZE            SZ_8G
+
 static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
 {
        struct blk_zone *zones = data;
@@ -111,23 +132,22 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
 }
 
 /*
- * The following zones are reserved as the circular buffer on ZONED btrfs.
- *  - The primary superblock: zones 0 and 1
- *  - The first copy: zones 16 and 17
- *  - The second copy: zones 1024 or zone at 256GB which is minimum, and
- *                     the following one
+ * Get the first zone number of the superblock mirror
  */
 static inline u32 sb_zone_number(int shift, int mirror)
 {
-       ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
+       u64 zone;
 
+       ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
        switch (mirror) {
-       case 0: return 0;
-       case 1: return 16;
-       case 2: return min_t(u64, btrfs_sb_offset(mirror) >> shift, 1024);
+       case 0: zone = 0; break;
+       case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
+       case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
        }
 
-       return 0;
+       ASSERT(zone <= U32_MAX);
+
+       return (u32)zone;
 }
 
 /*
@@ -300,10 +320,21 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
                zone_sectors = bdev_zone_sectors(bdev);
        }
 
-       nr_sectors = bdev_nr_sectors(bdev);
        /* Check if it's power of 2 (see is_power_of_2) */
        ASSERT(zone_sectors != 0 && (zone_sectors & (zone_sectors - 1)) == 0);
        zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
+
+       /* We reject devices with a zone size larger than 8GB */
+       if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
+               btrfs_err_in_rcu(fs_info,
+               "zoned: %s: zone size %llu larger than supported maximum %llu",
+                                rcu_str_deref(device->name),
+                                zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       nr_sectors = bdev_nr_sectors(bdev);
        zone_info->zone_size_shift = ilog2(zone_info->zone_size);
        zone_info->max_zone_append_size =
                (u64)queue_max_zone_append_sectors(queue) << SECTOR_SHIFT;
index dfb14db..38bb776 100644 (file)
@@ -118,6 +118,12 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
        cache->mnt = path.mnt;
        root = path.dentry;
 
+       ret = -EINVAL;
+       if (mnt_user_ns(path.mnt) != &init_user_ns) {
+               pr_warn("File cache on idmapped mounts not supported");
+               goto error_unsupported;
+       }
+
        /* check parameters */
        ret = -EOPNOTSUPP;
        if (d_is_negative(root) ||
index e027c71..8ffc40e 100644 (file)
@@ -24,17 +24,16 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
                container_of(wait, struct cachefiles_one_read, monitor);
        struct cachefiles_object *object;
        struct fscache_retrieval *op = monitor->op;
-       struct wait_bit_key *key = _key;
+       struct wait_page_key *key = _key;
        struct page *page = wait->private;
 
        ASSERT(key);
 
        _enter("{%lu},%u,%d,{%p,%u}",
               monitor->netfs_page->index, mode, sync,
-              key->flags, key->bit_nr);
+              key->page, key->bit_nr);
 
-       if (key->flags != &page->flags ||
-           key->bit_nr != PG_locked)
+       if (key->page != page || key->bit_nr != PG_locked)
                return 0;
 
        _debug("--- monitor %p %lx ---", page, page->flags);
index fe03cbd..bf52e93 100644 (file)
@@ -18,6 +18,7 @@ config CIFS
        select CRYPTO_AES
        select CRYPTO_LIB_DES
        select KEYS
+       select DNS_RESOLVER
        help
          This is the client VFS module for the SMB3 family of NAS protocols,
          (including support for the most recent, most secure dialect SMB3.1.1)
@@ -112,7 +113,6 @@ config CIFS_WEAK_PW_HASH
 config CIFS_UPCALL
        bool "Kerberos/SPNEGO advanced session setup"
        depends on CIFS
-       select DNS_RESOLVER
        help
          Enables an upcall mechanism for CIFS which accesses userspace helper
          utilities to provide SPNEGO packaged (RFC 4178) Kerberos tickets
@@ -179,7 +179,6 @@ config CIFS_DEBUG_DUMP_KEYS
 config CIFS_DFS_UPCALL
        bool "DFS feature support"
        depends on CIFS
-       select DNS_RESOLVER
        help
          Distributed File System (DFS) support is used to access shares
          transparently in an enterprise name space, even if the share
index 5213b20..3ee3b7d 100644 (file)
@@ -10,13 +10,14 @@ cifs-y := trace.o cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o \
          cifs_unicode.o nterr.o cifsencrypt.o \
          readdir.o ioctl.o sess.o export.o smb1ops.o unc.o winucase.o \
          smb2ops.o smb2maperror.o smb2transport.o \
-         smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o
+         smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o \
+         dns_resolve.o
 
 cifs-$(CONFIG_CIFS_XATTR) += xattr.o
 
 cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
 
-cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o dfs_cache.o
+cifs-$(CONFIG_CIFS_DFS_UPCALL) += cifs_dfs_ref.o dfs_cache.o
 
 cifs-$(CONFIG_CIFS_SWN_UPCALL) += netlink.o cifs_swn.o
 
index f2d730f..d829b8b 100644 (file)
@@ -248,7 +248,7 @@ nlmsg_fail:
 
 /*
  * Try to find a matching registration for the tcon's server name and share name.
- * Calls to this funciton must be protected by cifs_swnreg_idr_mutex.
+ * Calls to this function must be protected by cifs_swnreg_idr_mutex.
  * TODO Try to avoid memory allocations
  */
 static struct cifs_swn_reg *cifs_find_swn_reg(struct cifs_tcon *tcon)
index 9d29eb9..d178cf8 100644 (file)
@@ -1118,7 +1118,6 @@ static int set_chmod_dacl(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
        /* Retain old ACEs which we can retain */
        for (i = 0; i < src_num_aces; ++i) {
                pntace = (struct cifs_ace *) (acl_base + size);
-               pnntace = (struct cifs_ace *) (nacl_base + nsize);
 
                if (!new_aces_set && (pntace->flags & INHERITED_ACE)) {
                        /* Place the new ACEs in between existing explicit and inherited */
@@ -1131,14 +1130,17 @@ static int set_chmod_dacl(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
                }
 
                /* If it's any one of the ACE we're replacing, skip! */
-               if ((compare_sids(&pntace->sid, &sid_unix_NFS_mode) == 0) ||
+               if (((compare_sids(&pntace->sid, &sid_unix_NFS_mode) == 0) ||
                                (compare_sids(&pntace->sid, pownersid) == 0) ||
                                (compare_sids(&pntace->sid, pgrpsid) == 0) ||
                                (compare_sids(&pntace->sid, &sid_everyone) == 0) ||
-                               (compare_sids(&pntace->sid, &sid_authusers) == 0)) {
+                               (compare_sids(&pntace->sid, &sid_authusers) == 0))) {
                        goto next_ace;
                }
 
+               /* update the pointer to the next ACE to populate*/
+               pnntace = (struct cifs_ace *) (nacl_base + nsize);
+
                nsize += cifs_copy_ace(pnntace, pntace, NULL);
                num_aces++;
 
index 099ad9f..5ddd20b 100644 (file)
@@ -476,7 +476,8 @@ static int cifs_show_devname(struct seq_file *m, struct dentry *root)
                seq_puts(m, "none");
        else {
                convert_delimiter(devname, '/');
-               seq_puts(m, devname);
+               /* escape all spaces in share names */
+               seq_escape(m, devname, " \t");
                kfree(devname);
        }
        return 0;
index 31fc869..ec824ab 100644 (file)
@@ -919,8 +919,8 @@ struct cifs_ses {
        bool binding:1; /* are we binding the session? */
        __u16 session_flags;
        __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
-       __u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
-       __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
+       __u8 smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
+       __u8 smb3decryptionkey[SMB3_ENC_DEC_KEY_SIZE];
        __u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
 
        __u8 binding_preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
@@ -1283,8 +1283,6 @@ struct cifs_aio_ctx {
        bool                    direct_io;
 };
 
-struct cifs_readdata;
-
 /* asynchronous read support */
 struct cifs_readdata {
        struct kref                     refcount;
index 64fe5a4..9adc74b 100644 (file)
  */
 #define SMB3_SIGN_KEY_SIZE (16)
 
+/*
+ * Size of the smb3 encryption/decryption keys
+ */
+#define SMB3_ENC_DEC_KEY_SIZE (32)
+
 #define CIFS_CLIENT_CHALLENGE_SIZE (8)
 #define CIFS_SERVER_CHALLENGE_SIZE (8)
 #define CIFS_HMAC_MD5_HASH_SIZE (16)
index eec8a20..24668eb 100644 (file)
@@ -87,7 +87,6 @@ static void cifs_prune_tlinks(struct work_struct *work);
  *
  * This should be called with server->srv_mutex held.
  */
-#ifdef CONFIG_CIFS_DFS_UPCALL
 static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
 {
        int rc;
@@ -124,6 +123,7 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
        return !rc ? -1 : 0;
 }
 
+#ifdef CONFIG_CIFS_DFS_UPCALL
 /* These functions must be called with server->srv_mutex held */
 static void reconn_set_next_dfs_target(struct TCP_Server_Info *server,
                                       struct cifs_sb_info *cifs_sb,
@@ -321,14 +321,29 @@ cifs_reconnect(struct TCP_Server_Info *server)
 #endif
 
 #ifdef CONFIG_CIFS_DFS_UPCALL
+               if (cifs_sb && cifs_sb->origin_fullpath)
                        /*
                         * Set up next DFS target server (if any) for reconnect. If DFS
                         * feature is disabled, then we will retry last server we
                         * connected to before.
                         */
                        reconn_set_next_dfs_target(server, cifs_sb, &tgt_list, &tgt_it);
+               else {
+#endif
+                       /*
+                        * Resolve the hostname again to make sure that IP address is up-to-date.
+                        */
+                       rc = reconn_set_ipaddr_from_hostname(server);
+                       if (rc) {
+                               cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
+                                               __func__, rc);
+                       }
+
+#ifdef CONFIG_CIFS_DFS_UPCALL
+               }
 #endif
 
+
 #ifdef CONFIG_CIFS_SWN_UPCALL
                }
 #endif
index 26de432..042e24a 100644 (file)
@@ -165,6 +165,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
                        goto posix_open_ret;
                }
        } else {
+               cifs_revalidate_mapping(*pinode);
                cifs_fattr_to_inode(*pinode, &fattr);
        }
 
index 892f51a..7888902 100644 (file)
@@ -1196,9 +1196,11 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
                pr_warn_once("Witness protocol support is experimental\n");
                break;
        case Opt_rootfs:
-#ifdef CONFIG_CIFS_ROOT
-               ctx->rootfs = true;
+#ifndef CONFIG_CIFS_ROOT
+               cifs_dbg(VFS, "rootfs support requires CONFIG_CIFS_ROOT config option\n");
+               goto cifs_parse_mount_err;
 #endif
+               ctx->rootfs = true;
                break;
        case Opt_posixpaths:
                if (result.negated)
index 7c61bc9..f2df442 100644 (file)
@@ -2395,7 +2395,7 @@ int cifs_getattr(struct user_namespace *mnt_userns, const struct path *path,
         * We need to be sure that all dirty pages are written and the server
         * has actual ctime, mtime and file length.
         */
-       if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_SIZE)) &&
+       if ((request_mask & (STATX_CTIME | STATX_MTIME | STATX_SIZE | STATX_BLOCKS)) &&
            !CIFS_CACHE_READ(CIFS_I(inode)) &&
            inode->i_mapping && inode->i_mapping->nrpages != 0) {
                rc = filemap_fdatawait(inode->i_mapping);
@@ -2585,6 +2585,14 @@ set_size_out:
        if (rc == 0) {
                cifsInode->server_eof = attrs->ia_size;
                cifs_setsize(inode, attrs->ia_size);
+               /*
+                * i_blocks is not related to (i_size / i_blksize), but instead
+                * 512 byte (2**9) size is required for calculating num blocks.
+                * Until we can query the server for actual allocation size,
+                * this is best estimate we have for blocks allocated for a file
+                * Number of blocks must be rounded up so size 1 is not 0 blocks
+                */
+               inode->i_blocks = (512 - 1 + attrs->ia_size) >> 9;
 
                /*
                 * The man page of truncate says if the size changed,
index 99a1951..d9a990c 100644 (file)
@@ -58,6 +58,7 @@
 #define SMB2_HMACSHA256_SIZE (32)
 #define SMB2_CMACAES_SIZE (16)
 #define SMB3_SIGNKEY_SIZE (16)
+#define SMB3_GCM128_CRYPTKEY_SIZE (16)
 #define SMB3_GCM256_CRYPTKEY_SIZE (32)
 
 /* Maximum buffer size value we can send with 1 credit */
index b50164e..aac384f 100644 (file)
@@ -754,8 +754,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
                }
        }
        spin_unlock(&cifs_tcp_ses_lock);
-       cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
-       return false;
+       cifs_dbg(FYI, "No file id matched, oplock break ignored\n");
+       return true;
 }
 
 void
index 9bae7e8..f703204 100644 (file)
@@ -2038,6 +2038,7 @@ smb2_duplicate_extents(const unsigned int xid,
 {
        int rc;
        unsigned int ret_data_len;
+       struct inode *inode;
        struct duplicate_extents_to_file dup_ext_buf;
        struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
 
@@ -2054,10 +2055,21 @@ smb2_duplicate_extents(const unsigned int xid,
        cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
                src_off, dest_off, len);
 
-       rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
-       if (rc)
-               goto duplicate_extents_out;
+       inode = d_inode(trgtfile->dentry);
+       if (inode->i_size < dest_off + len) {
+               rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
+               if (rc)
+                       goto duplicate_extents_out;
 
+               /*
+                * Although also could set plausible allocation size (i_blocks)
+                * here in addition to setting the file size, in reflink
+                * it is likely that the target file is sparse. Its allocation
+                * size will be queried on next revalidate, but it is important
+                * to make sure that file's cached size is updated immediately
+                */
+               cifs_setsize(inode, dest_off + len);
+       }
        rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
                        trgtfile->fid.volatile_fid,
                        FSCTL_DUPLICATE_EXTENTS_TO_FILE,
@@ -4158,7 +4170,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
                        if (ses->Suid == ses_id) {
                                ses_enc_key = enc ? ses->smb3encryptionkey :
                                        ses->smb3decryptionkey;
-                               memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
+                               memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
                                spin_unlock(&cifs_tcp_ses_lock);
                                return 0;
                        }
@@ -4185,7 +4197,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
        int rc = 0;
        struct scatterlist *sg;
        u8 sign[SMB2_SIGNATURE_SIZE] = {};
-       u8 key[SMB3_SIGN_KEY_SIZE];
+       u8 key[SMB3_ENC_DEC_KEY_SIZE];
        struct aead_request *req;
        char *iv;
        unsigned int iv_len;
@@ -4209,10 +4221,11 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
        tfm = enc ? server->secmech.ccmaesencrypt :
                                                server->secmech.ccmaesdecrypt;
 
-       if (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
+       if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
+               (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
                rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
        else
-               rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
+               rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
 
        if (rc) {
                cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
index ebccd71..e6fa76a 100644 (file)
@@ -298,7 +298,8 @@ static int generate_key(struct cifs_ses *ses, struct kvec label,
 {
        unsigned char zero = 0x0;
        __u8 i[4] = {0, 0, 0, 1};
-       __u8 L[4] = {0, 0, 0, 128};
+       __u8 L128[4] = {0, 0, 0, 128};
+       __u8 L256[4] = {0, 0, 1, 0};
        int rc = 0;
        unsigned char prfhash[SMB2_HMACSHA256_SIZE];
        unsigned char *hashptr = prfhash;
@@ -354,8 +355,14 @@ static int generate_key(struct cifs_ses *ses, struct kvec label,
                goto smb3signkey_ret;
        }
 
-       rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
-                               L, 4);
+       if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
+               (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) {
+               rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
+                               L256, 4);
+       } else {
+               rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
+                               L128, 4);
+       }
        if (rc) {
                cifs_server_dbg(VFS, "%s: Could not update with L\n", __func__);
                goto smb3signkey_ret;
@@ -390,6 +397,9 @@ generate_smb3signingkey(struct cifs_ses *ses,
                        const struct derivation_triplet *ptriplet)
 {
        int rc;
+#ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
+       struct TCP_Server_Info *server = ses->server;
+#endif
 
        /*
         * All channels use the same encryption/decryption keys but
@@ -422,11 +432,11 @@ generate_smb3signingkey(struct cifs_ses *ses,
                rc = generate_key(ses, ptriplet->encryption.label,
                                  ptriplet->encryption.context,
                                  ses->smb3encryptionkey,
-                                 SMB3_SIGN_KEY_SIZE);
+                                 SMB3_ENC_DEC_KEY_SIZE);
                rc = generate_key(ses, ptriplet->decryption.label,
                                  ptriplet->decryption.context,
                                  ses->smb3decryptionkey,
-                                 SMB3_SIGN_KEY_SIZE);
+                                 SMB3_ENC_DEC_KEY_SIZE);
                if (rc)
                        return rc;
        }
@@ -442,14 +452,23 @@ generate_smb3signingkey(struct cifs_ses *ses,
         */
        cifs_dbg(VFS, "Session Id    %*ph\n", (int)sizeof(ses->Suid),
                        &ses->Suid);
+       cifs_dbg(VFS, "Cipher type   %d\n", server->cipher_type);
        cifs_dbg(VFS, "Session Key   %*ph\n",
                 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
        cifs_dbg(VFS, "Signing Key   %*ph\n",
                 SMB3_SIGN_KEY_SIZE, ses->smb3signingkey);
-       cifs_dbg(VFS, "ServerIn Key  %*ph\n",
-                SMB3_SIGN_KEY_SIZE, ses->smb3encryptionkey);
-       cifs_dbg(VFS, "ServerOut Key %*ph\n",
-                SMB3_SIGN_KEY_SIZE, ses->smb3decryptionkey);
+       if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
+               (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) {
+               cifs_dbg(VFS, "ServerIn Key  %*ph\n",
+                               SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3encryptionkey);
+               cifs_dbg(VFS, "ServerOut Key %*ph\n",
+                               SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3decryptionkey);
+       } else {
+               cifs_dbg(VFS, "ServerIn Key  %*ph\n",
+                               SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3encryptionkey);
+               cifs_dbg(VFS, "ServerOut Key %*ph\n",
+                               SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3decryptionkey);
+       }
 #endif
        return rc;
 }
index 007d994..c1725b5 100644 (file)
@@ -1196,9 +1196,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
        /*
         * Compounding is never used during session establish.
         */
-       if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP))
+       if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
+               mutex_lock(&server->srv_mutex);
                smb311_update_preauth_hash(ses, rqst[0].rq_iov,
                                           rqst[0].rq_nvec);
+               mutex_unlock(&server->srv_mutex);
+       }
 
        for (i = 0; i < num_rqst; i++) {
                rc = wait_for_response(server, midQ[i]);
@@ -1266,7 +1269,9 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
                        .iov_base = resp_iov[0].iov_base,
                        .iov_len = resp_iov[0].iov_len
                };
+               mutex_lock(&server->srv_mutex);
                smb311_update_preauth_hash(ses, &iov, 1);
+               mutex_unlock(&server->srv_mutex);
        }
 
 out:
index b61491b..b2e86e7 100644 (file)
@@ -812,6 +812,7 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
                    struct buffer_head *map_bh)
 {
        int ret = 0;
+       int boundary = sdio->boundary;  /* dio_send_cur_page may clear it */
 
        if (dio->op == REQ_OP_WRITE) {
                /*
@@ -850,10 +851,10 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
        sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
 out:
        /*
-        * If sdio->boundary then we want to schedule the IO now to
+        * If boundary then we want to schedule the IO now to
         * avoid metadata seeks.
         */
-       if (sdio->boundary) {
+       if (boundary) {
                ret = dio_send_cur_page(dio, sdio, map_bh);
                if (sdio->bio)
                        dio_bio_submit(dio, sdio);
index f45f9fe..74a5172 100644 (file)
@@ -626,27 +626,41 @@ int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
 
 /**
  * ext4_should_retry_alloc() - check if a block allocation should be retried
- * @sb:                        super block
- * @retries:           number of attemps has been made
+ * @sb:                        superblock
+ * @retries:           number of retry attempts made so far
  *
- * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
- * it is profitable to retry the operation, this function will wait
- * for the current or committing transaction to complete, and then
- * return TRUE.  We will only retry once.
+ * ext4_should_retry_alloc() is called when ENOSPC is returned while
+ * attempting to allocate blocks.  If there's an indication that a pending
+ * journal transaction might free some space and allow another attempt to
+ * succeed, this function will wait for the current or committing transaction
+ * to complete and then return TRUE.
  */
 int ext4_should_retry_alloc(struct super_block *sb, int *retries)
 {
-       if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
-           (*retries)++ > 1 ||
-           !EXT4_SB(sb)->s_journal)
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+       if (!sbi->s_journal)
                return 0;
 
-       smp_mb();
-       if (EXT4_SB(sb)->s_mb_free_pending == 0)
+       if (++(*retries) > 3) {
+               percpu_counter_inc(&sbi->s_sra_exceeded_retry_limit);
                return 0;
+       }
 
+       /*
+        * if there's no indication that blocks are about to be freed it's
+        * possible we just missed a transaction commit that did so
+        */
+       smp_mb();
+       if (sbi->s_mb_free_pending == 0)
+               return ext4_has_free_clusters(sbi, 1, 0);
+
+       /*
+        * it's possible we've just missed a transaction commit here,
+        * so ignore the returned status
+        */
        jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
-       jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
+       (void) jbd2_journal_force_commit_nested(sbi->s_journal);
        return 1;
 }
 
index 644fd69..826a56e 100644 (file)
@@ -1484,6 +1484,7 @@ struct ext4_sb_info {
        struct percpu_counter s_freeinodes_counter;
        struct percpu_counter s_dirs_counter;
        struct percpu_counter s_dirtyclusters_counter;
+       struct percpu_counter s_sra_exceeded_retry_limit;
        struct blockgroup_lock *s_blockgroup_lock;
        struct proc_dir_entry *s_proc;
        struct kobject s_kobj;
@@ -2793,6 +2794,8 @@ void __ext4_fc_track_link(handle_t *handle, struct inode *inode,
        struct dentry *dentry);
 void ext4_fc_track_unlink(handle_t *handle, struct dentry *dentry);
 void ext4_fc_track_link(handle_t *handle, struct dentry *dentry);
+void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
+                           struct dentry *dentry);
 void ext4_fc_track_create(handle_t *handle, struct dentry *dentry);
 void ext4_fc_track_inode(handle_t *handle, struct inode *inode);
 void ext4_fc_mark_ineligible(struct super_block *sb, int reason);
index 77c7c8a..77c84d6 100644 (file)
@@ -4382,7 +4382,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
 {
        struct inode *inode = file_inode(file);
        handle_t *handle;
-       int ret, ret2 = 0, ret3 = 0;
+       int ret = 0, ret2 = 0, ret3 = 0;
        int retries = 0;
        int depth = 0;
        struct ext4_map_blocks map;
index 6c4f19b..7541d0b 100644 (file)
@@ -513,10 +513,10 @@ void ext4_fc_track_link(handle_t *handle, struct dentry *dentry)
        __ext4_fc_track_link(handle, d_inode(dentry), dentry);
 }
 
-void ext4_fc_track_create(handle_t *handle, struct dentry *dentry)
+void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
+                         struct dentry *dentry)
 {
        struct __track_dentry_update_args args;
-       struct inode *inode = d_inode(dentry);
        int ret;
 
        args.dentry = dentry;
@@ -527,6 +527,11 @@ void ext4_fc_track_create(handle_t *handle, struct dentry *dentry)
        trace_ext4_fc_track_create(inode, dentry, ret);
 }
 
+void ext4_fc_track_create(handle_t *handle, struct dentry *dentry)
+{
+       __ext4_fc_track_create(handle, d_inode(dentry), dentry);
+}
+
 /* __track_fn for inode tracking */
 static int __track_inode(struct inode *inode, void *arg, bool update)
 {
index 650c5ac..0948a43 100644 (file)
@@ -1938,13 +1938,13 @@ static int __ext4_journalled_writepage(struct page *page,
        if (!ret)
                ret = err;
 
-       if (!ext4_has_inline_data(inode))
-               ext4_walk_page_buffers(NULL, page_bufs, 0, len,
-                                      NULL, bput_one);
        ext4_set_inode_state(inode, EXT4_STATE_JDATA);
 out:
        unlock_page(page);
 out_no_pagelock:
+       if (!inline_data && page_bufs)
+               ext4_walk_page_buffers(NULL, page_bufs, 0, len,
+                                      NULL, bput_one);
        brelse(inode_bh);
        return ret;
 }
@@ -5026,7 +5026,7 @@ static int ext4_do_update_inode(handle_t *handle,
        struct ext4_inode_info *ei = EXT4_I(inode);
        struct buffer_head *bh = iloc->bh;
        struct super_block *sb = inode->i_sb;
-       int err = 0, rc, block;
+       int err = 0, block;
        int need_datasync = 0, set_large_file = 0;
        uid_t i_uid;
        gid_t i_gid;
@@ -5138,9 +5138,9 @@ static int ext4_do_update_inode(handle_t *handle,
                                              bh->b_data);
 
        BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-       rc = ext4_handle_dirty_metadata(handle, NULL, bh);
-       if (!err)
-               err = rc;
+       err = ext4_handle_dirty_metadata(handle, NULL, bh);
+       if (err)
+               goto out_brelse;
        ext4_clear_inode_state(inode, EXT4_STATE_NEW);
        if (set_large_file) {
                BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
@@ -5387,8 +5387,10 @@ int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                        inode->i_gid = attr->ia_gid;
                error = ext4_mark_inode_dirty(handle, inode);
                ext4_journal_stop(handle);
-               if (unlikely(error))
+               if (unlikely(error)) {
+                       ext4_fc_stop_update(inode);
                        return error;
+               }
        }
 
        if (attr->ia_valid & ATTR_SIZE) {
index 99bf091..a02fadf 100644 (file)
@@ -2709,8 +2709,15 @@ static int ext4_mb_init_backend(struct super_block *sb)
        }
 
        if (ext4_has_feature_flex_bg(sb)) {
-               /* a single flex group is supposed to be read by a single IO */
-               sbi->s_mb_prefetch = min(1 << sbi->s_es->s_log_groups_per_flex,
+               /* a single flex group is supposed to be read by a single IO.
+                * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is
+                * unsigned integer, so the maximum shift is 32.
+                */
+               if (sbi->s_es->s_log_groups_per_flex >= 32) {
+                       ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group");
+                       goto err_freesgi;
+               }
+               sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex,
                        BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9));
                sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */
        } else {
index 686bf98..883e2a7 100644 (file)
@@ -3613,6 +3613,31 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
        return retval;
 }
 
+static void ext4_resetent(handle_t *handle, struct ext4_renament *ent,
+                         unsigned ino, unsigned file_type)
+{
+       struct ext4_renament old = *ent;
+       int retval = 0;
+
+       /*
+        * old->de could have moved from under us during make indexed dir,
+        * so the old->de may no longer valid and need to find it again
+        * before reset old inode info.
+        */
+       old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
+       if (IS_ERR(old.bh))
+               retval = PTR_ERR(old.bh);
+       if (!old.bh)
+               retval = -ENOENT;
+       if (retval) {
+               ext4_std_error(old.dir->i_sb, retval);
+               return;
+       }
+
+       ext4_setent(handle, &old, ino, file_type);
+       brelse(old.bh);
+}
+
 static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
                                  const struct qstr *d_name)
 {
@@ -3774,14 +3799,14 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
         */
        retval = -ENOENT;
        if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
-               goto end_rename;
+               goto release_bh;
 
        new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
                                 &new.de, &new.inlined);
        if (IS_ERR(new.bh)) {
                retval = PTR_ERR(new.bh);
                new.bh = NULL;
-               goto end_rename;
+               goto release_bh;
        }
        if (new.bh) {
                if (!new.inode) {
@@ -3798,15 +3823,13 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
                handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits);
                if (IS_ERR(handle)) {
                        retval = PTR_ERR(handle);
-                       handle = NULL;
-                       goto end_rename;
+                       goto release_bh;
                }
        } else {
                whiteout = ext4_whiteout_for_rename(mnt_userns, &old, credits, &handle);
                if (IS_ERR(whiteout)) {
                        retval = PTR_ERR(whiteout);
-                       whiteout = NULL;
-                       goto end_rename;
+                       goto release_bh;
                }
        }
 
@@ -3850,6 +3873,7 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
                retval = ext4_mark_inode_dirty(handle, whiteout);
                if (unlikely(retval))
                        goto end_rename;
+
        }
        if (!new.bh) {
                retval = ext4_add_entry(handle, new.dentry, old.inode);
@@ -3923,6 +3947,8 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
                        ext4_fc_track_unlink(handle, new.dentry);
                __ext4_fc_track_link(handle, old.inode, new.dentry);
                __ext4_fc_track_unlink(handle, old.inode, old.dentry);
+               if (whiteout)
+                       __ext4_fc_track_create(handle, whiteout, old.dentry);
        }
 
        if (new.inode) {
@@ -3937,19 +3963,21 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
 end_rename:
        if (whiteout) {
                if (retval) {
-                       ext4_setent(handle, &old,
-                               old.inode->i_ino, old_file_type);
+                       ext4_resetent(handle, &old,
+                                     old.inode->i_ino, old_file_type);
                        drop_nlink(whiteout);
+                       ext4_orphan_add(handle, whiteout);
                }
                unlock_new_inode(whiteout);
+               ext4_journal_stop(handle);
                iput(whiteout);
-
+       } else {
+               ext4_journal_stop(handle);
        }
+release_bh:
        brelse(old.dir_bh);
        brelse(old.bh);
        brelse(new.bh);
-       if (handle)
-               ext4_journal_stop(handle);
        return retval;
 }
 
index ad34a37..b969368 100644 (file)
@@ -1210,6 +1210,7 @@ static void ext4_put_super(struct super_block *sb)
        percpu_counter_destroy(&sbi->s_freeinodes_counter);
        percpu_counter_destroy(&sbi->s_dirs_counter);
        percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+       percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
        percpu_free_rwsem(&sbi->s_writepages_rwsem);
 #ifdef CONFIG_QUOTA
        for (i = 0; i < EXT4_MAXQUOTAS; i++)
@@ -5011,6 +5012,9 @@ no_journal:
        if (!err)
                err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
                                          GFP_KERNEL);
+       if (!err)
+               err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0,
+                                         GFP_KERNEL);
        if (!err)
                err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
 
@@ -5124,6 +5128,7 @@ failed_mount6:
        percpu_counter_destroy(&sbi->s_freeinodes_counter);
        percpu_counter_destroy(&sbi->s_dirs_counter);
        percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+       percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
        percpu_free_rwsem(&sbi->s_writepages_rwsem);
 failed_mount5:
        ext4_ext_release(sb);
@@ -5149,8 +5154,8 @@ failed_mount_wq:
 failed_mount3a:
        ext4_es_unregister_shrinker(sbi);
 failed_mount3:
-       del_timer_sync(&sbi->s_err_report);
        flush_work(&sbi->s_error_work);
+       del_timer_sync(&sbi->s_err_report);
        if (sbi->s_mmp_tsk)
                kthread_stop(sbi->s_mmp_tsk);
 failed_mount2:
index 075aa3a..a3d0827 100644 (file)
@@ -24,6 +24,7 @@ typedef enum {
        attr_session_write_kbytes,
        attr_lifetime_write_kbytes,
        attr_reserved_clusters,
+       attr_sra_exceeded_retry_limit,
        attr_inode_readahead,
        attr_trigger_test_error,
        attr_first_error_time,
@@ -202,6 +203,7 @@ EXT4_ATTR_FUNC(delayed_allocation_blocks, 0444);
 EXT4_ATTR_FUNC(session_write_kbytes, 0444);
 EXT4_ATTR_FUNC(lifetime_write_kbytes, 0444);
 EXT4_ATTR_FUNC(reserved_clusters, 0644);
+EXT4_ATTR_FUNC(sra_exceeded_retry_limit, 0444);
 
 EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead,
                 ext4_sb_info, s_inode_readahead_blks);
@@ -251,6 +253,7 @@ static struct attribute *ext4_attrs[] = {
        ATTR_LIST(session_write_kbytes),
        ATTR_LIST(lifetime_write_kbytes),
        ATTR_LIST(reserved_clusters),
+       ATTR_LIST(sra_exceeded_retry_limit),
        ATTR_LIST(inode_readahead_blks),
        ATTR_LIST(inode_goal),
        ATTR_LIST(mb_stats),
@@ -374,6 +377,10 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
                return snprintf(buf, PAGE_SIZE, "%llu\n",
                                (unsigned long long)
                                atomic64_read(&sbi->s_resv_clusters));
+       case attr_sra_exceeded_retry_limit:
+               return snprintf(buf, PAGE_SIZE, "%llu\n",
+                               (unsigned long long)
+                       percpu_counter_sum(&sbi->s_sra_exceeded_retry_limit));
        case attr_inode_readahead:
        case attr_pointer_ui:
                if (!ptr)
index 5b7ba8f..00e3cbd 100644 (file)
@@ -201,55 +201,76 @@ static int ext4_end_enable_verity(struct file *filp, const void *desc,
        struct inode *inode = file_inode(filp);
        const int credits = 2; /* superblock and inode for ext4_orphan_del() */
        handle_t *handle;
+       struct ext4_iloc iloc;
        int err = 0;
-       int err2;
 
-       if (desc != NULL) {
-               /* Succeeded; write the verity descriptor. */
-               err = ext4_write_verity_descriptor(inode, desc, desc_size,
-                                                  merkle_tree_size);
-
-               /* Write all pages before clearing VERITY_IN_PROGRESS. */
-               if (!err)
-                       err = filemap_write_and_wait(inode->i_mapping);
-       }
+       /*
+        * If an error already occurred (which fs/verity/ signals by passing
+        * desc == NULL), then only clean-up is needed.
+        */
+       if (desc == NULL)
+               goto cleanup;
 
-       /* If we failed, truncate anything we wrote past i_size. */
-       if (desc == NULL || err)
-               ext4_truncate(inode);
+       /* Append the verity descriptor. */
+       err = ext4_write_verity_descriptor(inode, desc, desc_size,
+                                          merkle_tree_size);
+       if (err)
+               goto cleanup;
 
        /*
-        * We must always clean up by clearing EXT4_STATE_VERITY_IN_PROGRESS and
-        * deleting the inode from the orphan list, even if something failed.
-        * If everything succeeded, we'll also set the verity bit in the same
-        * transaction.
+        * Write all pages (both data and verity metadata).  Note that this must
+        * happen before clearing EXT4_STATE_VERITY_IN_PROGRESS; otherwise pages
+        * beyond i_size won't be written properly.  For crash consistency, this
+        * also must happen before the verity inode flag gets persisted.
         */
+       err = filemap_write_and_wait(inode->i_mapping);
+       if (err)
+               goto cleanup;
 
-       ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
+       /*
+        * Finally, set the verity inode flag and remove the inode from the
+        * orphan list (in a single transaction).
+        */
 
        handle = ext4_journal_start(inode, EXT4_HT_INODE, credits);
        if (IS_ERR(handle)) {
-               ext4_orphan_del(NULL, inode);
-               return PTR_ERR(handle);
+               err = PTR_ERR(handle);
+               goto cleanup;
        }
 
-       err2 = ext4_orphan_del(handle, inode);
-       if (err2)
-               goto out_stop;
+       err = ext4_orphan_del(handle, inode);
+       if (err)
+               goto stop_and_cleanup;
 
-       if (desc != NULL && !err) {
-               struct ext4_iloc iloc;
+       err = ext4_reserve_inode_write(handle, inode, &iloc);
+       if (err)
+               goto stop_and_cleanup;
 
-               err = ext4_reserve_inode_write(handle, inode, &iloc);
-               if (err)
-                       goto out_stop;
-               ext4_set_inode_flag(inode, EXT4_INODE_VERITY);
-               ext4_set_inode_flags(inode, false);
-               err = ext4_mark_iloc_dirty(handle, inode, &iloc);
-       }
-out_stop:
+       ext4_set_inode_flag(inode, EXT4_INODE_VERITY);
+       ext4_set_inode_flags(inode, false);
+       err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+       if (err)
+               goto stop_and_cleanup;
+
+       ext4_journal_stop(handle);
+
+       ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
+       return 0;
+
+stop_and_cleanup:
        ext4_journal_stop(handle);
-       return err ?: err2;
+cleanup:
+       /*
+        * Verity failed to be enabled, so clean up by truncating any verity
+        * metadata that was written beyond i_size (both from cache and from
+        * disk), removing the inode from the orphan list (if it wasn't done
+        * already), and clearing EXT4_STATE_VERITY_IN_PROGRESS.
+        */
+       truncate_inode_pages(inode->i_mapping, inode->i_size);
+       ext4_truncate(inode);
+       ext4_orphan_del(NULL, inode);
+       ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
+       return err;
 }
 
 static int ext4_get_verity_descriptor_location(struct inode *inode,
index 3722085..6c10182 100644 (file)
@@ -1462,6 +1462,9 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
        if (!ce)
                return NULL;
 
+       WARN_ON_ONCE(ext4_handle_valid(journal_current_handle()) &&
+                    !(current->flags & PF_MEMALLOC_NOFS));
+
        ea_data = kvmalloc(value_len, GFP_KERNEL);
        if (!ea_data) {
                mb_cache_entry_put(ea_inode_cache, ce);
@@ -2327,6 +2330,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
                        error = -ENOSPC;
                        goto cleanup;
                }
+               WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
        }
 
        error = ext4_reserve_inode_write(handle, inode, &is.iloc);
@@ -2400,7 +2404,7 @@ retry_inode:
                                 * external inode if possible.
                                 */
                                if (ext4_has_feature_ea_inode(inode->i_sb) &&
-                                   !i.in_inode) {
+                                   i.value_len && !i.in_inode) {
                                        i.in_inode = 1;
                                        goto retry_inode;
                                }
index f3a4bac..f633348 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -629,17 +629,30 @@ int close_fd(unsigned fd)
 }
 EXPORT_SYMBOL(close_fd); /* for ksys_close() */
 
+/**
+ * last_fd - return last valid index into fd table
+ * @cur_fds: files struct
+ *
+ * Context: Either rcu read lock or files_lock must be held.
+ *
+ * Returns: Last valid index into fdtable.
+ */
+static inline unsigned last_fd(struct fdtable *fdt)
+{
+       return fdt->max_fds - 1;
+}
+
 static inline void __range_cloexec(struct files_struct *cur_fds,
                                   unsigned int fd, unsigned int max_fd)
 {
        struct fdtable *fdt;
 
-       if (fd > max_fd)
-               return;
-
+       /* make sure we're using the correct maximum value */
        spin_lock(&cur_fds->file_lock);
        fdt = files_fdtable(cur_fds);
-       bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
+       max_fd = min(last_fd(fdt), max_fd);
+       if (fd <= max_fd)
+               bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
        spin_unlock(&cur_fds->file_lock);
 }
 
index c6636b4..c0fee83 100644 (file)
@@ -2229,19 +2229,21 @@ static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
 static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
                           unsigned long arg)
 {
-       int err = -ENOTTY;
+       int res;
+       int oldfd;
+       struct fuse_dev *fud = NULL;
 
-       if (cmd == FUSE_DEV_IOC_CLONE) {
-               int oldfd;
+       if (_IOC_TYPE(cmd) != FUSE_DEV_IOC_MAGIC)
+               return -ENOTTY;
 
-               err = -EFAULT;
-               if (!get_user(oldfd, (__u32 __user *) arg)) {
+       switch (_IOC_NR(cmd)) {
+       case _IOC_NR(FUSE_DEV_IOC_CLONE):
+               res = -EFAULT;
+               if (!get_user(oldfd, (__u32 __user *)arg)) {
                        struct file *old = fget(oldfd);
 
-                       err = -EINVAL;
+                       res = -EINVAL;
                        if (old) {
-                               struct fuse_dev *fud = NULL;
-
                                /*
                                 * Check against file->f_op because CUSE
                                 * uses the same ioctl handler.
@@ -2252,14 +2254,18 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
 
                                if (fud) {
                                        mutex_lock(&fuse_mutex);
-                                       err = fuse_device_clone(fud->fc, file);
+                                       res = fuse_device_clone(fud->fc, file);
                                        mutex_unlock(&fuse_mutex);
                                }
                                fput(old);
                        }
                }
+               break;
+       default:
+               res = -ENOTTY;
+               break;
        }
-       return err;
+       return res;
 }
 
 const struct file_operations fuse_dev_operations = {
index 68cca8d..63d97a1 100644 (file)
@@ -863,6 +863,7 @@ static inline u64 fuse_get_attr_version(struct fuse_conn *fc)
 
 static inline void fuse_make_bad(struct inode *inode)
 {
+       remove_inode_hash(inode);
        set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state);
 }
 
index 8868ac3..4ee6f73 100644 (file)
@@ -1324,8 +1324,15 @@ static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
 
        /* virtiofs allocates and installs its own fuse devices */
        ctx->fudptr = NULL;
-       if (ctx->dax)
+       if (ctx->dax) {
+               if (!fs->dax_dev) {
+                       err = -EINVAL;
+                       pr_err("virtio-fs: dax can't be enabled as filesystem"
+                              " device does not support it.\n");
+                       goto err_free_fuse_devs;
+               }
                ctx->dax_dev = fs->dax_dev;
+       }
        err = fuse_fill_super_common(sb, ctx);
        if (err < 0)
                goto err_free_fuse_devs;
index 97076d3..8fb9602 100644 (file)
@@ -162,8 +162,10 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
        int error;
 
        error = init_threads(sdp);
-       if (error)
+       if (error) {
+               gfs2_withdraw_delayed(sdp);
                return error;
+       }
 
        j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
        if (gfs2_withdrawn(sdp)) {
@@ -750,11 +752,13 @@ void gfs2_freeze_func(struct work_struct *work)
 static int gfs2_freeze(struct super_block *sb)
 {
        struct gfs2_sbd *sdp = sb->s_fs_info;
-       int error = 0;
+       int error;
 
        mutex_lock(&sdp->sd_freeze_mutex);
-       if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN)
+       if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) {
+               error = -EBUSY;
                goto out;
+       }
 
        for (;;) {
                if (gfs2_withdrawn(sdp)) {
@@ -795,10 +799,10 @@ static int gfs2_unfreeze(struct super_block *sb)
        struct gfs2_sbd *sdp = sb->s_fs_info;
 
        mutex_lock(&sdp->sd_freeze_mutex);
-        if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
+       if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
            !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
                mutex_unlock(&sdp->sd_freeze_mutex);
-                return 0;
+               return -EINVAL;
        }
 
        gfs2_freeze_unlock(&sdp->sd_freeze_gh);
index 29e4077..743a005 100644 (file)
@@ -144,7 +144,7 @@ static char *follow_link(char *link)
        char *name, *resolved, *end;
        int n;
 
-       name = __getname();
+       name = kmalloc(PATH_MAX, GFP_KERNEL);
        if (!name) {
                n = -ENOMEM;
                goto out_free;
@@ -173,12 +173,11 @@ static char *follow_link(char *link)
                goto out_free;
        }
 
-       __putname(name);
-       kfree(link);
+       kfree(name);
        return resolved;
 
  out_free:
-       __putname(name);
+       kfree(name);
        return ERR_PTR(n);
 }
 
index 0ae9eca..4eba531 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/rculist_nulls.h>
 #include <linux/cpu.h>
 #include <linux/tracehook.h>
-#include <linux/freezer.h>
 
 #include "../kernel/sched/sched.h"
 #include "io-wq.h"
@@ -386,13 +385,14 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
        return NULL;
 }
 
-static void io_flush_signals(void)
+static bool io_flush_signals(void)
 {
-       if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) {
-               if (current->task_works)
-                       task_work_run();
-               clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL);
+       if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) {
+               __set_current_state(TASK_RUNNING);
+               tracehook_notify_signal();
+               return true;
        }
+       return false;
 }
 
 static void io_assign_current_work(struct io_worker *worker,
@@ -415,6 +415,7 @@ static void io_worker_handle_work(struct io_worker *worker)
 {
        struct io_wqe *wqe = worker->wqe;
        struct io_wq *wq = wqe->wq;
+       bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
 
        do {
                struct io_wq_work *work;
@@ -444,6 +445,9 @@ get_next:
                        unsigned int hash = io_get_work_hash(work);
 
                        next_hashed = wq_next_work(work);
+
+                       if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
+                               work->flags |= IO_WQ_WORK_CANCEL;
                        wq->do_work(work);
                        io_assign_current_work(worker, NULL);
 
@@ -484,10 +488,12 @@ static int io_wqe_worker(void *data)
        worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
        io_wqe_inc_running(worker);
 
-       sprintf(buf, "iou-wrk-%d", wq->task_pid);
+       snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task_pid);
        set_task_comm(current, buf);
 
        while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
+               long ret;
+
                set_current_state(TASK_INTERRUPTIBLE);
 loop:
                raw_spin_lock_irq(&wqe->lock);
@@ -497,11 +503,18 @@ loop:
                }
                __io_worker_idle(wqe, worker);
                raw_spin_unlock_irq(&wqe->lock);
-               io_flush_signals();
-               if (schedule_timeout(WORKER_IDLE_TIMEOUT))
+               if (io_flush_signals())
                        continue;
-               if (fatal_signal_pending(current))
+               ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
+               if (signal_pending(current)) {
+                       struct ksignal ksig;
+
+                       if (!get_signal(&ksig))
+                               continue;
                        break;
+               }
+               if (ret)
+                       continue;
                /* timed out, exit unless we're the fixed worker */
                if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
                    !(worker->flags & IO_WORKER_F_FIXED))
@@ -702,15 +715,20 @@ static int io_wq_manager(void *data)
        char buf[TASK_COMM_LEN];
        int node;
 
-       sprintf(buf, "iou-mgr-%d", wq->task_pid);
+       snprintf(buf, sizeof(buf), "iou-mgr-%d", wq->task_pid);
        set_task_comm(current, buf);
 
        do {
                set_current_state(TASK_INTERRUPTIBLE);
                io_wq_check_workers(wq);
                schedule_timeout(HZ);
-               if (fatal_signal_pending(current))
+               if (signal_pending(current)) {
+                       struct ksignal ksig;
+
+                       if (!get_signal(&ksig))
+                               continue;
                        set_bit(IO_WQ_BIT_EXIT, &wq->state);
+               }
        } while (!test_bit(IO_WQ_BIT_EXIT, &wq->state));
 
        io_wq_check_workers(wq);
@@ -1057,7 +1075,11 @@ static void io_wq_destroy(struct io_wq *wq)
 
        for_each_node(node) {
                struct io_wqe *wqe = wq->wqes[node];
-               WARN_ON_ONCE(!wq_list_empty(&wqe->work_list));
+               struct io_cb_cancel_data match = {
+                       .fn             = io_wq_work_match_all,
+                       .cancel_all     = true,
+               };
+               io_wqe_cancel_pending_work(wqe, &match);
                kfree(wqe);
        }
        io_wq_put_hash(wq->hash);
index 1ac2f32..80d5905 100644 (file)
@@ -2,7 +2,6 @@
 #define INTERNAL_IO_WQ_H
 
 #include <linux/refcount.h>
-#include <linux/io_uring.h>
 
 struct io_wq;
 
@@ -21,6 +20,15 @@ enum io_wq_cancel {
        IO_WQ_CANCEL_NOTFOUND,  /* work not found */
 };
 
+struct io_wq_work_node {
+       struct io_wq_work_node *next;
+};
+
+struct io_wq_work_list {
+       struct io_wq_work_node *first;
+       struct io_wq_work_node *last;
+};
+
 static inline void wq_list_add_after(struct io_wq_work_node *node,
                                     struct io_wq_work_node *pos,
                                     struct io_wq_work_list *list)
index a4bce17..bd14327 100644 (file)
@@ -78,7 +78,6 @@
 #include <linux/task_work.h>
 #include <linux/pagemap.h>
 #include <linux/io_uring.h>
-#include <linux/freezer.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/io_uring.h>
@@ -258,7 +257,8 @@ enum {
 
 struct io_sq_data {
        refcount_t              refs;
-       struct rw_semaphore     rw_lock;
+       atomic_t                park_pending;
+       struct mutex            lock;
 
        /* ctx's that are using this sqd */
        struct list_head        ctx_list;
@@ -273,6 +273,7 @@ struct io_sq_data {
 
        unsigned long           state;
        struct completion       exited;
+       struct callback_head    *park_task_work;
 };
 
 #define IO_IOPOLL_BATCH                        8
@@ -402,7 +403,7 @@ struct io_ring_ctx {
        struct socket           *ring_sock;
 #endif
 
-       struct idr              io_buffer_idr;
+       struct xarray           io_buffers;
 
        struct xarray           personalities;
        u32                     pers_next;
@@ -454,6 +455,22 @@ struct io_ring_ctx {
        struct list_head                tctx_list;
 };
 
+struct io_uring_task {
+       /* submission side */
+       struct xarray           xa;
+       struct wait_queue_head  wait;
+       const struct io_ring_ctx *last;
+       struct io_wq            *io_wq;
+       struct percpu_counter   inflight;
+       atomic_t                in_idle;
+       bool                    sqpoll;
+
+       spinlock_t              task_lock;
+       struct io_wq_work_list  task_list;
+       unsigned long           task_state;
+       struct callback_head    task_work;
+};
+
 /*
  * First field must be the file pointer in all the
  * iocb unions! See also 'struct kiocb' in <linux/fs.h>
@@ -680,6 +697,7 @@ enum {
        REQ_F_NO_FILE_TABLE_BIT,
        REQ_F_LTIMEOUT_ACTIVE_BIT,
        REQ_F_COMPLETE_INLINE_BIT,
+       REQ_F_REISSUE_BIT,
 
        /* not a real bit, just to check we're not overflowing the space */
        __REQ_F_LAST_BIT,
@@ -723,6 +741,8 @@ enum {
        REQ_F_LTIMEOUT_ACTIVE   = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
        /* completion is deferred through io_comp_state */
        REQ_F_COMPLETE_INLINE   = BIT(REQ_F_COMPLETE_INLINE_BIT),
+       /* caller should reissue async */
+       REQ_F_REISSUE           = BIT(REQ_F_REISSUE_BIT),
 };
 
 struct async_poll {
@@ -1077,8 +1097,6 @@ static bool io_match_task(struct io_kiocb *head,
        io_for_each_link(req, head) {
                if (req->flags & REQ_F_INFLIGHT)
                        return true;
-               if (req->task->files == files)
-                       return true;
        }
        return false;
 }
@@ -1135,7 +1153,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
        init_waitqueue_head(&ctx->cq_wait);
        INIT_LIST_HEAD(&ctx->cq_overflow_list);
        init_completion(&ctx->ref_comp);
-       idr_init(&ctx->io_buffer_idr);
+       xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
        xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
        mutex_init(&ctx->uring_lock);
        init_waitqueue_head(&ctx->wait);
@@ -1198,7 +1216,7 @@ static void io_prep_async_work(struct io_kiocb *req)
        if (req->flags & REQ_F_ISREG) {
                if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
                        io_wq_hash_work(&req->work, file_inode(req->file));
-       } else {
+       } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
                if (def->unbound_nonreg_file)
                        req->work.flags |= IO_WQ_WORK_UNBOUND;
        }
@@ -1221,16 +1239,16 @@ static void io_queue_async_work(struct io_kiocb *req)
        BUG_ON(!tctx);
        BUG_ON(!tctx->io_wq);
 
-       trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
-                                       &req->work, req->flags);
        /* init ->work of the whole link before punting */
        io_prep_async_link(req);
+       trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
+                                       &req->work, req->flags);
        io_wq_enqueue(tctx->io_wq, &req->work);
        if (link)
                io_queue_linked_timeout(link);
 }
 
-static void io_kill_timeout(struct io_kiocb *req)
+static void io_kill_timeout(struct io_kiocb *req, int status)
 {
        struct io_timeout_data *io = req->async_data;
        int ret;
@@ -1240,31 +1258,11 @@ static void io_kill_timeout(struct io_kiocb *req)
                atomic_set(&req->ctx->cq_timeouts,
                        atomic_read(&req->ctx->cq_timeouts) + 1);
                list_del_init(&req->timeout.list);
-               io_cqring_fill_event(req, 0);
+               io_cqring_fill_event(req, status);
                io_put_req_deferred(req, 1);
        }
 }
 
-/*
- * Returns true if we found and killed one or more timeouts
- */
-static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
-                            struct files_struct *files)
-{
-       struct io_kiocb *req, *tmp;
-       int canceled = 0;
-
-       spin_lock_irq(&ctx->completion_lock);
-       list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
-               if (io_match_task(req, tsk, files)) {
-                       io_kill_timeout(req);
-                       canceled++;
-               }
-       }
-       spin_unlock_irq(&ctx->completion_lock);
-       return canceled != 0;
-}
-
 static void __io_queue_deferred(struct io_ring_ctx *ctx)
 {
        do {
@@ -1309,7 +1307,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
                        break;
 
                list_del_init(&req->timeout.list);
-               io_kill_timeout(req);
+               io_kill_timeout(req, 0);
        } while (!list_empty(&ctx->timeout_list));
 
        ctx->cq_last_tm_flush = seq;
@@ -1550,14 +1548,17 @@ static void io_req_complete_post(struct io_kiocb *req, long res,
                io_put_task(req->task, 1);
                list_add(&req->compl.list, &cs->locked_free_list);
                cs->locked_free_nr++;
-       } else
-               req = NULL;
+       } else {
+               if (!percpu_ref_tryget(&ctx->refs))
+                       req = NULL;
+       }
        io_commit_cqring(ctx);
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
-       io_cqring_ev_posted(ctx);
 
-       if (req)
+       if (req) {
+               io_cqring_ev_posted(ctx);
                percpu_ref_put(&ctx->refs);
+       }
 }
 
 static void io_req_complete_state(struct io_kiocb *req, long res,
@@ -1925,17 +1926,44 @@ static int io_req_task_work_add(struct io_kiocb *req)
        return ret;
 }
 
-static void io_req_task_work_add_fallback(struct io_kiocb *req,
-                                         task_work_func_t cb)
+static bool io_run_task_work_head(struct callback_head **work_head)
+{
+       struct callback_head *work, *next;
+       bool executed = false;
+
+       do {
+               work = xchg(work_head, NULL);
+               if (!work)
+                       break;
+
+               do {
+                       next = work->next;
+                       work->func(work);
+                       work = next;
+                       cond_resched();
+               } while (work);
+               executed = true;
+       } while (1);
+
+       return executed;
+}
+
+static void io_task_work_add_head(struct callback_head **work_head,
+                                 struct callback_head *task_work)
 {
-       struct io_ring_ctx *ctx = req->ctx;
        struct callback_head *head;
 
-       init_task_work(&req->task_work, cb);
        do {
-               head = READ_ONCE(ctx->exit_task_work);
-               req->task_work.next = head;
-       } while (cmpxchg(&ctx->exit_task_work, head, &req->task_work) != head);
+               head = READ_ONCE(*work_head);
+               task_work->next = head;
+       } while (cmpxchg(work_head, head, task_work) != head);
+}
+
+static void io_req_task_work_add_fallback(struct io_kiocb *req,
+                                         task_work_func_t cb)
+{
+       init_task_work(&req->task_work, cb);
+       io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work);
 }
 
 static void __io_req_task_cancel(struct io_kiocb *req, int error)
@@ -2451,6 +2479,11 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
                return false;
        return true;
 }
+#else
+static bool io_rw_should_reissue(struct io_kiocb *req)
+{
+       return false;
+}
 #endif
 
 static bool io_rw_reissue(struct io_kiocb *req)
@@ -2476,13 +2509,14 @@ static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
 {
        int cflags = 0;
 
-       if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_reissue(req))
+       if (req->rw.kiocb.ki_flags & IOCB_WRITE)
+               kiocb_end_write(req);
+       if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_should_reissue(req)) {
+               req->flags |= REQ_F_REISSUE;
                return;
+       }
        if (res != req->result)
                req_set_fail_links(req);
-
-       if (req->rw.kiocb.ki_flags & IOCB_WRITE)
-               kiocb_end_write(req);
        if (req->flags & REQ_F_BUFFER_SELECTED)
                cflags = io_put_rw_kbuf(req);
        __io_req_complete(req, issue_flags, res, cflags);
@@ -2728,6 +2762,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
        struct io_async_rw *io = req->async_data;
+       bool check_reissue = kiocb->ki_complete == io_complete_rw;
 
        /* add previously done IO, if any */
        if (io && io->bytes_done > 0) {
@@ -2743,6 +2778,18 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
                __io_complete_rw(req, ret, 0, issue_flags);
        else
                io_rw_done(kiocb, ret);
+
+       if (check_reissue && req->flags & REQ_F_REISSUE) {
+               req->flags &= ~REQ_F_REISSUE;
+               if (!io_rw_reissue(req)) {
+                       int cflags = 0;
+
+                       req_set_fail_links(req);
+                       if (req->flags & REQ_F_BUFFER_SELECTED)
+                               cflags = io_put_rw_kbuf(req);
+                       __io_req_complete(req, issue_flags, ret, cflags);
+               }
+       }
 }
 
 static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
@@ -2843,7 +2890,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
 
        lockdep_assert_held(&req->ctx->uring_lock);
 
-       head = idr_find(&req->ctx->io_buffer_idr, bgid);
+       head = xa_load(&req->ctx->io_buffers, bgid);
        if (head) {
                if (!list_empty(&head->list)) {
                        kbuf = list_last_entry(&head->list, struct io_buffer,
@@ -2851,7 +2898,7 @@ static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
                        list_del(&kbuf->list);
                } else {
                        kbuf = head;
-                       idr_remove(&req->ctx->io_buffer_idr, bgid);
+                       xa_erase(&req->ctx->io_buffers, bgid);
                }
                if (*len > kbuf->len)
                        *len = kbuf->len;
@@ -3259,11 +3306,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
 
        ret = io_iter_do_read(req, iter);
 
-       if (ret == -EIOCBQUEUED) {
-               if (req->async_data)
-                       iov_iter_revert(iter, io_size - iov_iter_count(iter));
-               goto out_free;
-       } else if (ret == -EAGAIN) {
+       if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
+               req->flags &= ~REQ_F_REISSUE;
                /* IOPOLL retry should happen for io-wq threads */
                if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
                        goto done;
@@ -3273,6 +3317,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                /* some cases will consume bytes even on error returns */
                iov_iter_revert(iter, io_size - iov_iter_count(iter));
                ret = 0;
+       } else if (ret == -EIOCBQUEUED) {
+               goto out_free;
        } else if (ret <= 0 || ret == io_size || !force_nonblock ||
                   (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
                /* read all, failed, already did sync or don't want to retry */
@@ -3385,6 +3431,11 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
        else
                ret2 = -EINVAL;
 
+       if (req->flags & REQ_F_REISSUE) {
+               req->flags &= ~REQ_F_REISSUE;
+               ret2 = -EAGAIN;
+       }
+
        /*
         * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
         * retry them without IOCB_NOWAIT.
@@ -3394,8 +3445,6 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
        /* no retry on NONBLOCK nor RWF_NOWAIT */
        if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
                goto done;
-       if (ret2 == -EIOCBQUEUED && req->async_data)
-               iov_iter_revert(iter, io_size - iov_iter_count(iter));
        if (!force_nonblock || ret2 != -EAGAIN) {
                /* IOPOLL retry should happen for io-wq threads */
                if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
@@ -3892,7 +3941,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
        }
        i++;
        kfree(buf);
-       idr_remove(&ctx->io_buffer_idr, bgid);
+       xa_erase(&ctx->io_buffers, bgid);
 
        return i;
 }
@@ -3910,7 +3959,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
        lockdep_assert_held(&ctx->uring_lock);
 
        ret = -ENOENT;
-       head = idr_find(&ctx->io_buffer_idr, p->bgid);
+       head = xa_load(&ctx->io_buffers, p->bgid);
        if (head)
                ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
        if (ret < 0)
@@ -3930,6 +3979,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
 static int io_provide_buffers_prep(struct io_kiocb *req,
                                   const struct io_uring_sqe *sqe)
 {
+       unsigned long size;
        struct io_provide_buf *p = &req->pbuf;
        u64 tmp;
 
@@ -3943,7 +3993,8 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
        p->addr = READ_ONCE(sqe->addr);
        p->len = READ_ONCE(sqe->len);
 
-       if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
+       size = (unsigned long)p->len * p->nbufs;
+       if (!access_ok(u64_to_user_ptr(p->addr), size))
                return -EFAULT;
 
        p->bgid = READ_ONCE(sqe->buf_group);
@@ -3993,21 +4044,14 @@ static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
 
        lockdep_assert_held(&ctx->uring_lock);
 
-       list = head = idr_find(&ctx->io_buffer_idr, p->bgid);
+       list = head = xa_load(&ctx->io_buffers, p->bgid);
 
        ret = io_add_buffers(p, &head);
-       if (ret < 0)
-               goto out;
-
-       if (!list) {
-               ret = idr_alloc(&ctx->io_buffer_idr, head, p->bgid, p->bgid + 1,
-                                       GFP_KERNEL);
-               if (ret < 0) {
+       if (ret >= 0 && !list) {
+               ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
+               if (ret < 0)
                        __io_remove_buffers(ctx, head, p->bgid, -1U);
-                       goto out;
-               }
        }
-out:
        if (ret < 0)
                req_set_fail_links(req);
 
@@ -4345,6 +4389,7 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
        struct io_async_msghdr iomsg, *kmsg;
        struct socket *sock;
        unsigned flags;
+       int min_ret = 0;
        int ret;
 
        sock = sock_from_file(req->file);
@@ -4359,12 +4404,15 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
                kmsg = &iomsg;
        }
 
-       flags = req->sr_msg.msg_flags;
+       flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
        if (flags & MSG_DONTWAIT)
                req->flags |= REQ_F_NOWAIT;
        else if (issue_flags & IO_URING_F_NONBLOCK)
                flags |= MSG_DONTWAIT;
 
+       if (flags & MSG_WAITALL)
+               min_ret = iov_iter_count(&kmsg->msg.msg_iter);
+
        ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
        if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
                return io_setup_async_msg(req, kmsg);
@@ -4375,7 +4423,7 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
        if (kmsg->free_iov)
                kfree(kmsg->free_iov);
        req->flags &= ~REQ_F_NEED_CLEANUP;
-       if (ret < 0)
+       if (ret < min_ret)
                req_set_fail_links(req);
        __io_req_complete(req, issue_flags, ret, 0);
        return 0;
@@ -4388,6 +4436,7 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags)
        struct iovec iov;
        struct socket *sock;
        unsigned flags;
+       int min_ret = 0;
        int ret;
 
        sock = sock_from_file(req->file);
@@ -4403,12 +4452,15 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags)
        msg.msg_controllen = 0;
        msg.msg_namelen = 0;
 
-       flags = req->sr_msg.msg_flags;
+       flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
        if (flags & MSG_DONTWAIT)
                req->flags |= REQ_F_NOWAIT;
        else if (issue_flags & IO_URING_F_NONBLOCK)
                flags |= MSG_DONTWAIT;
 
+       if (flags & MSG_WAITALL)
+               min_ret = iov_iter_count(&msg.msg_iter);
+
        msg.msg_flags = flags;
        ret = sock_sendmsg(sock, &msg);
        if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
@@ -4416,7 +4468,7 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags)
        if (ret == -ERESTARTSYS)
                ret = -EINTR;
 
-       if (ret < 0)
+       if (ret < min_ret)
                req_set_fail_links(req);
        __io_req_complete(req, issue_flags, ret, 0);
        return 0;
@@ -4568,6 +4620,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
        struct socket *sock;
        struct io_buffer *kbuf;
        unsigned flags;
+       int min_ret = 0;
        int ret, cflags = 0;
        bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 
@@ -4593,12 +4646,15 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
                                1, req->sr_msg.len);
        }
 
-       flags = req->sr_msg.msg_flags;
+       flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
        if (flags & MSG_DONTWAIT)
                req->flags |= REQ_F_NOWAIT;
        else if (force_nonblock)
                flags |= MSG_DONTWAIT;
 
+       if (flags & MSG_WAITALL)
+               min_ret = iov_iter_count(&kmsg->msg.msg_iter);
+
        ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
                                        kmsg->uaddr, flags);
        if (force_nonblock && ret == -EAGAIN)
@@ -4612,7 +4668,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
        if (kmsg->free_iov)
                kfree(kmsg->free_iov);
        req->flags &= ~REQ_F_NEED_CLEANUP;
-       if (ret < 0)
+       if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
                req_set_fail_links(req);
        __io_req_complete(req, issue_flags, ret, cflags);
        return 0;
@@ -4627,6 +4683,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
        struct socket *sock;
        struct iovec iov;
        unsigned flags;
+       int min_ret = 0;
        int ret, cflags = 0;
        bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 
@@ -4652,12 +4709,15 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
        msg.msg_iocb = NULL;
        msg.msg_flags = 0;
 
-       flags = req->sr_msg.msg_flags;
+       flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
        if (flags & MSG_DONTWAIT)
                req->flags |= REQ_F_NOWAIT;
        else if (force_nonblock)
                flags |= MSG_DONTWAIT;
 
+       if (flags & MSG_WAITALL)
+               min_ret = iov_iter_count(&msg.msg_iter);
+
        ret = sock_recvmsg(sock, &msg, flags);
        if (force_nonblock && ret == -EAGAIN)
                return -EAGAIN;
@@ -4666,7 +4726,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
 out_free:
        if (req->flags & REQ_F_BUFFER_SELECTED)
                cflags = io_put_recv_kbuf(req);
-       if (ret < 0)
+       if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
                req_set_fail_links(req);
        __io_req_complete(req, issue_flags, ret, cflags);
        return 0;
@@ -4763,7 +4823,6 @@ static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
                        ret = -ENOMEM;
                        goto out;
                }
-               io = req->async_data;
                memcpy(req->async_data, &__io, sizeof(__io));
                return -EAGAIN;
        }
@@ -5526,7 +5585,8 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 
        data->mode = io_translate_timeout_mode(flags);
        hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
-       io_req_track_inflight(req);
+       if (is_timeout_link)
+               io_req_track_inflight(req);
        return 0;
 }
 
@@ -6204,7 +6264,6 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
        if (prev) {
-               req_set_fail_links(prev);
                io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
                io_put_req_deferred(prev, 1);
        } else {
@@ -6423,8 +6482,6 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
        ret = io_init_req(ctx, req, sqe);
        if (unlikely(ret)) {
 fail_req:
-               io_put_req(req);
-               io_req_complete(req, ret);
                if (link->head) {
                        /* fail even hard links since we don't submit */
                        link->head->flags |= REQ_F_FAIL_LINK;
@@ -6432,6 +6489,8 @@ fail_req:
                        io_req_complete(link->head, -ECANCELED);
                        link->head = NULL;
                }
+               io_put_req(req);
+               io_req_complete(req, ret);
                return ret;
        }
        ret = io_req_prep(req, sqe);
@@ -6684,7 +6743,7 @@ static int io_sq_thread(void *data)
        char buf[TASK_COMM_LEN];
        DEFINE_WAIT(wait);
 
-       sprintf(buf, "iou-sqp-%d", sqd->task_pid);
+       snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
        set_task_comm(current, buf);
        current->pf_io_worker = NULL;
 
@@ -6694,22 +6753,30 @@ static int io_sq_thread(void *data)
                set_cpus_allowed_ptr(current, cpu_online_mask);
        current->flags |= PF_NO_SETAFFINITY;
 
-       down_read(&sqd->rw_lock);
-
+       mutex_lock(&sqd->lock);
        while (!test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)) {
                int ret;
                bool cap_entries, sqt_spin, needs_sched;
 
-               if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) {
-                       up_read(&sqd->rw_lock);
+               if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
+                   signal_pending(current)) {
+                       bool did_sig = false;
+
+                       mutex_unlock(&sqd->lock);
+                       if (signal_pending(current)) {
+                               struct ksignal ksig;
+
+                               did_sig = get_signal(&ksig);
+                       }
                        cond_resched();
-                       down_read(&sqd->rw_lock);
+                       mutex_lock(&sqd->lock);
+                       if (did_sig)
+                               break;
                        io_run_task_work();
+                       io_run_task_work_head(&sqd->park_task_work);
                        timeout = jiffies + sqd->sq_thread_idle;
                        continue;
                }
-               if (fatal_signal_pending(current))
-                       break;
                sqt_spin = false;
                cap_entries = !list_is_singular(&sqd->ctx_list);
                list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
@@ -6750,32 +6817,27 @@ static int io_sq_thread(void *data)
                        list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
                                io_ring_set_wakeup_flag(ctx);
 
-                       up_read(&sqd->rw_lock);
+                       mutex_unlock(&sqd->lock);
                        schedule();
-                       down_read(&sqd->rw_lock);
+                       mutex_lock(&sqd->lock);
                        list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
                                io_ring_clear_wakeup_flag(ctx);
                }
 
                finish_wait(&sqd->wait, &wait);
+               io_run_task_work_head(&sqd->park_task_work);
                timeout = jiffies + sqd->sq_thread_idle;
        }
-       up_read(&sqd->rw_lock);
-       down_write(&sqd->rw_lock);
-       /*
-        * someone may have parked and added a cancellation task_work, run
-        * it first because we don't want it in io_uring_cancel_sqpoll()
-        */
-       io_run_task_work();
 
        list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
                io_uring_cancel_sqpoll(ctx);
        sqd->thread = NULL;
        list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
                io_ring_set_wakeup_flag(ctx);
-       up_write(&sqd->rw_lock);
+       mutex_unlock(&sqd->lock);
 
        io_run_task_work();
+       io_run_task_work_head(&sqd->park_task_work);
        complete(&sqd->exited);
        do_exit(0);
 }
@@ -6821,7 +6883,7 @@ static int io_run_task_work_sig(void)
                return 1;
        if (!signal_pending(current))
                return 0;
-       if (test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))
+       if (test_thread_flag(TIF_NOTIFY_SIGNAL))
                return -ERESTARTSYS;
        return -EINTR;
 }
@@ -7075,23 +7137,28 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
 }
 
 static void io_sq_thread_unpark(struct io_sq_data *sqd)
-       __releases(&sqd->rw_lock)
+       __releases(&sqd->lock)
 {
        WARN_ON_ONCE(sqd->thread == current);
 
+       /*
+        * Do the dance but not conditional clear_bit() because it'd race with
+        * other threads incrementing park_pending and setting the bit.
+        */
        clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
-       up_write(&sqd->rw_lock);
+       if (atomic_dec_return(&sqd->park_pending))
+               set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
+       mutex_unlock(&sqd->lock);
 }
 
 static void io_sq_thread_park(struct io_sq_data *sqd)
-       __acquires(&sqd->rw_lock)
+       __acquires(&sqd->lock)
 {
        WARN_ON_ONCE(sqd->thread == current);
 
+       atomic_inc(&sqd->park_pending);
        set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
-       down_write(&sqd->rw_lock);
-       /* set again for consistency, in case concurrent parks are happening */
-       set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
+       mutex_lock(&sqd->lock);
        if (sqd->thread)
                wake_up_process(sqd->thread);
 }
@@ -7100,17 +7167,19 @@ static void io_sq_thread_stop(struct io_sq_data *sqd)
 {
        WARN_ON_ONCE(sqd->thread == current);
 
-       down_write(&sqd->rw_lock);
+       mutex_lock(&sqd->lock);
        set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
        if (sqd->thread)
                wake_up_process(sqd->thread);
-       up_write(&sqd->rw_lock);
+       mutex_unlock(&sqd->lock);
        wait_for_completion(&sqd->exited);
 }
 
 static void io_put_sq_data(struct io_sq_data *sqd)
 {
        if (refcount_dec_and_test(&sqd->refs)) {
+               WARN_ON_ONCE(atomic_read(&sqd->park_pending));
+
                io_sq_thread_stop(sqd);
                kfree(sqd);
        }
@@ -7184,9 +7253,10 @@ static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
        if (!sqd)
                return ERR_PTR(-ENOMEM);
 
+       atomic_set(&sqd->park_pending, 0);
        refcount_set(&sqd->refs, 1);
        INIT_LIST_HEAD(&sqd->ctx_list);
-       init_rwsem(&sqd->rw_lock);
+       mutex_init(&sqd->lock);
        init_waitqueue_head(&sqd->wait);
        init_completion(&sqd->exited);
        return sqd;
@@ -7866,22 +7936,17 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
 
                ret = 0;
                io_sq_thread_park(sqd);
+               list_add(&ctx->sqd_list, &sqd->ctx_list);
+               io_sqd_update_thread_idle(sqd);
                /* don't attach to a dying SQPOLL thread, would be racy */
-               if (attached && !sqd->thread) {
+               if (attached && !sqd->thread)
                        ret = -ENXIO;
-               } else {
-                       list_add(&ctx->sqd_list, &sqd->ctx_list);
-                       io_sqd_update_thread_idle(sqd);
-               }
                io_sq_thread_unpark(sqd);
 
-               if (ret < 0) {
-                       io_put_sq_data(sqd);
-                       ctx->sq_data = NULL;
-                       return ret;
-               } else if (attached) {
+               if (ret < 0)
+                       goto err;
+               if (attached)
                        return 0;
-               }
 
                if (p->flags & IORING_SETUP_SQ_AFF) {
                        int cpu = p->sq_thread_cpu;
@@ -8332,19 +8397,13 @@ static int io_eventfd_unregister(struct io_ring_ctx *ctx)
        return -ENXIO;
 }
 
-static int __io_destroy_buffers(int id, void *p, void *data)
-{
-       struct io_ring_ctx *ctx = data;
-       struct io_buffer *buf = p;
-
-       __io_remove_buffers(ctx, buf, id, -1U);
-       return 0;
-}
-
 static void io_destroy_buffers(struct io_ring_ctx *ctx)
 {
-       idr_for_each(&ctx->io_buffer_idr, __io_destroy_buffers, ctx);
-       idr_destroy(&ctx->io_buffer_idr);
+       struct io_buffer *buf;
+       unsigned long index;
+
+       xa_for_each(&ctx->io_buffers, index, buf)
+               __io_remove_buffers(ctx, buf, index, -1U);
 }
 
 static void io_req_cache_free(struct list_head *list, struct task_struct *tsk)
@@ -8386,11 +8445,13 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
 {
        /*
         * Some may use context even when all refs and requests have been put,
-        * and they are free to do so while still holding uring_lock, see
-        * __io_req_task_submit(). Wait for them to finish.
+        * and they are free to do so while still holding uring_lock or
+        * completion_lock, see __io_req_task_submit(). Wait for them to finish.
         */
        mutex_lock(&ctx->uring_lock);
        mutex_unlock(&ctx->uring_lock);
+       spin_lock_irq(&ctx->completion_lock);
+       spin_unlock_irq(&ctx->completion_lock);
 
        io_sq_thread_finish(ctx);
        io_sqe_buffers_unregister(ctx);
@@ -8478,26 +8539,9 @@ static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
        return -EINVAL;
 }
 
-static bool io_run_ctx_fallback(struct io_ring_ctx *ctx)
+static inline bool io_run_ctx_fallback(struct io_ring_ctx *ctx)
 {
-       struct callback_head *work, *next;
-       bool executed = false;
-
-       do {
-               work = xchg(&ctx->exit_task_work, NULL);
-               if (!work)
-                       break;
-
-               do {
-                       next = work->next;
-                       work->func(work);
-                       work = next;
-                       cond_resched();
-               } while (work);
-               executed = true;
-       } while (1);
-
-       return executed;
+       return io_run_task_work_head(&ctx->exit_task_work);
 }
 
 struct io_tctx_exit {
@@ -8529,6 +8573,14 @@ static void io_ring_exit_work(struct work_struct *work)
        struct io_tctx_node *node;
        int ret;
 
+       /* prevent SQPOLL from submitting new requests */
+       if (ctx->sq_data) {
+               io_sq_thread_park(ctx->sq_data);
+               list_del_init(&ctx->sqd_list);
+               io_sqd_update_thread_idle(ctx->sq_data);
+               io_sq_thread_unpark(ctx->sq_data);
+       }
+
        /*
         * If we're doing polled IO and end up having requests being
         * submitted async (out-of-line), then completions can come in while
@@ -8565,6 +8617,28 @@ static void io_ring_exit_work(struct work_struct *work)
        io_ring_ctx_free(ctx);
 }
 
+/* Returns true if we found and killed one or more timeouts */
+static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
+                            struct files_struct *files)
+{
+       struct io_kiocb *req, *tmp;
+       int canceled = 0;
+
+       spin_lock_irq(&ctx->completion_lock);
+       list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
+               if (io_match_task(req, tsk, files)) {
+                       io_kill_timeout(req, -ECANCELED);
+                       canceled++;
+               }
+       }
+       if (canceled != 0)
+               io_commit_cqring(ctx);
+       spin_unlock_irq(&ctx->completion_lock);
+       if (canceled != 0)
+               io_cqring_ev_posted(ctx);
+       return canceled != 0;
+}
+
 static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
 {
        unsigned long index;
@@ -8879,7 +8953,7 @@ static void io_sqpoll_cancel_sync(struct io_ring_ctx *ctx)
        if (task) {
                init_completion(&work.completion);
                init_task_work(&work.task_work, io_sqpoll_cancel_cb);
-               WARN_ON_ONCE(task_work_add(task, &work.task_work, TWA_SIGNAL));
+               io_task_work_add_head(&sqd->park_task_work, &work.task_work);
                wake_up_process(task);
        }
        io_sq_thread_unpark(sqd);
@@ -8956,6 +9030,8 @@ void __io_uring_task_cancel(void)
 
        /* make sure overflow events are dropped */
        atomic_inc(&tctx->in_idle);
+       __io_uring_files_cancel(NULL);
+
        do {
                /* read completions before cancelations */
                inflight = tctx_inflight(tctx);
index a648dbf..a5e478d 100644 (file)
@@ -170,6 +170,16 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
                        return ret;
        }
 
+       /*
+        * If this swapfile doesn't contain even a single page-aligned
+        * contiguous range of blocks, reject this useless swapfile to
+        * prevent confusion later on.
+        */
+       if (isi.nr_pages == 0) {
+               pr_warn("swapon: Cannot find a single usable page in file.\n");
+               return -EINVAL;
+       }
+
        *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
        sis->max = isi.nr_pages;
        sis->pages = isi.nr_pages - 1;
index 99ca97e..6125d2d 100644 (file)
@@ -1808,9 +1808,6 @@ check_conflicting_open(struct file *filp, const long arg, int flags)
 
        if (flags & FL_LAYOUT)
                return 0;
-       if (flags & FL_DELEG)
-               /* We leave these checks to the caller. */
-               return 0;
 
        if (arg == F_RDLCK)
                return inode_is_open_for_write(inode) ? -EAGAIN : 0;
index 216f16e..48a2f28 100644 (file)
@@ -579,6 +579,8 @@ static void set_nameidata(struct nameidata *p, int dfd, struct filename *name)
        p->stack = p->internal;
        p->dfd = dfd;
        p->name = name;
+       p->path.mnt = NULL;
+       p->path.dentry = NULL;
        p->total_link_count = old ? old->total_link_count : 0;
        p->saved = old;
        current->nameidata = p;
@@ -652,6 +654,8 @@ static void terminate_walk(struct nameidata *nd)
                rcu_read_unlock();
        }
        nd->depth = 0;
+       nd->path.mnt = NULL;
+       nd->path.dentry = NULL;
 }
 
 /* path_put is needed afterwards regardless of success or failure */
@@ -2322,8 +2326,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
        }
 
        nd->root.mnt = NULL;
-       nd->path.mnt = NULL;
-       nd->path.dentry = NULL;
 
        /* Absolute pathname -- fetch the root (LOOKUP_IN_ROOT uses nd->dfd). */
        if (*s == '/' && !(flags & LOOKUP_IN_ROOT)) {
@@ -2419,16 +2421,16 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
        while (!(err = link_path_walk(s, nd)) &&
               (s = lookup_last(nd)) != NULL)
                ;
+       if (!err && unlikely(nd->flags & LOOKUP_MOUNTPOINT)) {
+               err = handle_lookup_down(nd);
+               nd->flags &= ~LOOKUP_JUMPED; // no d_weak_revalidate(), please...
+       }
        if (!err)
                err = complete_walk(nd);
 
        if (!err && nd->flags & LOOKUP_DIRECTORY)
                if (!d_can_lookup(nd->path.dentry))
                        err = -ENOTDIR;
-       if (!err && unlikely(nd->flags & LOOKUP_MOUNTPOINT)) {
-               err = handle_lookup_down(nd);
-               nd->flags &= ~LOOKUP_JUMPED; // no d_weak_revalidate(), please...
-       }
        if (!err) {
                *path = nd->path;
                nd->path.mnt = NULL;
index 821e591..d6cff5f 100644 (file)
@@ -73,6 +73,7 @@ config NFSD_V4
        select NFSD_V3
        select FS_POSIX_ACL
        select SUNRPC_GSS
+       select CRYPTO
        select CRYPTO_MD5
        select CRYPTO_SHA256
        select GRACE_PERIOD
index 53fcbf7..7629248 100644 (file)
@@ -898,6 +898,8 @@ nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
                        continue;
                if (!nfsd_match_cred(nf->nf_cred, current_cred()))
                        continue;
+               if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags))
+                       continue;
                if (nfsd_file_get(nf) != NULL)
                        return nf;
        }
index 052be5b..7325592 100644 (file)
@@ -1189,6 +1189,7 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
                switch (task->tk_status) {
                case -EIO:
                case -ETIMEDOUT:
+               case -EACCES:
                        nfsd4_mark_cb_down(clp, task->tk_status);
                }
                break;
index acdb3cd..dd9f38d 100644 (file)
@@ -1302,7 +1302,7 @@ nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
                        struct nfsd_file *dst)
 {
        nfs42_ssc_close(src->nf_file);
-       /* 'src' is freed by nfsd4_do_async_copy */
+       fput(src->nf_file);
        nfsd_file_put(dst);
        mntput(ss_mnt);
 }
index 423fd66..97447a6 100644 (file)
@@ -4940,31 +4940,6 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
        return fl;
 }
 
-static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
-                                               struct nfs4_file *fp)
-{
-       struct nfs4_clnt_odstate *co;
-       struct file *f = fp->fi_deleg_file->nf_file;
-       struct inode *ino = locks_inode(f);
-       int writes = atomic_read(&ino->i_writecount);
-
-       if (fp->fi_fds[O_WRONLY])
-               writes--;
-       if (fp->fi_fds[O_RDWR])
-               writes--;
-       if (writes > 0)
-               return -EAGAIN;
-       spin_lock(&fp->fi_lock);
-       list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
-               if (co->co_client != clp) {
-                       spin_unlock(&fp->fi_lock);
-                       return -EAGAIN;
-               }
-       }
-       spin_unlock(&fp->fi_lock);
-       return 0;
-}
-
 static struct nfs4_delegation *
 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
                    struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
@@ -4984,12 +4959,9 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
 
        nf = find_readable_file(fp);
        if (!nf) {
-               /*
-                * We probably could attempt another open and get a read
-                * delegation, but for now, don't bother until the
-                * client actually sends us one.
-                */
-               return ERR_PTR(-EAGAIN);
+               /* We should always have a readable file here */
+               WARN_ON_ONCE(1);
+               return ERR_PTR(-EBADF);
        }
        spin_lock(&state_lock);
        spin_lock(&fp->fi_lock);
@@ -5019,19 +4991,11 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
        if (!fl)
                goto out_clnt_odstate;
 
-       status = nfsd4_check_conflicting_opens(clp, fp);
-       if (status) {
-               locks_free_lock(fl);
-               goto out_clnt_odstate;
-       }
        status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
        if (fl)
                locks_free_lock(fl);
        if (status)
                goto out_clnt_odstate;
-       status = nfsd4_check_conflicting_opens(clp, fp);
-       if (status)
-               goto out_clnt_odstate;
 
        spin_lock(&state_lock);
        spin_lock(&fp->fi_lock);
@@ -5113,6 +5077,17 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
                                goto out_no_deleg;
                        if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
                                goto out_no_deleg;
+                       /*
+                        * Also, if the file was opened for write or
+                        * create, there's a good chance the client's
+                        * about to write to it, resulting in an
+                        * immediate recall (since we don't support
+                        * write delegations):
+                        */
+                       if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
+                               goto out_no_deleg;
+                       if (open->op_create == NFS4_OPEN_CREATE)
+                               goto out_no_deleg;
                        break;
                default:
                        goto out_no_deleg;
@@ -5389,7 +5364,7 @@ nfs4_laundromat(struct nfsd_net *nn)
        idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
                cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
                if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&
-                               cps->cpntf_time > cutoff)
+                               cps->cpntf_time < cutoff)
                        _free_cpntf_state_locked(nn, cps);
        }
        spin_unlock(&nn->s2s_cp_lock);
index 3bfb414..ad20403 100644 (file)
@@ -2295,7 +2295,7 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
        struct ocfs2_alloc_context *meta_ac = NULL;
        handle_t *handle = NULL;
        loff_t end = offset + bytes;
-       int ret = 0, credits = 0, locked = 0;
+       int ret = 0, credits = 0;
 
        ocfs2_init_dealloc_ctxt(&dealloc);
 
@@ -2306,13 +2306,6 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
            !dwc->dw_orphaned)
                goto out;
 
-       /* ocfs2_file_write_iter will get i_mutex, so we need not lock if we
-        * are in that context. */
-       if (dwc->dw_writer_pid != task_pid_nr(current)) {
-               inode_lock(inode);
-               locked = 1;
-       }
-
        ret = ocfs2_inode_lock(inode, &di_bh, 1);
        if (ret < 0) {
                mlog_errno(ret);
@@ -2393,8 +2386,6 @@ out:
        if (meta_ac)
                ocfs2_free_alloc_context(meta_ac);
        ocfs2_run_deallocs(osb, &dealloc);
-       if (locked)
-               inode_unlock(inode);
        ocfs2_dio_free_write_ctx(inode, dwc);
 
        return ret;
index 6611c64..5edc1d0 100644 (file)
@@ -1245,22 +1245,24 @@ int ocfs2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                                goto bail_unlock;
                        }
                }
+               down_write(&OCFS2_I(inode)->ip_alloc_sem);
                handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
                                           2 * ocfs2_quota_trans_credits(sb));
                if (IS_ERR(handle)) {
                        status = PTR_ERR(handle);
                        mlog_errno(status);
-                       goto bail_unlock;
+                       goto bail_unlock_alloc;
                }
                status = __dquot_transfer(inode, transfer_to);
                if (status < 0)
                        goto bail_commit;
        } else {
+               down_write(&OCFS2_I(inode)->ip_alloc_sem);
                handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
                if (IS_ERR(handle)) {
                        status = PTR_ERR(handle);
                        mlog_errno(status);
-                       goto bail_unlock;
+                       goto bail_unlock_alloc;
                }
        }
 
@@ -1273,6 +1275,8 @@ int ocfs2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
 
 bail_commit:
        ocfs2_commit_trans(osb, handle);
+bail_unlock_alloc:
+       up_write(&OCFS2_I(inode)->ip_alloc_sem);
 bail_unlock:
        if (status && inode_locked) {
                ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
index 9b3b06d..e47fde1 100644 (file)
@@ -44,7 +44,7 @@ void reiserfs_security_free(struct reiserfs_security_handle *sec);
 
 static inline int reiserfs_xattrs_initialized(struct super_block *sb)
 {
-       return REISERFS_SB(sb)->priv_root != NULL;
+       return REISERFS_SB(sb)->priv_root && REISERFS_SB(sb)->xattr_root;
 }
 
 #define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header))
index 37aaa83..945896d 100644 (file)
@@ -1055,10 +1055,9 @@ static long do_restart_poll(struct restart_block *restart_block)
 
        ret = do_sys_poll(ufds, nfds, to);
 
-       if (ret == -ERESTARTNOHAND) {
-               restart_block->fn = do_restart_poll;
-               ret = -ERESTART_RESTARTBLOCK;
-       }
+       if (ret == -ERESTARTNOHAND)
+               ret = set_restart_fn(restart_block, do_restart_poll);
+
        return ret;
 }
 
@@ -1080,7 +1079,6 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
                struct restart_block *restart_block;
 
                restart_block = &current->restart_block;
-               restart_block->fn = do_restart_poll;
                restart_block->poll.ufds = ufds;
                restart_block->poll.nfds = nfds;
 
@@ -1091,7 +1089,7 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
                } else
                        restart_block->poll.has_timeout = 0;
 
-               ret = -ERESTART_RESTARTBLOCK;
+               ret = set_restart_fn(restart_block, do_restart_poll);
        }
        return ret;
 }
index eb02072..7237637 100644 (file)
@@ -152,14 +152,18 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
                start = le64_to_cpu(table[n]);
                end = le64_to_cpu(table[n + 1]);
 
-               if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
+               if (start >= end
+                   || (end - start) >
+                   (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
                        kfree(table);
                        return ERR_PTR(-EINVAL);
                }
        }
 
        start = le64_to_cpu(table[indexes - 1]);
-       if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) {
+       if (start >= lookup_table_start ||
+           (lookup_table_start - start) >
+           (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
                kfree(table);
                return ERR_PTR(-EINVAL);
        }
index 11581bf..ea53876 100644 (file)
@@ -97,14 +97,16 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
                start = le64_to_cpu(table[n]);
                end = le64_to_cpu(table[n + 1]);
 
-               if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
+               if (start >= end || (end - start) >
+                               (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
                        kfree(table);
                        return ERR_PTR(-EINVAL);
                }
        }
 
        start = le64_to_cpu(table[indexes - 1]);
-       if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) {
+       if (start >= id_table_start || (id_table_start - start) >
+                               (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
                kfree(table);
                return ERR_PTR(-EINVAL);
        }
index 8d64edb..b3fdc82 100644 (file)
@@ -17,6 +17,7 @@
 
 /* size of metadata (inode and directory) blocks */
 #define SQUASHFS_METADATA_SIZE         8192
+#define SQUASHFS_BLOCK_OFFSET          2
 
 /* default size of block device I/O */
 #ifdef CONFIG_SQUASHFS_4K_DEVBLK_SIZE
index ead6667..087cab8 100644 (file)
@@ -109,14 +109,16 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
                start = le64_to_cpu(table[n]);
                end = le64_to_cpu(table[n + 1]);
 
-               if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
+               if (start >= end || (end - start) >
+                               (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
                        kfree(table);
                        return ERR_PTR(-EINVAL);
                }
        }
 
        start = le64_to_cpu(table[indexes - 1]);
-       if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) {
+       if (start >= table_start || (table_start - start) >
+                               (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
                kfree(table);
                return ERR_PTR(-EINVAL);
        }
index 46a861d..f93370b 100644 (file)
@@ -1007,9 +1007,10 @@ xfs_create(
        /*
         * Make sure that we have allocated dquot(s) on disk.
         */
-       error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
-                                       XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
-                                       &udqp, &gdqp, &pdqp);
+       error = xfs_qm_vop_dqalloc(dp, fsuid_into_mnt(mnt_userns),
+                       fsgid_into_mnt(mnt_userns), prid,
+                       XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
+                       &udqp, &gdqp, &pdqp);
        if (error)
                return error;
 
@@ -1157,9 +1158,10 @@ xfs_create_tmpfile(
        /*
         * Make sure that we have allocated dquot(s) on disk.
         */
-       error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
-                               XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
-                               &udqp, &gdqp, &pdqp);
+       error = xfs_qm_vop_dqalloc(dp, fsuid_into_mnt(mnt_userns),
+                       fsgid_into_mnt(mnt_userns), prid,
+                       XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
+                       &udqp, &gdqp, &pdqp);
        if (error)
                return error;
 
index ca310a1..3498b97 100644 (file)
@@ -168,6 +168,12 @@ xfs_bulkstat_one(
        };
        int                     error;
 
+       if (breq->mnt_userns != &init_user_ns) {
+               xfs_warn_ratelimited(breq->mp,
+                       "bulkstat not supported inside of idmapped mounts.");
+               return -EINVAL;
+       }
+
        ASSERT(breq->icount == 1);
 
        bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
index 52370d0..1c97b15 100644 (file)
@@ -634,6 +634,47 @@ xfs_check_summary_counts(
        return xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount);
 }
 
+/*
+ * Flush and reclaim dirty inodes in preparation for unmount. Inodes and
+ * internal inode structures can be sitting in the CIL and AIL at this point,
+ * so we need to unpin them, write them back and/or reclaim them before unmount
+ * can proceed.
+ *
+ * An inode cluster that has been freed can have its buffer still pinned in
+ * memory because the transaction is still sitting in a iclog. The stale inodes
+ * on that buffer will be pinned to the buffer until the transaction hits the
+ * disk and the callbacks run. Pushing the AIL will skip the stale inodes and
+ * may never see the pinned buffer, so nothing will push out the iclog and
+ * unpin the buffer.
+ *
+ * Hence we need to force the log to unpin everything first. However, log
+ * forces don't wait for the discards they issue to complete, so we have to
+ * explicitly wait for them to complete here as well.
+ *
+ * Then we can tell the world we are unmounting so that error handling knows
+ * that the filesystem is going away and we should error out anything that we
+ * have been retrying in the background.  This will prevent never-ending
+ * retries in AIL pushing from hanging the unmount.
+ *
+ * Finally, we can push the AIL to clean all the remaining dirty objects, then
+ * reclaim the remaining inodes that are still in memory at this point in time.
+ */
+static void
+xfs_unmount_flush_inodes(
+       struct xfs_mount        *mp)
+{
+       xfs_log_force(mp, XFS_LOG_SYNC);
+       xfs_extent_busy_wait_all(mp);
+       flush_workqueue(xfs_discard_wq);
+
+       mp->m_flags |= XFS_MOUNT_UNMOUNTING;
+
+       xfs_ail_push_all_sync(mp->m_ail);
+       cancel_delayed_work_sync(&mp->m_reclaim_work);
+       xfs_reclaim_inodes(mp);
+       xfs_health_unmount(mp);
+}
+
 /*
  * This function does the following on an initial mount of a file system:
  *     - reads the superblock from disk and init the mount struct
@@ -1008,7 +1049,7 @@ xfs_mountfs(
        /* Clean out dquots that might be in memory after quotacheck. */
        xfs_qm_unmount(mp);
        /*
-        * Cancel all delayed reclaim work and reclaim the inodes directly.
+        * Flush all inode reclamation work and flush the log.
         * We have to do this /after/ rtunmount and qm_unmount because those
         * two will have scheduled delayed reclaim for the rt/quota inodes.
         *
@@ -1018,11 +1059,8 @@ xfs_mountfs(
         * qm_unmount_quotas and therefore rely on qm_unmount to release the
         * quota inodes.
         */
-       cancel_delayed_work_sync(&mp->m_reclaim_work);
-       xfs_reclaim_inodes(mp);
-       xfs_health_unmount(mp);
+       xfs_unmount_flush_inodes(mp);
  out_log_dealloc:
-       mp->m_flags |= XFS_MOUNT_UNMOUNTING;
        xfs_log_mount_cancel(mp);
  out_fail_wait:
        if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
@@ -1063,47 +1101,7 @@ xfs_unmountfs(
        xfs_rtunmount_inodes(mp);
        xfs_irele(mp->m_rootip);
 
-       /*
-        * We can potentially deadlock here if we have an inode cluster
-        * that has been freed has its buffer still pinned in memory because
-        * the transaction is still sitting in a iclog. The stale inodes
-        * on that buffer will be pinned to the buffer until the
-        * transaction hits the disk and the callbacks run. Pushing the AIL will
-        * skip the stale inodes and may never see the pinned buffer, so
-        * nothing will push out the iclog and unpin the buffer. Hence we
-        * need to force the log here to ensure all items are flushed into the
-        * AIL before we go any further.
-        */
-       xfs_log_force(mp, XFS_LOG_SYNC);
-
-       /*
-        * Wait for all busy extents to be freed, including completion of
-        * any discard operation.
-        */
-       xfs_extent_busy_wait_all(mp);
-       flush_workqueue(xfs_discard_wq);
-
-       /*
-        * We now need to tell the world we are unmounting. This will allow
-        * us to detect that the filesystem is going away and we should error
-        * out anything that we have been retrying in the background. This will
-        * prevent neverending retries in AIL pushing from hanging the unmount.
-        */
-       mp->m_flags |= XFS_MOUNT_UNMOUNTING;
-
-       /*
-        * Flush all pending changes from the AIL.
-        */
-       xfs_ail_push_all_sync(mp->m_ail);
-
-       /*
-        * Reclaim all inodes. At this point there should be no dirty inodes and
-        * none should be pinned or locked. Stop background inode reclaim here
-        * if it is still running.
-        */
-       cancel_delayed_work_sync(&mp->m_reclaim_work);
-       xfs_reclaim_inodes(mp);
-       xfs_health_unmount(mp);
+       xfs_unmount_flush_inodes(mp);
 
        xfs_qm_unmount(mp);
 
index 1379013..7f368b1 100644 (file)
@@ -182,7 +182,8 @@ xfs_symlink(
        /*
         * Make sure that we have allocated dquot(s) on disk.
         */
-       error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
+       error = xfs_qm_vop_dqalloc(dp, fsuid_into_mnt(mnt_userns),
+                       fsgid_into_mnt(mnt_userns), prid,
                        XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
                        &udqp, &gdqp, &pdqp);
        if (error)
index 0fe76f3..049e36c 100644 (file)
@@ -165,6 +165,21 @@ static int zonefs_writepages(struct address_space *mapping,
        return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
 }
 
+static int zonefs_swap_activate(struct swap_info_struct *sis,
+                               struct file *swap_file, sector_t *span)
+{
+       struct inode *inode = file_inode(swap_file);
+       struct zonefs_inode_info *zi = ZONEFS_I(inode);
+
+       if (zi->i_ztype != ZONEFS_ZTYPE_CNV) {
+               zonefs_err(inode->i_sb,
+                          "swap file: not a conventional zone file\n");
+               return -EINVAL;
+       }
+
+       return iomap_swapfile_activate(sis, swap_file, span, &zonefs_iomap_ops);
+}
+
 static const struct address_space_operations zonefs_file_aops = {
        .readpage               = zonefs_readpage,
        .readahead              = zonefs_readahead,
@@ -177,6 +192,7 @@ static const struct address_space_operations zonefs_file_aops = {
        .is_partially_uptodate  = iomap_is_partially_uptodate,
        .error_remove_page      = generic_error_remove_page,
        .direct_IO              = noop_direct_IO,
+       .swap_activate          = zonefs_swap_activate,
 };
 
 static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
@@ -727,6 +743,68 @@ out_release:
        return ret;
 }
 
+/*
+ * Do not exceed the LFS limits nor the file zone size. If pos is under the
+ * limit it becomes a short access. If it exceeds the limit, return -EFBIG.
+ */
+static loff_t zonefs_write_check_limits(struct file *file, loff_t pos,
+                                       loff_t count)
+{
+       struct inode *inode = file_inode(file);
+       struct zonefs_inode_info *zi = ZONEFS_I(inode);
+       loff_t limit = rlimit(RLIMIT_FSIZE);
+       loff_t max_size = zi->i_max_size;
+
+       if (limit != RLIM_INFINITY) {
+               if (pos >= limit) {
+                       send_sig(SIGXFSZ, current, 0);
+                       return -EFBIG;
+               }
+               count = min(count, limit - pos);
+       }
+
+       if (!(file->f_flags & O_LARGEFILE))
+               max_size = min_t(loff_t, MAX_NON_LFS, max_size);
+
+       if (unlikely(pos >= max_size))
+               return -EFBIG;
+
+       return min(count, max_size - pos);
+}
+
+static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file_inode(file);
+       struct zonefs_inode_info *zi = ZONEFS_I(inode);
+       loff_t count;
+
+       if (IS_SWAPFILE(inode))
+               return -ETXTBSY;
+
+       if (!iov_iter_count(from))
+               return 0;
+
+       if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
+               return -EINVAL;
+
+       if (iocb->ki_flags & IOCB_APPEND) {
+               if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
+                       return -EINVAL;
+               mutex_lock(&zi->i_truncate_mutex);
+               iocb->ki_pos = zi->i_wpoffset;
+               mutex_unlock(&zi->i_truncate_mutex);
+       }
+
+       count = zonefs_write_check_limits(file, iocb->ki_pos,
+                                         iov_iter_count(from));
+       if (count < 0)
+               return count;
+
+       iov_iter_truncate(from, count);
+       return iov_iter_count(from);
+}
+
 /*
  * Handle direct writes. For sequential zone files, this is the only possible
  * write path. For these files, check that the user is issuing writes
@@ -744,8 +822,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
        struct super_block *sb = inode->i_sb;
        bool sync = is_sync_kiocb(iocb);
        bool append = false;
-       size_t count;
-       ssize_t ret;
+       ssize_t ret, count;
 
        /*
         * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
@@ -763,12 +840,11 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
                inode_lock(inode);
        }
 
-       ret = generic_write_checks(iocb, from);
-       if (ret <= 0)
+       count = zonefs_write_checks(iocb, from);
+       if (count <= 0) {
+               ret = count;
                goto inode_unlock;
-
-       iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
-       count = iov_iter_count(from);
+       }
 
        if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
                ret = -EINVAL;
@@ -828,12 +904,10 @@ static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
                inode_lock(inode);
        }
 
-       ret = generic_write_checks(iocb, from);
+       ret = zonefs_write_checks(iocb, from);
        if (ret <= 0)
                goto inode_unlock;
 
-       iov_iter_truncate(from, zi->i_max_size - iocb->ki_pos);
-
        ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops);
        if (ret > 0)
                iocb->ki_pos += ret;
@@ -966,9 +1040,7 @@ static int zonefs_open_zone(struct inode *inode)
 
        mutex_lock(&zi->i_truncate_mutex);
 
-       zi->i_wr_refcnt++;
-       if (zi->i_wr_refcnt == 1) {
-
+       if (!zi->i_wr_refcnt) {
                if (atomic_inc_return(&sbi->s_open_zones) > sbi->s_max_open_zones) {
                        atomic_dec(&sbi->s_open_zones);
                        ret = -EBUSY;
@@ -978,7 +1050,6 @@ static int zonefs_open_zone(struct inode *inode)
                if (i_size_read(inode) < zi->i_max_size) {
                        ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
                        if (ret) {
-                               zi->i_wr_refcnt--;
                                atomic_dec(&sbi->s_open_zones);
                                goto unlock;
                        }
@@ -986,6 +1057,8 @@ static int zonefs_open_zone(struct inode *inode)
                }
        }
 
+       zi->i_wr_refcnt++;
+
 unlock:
        mutex_unlock(&zi->i_truncate_mutex);
 
index 02a716a..f28b097 100644 (file)
@@ -233,6 +233,7 @@ struct acpi_pnp_type {
 
 struct acpi_device_pnp {
        acpi_bus_id bus_id;             /* Object name */
+       int instance_no;                /* Instance number of this object */
        struct acpi_pnp_type type;      /* ID type */
        acpi_bus_address bus_address;   /* _ADR */
        char *unique_id;                /* _UID */
index 85d728f..1e85c20 100644 (file)
@@ -1846,34 +1846,34 @@ struct drm_dp_aux_cec {
  * @crc_count: counter of captured frame CRCs
  * @transfer: transfers a message representing a single AUX transaction
  *
- * The .dev field should be set to a pointer to the device that implements
- * the AUX channel.
+ * The @dev field should be set to a pointer to the device that implements the
+ * AUX channel.
  *
- * The .name field may be used to specify the name of the I2C adapter. If set to
- * NULL, dev_name() of .dev will be used.
+ * The @name field may be used to specify the name of the I2C adapter. If set to
+ * %NULL, dev_name() of @dev will be used.
  *
- * Drivers provide a hardware-specific implementation of how transactions
- * are executed via the .transfer() function. A pointer to a drm_dp_aux_msg
+ * Drivers provide a hardware-specific implementation of how transactions are
+ * executed via the @transfer() function. A pointer to a &drm_dp_aux_msg
  * structure describing the transaction is passed into this function. Upon
- * success, the implementation should return the number of payload bytes
- * that were transferred, or a negative error-code on failure. Helpers
- * propagate errors from the .transfer() function, with the exception of
- * the -EBUSY error, which causes a transaction to be retried. On a short,
- * helpers will return -EPROTO to make it simpler to check for failure.
+ * success, the implementation should return the number of payload bytes that
+ * were transferred, or a negative error-code on failure. Helpers propagate
+ * errors from the @transfer() function, with the exception of the %-EBUSY
+ * error, which causes a transaction to be retried. On a short, helpers will
+ * return %-EPROTO to make it simpler to check for failure.
  *
  * An AUX channel can also be used to transport I2C messages to a sink. A
- * typical application of that is to access an EDID that's present in the
- * sink device. The .transfer() function can also be used to execute such
- * transactions. The drm_dp_aux_register() function registers an I2C
- * adapter that can be passed to drm_probe_ddc(). Upon removal, drivers
- * should call drm_dp_aux_unregister() to remove the I2C adapter.
- * The I2C adapter uses long transfers by default; if a partial response is
- * received, the adapter will drop down to the size given by the partial
- * response for this transaction only.
+ * typical application of that is to access an EDID that's present in the sink
+ * device. The @transfer() function can also be used to execute such
+ * transactions. The drm_dp_aux_register() function registers an I2C adapter
+ * that can be passed to drm_probe_ddc(). Upon removal, drivers should call
+ * drm_dp_aux_unregister() to remove the I2C adapter. The I2C adapter uses long
+ * transfers by default; if a partial response is received, the adapter will
+ * drop down to the size given by the partial response for this transaction
+ * only.
  *
- * Note that the aux helper code assumes that the .transfer() function
- * only modifies the reply field of the drm_dp_aux_msg structure.  The
- * retry logic and i2c helpers assume this is the case.
+ * Note that the aux helper code assumes that the @transfer() function only
+ * modifies the reply field of the &drm_dp_aux_msg structure. The retry logic
+ * and i2c helpers assume this is the case.
  */
 struct drm_dp_aux {
        const char *name;
index f32d179..a3c58c9 100644 (file)
@@ -524,16 +524,20 @@ void __drm_err(const char *format, ...);
 #define DRM_DEBUG_DP(fmt, ...)                                         \
        __drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
 
-
-#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...)                            \
-({                                                                     \
-       static DEFINE_RATELIMIT_STATE(_rs,                              \
-                                     DEFAULT_RATELIMIT_INTERVAL,       \
-                                     DEFAULT_RATELIMIT_BURST);         \
-       if (__ratelimit(&_rs))                                          \
-               drm_dev_dbg(NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__);      \
+#define __DRM_DEFINE_DBG_RATELIMITED(category, drm, fmt, ...)                                  \
+({                                                                                             \
+       static DEFINE_RATELIMIT_STATE(rs_, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);\
+       const struct drm_device *drm_ = (drm);                                                  \
+                                                                                               \
+       if (drm_debug_enabled(DRM_UT_ ## category) && __ratelimit(&rs_))                        \
+               drm_dev_printk(drm_ ? drm_->dev : NULL, KERN_DEBUG, fmt, ## __VA_ARGS__);       \
 })
 
+#define drm_dbg_kms_ratelimited(drm, fmt, ...) \
+       __DRM_DEFINE_DBG_RATELIMITED(KMS, drm, fmt, ## __VA_ARGS__)
+
+#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) drm_dbg_kms_ratelimited(NULL, fmt, ## __VA_ARGS__)
+
 /*
  * struct drm_device based WARNs
  *
index 1c815e0..10225a0 100644 (file)
@@ -277,6 +277,7 @@ struct drm_sched_backend_ops {
  * @hang_limit: once the hangs by a job crosses this limit then it is marked
  *              guilty and it will be considered for scheduling further.
  * @score: score to help loadbalancer pick a idle sched
+ * @_score: score used when the driver doesn't provide one
  * @ready: marks if the underlying HW is ready to work
  * @free_guilty: A hit to time out handler to free the guilty job.
  *
@@ -321,7 +322,10 @@ void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
+void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max);
 void drm_sched_increase_karma(struct drm_sched_job *bad);
+void drm_sched_reset_karma(struct drm_sched_job *bad);
+void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
 bool drm_sched_dependency_optimized(struct dma_fence* fence,
                                    struct drm_sched_entity *entity);
 void drm_sched_fault(struct drm_gpu_scheduler *sched);
index 3587f66..2155e2e 100644 (file)
@@ -603,9 +603,11 @@ static inline void ttm_bo_pin(struct ttm_buffer_object *bo)
 static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
 {
        dma_resv_assert_held(bo->base.resv);
-       WARN_ON_ONCE(!bo->pin_count);
        WARN_ON_ONCE(!kref_read(&bo->kref));
-       --bo->pin_count;
+       if (bo->pin_count)
+               --bo->pin_count;
+       else
+               WARN_ON_ONCE(true);
 }
 
 int ttm_mem_evict_first(struct ttm_device *bdev,
index dc93454..10528de 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Constant for device tree bindings for Turris Mox module configuration bus
  *
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
  */
 
 #ifndef _DT_BINDINGS_BUS_MOXTET_H
index fcdaab7..3bdcfc4 100644 (file)
@@ -222,10 +222,14 @@ void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
 void __acpi_unmap_table(void __iomem *map, unsigned long size);
 int early_acpi_boot_init(void);
 int acpi_boot_init (void);
+void acpi_boot_table_prepare (void);
 void acpi_boot_table_init (void);
 int acpi_mps_check (void);
 int acpi_numa_init (void);
 
+int acpi_locate_initial_tables (void);
+void acpi_reserve_initial_tables (void);
+void acpi_table_init_complete (void);
 int acpi_table_init (void);
 int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
 int __init acpi_table_parse_entries(char *id, unsigned long table_size,
@@ -814,9 +818,12 @@ static inline int acpi_boot_init(void)
        return 0;
 }
 
+static inline void acpi_boot_table_prepare(void)
+{
+}
+
 static inline void acpi_boot_table_init(void)
 {
-       return;
 }
 
 static inline int acpi_mps_check(void)
index 6cc93ab..c68d87b 100644 (file)
@@ -105,8 +105,19 @@ extern struct bus_type amba_bustype;
 #define amba_get_drvdata(d)    dev_get_drvdata(&d->dev)
 #define amba_set_drvdata(d,p)  dev_set_drvdata(&d->dev, p)
 
+#ifdef CONFIG_ARM_AMBA
 int amba_driver_register(struct amba_driver *);
 void amba_driver_unregister(struct amba_driver *);
+#else
+static inline int amba_driver_register(struct amba_driver *drv)
+{
+       return -EINVAL;
+}
+static inline void amba_driver_unregister(struct amba_driver *drv)
+{
+}
+#endif
+
 struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t);
 void amba_device_put(struct amba_device *);
 int amba_device_add(struct amba_device *, struct resource *);
index 57bb54f..ef4bd70 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * rWTM BIU Mailbox driver for Armada 37xx
  *
- * Author: Marek Behun <marek.behun@nic.cz>
+ * Author: Marek Behún <kabel@kernel.org>
  */
 
 #ifndef _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_
index 40bad71..532bcbf 100644 (file)
@@ -476,7 +476,6 @@ struct virtchnl_rss_key {
        u16 vsi_id;
        u16 key_len;
        u8 key[1];         /* RSS hash key, packed bytes */
-       u8 pad[1];
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
@@ -485,7 +484,6 @@ struct virtchnl_rss_lut {
        u16 vsi_id;
        u16 lut_entries;
        u8 lut[1];        /* RSS lookup table */
-       u8 pad[1];
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
index bc6bc83..158aefa 100644 (file)
@@ -85,8 +85,6 @@ typedef __u32 __bitwise req_flags_t;
 #define RQF_ELVPRIV            ((__force req_flags_t)(1 << 12))
 /* account into disk and partition IO statistics */
 #define RQF_IO_STAT            ((__force req_flags_t)(1 << 13))
-/* request came from our alloc pool */
-#define RQF_ALLOCED            ((__force req_flags_t)(1 << 14))
 /* runtime pm request */
 #define RQF_PM                 ((__force req_flags_t)(1 << 15))
 /* on IO scheduler merge hash */
index cccaef1..fdac053 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/capability.h>
 #include <linux/sched/mm.h>
 #include <linux/slab.h>
+#include <linux/percpu-refcount.h>
 
 struct bpf_verifier_env;
 struct bpf_verifier_log;
@@ -39,6 +40,7 @@ struct bpf_local_storage;
 struct bpf_local_storage_map;
 struct kobject;
 struct mem_cgroup;
+struct module;
 
 extern struct idr btf_idr;
 extern spinlock_t btf_idr_lock;
@@ -556,7 +558,8 @@ struct bpf_tramp_progs {
  *      fentry = a set of program to run before calling original function
  *      fexit = a set of program to run after original function
  */
-int arch_prepare_bpf_trampoline(void *image, void *image_end,
+struct bpf_tramp_image;
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
                                const struct btf_func_model *m, u32 flags,
                                struct bpf_tramp_progs *tprogs,
                                void *orig_call);
@@ -565,6 +568,8 @@ u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
 u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
+void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
+void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
 
 struct bpf_ksym {
        unsigned long            start;
@@ -583,6 +588,18 @@ enum bpf_tramp_prog_type {
        BPF_TRAMP_REPLACE, /* more than MAX */
 };
 
+struct bpf_tramp_image {
+       void *image;
+       struct bpf_ksym ksym;
+       struct percpu_ref pcref;
+       void *ip_after_call;
+       void *ip_epilogue;
+       union {
+               struct rcu_head rcu;
+               struct work_struct work;
+       };
+};
+
 struct bpf_trampoline {
        /* hlist for trampoline_table */
        struct hlist_node hlist;
@@ -605,9 +622,9 @@ struct bpf_trampoline {
        /* Number of attached programs. A counter per kind. */
        int progs_cnt[BPF_TRAMP_MAX];
        /* Executable image of trampoline */
-       void *image;
+       struct bpf_tramp_image *cur_image;
        u64 selector;
-       struct bpf_ksym ksym;
+       struct module *mod;
 };
 
 struct bpf_attach_target_info {
@@ -691,6 +708,8 @@ void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
 void bpf_image_ksym_del(struct bpf_ksym *ksym);
 void bpf_ksym_add(struct bpf_ksym *ksym);
 void bpf_ksym_del(struct bpf_ksym *ksym);
+int bpf_jit_charge_modmem(u32 pages);
+void bpf_jit_uncharge_modmem(u32 pages);
 #else
 static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
                                           struct bpf_trampoline *tr)
@@ -787,7 +806,6 @@ struct bpf_prog_aux {
        bool func_proto_unreliable;
        bool sleepable;
        bool tail_call_reachable;
-       enum bpf_tramp_prog_type trampoline_prog_type;
        struct hlist_node tramp_hlist;
        /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
        const struct btf_type *attach_func_proto;
@@ -1093,7 +1111,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
                _ret;                                                   \
         })
 
-#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
+#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \
        ({                                              \
                struct bpf_prog_array_item *_item;      \
                struct bpf_prog *_prog;                 \
@@ -1106,7 +1124,8 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
                        goto _out;                      \
                _item = &_array->items[0];              \
                while ((_prog = READ_ONCE(_item->prog))) {              \
-                       bpf_cgroup_storage_set(_item->cgroup_storage);  \
+                       if (set_cg_storage)             \
+                               bpf_cgroup_storage_set(_item->cgroup_storage);  \
                        _ret &= func(_prog, ctx);       \
                        _item++;                        \
                }                                       \
@@ -1153,10 +1172,10 @@ _out:                                                   \
        })
 
 #define BPF_PROG_RUN_ARRAY(array, ctx, func)           \
-       __BPF_PROG_RUN_ARRAY(array, ctx, func, false)
+       __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true)
 
 #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func)     \
-       __BPF_PROG_RUN_ARRAY(array, ctx, func, true)
+       __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false)
 
 #ifdef CONFIG_BPF_SYSCALL
 DECLARE_PER_CPU(int, bpf_prog_active);
index 58f6fe8..162a2e5 100644 (file)
@@ -785,6 +785,23 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
                                  (parent_data), (flags), (reg), (shift),     \
                                  (width), (clk_divider_flags), (table),      \
                                  (lock))
+/**
+ * devm_clk_hw_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_divider(dev, name, parent_name, flags, reg, shift,    \
+                               width, clk_divider_flags, lock)               \
+       __devm_clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL,   \
+                                 NULL, (flags), (reg), (shift), (width),     \
+                                 (clk_divider_flags), NULL, (lock))
 /**
  * devm_clk_hw_register_divider_table - register a table based divider clock
  * with the clock framework (devres variant)
@@ -868,6 +885,13 @@ struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
                const struct clk_parent_data *parent_data,
                unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
                u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+struct clk_hw *__devm_clk_hw_register_mux(struct device *dev, struct device_node *np,
+               const char *name, u8 num_parents,
+               const char * const *parent_names,
+               const struct clk_hw **parent_hws,
+               const struct clk_parent_data *parent_data,
+               unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
+               u8 clk_mux_flags, u32 *table, spinlock_t *lock);
 struct clk *clk_register_mux_table(struct device *dev, const char *name,
                const char * const *parent_names, u8 num_parents,
                unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
@@ -902,6 +926,12 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
        __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \
                              (parent_data), (flags), (reg), (shift),         \
                              BIT((width)) - 1, (clk_mux_flags), NULL, (lock))
+#define devm_clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \
+                           shift, width, clk_mux_flags, lock)                \
+       __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents),        \
+                             (parent_names), NULL, NULL, (flags), (reg),     \
+                             (shift), BIT((width)) - 1, (clk_mux_flags),     \
+                             NULL, (lock))
 
 int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
                         unsigned int val);
index 7f4ac87..5c641f9 100644 (file)
@@ -253,7 +253,11 @@ struct target_type {
 #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
 
 /*
- * Indicates that a target supports host-managed zoned block devices.
+ * Indicates support for zoned block devices:
+ * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
+ *   block devices but does not support combining different zoned models.
+ * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
+ *   devices with different zoned models.
  */
 #ifdef CONFIG_BLK_DEV_ZONED
 #define DM_TARGET_ZONED_HM             0x00000040
@@ -275,6 +279,15 @@ struct target_type {
 #define DM_TARGET_PASSES_CRYPTO                0x00000100
 #define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
 
+#ifdef CONFIG_BLK_DEV_ZONED
+#define DM_TARGET_MIXED_ZONED_MODEL    0x00000200
+#define dm_target_supports_mixed_zoned_model(type) \
+       ((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
+#else
+#define DM_TARGET_MIXED_ZONED_MODEL    0x00000000
+#define dm_target_supports_mixed_zoned_model(type) (false)
+#endif
+
 struct dm_target {
        struct dm_table *table;
        struct target_type *type;
index 9f12efa..6ffb4b2 100644 (file)
@@ -587,6 +587,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
 }
 
 struct dma_fence *dma_fence_get_stub(void);
+struct dma_fence *dma_fence_allocate_private_stub(void);
 u64 dma_fence_context_alloc(unsigned num);
 
 #define DMA_FENCE_TRACE(f, fmt, args...) \
index 8710f57..6b5d36b 100644 (file)
@@ -72,8 +72,10 @@ typedef void *efi_handle_t;
  */
 typedef guid_t efi_guid_t __aligned(__alignof__(u32));
 
-#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \
-       GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
+#define EFI_GUID(a, b, c, d...) (efi_guid_t){ {                                        \
+       (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff,  \
+       (b) & 0xff, ((b) >> 8) & 0xff,                                          \
+       (c) & 0xff, ((c) >> 8) & 0xff, d } }
 
 /*
  * Generic EFI table header
index ec4cd39..cdca84e 100644 (file)
@@ -87,9 +87,7 @@ u32 ethtool_op_get_link(struct net_device *dev);
 int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
 
 
-/**
- * struct ethtool_link_ext_state_info - link extended state and substate.
- */
+/* Link extended state and substate. */
 struct ethtool_link_ext_state_info {
        enum ethtool_link_ext_state link_ext_state;
        union {
@@ -129,7 +127,6 @@ struct ethtool_link_ksettings {
                __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
        } link_modes;
        u32     lanes;
-       enum ethtool_link_mode_bit_indices link_mode;
 };
 
 /**
@@ -292,6 +289,9 @@ struct ethtool_pause_stats {
  *     do not attach ext_substate attribute to netlink message). If link_ext_state
  *     and link_ext_substate are unknown, return -ENODATA. If not implemented,
  *     link_ext_state and link_ext_substate will not be sent to userspace.
+ * @get_eeprom_len: Read range of EEPROM addresses for validation of
+ *     @get_eeprom and @set_eeprom requests.
+ *     Returns 0 if device does not support EEPROM access.
  * @get_eeprom: Read data from the device EEPROM.
  *     Should fill in the magic field.  Don't need to check len for zero
  *     or wraparound.  Fill in the data argument with the eeprom values
@@ -384,6 +384,8 @@ struct ethtool_pause_stats {
  * @get_module_eeprom: Get the eeprom information from the plug-in module
  * @get_eee: Get Energy-Efficient (EEE) supported and status.
  * @set_eee: Set EEE status (enable/disable) as well as LPI timers.
+ * @get_tunable: Read the value of a driver / device tunable.
+ * @set_tunable: Set the value of a driver / device tunable.
  * @get_per_queue_coalesce: Get interrupt coalescing parameters per queue.
  *     It must check that the given queue number is valid. If neither a RX nor
  *     a TX queue has this number, return -EINVAL. If only a RX queue or a TX
@@ -547,8 +549,8 @@ struct phy_tdr_config;
  * @get_sset_count: Get number of strings that @get_strings will write.
  * @get_strings: Return a set of strings that describe the requested objects
  * @get_stats: Return extended statistics about the PHY device.
- * @start_cable_test - Start a cable test
- * @start_cable_test_tdr - Start a Time Domain Reflectometry cable test
+ * @start_cable_test: Start a cable test
+ * @start_cable_test_tdr: Start a Time Domain Reflectometry cable test
  *
  * All operations are optional (i.e. the function pointer may be set to %NULL)
  * and callers must take this into account. Callers must hold the RTNL lock.
@@ -571,4 +573,12 @@ struct ethtool_phy_ops {
  */
 void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops);
 
+/*
+ * ethtool_params_from_link_mode - Derive link parameters from a given link mode
+ * @link_ksettings: Link parameters to be derived from the link mode
+ * @link_mode: Link mode
+ */
+void
+ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
+                             enum ethtool_link_mode_bit_indices link_mode);
 #endif /* _LINUX_ETHTOOL_H */
index fd183fb..0c19010 100644 (file)
@@ -271,6 +271,29 @@ static inline  void devm_extcon_unregister_notifier(struct device *dev,
                                struct extcon_dev *edev, unsigned int id,
                                struct notifier_block *nb) { }
 
+static inline int extcon_register_notifier_all(struct extcon_dev *edev,
+                                              struct notifier_block *nb)
+{
+       return 0;
+}
+
+static inline int extcon_unregister_notifier_all(struct extcon_dev *edev,
+                                                struct notifier_block *nb)
+{
+       return 0;
+}
+
+static inline int devm_extcon_register_notifier_all(struct device *dev,
+                                                   struct extcon_dev *edev,
+                                                   struct notifier_block *nb)
+{
+       return 0;
+}
+
+static inline void devm_extcon_unregister_notifier_all(struct device *dev,
+                                                      struct extcon_dev *edev,
+                                                      struct notifier_block *nb) { }
+
 static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
 {
        return ERR_PTR(-ENODEV);
index ebc2956..19781b0 100644 (file)
@@ -56,7 +56,7 @@
  * COMMAND_RECONFIG_FLAG_PARTIAL:
  * Set to FPGA configuration type (full or partial).
  */
-#define COMMAND_RECONFIG_FLAG_PARTIAL  1
+#define COMMAND_RECONFIG_FLAG_PARTIAL  0
 
 /*
  * Timeout settings for service clients:
index ce59a6a..232e1bd 100644 (file)
@@ -25,14 +25,18 @@ u64 host1x_get_dma_mask(struct host1x *host1x);
 
 /**
  * struct host1x_client_ops - host1x client operations
+ * @early_init: host1x client early initialization code
  * @init: host1x client initialization code
  * @exit: host1x client tear down code
+ * @late_exit: host1x client late tear down code
  * @suspend: host1x client suspend code
  * @resume: host1x client resume code
  */
 struct host1x_client_ops {
+       int (*early_init)(struct host1x_client *client);
        int (*init)(struct host1x_client *client);
        int (*exit)(struct host1x_client *client);
+       int (*late_exit)(struct host1x_client *client);
        int (*suspend)(struct host1x_client *client);
        int (*resume)(struct host1x_client *client);
 };
@@ -142,7 +146,9 @@ struct host1x_syncpt_base;
 struct host1x_syncpt;
 struct host1x;
 
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
+struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id);
+struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id);
+struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp);
 u32 host1x_syncpt_id(struct host1x_syncpt *sp);
 u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
 u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
@@ -153,11 +159,17 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
                       u32 *value);
 struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
                                            unsigned long flags);
-void host1x_syncpt_free(struct host1x_syncpt *sp);
+void host1x_syncpt_put(struct host1x_syncpt *sp);
+struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
+                                         unsigned long flags,
+                                         const char *name);
 
 struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
 u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
 
+void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
+                                             u32 syncpt_id);
+
 /*
  * host1x channel
  */
@@ -218,7 +230,7 @@ struct host1x_job {
        dma_addr_t *reloc_addr_phys;
 
        /* Sync point id, number of increments and end related to the submit */
-       u32 syncpt_id;
+       struct host1x_syncpt *syncpt;
        u32 syncpt_incrs;
        u32 syncpt_end;
 
@@ -320,7 +332,14 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
 int host1x_device_init(struct host1x_device *device);
 int host1x_device_exit(struct host1x_device *device);
 
-int host1x_client_register(struct host1x_client *client);
+int __host1x_client_register(struct host1x_client *client,
+                            struct lock_class_key *key);
+#define host1x_client_register(class) \
+       ({ \
+               static struct lock_class_key __key; \
+               __host1x_client_register(class, &__key); \
+       })
+
 int host1x_client_unregister(struct host1x_client *client);
 
 int host1x_client_suspend(struct host1x_client *client);
index 2ad6e92..0bff345 100644 (file)
@@ -113,6 +113,11 @@ static inline bool hugetlb_cgroup_disabled(void)
        return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
 }
 
+static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
+{
+       css_put(&h_cg->css);
+}
+
 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
                                        struct hugetlb_cgroup **ptr);
 extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
@@ -138,7 +143,8 @@ extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
 
 extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
                                                struct file_region *rg,
-                                               unsigned long nr_pages);
+                                               unsigned long nr_pages,
+                                               bool region_del);
 
 extern void hugetlb_cgroup_file_init(void) __init;
 extern void hugetlb_cgroup_migrate(struct page *oldhpage,
@@ -147,7 +153,8 @@ extern void hugetlb_cgroup_migrate(struct page *oldhpage,
 #else
 static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
                                                       struct file_region *rg,
-                                                      unsigned long nr_pages)
+                                                      unsigned long nr_pages,
+                                                      bool region_del)
 {
 }
 
@@ -185,6 +192,10 @@ static inline bool hugetlb_cgroup_disabled(void)
        return true;
 }
 
+static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
+{
+}
+
 static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
                                               struct hugetlb_cgroup **ptr)
 {
index 96556c6..10c94a3 100644 (file)
@@ -43,13 +43,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
        if (likely(success)) {
                struct vlan_pcpu_stats *pcpu_stats;
 
-               pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
+               pcpu_stats = get_cpu_ptr(vlan->pcpu_stats);
                u64_stats_update_begin(&pcpu_stats->syncp);
                pcpu_stats->rx_packets++;
                pcpu_stats->rx_bytes += len;
                if (multicast)
                        pcpu_stats->rx_multicast++;
                u64_stats_update_end(&pcpu_stats->syncp);
+               put_cpu_ptr(vlan->pcpu_stats);
        } else {
                this_cpu_inc(vlan->pcpu_stats->rx_errors);
        }
index 9761a0e..79cde99 100644 (file)
@@ -5,31 +5,6 @@
 #include <linux/sched.h>
 #include <linux/xarray.h>
 
-struct io_wq_work_node {
-       struct io_wq_work_node *next;
-};
-
-struct io_wq_work_list {
-       struct io_wq_work_node *first;
-       struct io_wq_work_node *last;
-};
-
-struct io_uring_task {
-       /* submission side */
-       struct xarray           xa;
-       struct wait_queue_head  wait;
-       void                    *last;
-       void                    *io_wq;
-       struct percpu_counter   inflight;
-       atomic_t                in_idle;
-       bool                    sqpoll;
-
-       spinlock_t              task_lock;
-       struct io_wq_work_list  task_list;
-       unsigned long           task_state;
-       struct callback_head    task_work;
-};
-
 #if defined(CONFIG_IO_URING)
 struct sock *io_uring_get_socket(struct file *file);
 void __io_uring_task_cancel(void);
index d13e3cd..5984fff 100644 (file)
@@ -460,7 +460,7 @@ static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
 /*
  * Set the allocation direction to bottom-up or top-down.
  */
-static inline __init void memblock_set_bottom_up(bool enable)
+static inline __init_memblock void memblock_set_bottom_up(bool enable)
 {
        memblock.bottom_up = enable;
 }
@@ -470,7 +470,7 @@ static inline __init void memblock_set_bottom_up(bool enable)
  * if this is true, that said, memblock will allocate memory
  * in bottom-up direction.
  */
-static inline __init bool memblock_bottom_up(void)
+static inline __init_memblock bool memblock_bottom_up(void)
 {
        return memblock.bottom_up;
 }
index df5d91c..9c68b2d 100644 (file)
@@ -437,11 +437,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
        u8         reserved_at_60[0x18];
        u8         log_max_ft_num[0x8];
 
-       u8         reserved_at_80[0x18];
+       u8         reserved_at_80[0x10];
+       u8         log_max_flow_counter[0x8];
        u8         log_max_destination[0x8];
 
-       u8         log_max_flow_counter[0x8];
-       u8         reserved_at_a8[0x10];
+       u8         reserved_at_a0[0x18];
        u8         log_max_flow[0x8];
 
        u8         reserved_at_c0[0x40];
@@ -8835,6 +8835,8 @@ struct mlx5_ifc_pplm_reg_bits {
 
        u8         fec_override_admin_100g_2x[0x10];
        u8         fec_override_admin_50g_1x[0x10];
+
+       u8         reserved_at_140[0x140];
 };
 
 struct mlx5_ifc_ppcnt_reg_bits {
@@ -10198,7 +10200,7 @@ struct mlx5_ifc_pbmc_reg_bits {
 
        struct mlx5_ifc_bufferx_reg_bits buffer[10];
 
-       u8         reserved_at_2e0[0x40];
+       u8         reserved_at_2e0[0x80];
 };
 
 struct mlx5_ifc_qtct_reg_bits {
index d75ef8a..b7deb79 100644 (file)
@@ -547,4 +547,11 @@ static inline const char *mlx5_qp_state_str(int state)
        }
 }
 
+static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev)
+{
+       return !MLX5_CAP_ROCE(dev, qp_ts_format) ?
+                      MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING :
+                      MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
+}
+
 #endif /* MLX5_QP_H */
index 64a71bf..8ba4342 100644 (file)
@@ -1461,16 +1461,28 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
 
 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
 
+/*
+ * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
+ * setting tags for all pages to native kernel tag value 0xff, as the default
+ * value 0x00 maps to 0xff.
+ */
+
 static inline u8 page_kasan_tag(const struct page *page)
 {
-       if (kasan_enabled())
-               return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
-       return 0xff;
+       u8 tag = 0xff;
+
+       if (kasan_enabled()) {
+               tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+               tag ^= 0xff;
+       }
+
+       return tag;
 }
 
 static inline void page_kasan_tag_set(struct page *page, u8 tag)
 {
        if (kasan_enabled()) {
+               tag ^= 0xff;
                page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
                page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
        }
index b820078..1a6a9eb 100644 (file)
@@ -169,11 +169,11 @@ struct mmu_notifier_ops {
         * the last refcount is dropped.
         *
         * If blockable argument is set to false then the callback cannot
-        * sleep and has to return with -EAGAIN. 0 should be returned
-        * otherwise. Please note that if invalidate_range_start approves
-        * a non-blocking behavior then the same applies to
-        * invalidate_range_end.
-        *
+        * sleep and has to return with -EAGAIN if sleeping would be required.
+        * 0 should be returned otherwise. Please note that notifiers that can
+        * fail invalidate_range_start are not allowed to implement
+        * invalidate_range_end, as there is no mechanism for informing the
+        * notifier that its start failed.
         */
        int (*invalidate_range_start)(struct mmu_notifier *subscription,
                                      const struct mmu_notifier_range *range);
index 59f094f..da4b6fb 100644 (file)
@@ -30,9 +30,6 @@
 #include <linux/percpu.h>
 #include <asm/module.h>
 
-/* Not Yet Implemented */
-#define MODULE_SUPPORTED_DEVICE(name)
-
 #define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN
 
 struct modversion_info {
index 490db68..7918494 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Turris Mox module configuration bus driver
  *
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
  */
 
 #ifndef __LINUX_MOXTET_H
index 0cd631a..515cff7 100644 (file)
@@ -185,7 +185,7 @@ extern void mutex_lock_io(struct mutex *lock);
 # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
 # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
 # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
-# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
+# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
 #endif
 
 /*
index 5b67ea8..87a5d18 100644 (file)
@@ -360,6 +360,7 @@ enum {
        NAPI_STATE_IN_BUSY_POLL,        /* sk_busy_loop() owns this NAPI */
        NAPI_STATE_PREFER_BUSY_POLL,    /* prefer busy-polling over softirq processing*/
        NAPI_STATE_THREADED,            /* The poll is performed inside its own thread*/
+       NAPI_STATE_SCHED_THREADED,      /* Napi is currently scheduled in threaded mode */
 };
 
 enum {
@@ -372,6 +373,7 @@ enum {
        NAPIF_STATE_IN_BUSY_POLL        = BIT(NAPI_STATE_IN_BUSY_POLL),
        NAPIF_STATE_PREFER_BUSY_POLL    = BIT(NAPI_STATE_PREFER_BUSY_POLL),
        NAPIF_STATE_THREADED            = BIT(NAPI_STATE_THREADED),
+       NAPIF_STATE_SCHED_THREADED      = BIT(NAPI_STATE_SCHED_THREADED),
 };
 
 enum gro_result {
index 8ebb641..8ec4846 100644 (file)
@@ -227,7 +227,7 @@ struct xt_table {
        unsigned int valid_hooks;
 
        /* Man behind the curtain... */
-       struct xt_table_info __rcu *private;
+       struct xt_table_info *private;
 
        /* Set this to THIS_MODULE if you are a module, otherwise NULL */
        struct module *me;
@@ -376,7 +376,7 @@ static inline unsigned int xt_write_recseq_begin(void)
         * since addend is most likely 1
         */
        __this_cpu_add(xt_recseq.sequence, addend);
-       smp_wmb();
+       smp_mb();
 
        return addend;
 }
@@ -448,9 +448,6 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
 
 struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
 
-struct xt_table_info
-*xt_table_get_private_protected(const struct xt_table *table);
-
 #ifdef CONFIG_COMPAT
 #include <net/compat.h>
 
index 20225b0..8c9947f 100644 (file)
@@ -559,7 +559,6 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
        return pgoff;
 }
 
-/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
 struct wait_page_key {
        struct page *page;
        int bit_nr;
@@ -683,6 +682,7 @@ static inline int wait_on_page_locked_killable(struct page *page)
 
 int put_and_wait_on_page_locked(struct page *page, int state);
 void wait_on_page_writeback(struct page *page);
+int wait_on_page_writeback_killable(struct page *page);
 extern void end_page_writeback(struct page *page);
 void wait_for_stable_page(struct page *page);
 
index ec2ad4b..c4fdb44 100644 (file)
@@ -460,7 +460,5 @@ void geni_icc_set_tag(struct geni_se *se, u32 tag);
 int geni_icc_enable(struct geni_se *se);
 
 int geni_icc_disable(struct geni_se *se);
-
-void geni_remove_earlycon_icc_vote(void);
 #endif
 #endif
index bba2920..980a655 100644 (file)
@@ -23,6 +23,7 @@ enum timespec_type {
  * System call restart block.
  */
 struct restart_block {
+       unsigned long arch_data;
        long (*fn)(struct restart_block *);
        union {
                /* For futex_wait and futex_wait_requeue_pi */
index 6d0a33d..f2c9ee7 100644 (file)
@@ -285,6 +285,7 @@ struct nf_bridge_info {
 struct tc_skb_ext {
        __u32 chain;
        __u16 mru;
+       bool post_ct;
 };
 #endif
 
index 8edbbf5..822c048 100644 (file)
@@ -349,8 +349,13 @@ static inline void sk_psock_update_proto(struct sock *sk,
 static inline void sk_psock_restore_proto(struct sock *sk,
                                          struct sk_psock *psock)
 {
-       sk->sk_prot->unhash = psock->saved_unhash;
        if (inet_csk_has_ulp(sk)) {
+               /* TLS does not have an unhash proto in SW cases, but we need
+                * to ensure we stop using the sock_map unhash routine because
+                * the associated psock is being removed. So use the original
+                * unhash handler.
+                */
+               WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash);
                tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space);
        } else {
                sk->sk_write_space = psock->saved_write_space;
index 7c693b3..1e76ed6 100644 (file)
@@ -104,7 +104,6 @@ struct svcxprt_rdma {
 
        wait_queue_head_t    sc_send_wait;      /* SQ exhaustion waitlist */
        unsigned long        sc_flags;
-       u32                  sc_pending_recvs;
        struct list_head     sc_read_complete_q;
        struct work_struct   sc_work;
 
index 9b2158c..157762d 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/types.h>
 #include <linux/bug.h>
 #include <linux/restart_block.h>
+#include <linux/errno.h>
 
 #ifdef CONFIG_THREAD_INFO_IN_TASK
 /*
@@ -59,6 +60,18 @@ enum syscall_work_bit {
 
 #ifdef __KERNEL__
 
+#ifndef arch_set_restart_data
+#define arch_set_restart_data(restart) do { } while (0)
+#endif
+
+static inline long set_restart_fn(struct restart_block *restart,
+                                       long (*fn)(struct restart_block *))
+{
+       restart->fn = fn;
+       arch_set_restart_data(restart);
+       return -ERESTART_RESTARTBLOCK;
+}
+
 #ifndef THREAD_ALIGN
 #define THREAD_ALIGN   THREAD_SIZE
 #endif
index 6b03fdd..712363c 100644 (file)
@@ -86,6 +86,8 @@
                /* lies about caching, so always sync */        \
        US_FLAG(NO_SAME, 0x40000000)                            \
                /* Cannot handle WRITE_SAME */                  \
+       US_FLAG(SENSE_AFTER_SYNC, 0x80000000)                   \
+               /* Do REQUEST_SENSE after SYNCHRONIZE_CACHE */  \
 
 #define US_FLAG(name, value)   US_FL_##name = value ,
 enum { US_DO_ALL_FLAGS };
index 073a9e0..ad97041 100644 (file)
@@ -14,5 +14,6 @@ struct umd_info {
 int umd_load_blob(struct umd_info *info, const void *data, size_t len);
 int umd_unload_blob(struct umd_info *info);
 int fork_usermode_driver(struct umd_info *info);
+void umd_cleanup_helper(struct umd_info *info);
 
 #endif /* __LINUX_USERMODE_DRIVER_H__ */
index 4ab5494..15fa085 100644 (file)
@@ -250,20 +250,20 @@ struct vdpa_config_ops {
 
 struct vdpa_device *__vdpa_alloc_device(struct device *parent,
                                        const struct vdpa_config_ops *config,
-                                       int nvqs, size_t size, const char *name);
+                                       size_t size, const char *name);
 
-#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs, name)   \
+#define vdpa_alloc_device(dev_struct, member, parent, config, name)   \
                          container_of(__vdpa_alloc_device( \
-                                      parent, config, nvqs, \
+                                      parent, config, \
                                       sizeof(dev_struct) + \
                                       BUILD_BUG_ON_ZERO(offsetof( \
                                       dev_struct, member)), name), \
                                       dev_struct, member)
 
-int vdpa_register_device(struct vdpa_device *vdev);
+int vdpa_register_device(struct vdpa_device *vdev, int nvqs);
 void vdpa_unregister_device(struct vdpa_device *vdev);
 
-int _vdpa_register_device(struct vdpa_device *vdev);
+int _vdpa_register_device(struct vdpa_device *vdev, int nvqs);
 void _vdpa_unregister_device(struct vdpa_device *vdev);
 
 /**
index 55ea329..b1894e0 100644 (file)
@@ -132,8 +132,6 @@ bool is_virtio_device(struct device *dev);
 void virtio_break_device(struct virtio_device *dev);
 
 void virtio_config_changed(struct virtio_device *dev);
-void virtio_config_disable(struct virtio_device *dev);
-void virtio_config_enable(struct virtio_device *dev);
 int virtio_finalize_features(struct virtio_device *dev);
 #ifdef CONFIG_PM_SLEEP
 int virtio_device_freeze(struct virtio_device *dev);
index 6b5fcfa..b465f8f 100644 (file)
@@ -62,15 +62,21 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                        return -EINVAL;
        }
 
+       skb_reset_mac_header(skb);
+
        if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
-               u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
-               u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
+               u32 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
+               u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
+               u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
+
+               if (!pskb_may_pull(skb, needed))
+                       return -EINVAL;
 
                if (!skb_partial_csum_set(skb, start, off))
                        return -EINVAL;
 
                p_off = skb_transport_offset(skb) + thlen;
-               if (p_off > skb_headlen(skb))
+               if (!pskb_may_pull(skb, p_off))
                        return -EINVAL;
        } else {
                /* gso packets without NEEDS_CSUM do not set transport_offset.
@@ -100,14 +106,14 @@ retry:
                        }
 
                        p_off = keys.control.thoff + thlen;
-                       if (p_off > skb_headlen(skb) ||
+                       if (!pskb_may_pull(skb, p_off) ||
                            keys.basic.ip_proto != ip_proto)
                                return -EINVAL;
 
                        skb_set_transport_header(skb, keys.control.thoff);
                } else if (gso_type) {
                        p_off = thlen;
-                       if (p_off > skb_headlen(skb))
+                       if (!pskb_may_pull(skb, p_off))
                                return -EINVAL;
                }
        }
index 850424e..6ecf2a0 100644 (file)
@@ -173,9 +173,10 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
  */
 static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
 {
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
        mutex_release(&ctx->dep_map, _THIS_IP_);
-
+#endif
+#ifdef CONFIG_DEBUG_MUTEXES
        DEBUG_LOCKS_WARN_ON(ctx->acquired);
        if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
                /*
index 92c0160..a91e3d9 100644 (file)
@@ -229,9 +229,10 @@ static inline int xa_err(void *entry)
  *
  * This structure is used either directly or via the XA_LIMIT() macro
  * to communicate the range of IDs that are valid for allocation.
- * Two common ranges are predefined for you:
+ * Three common ranges are predefined for you:
  * * xa_limit_32b      - [0 - UINT_MAX]
  * * xa_limit_31b      - [0 - INT_MAX]
+ * * xa_limit_16b      - [0 - USHRT_MAX]
  */
 struct xa_limit {
        u32 max;
@@ -242,6 +243,7 @@ struct xa_limit {
 
 #define xa_limit_32b   XA_LIMIT(0, UINT_MAX)
 #define xa_limit_31b   XA_LIMIT(0, INT_MAX)
+#define xa_limit_16b   XA_LIMIT(0, USHRT_MAX)
 
 typedef unsigned __bitwise xa_mark_t;
 #define XA_MARK_0              ((__force xa_mark_t)0U)
index 2bf3092..086b291 100644 (file)
@@ -170,12 +170,7 @@ void tcf_idr_insert_many(struct tc_action *actions[]);
 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
                        struct tc_action **a, int bind);
-int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
-
-static inline int tcf_idr_release(struct tc_action *a, bool bind)
-{
-       return __tcf_idr_release(a, bind, false);
-}
+int tcf_idr_release(struct tc_action *a, bool bind);
 
 int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
 int tcf_unregister_action(struct tc_action_ops *a,
@@ -185,7 +180,7 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
                    int nr_actions, struct tcf_result *res);
 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
                    struct nlattr *est, char *name, int ovr, int bind,
-                   struct tc_action *actions[], size_t *attr_size,
+                   struct tc_action *actions[], int init_res[], size_t *attr_size,
                    bool rtnl_held, struct netlink_ext_ack *extack);
 struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
                                         bool rtnl_held,
@@ -193,7 +188,8 @@ struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
                                    struct nlattr *nla, struct nlattr *est,
                                    char *name, int ovr, int bind,
-                                   struct tc_action_ops *ops, bool rtnl_held,
+                                   struct tc_action_ops *a_o, int *init_res,
+                                   bool rtnl_held,
                                    struct netlink_ext_ack *extack);
 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
                    int ref, bool terse);
index 26f134a..75b1e73 100644 (file)
@@ -550,4 +550,15 @@ static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
                dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
 }
 
+struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
+void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
+                              struct sk_buff *skb, u32 mtu, bool confirm_neigh);
+void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
+                           struct sk_buff *skb);
+u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old);
+struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
+                                            struct sk_buff *skb,
+                                            const void *daddr);
+unsigned int dst_blackhole_mtu(const struct dst_entry *dst);
+
 #endif /* _NET_DST_H */
index 10a6257..3c8c594 100644 (file)
@@ -282,7 +282,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
        return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
 }
 
-void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
+bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
 
 static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
index fdec57d..5aaced6 100644 (file)
@@ -1536,6 +1536,7 @@ struct nft_trans_flowtable {
        struct nft_flowtable            *flowtable;
        bool                            update;
        struct list_head                hook_list;
+       u32                             flags;
 };
 
 #define nft_trans_flowtable(trans)     \
@@ -1544,6 +1545,8 @@ struct nft_trans_flowtable {
        (((struct nft_trans_flowtable *)trans->data)->update)
 #define nft_trans_flowtable_hooks(trans)       \
        (((struct nft_trans_flowtable *)trans->data)->hook_list)
+#define nft_trans_flowtable_flags(trans)       \
+       (((struct nft_trans_flowtable *)trans->data)->flags)
 
 int __init nft_chain_filter_init(void);
 void nft_chain_filter_fini(void);
index 59f45b1..e816b6a 100644 (file)
@@ -72,7 +72,9 @@ struct netns_xfrm {
 #if IS_ENABLED(CONFIG_IPV6)
        struct dst_ops          xfrm6_dst_ops;
 #endif
-       spinlock_t xfrm_state_lock;
+       spinlock_t              xfrm_state_lock;
+       seqcount_spinlock_t     xfrm_state_hash_generation;
+
        spinlock_t xfrm_policy_lock;
        struct mutex xfrm_cfg_mutex;
 };
index 7bc057a..a10a319 100644 (file)
@@ -410,6 +410,7 @@ static inline struct fib_nh *fib_info_nh(struct fib_info *fi, int nhsel)
 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
                       struct netlink_ext_ack *extack);
 
+/* Caller should either hold rcu_read_lock(), or RTNL. */
 static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
 {
        struct nh_info *nhi;
@@ -430,6 +431,29 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
        return NULL;
 }
 
+/* Variant of nexthop_fib6_nh().
+ * Caller should either hold rcu_read_lock_bh(), or RTNL.
+ */
+static inline struct fib6_nh *nexthop_fib6_nh_bh(struct nexthop *nh)
+{
+       struct nh_info *nhi;
+
+       if (nh->is_group) {
+               struct nh_group *nh_grp;
+
+               nh_grp = rcu_dereference_bh_rtnl(nh->nh_grp);
+               nh = nexthop_mpath_select(nh_grp, 0);
+               if (!nh)
+                       return NULL;
+       }
+
+       nhi = rcu_dereference_bh_rtnl(nh->nh_info);
+       if (nhi->family == AF_INET6)
+               return &nhi->fib6_nh;
+
+       return NULL;
+}
+
 static inline struct net_device *fib6_info_nh_dev(struct fib6_info *f6i)
 {
        struct fib6_nh *fib6_nh;
index 932f0d7..be11dbd 100644 (file)
@@ -168,16 +168,24 @@ static inline void red_set_vars(struct red_vars *v)
        v->qcount       = -1;
 }
 
-static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_log)
+static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
+                                   u8 Scell_log, u8 *stab)
 {
-       if (fls(qth_min) + Wlog > 32)
+       if (fls(qth_min) + Wlog >= 32)
                return false;
-       if (fls(qth_max) + Wlog > 32)
+       if (fls(qth_max) + Wlog >= 32)
                return false;
        if (Scell_log >= 32)
                return false;
        if (qth_max < qth_min)
                return false;
+       if (stab) {
+               int i;
+
+               for (i = 0; i < RED_STAB_SIZE; i++)
+                       if (stab[i] >= 32)
+                               return false;
+       }
        return true;
 }
 
@@ -287,7 +295,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms
        int  shift;
 
        /*
-        * The problem: ideally, average length queue recalcultion should
+        * The problem: ideally, average length queue recalculation should
         * be done over constant clock intervals. This is too expensive, so
         * that the calculation is driven by outgoing packets.
         * When the queue is idle we have to model this clock by hand.
index e2091bb..479f60e 100644 (file)
@@ -33,6 +33,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
  *
  *     @list: Used internally
  *     @kind: Identifier
+ *     @netns_refund: Physical device, move to init_net on netns exit
  *     @maxtype: Highest device specific netlink attribute number
  *     @policy: Netlink policy for device specific attribute validation
  *     @validate: Optional validation function for netlink/changelink parameters
@@ -64,6 +65,7 @@ struct rtnl_link_ops {
        size_t                  priv_size;
        void                    (*setup)(struct net_device *dev);
 
+       bool                    netns_refund;
        unsigned int            maxtype;
        const struct nla_policy *policy;
        int                     (*validate)(struct nlattr *tb[],
@@ -145,8 +147,8 @@ struct rtnl_af_ops {
        int                     (*validate_link_af)(const struct net_device *dev,
                                                    const struct nlattr *attr);
        int                     (*set_link_af)(struct net_device *dev,
-                                              const struct nlattr *attr);
-
+                                              const struct nlattr *attr,
+                                              struct netlink_ext_ack *extack);
        int                     (*fill_stats_af)(struct sk_buff *skb,
                                                 const struct net_device *dev);
        size_t                  (*get_stats_af_size)(const struct net_device *dev);
index 636810d..8487f58 100644 (file)
@@ -934,6 +934,10 @@ static inline void sk_acceptq_added(struct sock *sk)
        WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
 }
 
+/* Note: If you think the test should be:
+ *     return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog);
+ * Then please take a look at commit 64a146513f8f ("[NET]: Revert incorrect accept queue backlog changes.")
+ */
 static inline bool sk_acceptq_is_full(const struct sock *sk)
 {
        return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
@@ -2221,6 +2225,15 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
        sk_mem_charge(sk, skb->truesize);
 }
 
+static inline void skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
+{
+       if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
+               skb_orphan(skb);
+               skb->destructor = sock_efree;
+               skb->sk = sk;
+       }
+}
+
 void sk_reset_timer(struct sock *sk, struct timer_list *timer,
                    unsigned long expires);
 
index b2a06f1..c58a6d4 100644 (file)
@@ -1097,7 +1097,7 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
                return __xfrm_policy_check(sk, ndir, skb, family);
 
        return  (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
-               (skb_dst(skb)->flags & DST_NOPOLICY) ||
+               (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
                __xfrm_policy_check(sk, ndir, skb, family);
 }
 
@@ -1557,7 +1557,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
 int xfrm_trans_queue(struct sk_buff *skb,
                     int (*finish)(struct net *, struct sock *,
                                   struct sk_buff *));
-int xfrm_output_resume(struct sk_buff *skb, int err);
+int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
 int xfrm_output(struct sock *sk, struct sk_buff *skb);
 
 #if IS_ENABLED(CONFIG_NET_PKTGEN)
index 8a26a2f..fc5a398 100644 (file)
@@ -193,6 +193,7 @@ enum iscsi_connection_state {
        ISCSI_CONN_UP = 0,
        ISCSI_CONN_DOWN,
        ISCSI_CONN_FAILED,
+       ISCSI_CONN_BOUND,
 };
 
 struct iscsi_cls_conn {
index 970cc2e..6154a2e 100644 (file)
@@ -30,7 +30,7 @@ TRACE_EVENT(workqueue_queue_work,
        TP_STRUCT__entry(
                __field( void *,        work    )
                __field( void *,        function)
-               __field( const char *,  workqueue)
+               __string( workqueue,    pwq->wq->name)
                __field( unsigned int,  req_cpu )
                __field( unsigned int,  cpu     )
        ),
@@ -38,13 +38,13 @@ TRACE_EVENT(workqueue_queue_work,
        TP_fast_assign(
                __entry->work           = work;
                __entry->function       = work->func;
-               __entry->workqueue      = pwq->wq->name;
+               __assign_str(workqueue, pwq->wq->name);
                __entry->req_cpu        = req_cpu;
                __entry->cpu            = pwq->pool->cpu;
        ),
 
        TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%u cpu=%u",
-                 __entry->work, __entry->function, __entry->workqueue,
+                 __entry->work, __entry->function, __get_str(workqueue),
                  __entry->req_cpu, __entry->cpu)
 );
 
index a6c1f3e..5596d7c 100644 (file)
@@ -76,6 +76,7 @@ struct drm_msm_timespec {
 #define MSM_PARAM_NR_RINGS   0x07
 #define MSM_PARAM_PP_PGTABLE 0x08  /* => 1 for per-process pagetables, else 0 */
 #define MSM_PARAM_FAULTS     0x09
+#define MSM_PARAM_SUSPENDS   0x0a
 
 struct drm_msm_param {
        __u32 pipe;           /* in, MSM_PIPE_x */
index ac6474e..d0a64ee 100644 (file)
@@ -2,29 +2,6 @@
 #ifndef _UAPI__LINUX_BLKPG_H
 #define _UAPI__LINUX_BLKPG_H
 
-/*
- * Partition table and disk geometry handling
- *
- * A single ioctl with lots of subfunctions:
- *
- * Device number stuff:
- *    get_whole_disk()         (given the device number of a partition,
- *                               find the device number of the encompassing disk)
- *    get_all_partitions()     (given the device number of a disk, return the
- *                              device numbers of all its known partitions)
- *
- * Partition stuff:
- *    add_partition()
- *    delete_partition()
- *    test_partition_in_use()  (also for test_disk_in_use)
- *
- * Geometry stuff:
- *    get_geometry()
- *    set_geometry()
- *    get_bios_drivedata()
- *
- * For today, only the partition stuff - aeb, 990515
- */
 #include <linux/compiler.h>
 #include <linux/ioctl.h>
 
@@ -52,9 +29,8 @@ struct blkpg_partition {
        long long start;                /* starting offset in bytes */
        long long length;               /* length in bytes */
        int pno;                        /* partition number */
-       char devname[BLKPG_DEVNAMELTH]; /* partition name, like sda5 or c0d1p2,
-                                          to be used in kernel messages */
-       char volname[BLKPG_VOLNAMELTH]; /* volume label */
+       char devname[BLKPG_DEVNAMELTH]; /* unused / ignored */
+       char volname[BLKPG_VOLNAMELTH]; /* unused / ignore */
 };
 
 #endif /* _UAPI__LINUX_BLKPG_H */
index 79c8933..4ba4ef0 100644 (file)
@@ -3850,7 +3850,7 @@ union bpf_attr {
  *
  * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags)
  *     Description
- *             Check ctx packet size against exceeding MTU of net device (based
+ *             Check packet size against exceeding MTU of net device (based
  *             on *ifindex*).  This helper will likely be used in combination
  *             with helpers that adjust/change the packet size.
  *
@@ -3867,6 +3867,14 @@ union bpf_attr {
  *             against the current net device.  This is practical if this isn't
  *             used prior to redirect.
  *
+ *             On input *mtu_len* must be a valid pointer, else verifier will
+ *             reject BPF program.  If the value *mtu_len* is initialized to
+ *             zero then the ctx packet size is use.  When value *mtu_len* is
+ *             provided as input this specify the L3 length that the MTU check
+ *             is done against. Remember XDP and TC length operate at L2, but
+ *             this value is L3 as this correlate to MTU and IP-header tot_len
+ *             values which are L3 (similar behavior as bpf_fib_lookup).
+ *
  *             The Linux kernel route table can configure MTUs on a more
  *             specific per route level, which is not provided by this helper.
  *             For route level MTU checks use the **bpf_fib_lookup**\ ()
@@ -3891,11 +3899,9 @@ union bpf_attr {
  *
  *             On return *mtu_len* pointer contains the MTU value of the net
  *             device.  Remember the net device configured MTU is the L3 size,
- *             which is returned here and XDP and TX length operate at L2.
+ *             which is returned here and XDP and TC length operate at L2.
  *             Helper take this into account for you, but remember when using
- *             MTU value in your BPF-code.  On input *mtu_len* must be a valid
- *             pointer and be initialized (to zero), else verifier will reject
- *             BPF program.
+ *             MTU value in your BPF-code.
  *
  *     Return
  *             * 0 on success, and populate MTU value in *mtu_len* pointer.
index f75238a..c753535 100644 (file)
@@ -113,7 +113,7 @@ struct can_frame {
                 */
                __u8 len;
                __u8 can_dlc; /* deprecated */
-       };
+       } __attribute__((packed)); /* disable padding added in some ABIs */
        __u8 __pad; /* padding */
        __u8 __res0; /* reserved / padding */
        __u8 len8_dlc; /* optional DLC for 8 byte payload length (9 .. 15) */
index cde753b..5afea69 100644 (file)
  * have the same layout for 32-bit and 64-bit userland.
  */
 
+/* Note on reserved space.
+ * Reserved fields must not be accessed directly by user space because
+ * they may be replaced by a different field in the future. They must
+ * be initialized to zero before making the request, e.g. via memset
+ * of the entire structure or implicitly by not being set in a structure
+ * initializer.
+ */
+
 /**
  * struct ethtool_cmd - DEPRECATED, link control and status
  * This structure is DEPRECATED, please use struct ethtool_link_settings.
@@ -67,6 +75,7 @@
  *     and other link features that the link partner advertised
  *     through autonegotiation; 0 if unknown or not applicable.
  *     Read-only.
+ * @reserved: Reserved for future use; see the note on reserved space.
  *
  * The link speed in Mbps is split between @speed and @speed_hi.  Use
  * the ethtool_cmd_speed() and ethtool_cmd_speed_set() functions to
@@ -155,6 +164,7 @@ static inline __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep)
  * @bus_info: Device bus address.  This should match the dev_name()
  *     string for the underlying bus device, if there is one.  May be
  *     an empty string.
+ * @reserved2: Reserved for future use; see the note on reserved space.
  * @n_priv_flags: Number of flags valid for %ETHTOOL_GPFLAGS and
  *     %ETHTOOL_SPFLAGS commands; also the number of strings in the
  *     %ETH_SS_PRIV_FLAGS set
@@ -356,6 +366,7 @@ struct ethtool_eeprom {
  * @tx_lpi_timer: Time in microseconds the interface delays prior to asserting
  *     its tx lpi (after reaching 'idle' state). Effective only when eee
  *     was negotiated and tx_lpi_enabled was set.
+ * @reserved: Reserved for future use; see the note on reserved space.
  */
 struct ethtool_eee {
        __u32   cmd;
@@ -374,6 +385,7 @@ struct ethtool_eee {
  * @cmd: %ETHTOOL_GMODULEINFO
  * @type: Standard the module information conforms to %ETH_MODULE_SFF_xxxx
  * @eeprom_len: Length of the eeprom
+ * @reserved: Reserved for future use; see the note on reserved space.
  *
  * This structure is used to return the information to
  * properly size memory for a subsequent call to %ETHTOOL_GMODULEEEPROM.
@@ -579,9 +591,7 @@ struct ethtool_pauseparam {
        __u32   tx_pause;
 };
 
-/**
- * enum ethtool_link_ext_state - link extended state
- */
+/* Link extended state */
 enum ethtool_link_ext_state {
        ETHTOOL_LINK_EXT_STATE_AUTONEG,
        ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
@@ -595,10 +605,7 @@ enum ethtool_link_ext_state {
        ETHTOOL_LINK_EXT_STATE_OVERHEAT,
 };
 
-/**
- * enum ethtool_link_ext_substate_autoneg - more information in addition to
- * ETHTOOL_LINK_EXT_STATE_AUTONEG.
- */
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_AUTONEG. */
 enum ethtool_link_ext_substate_autoneg {
        ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1,
        ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED,
@@ -608,9 +615,7 @@ enum ethtool_link_ext_substate_autoneg {
        ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD,
 };
 
-/**
- * enum ethtool_link_ext_substate_link_training - more information in addition to
- * ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE.
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE.
  */
 enum ethtool_link_ext_substate_link_training {
        ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1,
@@ -619,9 +624,7 @@ enum ethtool_link_ext_substate_link_training {
        ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT,
 };
 
-/**
- * enum ethtool_link_ext_substate_logical_mismatch - more information in addition
- * to ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH.
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH.
  */
 enum ethtool_link_ext_substate_link_logical_mismatch {
        ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1,
@@ -631,19 +634,14 @@ enum ethtool_link_ext_substate_link_logical_mismatch {
        ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED,
 };
 
-/**
- * enum ethtool_link_ext_substate_bad_signal_integrity - more information in
- * addition to ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY.
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY.
  */
 enum ethtool_link_ext_substate_bad_signal_integrity {
        ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1,
        ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE,
 };
 
-/**
- * enum ethtool_link_ext_substate_cable_issue - more information in
- * addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE.
- */
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE. */
 enum ethtool_link_ext_substate_cable_issue {
        ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1,
        ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE,
@@ -661,6 +659,7 @@ enum ethtool_link_ext_substate_cable_issue {
  *     now deprecated
  * @ETH_SS_FEATURES: Device feature names
  * @ETH_SS_RSS_HASH_FUNCS: RSS hush function names
+ * @ETH_SS_TUNABLES: tunable names
  * @ETH_SS_PHY_STATS: Statistic names, for use with %ETHTOOL_GPHYSTATS
  * @ETH_SS_PHY_TUNABLES: PHY tunable names
  * @ETH_SS_LINK_MODES: link mode names
@@ -670,6 +669,8 @@ enum ethtool_link_ext_substate_cable_issue {
  * @ETH_SS_TS_TX_TYPES: timestamping Tx types
  * @ETH_SS_TS_RX_FILTERS: timestamping Rx filters
  * @ETH_SS_UDP_TUNNEL_TYPES: UDP tunnel types
+ *
+ * @ETH_SS_COUNT: number of defined string sets
  */
 enum ethtool_stringset {
        ETH_SS_TEST             = 0,
@@ -715,6 +716,7 @@ struct ethtool_gstrings {
 /**
  * struct ethtool_sset_info - string set information
  * @cmd: Command number = %ETHTOOL_GSSET_INFO
+ * @reserved: Reserved for future use; see the note on reserved space.
  * @sset_mask: On entry, a bitmask of string sets to query, with bits
  *     numbered according to &enum ethtool_stringset.  On return, a
  *     bitmask of those string sets queried that are supported.
@@ -759,6 +761,7 @@ enum ethtool_test_flags {
  * @flags: A bitmask of flags from &enum ethtool_test_flags.  Some
  *     flags may be set by the user on entry; others may be set by
  *     the driver on return.
+ * @reserved: Reserved for future use; see the note on reserved space.
  * @len: On return, the number of test results
  * @data: Array of test results
  *
@@ -959,6 +962,7 @@ union ethtool_flow_union {
  * @vlan_etype: VLAN EtherType
  * @vlan_tci: VLAN tag control information
  * @data: user defined data
+ * @padding: Reserved for future use; see the note on reserved space.
  *
  * Note, @vlan_etype, @vlan_tci, and @data are only valid if %FLOW_EXT
  * is set in &struct ethtool_rx_flow_spec @flow_type.
@@ -1134,7 +1138,8 @@ struct ethtool_rxfh_indir {
  *     hardware hash key.
  * @hfunc: Defines the current RSS hash function used by HW (or to be set to).
  *     Valid values are one of the %ETH_RSS_HASH_*.
- * @rsvd:      Reserved for future extensions.
+ * @rsvd8: Reserved for future use; see the note on reserved space.
+ * @rsvd32: Reserved for future use; see the note on reserved space.
  * @rss_config: RX ring/queue index for each hash value i.e., indirection table
  *     of @indir_size __u32 elements, followed by hash key of @key_size
  *     bytes.
@@ -1302,7 +1307,9 @@ struct ethtool_sfeatures {
  * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
  * @phc_index: device index of the associated PHC, or -1 if there is none
  * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
+ * @tx_reserved: Reserved for future use; see the note on reserved space.
  * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
+ * @rx_reserved: Reserved for future use; see the note on reserved space.
  *
  * The bits in the 'tx_types' and 'rx_filters' fields correspond to
  * the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
@@ -1958,6 +1965,11 @@ enum ethtool_reset_flags {
  *     autonegotiation; 0 if unknown or not applicable.  Read-only.
  * @transceiver: Used to distinguish different possible PHY types,
  *     reported consistently by PHYLIB.  Read-only.
+ * @master_slave_cfg: Master/slave port mode.
+ * @master_slave_state: Master/slave port state.
+ * @reserved: Reserved for future use; see the note on reserved space.
+ * @reserved1: Reserved for future use; see the note on reserved space.
+ * @link_mode_masks: Variable length bitmaps.
  *
  * If autonegotiation is disabled, the speed and @duplex represent the
  * fixed link mode and are writable if the driver supports multiple
index 98ca64d..5444261 100644 (file)
@@ -903,7 +903,8 @@ struct fuse_notify_retrieve_in {
 };
 
 /* Device ioctls: */
-#define FUSE_DEV_IOC_CLONE     _IOR(229, 0, uint32_t)
+#define FUSE_DEV_IOC_MAGIC             229
+#define FUSE_DEV_IOC_CLONE             _IOR(FUSE_DEV_IOC_MAGIC, 0, uint32_t)
 
 struct fuse_lseek_in {
        uint64_t        fh;
index aea26ab..bff5032 100644 (file)
@@ -3,7 +3,6 @@
 #define __UAPI_PSAMPLE_H
 
 enum {
-       /* sampled packet metadata */
        PSAMPLE_ATTR_IIFINDEX,
        PSAMPLE_ATTR_OIFINDEX,
        PSAMPLE_ATTR_ORIGSIZE,
@@ -11,10 +10,8 @@ enum {
        PSAMPLE_ATTR_GROUP_SEQ,
        PSAMPLE_ATTR_SAMPLE_RATE,
        PSAMPLE_ATTR_DATA,
-       PSAMPLE_ATTR_TUNNEL,
-
-       /* commands attributes */
        PSAMPLE_ATTR_GROUP_REFCOUNT,
+       PSAMPLE_ATTR_TUNNEL,
 
        __PSAMPLE_ATTR_MAX
 };
index 03e8af8..9b77cfc 100644 (file)
@@ -86,34 +86,90 @@ enum rfkill_hard_block_reasons {
  * @op: operation code
  * @hard: hard state (0/1)
  * @soft: soft state (0/1)
+ *
+ * Structure used for userspace communication on /dev/rfkill,
+ * used for events from the kernel and control to the kernel.
+ */
+struct rfkill_event {
+       __u32 idx;
+       __u8  type;
+       __u8  op;
+       __u8  soft;
+       __u8  hard;
+} __attribute__((packed));
+
+/**
+ * struct rfkill_event_ext - events for userspace on /dev/rfkill
+ * @idx: index of dev rfkill
+ * @type: type of the rfkill struct
+ * @op: operation code
+ * @hard: hard state (0/1)
+ * @soft: soft state (0/1)
  * @hard_block_reasons: valid if hard is set. One or several reasons from
  *     &enum rfkill_hard_block_reasons.
  *
  * Structure used for userspace communication on /dev/rfkill,
  * used for events from the kernel and control to the kernel.
+ *
+ * See the extensibility docs below.
  */
-struct rfkill_event {
+struct rfkill_event_ext {
        __u32 idx;
        __u8  type;
        __u8  op;
        __u8  soft;
        __u8  hard;
+
+       /*
+        * older kernels will accept/send only up to this point,
+        * and if extended further up to any chunk marked below
+        */
+
        __u8  hard_block_reasons;
 } __attribute__((packed));
 
-/*
- * We are planning to be backward and forward compatible with changes
- * to the event struct, by adding new, optional, members at the end.
- * When reading an event (whether the kernel from userspace or vice
- * versa) we need to accept anything that's at least as large as the
- * version 1 event size, but might be able to accept other sizes in
- * the future.
+/**
+ * DOC: Extensibility
+ *
+ * Originally, we had planned to allow backward and forward compatible
+ * changes by just adding fields at the end of the structure that are
+ * then not reported on older kernels on read(), and not written to by
+ * older kernels on write(), with the kernel reporting the size it did
+ * accept as the result.
+ *
+ * This would have allowed userspace to detect on read() and write()
+ * which kernel structure version it was dealing with, and if was just
+ * recompiled it would have gotten the new fields, but obviously not
+ * accessed them, but things should've continued to work.
+ *
+ * Unfortunately, while actually exercising this mechanism to add the
+ * hard block reasons field, we found that userspace (notably systemd)
+ * did all kinds of fun things not in line with this scheme:
+ *
+ * 1. treat the (expected) short writes as an error;
+ * 2. ask to read sizeof(struct rfkill_event) but then compare the
+ *    actual return value to RFKILL_EVENT_SIZE_V1 and treat any
+ *    mismatch as an error.
+ *
+ * As a consequence, just recompiling with a new struct version caused
+ * things to no longer work correctly on old and new kernels.
+ *
+ * Hence, we've rolled back &struct rfkill_event to the original version
+ * and added &struct rfkill_event_ext. This effectively reverts to the
+ * old behaviour for all userspace, unless it explicitly opts in to the
+ * rules outlined here by using the new &struct rfkill_event_ext.
+ *
+ * Userspace using &struct rfkill_event_ext must adhere to the following
+ * rules
  *
- * One exception is the kernel -- we already have two event sizes in
- * that we've made the 'hard' member optional since our only option
- * is to ignore it anyway.
+ * 1. accept short writes, optionally using them to detect that it's
+ *    running on an older kernel;
+ * 2. accept short reads, knowing that this means it's running on an
+ *    older kernel;
+ * 3. treat reads that are as long as requested as acceptable, not
+ *    checking against RFKILL_EVENT_SIZE_V1 or such.
  */
-#define RFKILL_EVENT_SIZE_V1   8
+#define RFKILL_EVENT_SIZE_V1   sizeof(struct rfkill_event)
 
 /* ioctl for turning off rfkill-input (if present) */
 #define RFKILL_IOC_MAGIC       'R'
index 6639640..b58b2ef 100644 (file)
@@ -109,7 +109,7 @@ static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)
        fd = *(int *)key;
        f = fget_raw(fd);
        if (!f)
-               return NULL;
+               return ERR_PTR(-EBADF);
 
        sdata = inode_storage_lookup(f->f_inode, map, true);
        fput(f);
index 1a666a9..70f6fd4 100644 (file)
@@ -430,7 +430,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
 
                tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
                tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
-               err = arch_prepare_bpf_trampoline(image,
+               err = arch_prepare_bpf_trampoline(NULL, image,
                                                  st_map->image + PAGE_SIZE,
                                                  &st_ops->func_models[i], 0,
                                                  tprogs, NULL);
index 3a283bf..75244ec 100644 (file)
@@ -827,7 +827,7 @@ static int __init bpf_jit_charge_init(void)
 }
 pure_initcall(bpf_jit_charge_init);
 
-static int bpf_jit_charge_modmem(u32 pages)
+int bpf_jit_charge_modmem(u32 pages)
 {
        if (atomic_long_add_return(pages, &bpf_jit_current) >
            (bpf_jit_limit >> PAGE_SHIFT)) {
@@ -840,7 +840,7 @@ static int bpf_jit_charge_modmem(u32 pages)
        return 0;
 }
 
-static void bpf_jit_uncharge_modmem(u32 pages)
+void bpf_jit_uncharge_modmem(u32 pages)
 {
        atomic_long_sub(pages, &bpf_jit_current);
 }
index 3acc7e0..faa54d5 100644 (file)
@@ -84,7 +84,7 @@ static const char *const bpf_atomic_alu_string[16] = {
        [BPF_ADD >> 4]  = "add",
        [BPF_AND >> 4]  = "and",
        [BPF_OR >> 4]  = "or",
-       [BPF_XOR >> 4]  = "or",
+       [BPF_XOR >> 4]  = "xor",
 };
 
 static const char *const bpf_ldst_string[] = {
index 1576ff3..d2de2ab 100644 (file)
@@ -543,11 +543,11 @@ int bpf_obj_get_user(const char __user *pathname, int flags)
                return PTR_ERR(raw);
 
        if (type == BPF_TYPE_PROG)
-               ret = bpf_prog_new_fd(raw);
+               ret = (f_flags != O_RDWR) ? -EINVAL : bpf_prog_new_fd(raw);
        else if (type == BPF_TYPE_MAP)
                ret = bpf_map_new_fd(raw, f_flags);
        else if (type == BPF_TYPE_LINK)
-               ret = bpf_link_new_fd(raw);
+               ret = (f_flags != O_RDWR) ? -EINVAL : bpf_link_new_fd(raw);
        else
                return -ENOENT;
 
index 79c5772..53736e5 100644 (file)
@@ -60,9 +60,12 @@ static int finish(void)
                         &magic, sizeof(magic), &pos);
        if (n != sizeof(magic))
                return -EPIPE;
+
        tgid = umd_ops.info.tgid;
-       wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
-       umd_ops.info.tgid = NULL;
+       if (tgid) {
+               wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
+               umd_cleanup_helper(&umd_ops.info);
+       }
        return 0;
 }
 
@@ -80,10 +83,18 @@ static int __init load_umd(void)
 
 static void __exit fini_umd(void)
 {
+       struct pid *tgid;
+
        bpf_preload_ops = NULL;
+
        /* kill UMD in case it's still there due to earlier error */
-       kill_pid(umd_ops.info.tgid, SIGKILL, 1);
-       umd_ops.info.tgid = NULL;
+       tgid = umd_ops.info.tgid;
+       if (tgid) {
+               kill_pid(tgid, SIGKILL, 1);
+
+               wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
+               umd_cleanup_helper(&umd_ops.info);
+       }
        umd_unload_blob(&umd_ops.info);
 }
 late_initcall(load_umd);
index be35bfb..6fbc2ab 100644 (file)
@@ -517,9 +517,17 @@ const struct bpf_func_proto bpf_get_stack_proto = {
 BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
           u32, size, u64, flags)
 {
-       struct pt_regs *regs = task_pt_regs(task);
+       struct pt_regs *regs;
+       long res;
 
-       return __bpf_get_stack(regs, task, NULL, buf, size, flags);
+       if (!try_get_task_stack(task))
+               return -EFAULT;
+
+       regs = task_pt_regs(task);
+       res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
+       put_task_stack(task);
+
+       return res;
 }
 
 BTF_ID_LIST_SINGLE(bpf_get_task_stack_btf_ids, struct, task_struct)
index c859bc4..2505034 100644 (file)
@@ -854,6 +854,11 @@ static int map_create(union bpf_attr *attr)
                        err = PTR_ERR(btf);
                        goto free_map;
                }
+               if (btf_is_kernel(btf)) {
+                       btf_put(btf);
+                       err = -EACCES;
+                       goto free_map;
+               }
                map->btf = btf;
 
                if (attr->btf_value_type_id) {
index 7bc3b32..4aa8b52 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/btf.h>
 #include <linux/rcupdate_trace.h>
 #include <linux/rcupdate_wait.h>
+#include <linux/module.h>
 
 /* dummy _ops. The verifier will operate on target program's ops. */
 const struct bpf_verifier_ops bpf_extension_verifier_ops = {
@@ -57,19 +58,10 @@ void bpf_image_ksym_del(struct bpf_ksym *ksym)
                           PAGE_SIZE, true, ksym->name);
 }
 
-static void bpf_trampoline_ksym_add(struct bpf_trampoline *tr)
-{
-       struct bpf_ksym *ksym = &tr->ksym;
-
-       snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", tr->key);
-       bpf_image_ksym_add(tr->image, ksym);
-}
-
 static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
 {
        struct bpf_trampoline *tr;
        struct hlist_head *head;
-       void *image;
        int i;
 
        mutex_lock(&trampoline_mutex);
@@ -84,14 +76,6 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
        if (!tr)
                goto out;
 
-       /* is_root was checked earlier. No need for bpf_jit_charge_modmem() */
-       image = bpf_jit_alloc_exec_page();
-       if (!image) {
-               kfree(tr);
-               tr = NULL;
-               goto out;
-       }
-
        tr->key = key;
        INIT_HLIST_NODE(&tr->hlist);
        hlist_add_head(&tr->hlist, head);
@@ -99,14 +83,31 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
        mutex_init(&tr->mutex);
        for (i = 0; i < BPF_TRAMP_MAX; i++)
                INIT_HLIST_HEAD(&tr->progs_hlist[i]);
-       tr->image = image;
-       INIT_LIST_HEAD_RCU(&tr->ksym.lnode);
-       bpf_trampoline_ksym_add(tr);
 out:
        mutex_unlock(&trampoline_mutex);
        return tr;
 }
 
+static int bpf_trampoline_module_get(struct bpf_trampoline *tr)
+{
+       struct module *mod;
+       int err = 0;
+
+       preempt_disable();
+       mod = __module_text_address((unsigned long) tr->func.addr);
+       if (mod && !try_module_get(mod))
+               err = -ENOENT;
+       preempt_enable();
+       tr->mod = mod;
+       return err;
+}
+
+static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
+{
+       module_put(tr->mod);
+       tr->mod = NULL;
+}
+
 static int is_ftrace_location(void *ip)
 {
        long addr;
@@ -128,6 +129,9 @@ static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
                ret = unregister_ftrace_direct((long)ip, (long)old_addr);
        else
                ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
+
+       if (!ret)
+               bpf_trampoline_module_put(tr);
        return ret;
 }
 
@@ -154,10 +158,16 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
                return ret;
        tr->func.ftrace_managed = ret;
 
+       if (bpf_trampoline_module_get(tr))
+               return -ENOENT;
+
        if (tr->func.ftrace_managed)
                ret = register_ftrace_direct((long)ip, (long)new_addr);
        else
                ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
+
+       if (ret)
+               bpf_trampoline_module_put(tr);
        return ret;
 }
 
@@ -185,10 +195,142 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total)
        return tprogs;
 }
 
+static void __bpf_tramp_image_put_deferred(struct work_struct *work)
+{
+       struct bpf_tramp_image *im;
+
+       im = container_of(work, struct bpf_tramp_image, work);
+       bpf_image_ksym_del(&im->ksym);
+       bpf_jit_free_exec(im->image);
+       bpf_jit_uncharge_modmem(1);
+       percpu_ref_exit(&im->pcref);
+       kfree_rcu(im, rcu);
+}
+
+/* callback, fexit step 3 or fentry step 2 */
+static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
+{
+       struct bpf_tramp_image *im;
+
+       im = container_of(rcu, struct bpf_tramp_image, rcu);
+       INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
+       schedule_work(&im->work);
+}
+
+/* callback, fexit step 2. Called after percpu_ref_kill confirms. */
+static void __bpf_tramp_image_release(struct percpu_ref *pcref)
+{
+       struct bpf_tramp_image *im;
+
+       im = container_of(pcref, struct bpf_tramp_image, pcref);
+       call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
+}
+
+/* callback, fexit or fentry step 1 */
+static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
+{
+       struct bpf_tramp_image *im;
+
+       im = container_of(rcu, struct bpf_tramp_image, rcu);
+       if (im->ip_after_call)
+               /* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */
+               percpu_ref_kill(&im->pcref);
+       else
+               /* the case of fentry trampoline */
+               call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
+}
+
+static void bpf_tramp_image_put(struct bpf_tramp_image *im)
+{
+       /* The trampoline image that calls original function is using:
+        * rcu_read_lock_trace to protect sleepable bpf progs
+        * rcu_read_lock to protect normal bpf progs
+        * percpu_ref to protect trampoline itself
+        * rcu tasks to protect trampoline asm not covered by percpu_ref
+        * (which are few asm insns before __bpf_tramp_enter and
+        *  after __bpf_tramp_exit)
+        *
+        * The trampoline is unreachable before bpf_tramp_image_put().
+        *
+        * First, patch the trampoline to avoid calling into fexit progs.
+        * The progs will be freed even if the original function is still
+        * executing or sleeping.
+        * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on
+        * first few asm instructions to execute and call into
+        * __bpf_tramp_enter->percpu_ref_get.
+        * Then use percpu_ref_kill to wait for the trampoline and the original
+        * function to finish.
+        * Then use call_rcu_tasks() to make sure few asm insns in
+        * the trampoline epilogue are done as well.
+        *
+        * In !PREEMPT case the task that got interrupted in the first asm
+        * insns won't go through an RCU quiescent state which the
+        * percpu_ref_kill will be waiting for. Hence the first
+        * call_rcu_tasks() is not necessary.
+        */
+       if (im->ip_after_call) {
+               int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
+                                            NULL, im->ip_epilogue);
+               WARN_ON(err);
+               if (IS_ENABLED(CONFIG_PREEMPTION))
+                       call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
+               else
+                       percpu_ref_kill(&im->pcref);
+               return;
+       }
+
+       /* The trampoline without fexit and fmod_ret progs doesn't call original
+        * function and doesn't use percpu_ref.
+        * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
+        * Then use call_rcu_tasks() to wait for the rest of trampoline asm
+        * and normal progs.
+        */
+       call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
+}
+
+static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
+{
+       struct bpf_tramp_image *im;
+       struct bpf_ksym *ksym;
+       void *image;
+       int err = -ENOMEM;
+
+       im = kzalloc(sizeof(*im), GFP_KERNEL);
+       if (!im)
+               goto out;
+
+       err = bpf_jit_charge_modmem(1);
+       if (err)
+               goto out_free_im;
+
+       err = -ENOMEM;
+       im->image = image = bpf_jit_alloc_exec_page();
+       if (!image)
+               goto out_uncharge;
+
+       err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
+       if (err)
+               goto out_free_image;
+
+       ksym = &im->ksym;
+       INIT_LIST_HEAD_RCU(&ksym->lnode);
+       snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
+       bpf_image_ksym_add(image, ksym);
+       return im;
+
+out_free_image:
+       bpf_jit_free_exec(im->image);
+out_uncharge:
+       bpf_jit_uncharge_modmem(1);
+out_free_im:
+       kfree(im);
+out:
+       return ERR_PTR(err);
+}
+
 static int bpf_trampoline_update(struct bpf_trampoline *tr)
 {
-       void *old_image = tr->image + ((tr->selector + 1) & 1) * PAGE_SIZE/2;
-       void *new_image = tr->image + (tr->selector & 1) * PAGE_SIZE/2;
+       struct bpf_tramp_image *im;
        struct bpf_tramp_progs *tprogs;
        u32 flags = BPF_TRAMP_F_RESTORE_REGS;
        int err, total;
@@ -198,41 +340,42 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
                return PTR_ERR(tprogs);
 
        if (total == 0) {
-               err = unregister_fentry(tr, old_image);
+               err = unregister_fentry(tr, tr->cur_image->image);
+               bpf_tramp_image_put(tr->cur_image);
+               tr->cur_image = NULL;
                tr->selector = 0;
                goto out;
        }
 
+       im = bpf_tramp_image_alloc(tr->key, tr->selector);
+       if (IS_ERR(im)) {
+               err = PTR_ERR(im);
+               goto out;
+       }
+
        if (tprogs[BPF_TRAMP_FEXIT].nr_progs ||
            tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
                flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
 
-       /* Though the second half of trampoline page is unused a task could be
-        * preempted in the middle of the first half of trampoline and two
-        * updates to trampoline would change the code from underneath the
-        * preempted task. Hence wait for tasks to voluntarily schedule or go
-        * to userspace.
-        * The same trampoline can hold both sleepable and non-sleepable progs.
-        * synchronize_rcu_tasks_trace() is needed to make sure all sleepable
-        * programs finish executing.
-        * Wait for these two grace periods together.
-        */
-       synchronize_rcu_mult(call_rcu_tasks, call_rcu_tasks_trace);
-
-       err = arch_prepare_bpf_trampoline(new_image, new_image + PAGE_SIZE / 2,
+       err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
                                          &tr->func.model, flags, tprogs,
                                          tr->func.addr);
        if (err < 0)
                goto out;
 
-       if (tr->selector)
+       WARN_ON(tr->cur_image && tr->selector == 0);
+       WARN_ON(!tr->cur_image && tr->selector);
+       if (tr->cur_image)
                /* progs already running at this address */
-               err = modify_fentry(tr, old_image, new_image);
+               err = modify_fentry(tr, tr->cur_image->image, im->image);
        else
                /* first time registering */
-               err = register_fentry(tr, new_image);
+               err = register_fentry(tr, im->image);
        if (err)
                goto out;
+       if (tr->cur_image)
+               bpf_tramp_image_put(tr->cur_image);
+       tr->cur_image = im;
        tr->selector++;
 out:
        kfree(tprogs);
@@ -364,17 +507,12 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
                goto out;
        if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
                goto out;
-       bpf_image_ksym_del(&tr->ksym);
-       /* This code will be executed when all bpf progs (both sleepable and
-        * non-sleepable) went through
-        * bpf_prog_put()->call_rcu[_tasks_trace]()->bpf_prog_free_deferred().
-        * Hence no need for another synchronize_rcu_tasks_trace() here,
-        * but synchronize_rcu_tasks() is still needed, since trampoline
-        * may not have had any sleepable programs and we need to wait
-        * for tasks to get out of trampoline code before freeing it.
+       /* This code will be executed even when the last bpf_tramp_image
+        * is alive. All progs are detached from the trampoline and the
+        * trampoline image is patched with jmp into epilogue to skip
+        * fexit progs. The fentry-only trampoline will be freed via
+        * multiple rcu callbacks.
         */
-       synchronize_rcu_tasks();
-       bpf_jit_free_exec(tr->image);
        hlist_del(&tr->hlist);
        kfree(tr);
 out:
@@ -478,8 +616,18 @@ void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start)
        rcu_read_unlock_trace();
 }
 
+void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
+{
+       percpu_ref_get(&tr->pcref);
+}
+
+void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
+{
+       percpu_ref_put(&tr->pcref);
+}
+
 int __weak
-arch_prepare_bpf_trampoline(void *image, void *image_end,
+arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
                            const struct btf_func_model *m, u32 flags,
                            struct bpf_tramp_progs *tprogs,
                            void *orig_call)
index c56e3fc..3a73872 100644 (file)
@@ -5861,10 +5861,14 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
 {
        bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
                            (opcode == BPF_SUB && !off_is_neg);
-       u32 off;
+       u32 off, max;
 
        switch (ptr_reg->type) {
        case PTR_TO_STACK:
+               /* Offset 0 is out-of-bounds, but acceptable start for the
+                * left direction, see BPF_REG_FP.
+                */
+               max = MAX_BPF_STACK + mask_to_left;
                /* Indirect variable offset stack access is prohibited in
                 * unprivileged mode so it's not handled here.
                 */
@@ -5872,16 +5876,17 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
                if (mask_to_left)
                        *ptr_limit = MAX_BPF_STACK + off;
                else
-                       *ptr_limit = -off;
-               return 0;
+                       *ptr_limit = -off - 1;
+               return *ptr_limit >= max ? -ERANGE : 0;
        case PTR_TO_MAP_VALUE:
+               max = ptr_reg->map_ptr->value_size;
                if (mask_to_left) {
                        *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
                } else {
                        off = ptr_reg->smin_value + ptr_reg->off;
-                       *ptr_limit = ptr_reg->map_ptr->value_size - off;
+                       *ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
                }
-               return 0;
+               return *ptr_limit >= max ? -ERANGE : 0;
        default:
                return -EINVAL;
        }
@@ -5934,6 +5939,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
        u32 alu_state, alu_limit;
        struct bpf_reg_state tmp;
        bool ret;
+       int err;
 
        if (can_skip_alu_sanitation(env, insn))
                return 0;
@@ -5949,10 +5955,13 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
        alu_state |= ptr_is_dst_reg ?
                     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
 
-       if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
-               return 0;
-       if (update_alu_sanitation_state(aux, alu_state, alu_limit))
-               return -EACCES;
+       err = retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg);
+       if (err < 0)
+               return err;
+
+       err = update_alu_sanitation_state(aux, alu_state, alu_limit);
+       if (err < 0)
+               return err;
 do_sim:
        /* Simulate and find potential out-of-bounds access under
         * speculative execution from truncation as a result of
@@ -6103,7 +6112,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        case BPF_ADD:
                ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
                if (ret < 0) {
-                       verbose(env, "R%d tried to add from different maps or paths\n", dst);
+                       verbose(env, "R%d tried to add from different maps, paths, or prohibited types\n", dst);
                        return ret;
                }
                /* We can take a fixed offset as long as it doesn't overflow
@@ -6158,7 +6167,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        case BPF_SUB:
                ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
                if (ret < 0) {
-                       verbose(env, "R%d tried to sub from different maps or paths\n", dst);
+                       verbose(env, "R%d tried to sub from different maps, paths, or prohibited types\n", dst);
                        return ret;
                }
                if (dst_reg == off_reg) {
@@ -9056,6 +9065,10 @@ static int check_btf_info(struct bpf_verifier_env *env,
        btf = btf_get_by_fd(attr->prog_btf_fd);
        if (IS_ERR(btf))
                return PTR_ERR(btf);
+       if (btf_is_kernel(btf)) {
+               btf_put(btf);
+               return -EACCES;
+       }
        env->prog->aux->btf = btf;
 
        err = check_btf_func(env, attr, uattr);
@@ -11660,7 +11673,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
                        off_reg = issrc ? insn->src_reg : insn->dst_reg;
                        if (isneg)
                                *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
-                       *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
+                       *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
                        *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
                        *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
                        *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
@@ -12145,6 +12158,11 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
        u32 btf_id, member_idx;
        const char *mname;
 
+       if (!prog->gpl_compatible) {
+               verbose(env, "struct ops programs must have a GPL compatible license\n");
+               return -EINVAL;
+       }
+
        btf_id = prog->aux->attach_btf_id;
        st_ops = bpf_struct_ops_find(btf_id);
        if (!st_ops) {
index 0acc8ed..426cd0c 100644 (file)
@@ -1948,8 +1948,14 @@ static __latent_entropy struct task_struct *copy_process(
        p = dup_task_struct(current, node);
        if (!p)
                goto fork_out;
-       if (args->io_thread)
+       if (args->io_thread) {
+               /*
+                * Mark us an IO worker, and block any signal that isn't
+                * fatal or STOP
+                */
                p->flags |= PF_IO_WORKER;
+               siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP));
+       }
 
        /*
         * This _must_ happen before we call free_task(), i.e. before we jump
@@ -2438,15 +2444,8 @@ struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node)
                .stack_size     = (unsigned long)arg,
                .io_thread      = 1,
        };
-       struct task_struct *tsk;
 
-       tsk = copy_process(NULL, 0, node, &args);
-       if (!IS_ERR(tsk)) {
-               sigfillset(&tsk->blocked);
-               sigdelsetmask(&tsk->blocked, sigmask(SIGKILL));
-               tsk->flags |= PF_NOFREEZE;
-       }
-       return tsk;
+       return copy_process(NULL, 0, node, &args);
 }
 
 /*
index e68db77..00febd6 100644 (file)
@@ -2728,14 +2728,13 @@ retry:
                goto out;
 
        restart = &current->restart_block;
-       restart->fn = futex_wait_restart;
        restart->futex.uaddr = uaddr;
        restart->futex.val = val;
        restart->futex.time = *abs_time;
        restart->futex.bitset = bitset;
        restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
 
-       ret = -ERESTART_RESTARTBLOCK;
+       ret = set_restart_fn(restart, futex_wait_restart);
 
 out:
        if (to) {
index c94b820..c466c7f 100644 (file)
@@ -70,12 +70,16 @@ struct gcov_fn_info {
 
        u32 ident;
        u32 checksum;
+#if CONFIG_CLANG_VERSION < 110000
        u8 use_extra_checksum;
+#endif
        u32 cfg_checksum;
 
        u32 num_counters;
        u64 *counters;
+#if CONFIG_CLANG_VERSION < 110000
        const char *function_name;
+#endif
 };
 
 static struct gcov_info *current_info;
@@ -105,6 +109,7 @@ void llvm_gcov_init(llvm_gcov_callback writeout, llvm_gcov_callback flush)
 }
 EXPORT_SYMBOL(llvm_gcov_init);
 
+#if CONFIG_CLANG_VERSION < 110000
 void llvm_gcda_start_file(const char *orig_filename, const char version[4],
                u32 checksum)
 {
@@ -113,7 +118,17 @@ void llvm_gcda_start_file(const char *orig_filename, const char version[4],
        current_info->checksum = checksum;
 }
 EXPORT_SYMBOL(llvm_gcda_start_file);
+#else
+void llvm_gcda_start_file(const char *orig_filename, u32 version, u32 checksum)
+{
+       current_info->filename = orig_filename;
+       current_info->version = version;
+       current_info->checksum = checksum;
+}
+EXPORT_SYMBOL(llvm_gcda_start_file);
+#endif
 
+#if CONFIG_CLANG_VERSION < 110000
 void llvm_gcda_emit_function(u32 ident, const char *function_name,
                u32 func_checksum, u8 use_extra_checksum, u32 cfg_checksum)
 {
@@ -132,6 +147,21 @@ void llvm_gcda_emit_function(u32 ident, const char *function_name,
 
        list_add_tail(&info->head, &current_info->functions);
 }
+#else
+void llvm_gcda_emit_function(u32 ident, u32 func_checksum, u32 cfg_checksum)
+{
+       struct gcov_fn_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+       if (!info)
+               return;
+
+       INIT_LIST_HEAD(&info->head);
+       info->ident = ident;
+       info->checksum = func_checksum;
+       info->cfg_checksum = cfg_checksum;
+       list_add_tail(&info->head, &current_info->functions);
+}
+#endif
 EXPORT_SYMBOL(llvm_gcda_emit_function);
 
 void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters)
@@ -262,11 +292,16 @@ int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2)
                !list_is_last(&fn_ptr2->head, &info2->functions)) {
                if (fn_ptr1->checksum != fn_ptr2->checksum)
                        return false;
+#if CONFIG_CLANG_VERSION < 110000
                if (fn_ptr1->use_extra_checksum != fn_ptr2->use_extra_checksum)
                        return false;
                if (fn_ptr1->use_extra_checksum &&
                        fn_ptr1->cfg_checksum != fn_ptr2->cfg_checksum)
                        return false;
+#else
+               if (fn_ptr1->cfg_checksum != fn_ptr2->cfg_checksum)
+                       return false;
+#endif
                fn_ptr1 = list_next_entry(fn_ptr1, head);
                fn_ptr2 = list_next_entry(fn_ptr2, head);
        }
@@ -295,6 +330,7 @@ void gcov_info_add(struct gcov_info *dst, struct gcov_info *src)
        }
 }
 
+#if CONFIG_CLANG_VERSION < 110000
 static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
 {
        size_t cv_size; /* counter values size */
@@ -322,6 +358,28 @@ err_name:
        kfree(fn_dup);
        return NULL;
 }
+#else
+static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
+{
+       size_t cv_size; /* counter values size */
+       struct gcov_fn_info *fn_dup = kmemdup(fn, sizeof(*fn),
+                       GFP_KERNEL);
+       if (!fn_dup)
+               return NULL;
+       INIT_LIST_HEAD(&fn_dup->head);
+
+       cv_size = fn->num_counters * sizeof(fn->counters[0]);
+       fn_dup->counters = vmalloc(cv_size);
+       if (!fn_dup->counters) {
+               kfree(fn_dup);
+               return NULL;
+       }
+
+       memcpy(fn_dup->counters, fn->counters, cv_size);
+
+       return fn_dup;
+}
+#endif
 
 /**
  * gcov_info_dup - duplicate profiling data set
@@ -362,6 +420,7 @@ err:
  * gcov_info_free - release memory for profiling data set duplicate
  * @info: profiling data set duplicate to free
  */
+#if CONFIG_CLANG_VERSION < 110000
 void gcov_info_free(struct gcov_info *info)
 {
        struct gcov_fn_info *fn, *tmp;
@@ -375,6 +434,20 @@ void gcov_info_free(struct gcov_info *info)
        kfree(info->filename);
        kfree(info);
 }
+#else
+void gcov_info_free(struct gcov_info *info)
+{
+       struct gcov_fn_info *fn, *tmp;
+
+       list_for_each_entry_safe(fn, tmp, &info->functions, head) {
+               vfree(fn->counters);
+               list_del(&fn->head);
+               kfree(fn);
+       }
+       kfree(info->filename);
+       kfree(info);
+}
+#endif
 
 #define ITER_STRIDE    PAGE_SIZE
 
@@ -460,17 +533,22 @@ static size_t convert_to_gcda(char *buffer, struct gcov_info *info)
 
        list_for_each_entry(fi_ptr, &info->functions, head) {
                u32 i;
-               u32 len = 2;
-
-               if (fi_ptr->use_extra_checksum)
-                       len++;
 
                pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION);
-               pos += store_gcov_u32(buffer, pos, len);
+#if CONFIG_CLANG_VERSION < 110000
+               pos += store_gcov_u32(buffer, pos,
+                       fi_ptr->use_extra_checksum ? 3 : 2);
+#else
+               pos += store_gcov_u32(buffer, pos, 3);
+#endif
                pos += store_gcov_u32(buffer, pos, fi_ptr->ident);
                pos += store_gcov_u32(buffer, pos, fi_ptr->checksum);
+#if CONFIG_CLANG_VERSION < 110000
                if (fi_ptr->use_extra_checksum)
                        pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum);
+#else
+               pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum);
+#endif
 
                pos += store_gcov_u32(buffer, pos, GCOV_TAG_COUNTER_BASE);
                pos += store_gcov_u32(buffer, pos, fi_ptr->num_counters * 2);
index 4800660..40880c3 100644 (file)
@@ -159,7 +159,7 @@ static const struct irq_domain_ops irq_sim_domain_ops = {
  * irq_domain_create_sim - Create a new interrupt simulator irq_domain and
  *                         allocate a range of dummy interrupts.
  *
- * @fnode:      struct fwnode_handle to be associated with this domain.
+ * @fwnode:     struct fwnode_handle to be associated with this domain.
  * @num_irqs:   Number of interrupts to allocate.
  *
  * On success: return a new irq_domain object.
@@ -228,7 +228,7 @@ static void devm_irq_domain_release_sim(struct device *dev, void *res)
  *                              a managed device.
  *
  * @dev:        Device to initialize the simulator object for.
- * @fnode:      struct fwnode_handle to be associated with this domain.
+ * @fwnode:     struct fwnode_handle to be associated with this domain.
  * @num_irqs:   Number of interrupts to allocate
  *
  * On success: return a new irq_domain object.
index 97c231a..49288e9 100644 (file)
@@ -1142,11 +1142,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
        irqreturn_t ret;
 
        local_bh_disable();
+       if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+               local_irq_disable();
        ret = action->thread_fn(action->irq, action->dev_id);
        if (ret == IRQ_HANDLED)
                atomic_inc(&desc->threads_handled);
 
        irq_finalize_oneshot(desc, action);
+       if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+               local_irq_enable();
        local_bh_enable();
        return ret;
 }
index c6a39d6..ba39fbb 100644 (file)
@@ -407,6 +407,14 @@ static bool jump_label_can_update(struct jump_entry *entry, bool init)
                return false;
 
        if (!kernel_text_address(jump_entry_code(entry))) {
+               /*
+                * This skips patching built-in __exit, which
+                * is part of init_section_contains() but is
+                * not part of kernel_text_address().
+                *
+                * Skipping built-in __exit is fine since it
+                * will never be executed.
+                */
                WARN_ONCE(!jump_entry_is_init(entry),
                          "can't patch jump_label at %pS",
                          (void *)jump_entry_code(entry));
index c6d0c1d..f160f1c 100644 (file)
@@ -705,7 +705,7 @@ static void print_lock_name(struct lock_class *class)
 
        printk(KERN_CONT " (");
        __print_lock_name(class);
-       printk(KERN_CONT "){%s}-{%hd:%hd}", usage,
+       printk(KERN_CONT "){%s}-{%d:%d}", usage,
                        class->wait_type_outer ?: class->wait_type_inner,
                        class->wait_type_inner);
 }
@@ -930,7 +930,8 @@ static bool assign_lock_key(struct lockdep_map *lock)
                /* Debug-check: all keys must be persistent! */
                debug_locks_off();
                pr_err("INFO: trying to register non-static key.\n");
-               pr_err("the code is fine but needs lockdep annotation.\n");
+               pr_err("The code is fine but needs lockdep annotation, or maybe\n");
+               pr_err("you didn't initialize this object before use?\n");
                pr_err("turning off the locking correctness validator.\n");
                dump_stack();
                return false;
index adb9350..622ebdf 100644 (file)
@@ -626,7 +626,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
  */
 static __always_inline bool
 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
-                     const bool use_ww_ctx, struct mutex_waiter *waiter)
+                     struct mutex_waiter *waiter)
 {
        if (!waiter) {
                /*
@@ -702,7 +702,7 @@ fail:
 #else
 static __always_inline bool
 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
-                     const bool use_ww_ctx, struct mutex_waiter *waiter)
+                     struct mutex_waiter *waiter)
 {
        return false;
 }
@@ -922,6 +922,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        struct ww_mutex *ww;
        int ret;
 
+       if (!use_ww_ctx)
+               ww_ctx = NULL;
+
        might_sleep();
 
 #ifdef CONFIG_DEBUG_MUTEXES
@@ -929,7 +932,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 #endif
 
        ww = container_of(lock, struct ww_mutex, base);
-       if (use_ww_ctx && ww_ctx) {
+       if (ww_ctx) {
                if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
                        return -EALREADY;
 
@@ -946,10 +949,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
 
        if (__mutex_trylock(lock) ||
-           mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
+           mutex_optimistic_spin(lock, ww_ctx, NULL)) {
                /* got the lock, yay! */
                lock_acquired(&lock->dep_map, ip);
-               if (use_ww_ctx && ww_ctx)
+               if (ww_ctx)
                        ww_mutex_set_context_fastpath(ww, ww_ctx);
                preempt_enable();
                return 0;
@@ -960,7 +963,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
         * After waiting to acquire the wait_lock, try again.
         */
        if (__mutex_trylock(lock)) {
-               if (use_ww_ctx && ww_ctx)
+               if (ww_ctx)
                        __ww_mutex_check_waiters(lock, ww_ctx);
 
                goto skip_wait;
@@ -1013,7 +1016,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                        goto err;
                }
 
-               if (use_ww_ctx && ww_ctx) {
+               if (ww_ctx) {
                        ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
                        if (ret)
                                goto err;
@@ -1026,7 +1029,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                 * ww_mutex needs to always recheck its position since its waiter
                 * list is not FIFO ordered.
                 */
-               if ((use_ww_ctx && ww_ctx) || !first) {
+               if (ww_ctx || !first) {
                        first = __mutex_waiter_is_first(lock, &waiter);
                        if (first)
                                __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
@@ -1039,7 +1042,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                 * or we must see its unlock and acquire.
                 */
                if (__mutex_trylock(lock) ||
-                   (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
+                   (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
                        break;
 
                spin_lock(&lock->wait_lock);
@@ -1048,7 +1051,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 acquired:
        __set_current_state(TASK_RUNNING);
 
-       if (use_ww_ctx && ww_ctx) {
+       if (ww_ctx) {
                /*
                 * Wound-Wait; we stole the lock (!first_waiter), check the
                 * waiters as anyone might want to wound us.
@@ -1068,7 +1071,7 @@ skip_wait:
        /* got the lock - cleanup and rejoice! */
        lock_acquired(&lock->dep_map, ip);
 
-       if (use_ww_ctx && ww_ctx)
+       if (ww_ctx)
                ww_mutex_lock_acquired(ww, ww_ctx);
 
        spin_unlock(&lock->wait_lock);
index 1358fa4..0f4530b 100644 (file)
@@ -98,7 +98,7 @@ static int __init em_debug_init(void)
 
        return 0;
 }
-core_initcall(em_debug_init);
+fs_initcall(em_debug_init);
 #else /* CONFIG_DEBUG_FS */
 static void em_debug_create_pd(struct device *dev) {}
 static void em_debug_remove_pd(struct device *dev) {}
index 821cf17..61db50f 100644 (file)
@@ -375,7 +375,7 @@ static int ptrace_attach(struct task_struct *task, long request,
        audit_ptrace(task);
 
        retval = -EPERM;
-       if (unlikely(task->flags & (PF_KTHREAD | PF_IO_WORKER)))
+       if (unlikely(task->flags & PF_KTHREAD))
                goto out;
        if (same_thread_group(task, current))
                goto out;
index eb1b158..a6ad5eb 100644 (file)
@@ -244,8 +244,6 @@ void migrate_to_reboot_cpu(void)
 void kernel_restart(char *cmd)
 {
        kernel_restart_prepare(cmd);
-       if (pm_power_off_prepare)
-               pm_power_off_prepare();
        migrate_to_reboot_cpu();
        syscore_shutdown();
        if (!cmd)
index ba4d1ef..f271835 100644 (file)
@@ -91,7 +91,7 @@ static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
                return true;
 
        /* Only allow kernel generated signals to this kthread */
-       if (unlikely((t->flags & (PF_KTHREAD | PF_IO_WORKER)) &&
+       if (unlikely((t->flags & PF_KTHREAD) &&
                     (handler == SIG_KTHREAD_KERNEL) && !force))
                return true;
 
@@ -1096,7 +1096,7 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc
        /*
         * Skip useless siginfo allocation for SIGKILL and kernel threads.
         */
-       if ((sig == SIGKILL) || (t->flags & (PF_KTHREAD | PF_IO_WORKER)))
+       if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
                goto out_set;
 
        /*
@@ -2767,6 +2767,14 @@ relock:
                        do_coredump(&ksig->info);
                }
 
+               /*
+                * PF_IO_WORKER threads will catch and exit on fatal signals
+                * themselves. They have cleanup that must be performed, so
+                * we cannot call do_exit() on their behalf.
+                */
+               if (current->flags & PF_IO_WORKER)
+                       goto out;
+
                /*
                 * Death signals, no core dump.
                 */
@@ -2774,7 +2782,7 @@ relock:
                /* NOTREACHED */
        }
        spin_unlock_irq(&sighand->siglock);
-
+out:
        ksig->sig = signr;
 
        if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
index ae82529..2c5950b 100644 (file)
@@ -35,27 +35,30 @@ static inline void *static_call_addr(struct static_call_site *site)
        return (void *)((long)site->addr + (long)&site->addr);
 }
 
+static inline unsigned long __static_call_key(const struct static_call_site *site)
+{
+       return (long)site->key + (long)&site->key;
+}
 
 static inline struct static_call_key *static_call_key(const struct static_call_site *site)
 {
-       return (struct static_call_key *)
-               (((long)site->key + (long)&site->key) & ~STATIC_CALL_SITE_FLAGS);
+       return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS);
 }
 
 /* These assume the key is word-aligned. */
 static inline bool static_call_is_init(struct static_call_site *site)
 {
-       return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_INIT;
+       return __static_call_key(site) & STATIC_CALL_SITE_INIT;
 }
 
 static inline bool static_call_is_tail(struct static_call_site *site)
 {
-       return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_TAIL;
+       return __static_call_key(site) & STATIC_CALL_SITE_TAIL;
 }
 
 static inline void static_call_set_init(struct static_call_site *site)
 {
-       site->key = ((long)static_call_key(site) | STATIC_CALL_SITE_INIT) -
+       site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) -
                    (long)&site->key;
 }
 
@@ -146,6 +149,7 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
        };
 
        for (site_mod = &first; site_mod; site_mod = site_mod->next) {
+               bool init = system_state < SYSTEM_RUNNING;
                struct module *mod = site_mod->mod;
 
                if (!site_mod->sites) {
@@ -165,6 +169,7 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
                if (mod) {
                        stop = mod->static_call_sites +
                               mod->num_static_call_sites;
+                       init = mod->state == MODULE_STATE_COMING;
                }
 #endif
 
@@ -172,25 +177,26 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
                     site < stop && static_call_key(site) == key; site++) {
                        void *site_addr = static_call_addr(site);
 
-                       if (static_call_is_init(site)) {
-                               /*
-                                * Don't write to call sites which were in
-                                * initmem and have since been freed.
-                                */
-                               if (!mod && system_state >= SYSTEM_RUNNING)
-                                       continue;
-                               if (mod && !within_module_init((unsigned long)site_addr, mod))
-                                       continue;
-                       }
+                       if (!init && static_call_is_init(site))
+                               continue;
 
                        if (!kernel_text_address((unsigned long)site_addr)) {
-                               WARN_ONCE(1, "can't patch static call site at %pS",
+                               /*
+                                * This skips patching built-in __exit, which
+                                * is part of init_section_contains() but is
+                                * not part of kernel_text_address().
+                                *
+                                * Skipping built-in __exit is fine since it
+                                * will never be executed.
+                                */
+                               WARN_ONCE(!static_call_is_init(site),
+                                         "can't patch static call site at %pS",
                                          site_addr);
                                continue;
                        }
 
                        arch_static_call_transform(site_addr, NULL, func,
-                               static_call_is_tail(site));
+                                                  static_call_is_tail(site));
                }
        }
 
@@ -349,7 +355,7 @@ static int static_call_add_module(struct module *mod)
        struct static_call_site *site;
 
        for (site = start; site != stop; site++) {
-               unsigned long s_key = (long)site->key + (long)&site->key;
+               unsigned long s_key = __static_call_key(site);
                unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
                unsigned long key;
 
index 98d7a15..4d94e2b 100644 (file)
@@ -854,9 +854,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
        if (flags == TIMER_ABSTIME)
                return -ERESTARTNOHAND;
 
-       restart->fn = alarm_timer_nsleep_restart;
        restart->nanosleep.clockid = type;
        restart->nanosleep.expires = exp;
+       set_restart_fn(restart, alarm_timer_nsleep_restart);
        return ret;
 }
 
index 788b9d1..5c9d968 100644 (file)
@@ -1957,9 +1957,9 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
        }
 
        restart = &current->restart_block;
-       restart->fn = hrtimer_nanosleep_restart;
        restart->nanosleep.clockid = t.timer.base->clockid;
        restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
+       set_restart_fn(restart, hrtimer_nanosleep_restart);
 out:
        destroy_hrtimer_on_stack(&t.timer);
        return ret;
index a71758e..9abe152 100644 (file)
@@ -1480,8 +1480,8 @@ static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
                if (flags & TIMER_ABSTIME)
                        return -ERESTARTNOHAND;
 
-               restart_block->fn = posix_cpu_nsleep_restart;
                restart_block->nanosleep.clockid = which_clock;
+               set_restart_fn(restart_block, posix_cpu_nsleep_restart);
        }
        return error;
 }
index 4d8e355..3ba52d4 100644 (file)
@@ -3231,7 +3231,8 @@ ftrace_allocate_pages(unsigned long num_to_init)
        pg = start_pg;
        while (pg) {
                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-               free_pages((unsigned long)pg->records, order);
+               if (order >= 0)
+                       free_pages((unsigned long)pg->records, order);
                start_pg = pg->next;
                kfree(pg);
                pg = start_pg;
@@ -5045,6 +5046,20 @@ struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
        return NULL;
 }
 
+static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr)
+{
+       struct ftrace_direct_func *direct;
+
+       direct = kmalloc(sizeof(*direct), GFP_KERNEL);
+       if (!direct)
+               return NULL;
+       direct->addr = addr;
+       direct->count = 0;
+       list_add_rcu(&direct->next, &ftrace_direct_funcs);
+       ftrace_direct_func_count++;
+       return direct;
+}
+
 /**
  * register_ftrace_direct - Call a custom trampoline directly
  * @ip: The address of the nop at the beginning of a function
@@ -5120,15 +5135,11 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr)
 
        direct = ftrace_find_direct_func(addr);
        if (!direct) {
-               direct = kmalloc(sizeof(*direct), GFP_KERNEL);
+               direct = ftrace_alloc_direct_func(addr);
                if (!direct) {
                        kfree(entry);
                        goto out_unlock;
                }
-               direct->addr = addr;
-               direct->count = 0;
-               list_add_rcu(&direct->next, &ftrace_direct_funcs);
-               ftrace_direct_func_count++;
        }
 
        entry->ip = ip;
@@ -5329,6 +5340,7 @@ int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
 int modify_ftrace_direct(unsigned long ip,
                         unsigned long old_addr, unsigned long new_addr)
 {
+       struct ftrace_direct_func *direct, *new_direct = NULL;
        struct ftrace_func_entry *entry;
        struct dyn_ftrace *rec;
        int ret = -ENODEV;
@@ -5344,6 +5356,20 @@ int modify_ftrace_direct(unsigned long ip,
        if (entry->direct != old_addr)
                goto out_unlock;
 
+       direct = ftrace_find_direct_func(old_addr);
+       if (WARN_ON(!direct))
+               goto out_unlock;
+       if (direct->count > 1) {
+               ret = -ENOMEM;
+               new_direct = ftrace_alloc_direct_func(new_addr);
+               if (!new_direct)
+                       goto out_unlock;
+               direct->count--;
+               new_direct->count++;
+       } else {
+               direct->addr = new_addr;
+       }
+
        /*
         * If there's no other ftrace callback on the rec->ip location,
         * then it can be changed directly by the architecture.
@@ -5357,6 +5383,14 @@ int modify_ftrace_direct(unsigned long ip,
                ret = 0;
        }
 
+       if (unlikely(ret && new_direct)) {
+               direct->count++;
+               list_del_rcu(&new_direct->next);
+               synchronize_rcu_tasks();
+               kfree(new_direct);
+               ftrace_direct_func_count--;
+       }
+
  out_unlock:
        mutex_unlock(&ftrace_lock);
        mutex_unlock(&direct_mutex);
@@ -6418,7 +6452,8 @@ void ftrace_release_mod(struct module *mod)
                clear_mod_from_hashes(pg);
 
                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-               free_pages((unsigned long)pg->records, order);
+               if (order >= 0)
+                       free_pages((unsigned long)pg->records, order);
                tmp_page = pg->next;
                kfree(pg);
                ftrace_number_of_pages -= 1 << order;
@@ -6778,7 +6813,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
                if (!pg->index) {
                        *last_pg = pg->next;
                        order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-                       free_pages((unsigned long)pg->records, order);
+                       if (order >= 0)
+                               free_pages((unsigned long)pg->records, order);
                        ftrace_number_of_pages -= 1 << order;
                        ftrace_number_of_groups--;
                        kfree(pg);
index eccb4e1..5c77762 100644 (file)
@@ -2984,7 +2984,8 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
 
        size = nr_entries * sizeof(unsigned long);
        event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
-                                           sizeof(*entry) + size, trace_ctx);
+                                   (sizeof(*entry) - sizeof(entry->caller)) + size,
+                                   trace_ctx);
        if (!event)
                goto out;
        entry = ring_buffer_event_data(event);
index 0b35212..bb7bb3b 100644 (file)
@@ -139,13 +139,22 @@ static void umd_cleanup(struct subprocess_info *info)
        struct umd_info *umd_info = info->data;
 
        /* cleanup if umh_setup() was successful but exec failed */
-       if (info->retval) {
-               fput(umd_info->pipe_to_umh);
-               fput(umd_info->pipe_from_umh);
-               put_pid(umd_info->tgid);
-               umd_info->tgid = NULL;
-       }
+       if (info->retval)
+               umd_cleanup_helper(umd_info);
+}
+
+/**
+ * umd_cleanup_helper - release the resources which were allocated in umd_setup
+ * @info: information about usermode driver
+ */
+void umd_cleanup_helper(struct umd_info *info)
+{
+       fput(info->pipe_to_umh);
+       fput(info->pipe_from_umh);
+       put_pid(info->tgid);
+       info->tgid = NULL;
 }
+EXPORT_SYMBOL_GPL(umd_cleanup_helper);
 
 /**
  * fork_usermode_driver - fork a usermode driver
index 7110906..107bc38 100644 (file)
@@ -278,9 +278,10 @@ void touch_all_softlockup_watchdogs(void)
         * update as well, the only side effect might be a cycle delay for
         * the softlockup check.
         */
-       for_each_cpu(cpu, &watchdog_allowed_mask)
+       for_each_cpu(cpu, &watchdog_allowed_mask) {
                per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
-       wq_watchdog_touch(-1);
+               wq_watchdog_touch(cpu);
+       }
 }
 
 void touch_softlockup_watchdog_sync(void)
index 0d150da..79f2319 100644 (file)
@@ -1412,7 +1412,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
         */
        lockdep_assert_irqs_disabled();
 
-       debug_work_activate(work);
 
        /* if draining, only works from the same workqueue are allowed */
        if (unlikely(wq->flags & __WQ_DRAINING) &&
@@ -1494,6 +1493,7 @@ retry:
                worklist = &pwq->delayed_works;
        }
 
+       debug_work_activate(work);
        insert_work(pwq, work, worklist, work_flags);
 
 out:
@@ -5787,22 +5787,17 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
                        continue;
 
                /* get the latest of pool and touched timestamps */
+               if (pool->cpu >= 0)
+                       touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
+               else
+                       touched = READ_ONCE(wq_watchdog_touched);
                pool_ts = READ_ONCE(pool->watchdog_ts);
-               touched = READ_ONCE(wq_watchdog_touched);
 
                if (time_after(pool_ts, touched))
                        ts = pool_ts;
                else
                        ts = touched;
 
-               if (pool->cpu >= 0) {
-                       unsigned long cpu_touched =
-                               READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
-                                                 pool->cpu));
-                       if (time_after(cpu_touched, ts))
-                               ts = cpu_touched;
-               }
-
                /* did we stall? */
                if (time_after(jiffies, ts + thresh)) {
                        lockup_detected = true;
@@ -5826,8 +5821,8 @@ notrace void wq_watchdog_touch(int cpu)
 {
        if (cpu >= 0)
                per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
-       else
-               wq_watchdog_touched = jiffies;
+
+       wq_watchdog_touched = jiffies;
 }
 
 static void wq_watchdog_set_thresh(unsigned long thresh)
index 2779c29..417c3d3 100644 (file)
@@ -1363,7 +1363,7 @@ config LOCKDEP
        bool
        depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
        select STACKTRACE
-       select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !X86
+       depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
        select KALLSYMS
        select KALLSYMS_ALL
 
@@ -1665,7 +1665,7 @@ config LATENCYTOP
        depends on DEBUG_KERNEL
        depends on STACKTRACE_SUPPORT
        depends on PROC_FS
-       select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
+       depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
        select KALLSYMS
        select KALLSYMS_ALL
        select STACKTRACE
@@ -1918,7 +1918,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
        depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
        depends on !X86_64
        select STACKTRACE
-       select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
+       depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
        help
          Provide stacktrace filter for fault-injection capabilities
 
index 064d68a..4686639 100644 (file)
@@ -232,4 +232,5 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
 
        return res + div64_u64(a * b, c);
 }
+EXPORT_SYMBOL(mul_u64_u64_div_u64);
 #endif
index eee017f..f1017f3 100644 (file)
@@ -22,7 +22,7 @@ static noinline void __init copy_user_test(void)
        char *kmem;
        char __user *usermem;
        size_t size = 10;
-       int unused;
+       int __maybe_unused unused;
 
        kmem = kmalloc(size, GFP_KERNEL);
        if (!kmem)
index 8294f43..8b1c318 100644 (file)
@@ -1530,24 +1530,24 @@ static noinline void check_store_range(struct xarray *xa)
 
 #ifdef CONFIG_XARRAY_MULTI
 static void check_split_1(struct xarray *xa, unsigned long index,
-                                                       unsigned int order)
+                               unsigned int order, unsigned int new_order)
 {
-       XA_STATE(xas, xa, index);
-       void *entry;
-       unsigned int i = 0;
+       XA_STATE_ORDER(xas, xa, index, new_order);
+       unsigned int i;
 
        xa_store_order(xa, index, order, xa, GFP_KERNEL);
 
        xas_split_alloc(&xas, xa, order, GFP_KERNEL);
        xas_lock(&xas);
        xas_split(&xas, xa, order);
+       for (i = 0; i < (1 << order); i += (1 << new_order))
+               __xa_store(xa, index + i, xa_mk_index(index + i), 0);
        xas_unlock(&xas);
 
-       xa_for_each(xa, index, entry) {
-               XA_BUG_ON(xa, entry != xa);
-               i++;
+       for (i = 0; i < (1 << order); i++) {
+               unsigned int val = index + (i & ~((1 << new_order) - 1));
+               XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
        }
-       XA_BUG_ON(xa, i != 1 << order);
 
        xa_set_mark(xa, index, XA_MARK_0);
        XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
@@ -1557,14 +1557,16 @@ static void check_split_1(struct xarray *xa, unsigned long index,
 
 static noinline void check_split(struct xarray *xa)
 {
-       unsigned int order;
+       unsigned int order, new_order;
 
        XA_BUG_ON(xa, !xa_empty(xa));
 
        for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
-               check_split_1(xa, 0, order);
-               check_split_1(xa, 1UL << order, order);
-               check_split_1(xa, 3UL << order, order);
+               for (new_order = 0; new_order < order; new_order++) {
+                       check_split_1(xa, 0, order, new_order);
+                       check_split_1(xa, 1UL << order, order, new_order);
+                       check_split_1(xa, 3UL << order, order, new_order);
+               }
        }
 }
 #else
index 5fa5161..f5d8f54 100644 (file)
@@ -987,7 +987,7 @@ static void node_set_marks(struct xa_node *node, unsigned int offset,
  * xas_split_alloc() - Allocate memory for splitting an entry.
  * @xas: XArray operation state.
  * @entry: New entry which will be stored in the array.
- * @order: New entry order.
+ * @order: Current entry order.
  * @gfp: Memory allocation flags.
  *
  * This function should be called before calling xas_split().
@@ -1011,7 +1011,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
 
        do {
                unsigned int i;
-               void *sibling;
+               void *sibling = NULL;
                struct xa_node *node;
 
                node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
@@ -1021,7 +1021,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
                for (i = 0; i < XA_CHUNK_SIZE; i++) {
                        if ((i & mask) == 0) {
                                RCU_INIT_POINTER(node->slots[i], entry);
-                               sibling = xa_mk_sibling(0);
+                               sibling = xa_mk_sibling(i);
                        } else {
                                RCU_INIT_POINTER(node->slots[i], sibling);
                        }
@@ -1041,9 +1041,10 @@ EXPORT_SYMBOL_GPL(xas_split_alloc);
  * xas_split() - Split a multi-index entry into smaller entries.
  * @xas: XArray operation state.
  * @entry: New entry to store in the array.
- * @order: New entry order.
+ * @order: Current entry order.
  *
- * The value in the entry is copied to all the replacement entries.
+ * The size of the new entries is set in @xas.  The value in @entry is
+ * copied to all the replacement entries.
  *
  * Context: Any context.  The caller should hold the xa_lock.
  */
index e405796..ef7d2da 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1535,6 +1535,10 @@ struct page *get_dump_page(unsigned long addr)
                                      FOLL_FORCE | FOLL_DUMP | FOLL_GET);
        if (locked)
                mmap_read_unlock(mm);
+
+       if (ret == 1 && is_page_poisoned(page))
+               return NULL;
+
        return (ret == 1) ? page : NULL;
 }
 #endif /* CONFIG_ELF_CORE */
index 86f2b94..6ef8f5e 100644 (file)
@@ -618,7 +618,7 @@ void __kmap_local_sched_out(void)
                int idx;
 
                /* With debug all even slots are unmapped and act as guard */
-               if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) {
+               if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
                        WARN_ON_ONCE(!pte_none(pteval));
                        continue;
                }
@@ -654,7 +654,7 @@ void __kmap_local_sched_in(void)
                int idx;
 
                /* With debug all even slots are unmapped and act as guard */
-               if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) {
+               if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
                        WARN_ON_ONCE(!pte_none(pteval));
                        continue;
                }
index 5b1ab1f..a86a58e 100644 (file)
@@ -280,6 +280,17 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
                nrg->reservation_counter =
                        &h_cg->rsvd_hugepage[hstate_index(h)];
                nrg->css = &h_cg->css;
+               /*
+                * The caller will hold exactly one h_cg->css reference for the
+                * whole contiguous reservation region. But this area might be
+                * scattered when there are already some file_regions reside in
+                * it. As a result, many file_regions may share only one css
+                * reference. In order to ensure that one file_region must hold
+                * exactly one h_cg->css reference, we should do css_get for
+                * each file_region and leave the reference held by caller
+                * untouched.
+                */
+               css_get(&h_cg->css);
                if (!resv->pages_per_hpage)
                        resv->pages_per_hpage = pages_per_huge_page(h);
                /* pages_per_hpage should be the same for all entries in
@@ -293,6 +304,14 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
 #endif
 }
 
+static void put_uncharge_info(struct file_region *rg)
+{
+#ifdef CONFIG_CGROUP_HUGETLB
+       if (rg->css)
+               css_put(rg->css);
+#endif
+}
+
 static bool has_same_uncharge_info(struct file_region *rg,
                                   struct file_region *org)
 {
@@ -316,6 +335,7 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
                prg->to = rg->to;
 
                list_del(&rg->link);
+               put_uncharge_info(rg);
                kfree(rg);
 
                rg = prg;
@@ -327,6 +347,7 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
                nrg->from = rg->from;
 
                list_del(&rg->link);
+               put_uncharge_info(rg);
                kfree(rg);
        }
 }
@@ -662,7 +683,7 @@ retry:
 
                        del += t - f;
                        hugetlb_cgroup_uncharge_file_region(
-                               resv, rg, t - f);
+                               resv, rg, t - f, false);
 
                        /* New entry for end of split region */
                        nrg->from = t;
@@ -683,7 +704,7 @@ retry:
                if (f <= rg->from && t >= rg->to) { /* Remove entire region */
                        del += rg->to - rg->from;
                        hugetlb_cgroup_uncharge_file_region(resv, rg,
-                                                           rg->to - rg->from);
+                                                           rg->to - rg->from, true);
                        list_del(&rg->link);
                        kfree(rg);
                        continue;
@@ -691,13 +712,13 @@ retry:
 
                if (f <= rg->from) {    /* Trim beginning of region */
                        hugetlb_cgroup_uncharge_file_region(resv, rg,
-                                                           t - rg->from);
+                                                           t - rg->from, false);
 
                        del += t - rg->from;
                        rg->from = t;
                } else {                /* Trim end of region */
                        hugetlb_cgroup_uncharge_file_region(resv, rg,
-                                                           rg->to - f);
+                                                           rg->to - f, false);
 
                        del += rg->to - f;
                        rg->to = f;
@@ -5187,6 +5208,10 @@ bool hugetlb_reserve_pages(struct inode *inode,
                         */
                        long rsv_adjust;
 
+                       /*
+                        * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
+                        * reference to h_cg->css. See comment below for detail.
+                        */
                        hugetlb_cgroup_uncharge_cgroup_rsvd(
                                hstate_index(h),
                                (chg - add) * pages_per_huge_page(h), h_cg);
@@ -5194,6 +5219,14 @@ bool hugetlb_reserve_pages(struct inode *inode,
                        rsv_adjust = hugepage_subpool_put_pages(spool,
                                                                chg - add);
                        hugetlb_acct_memory(h, -rsv_adjust);
+               } else if (h_cg) {
+                       /*
+                        * The file_regions will hold their own reference to
+                        * h_cg->css. So we should release the reference held
+                        * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
+                        * done.
+                        */
+                       hugetlb_cgroup_put_rsvd_cgroup(h_cg);
                }
        }
        return true;
index f68b51f..603a131 100644 (file)
@@ -391,7 +391,8 @@ void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
 
 void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
                                         struct file_region *rg,
-                                        unsigned long nr_pages)
+                                        unsigned long nr_pages,
+                                        bool region_del)
 {
        if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
                return;
@@ -400,7 +401,12 @@ void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
            !resv->reservation_counter) {
                page_counter_uncharge(rg->reservation_counter,
                                      nr_pages * resv->pages_per_hpage);
-               css_put(rg->css);
+               /*
+                * Only do css_put(rg->css) when we delete the entire region
+                * because one file_region must hold exactly one css reference.
+                */
+               if (region_del)
+                       css_put(rg->css);
        }
 }
 
index 1432fee..cb3c5e0 100644 (file)
@@ -97,6 +97,26 @@ static inline void set_page_refcounted(struct page *page)
        set_page_count(page, 1);
 }
 
+/*
+ * When kernel touch the user page, the user page may be have been marked
+ * poison but still mapped in user space, if without this page, the kernel
+ * can guarantee the data integrity and operation success, the kernel is
+ * better to check the posion status and avoid touching it, be good not to
+ * panic, coredump for process fatal signal is a sample case matching this
+ * scenario. Or if kernel can't guarantee the data integrity, it's better
+ * not to call this function, let kernel touch the poison page and get to
+ * panic.
+ */
+static inline bool is_page_poisoned(struct page *page)
+{
+       if (PageHWPoison(page))
+               return true;
+       else if (PageHuge(page) && PageHWPoison(compound_head(page)))
+               return true;
+
+       return false;
+}
+
 extern unsigned long highest_memmap_pfn;
 
 /*
index 3b8ec93..d53c91f 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/debugfs.h>
 #include <linux/kcsan-checks.h>
 #include <linux/kfence.h>
+#include <linux/kmemleak.h>
 #include <linux/list.h>
 #include <linux/lockdep.h>
 #include <linux/memblock.h>
@@ -480,6 +481,14 @@ static bool __init kfence_init_pool(void)
                addr += 2 * PAGE_SIZE;
        }
 
+       /*
+        * The pool is live and will never be deallocated from this point on.
+        * Remove the pool object from the kmemleak object tree, as it would
+        * otherwise overlap with allocations returned by kfence_alloc(), which
+        * are registered with kmemleak through the slab post-alloc hook.
+        */
+       kmemleak_free(__kfence_pool);
+
        return true;
 
 err:
index c0014d3..fe6e3ae 100644 (file)
@@ -97,6 +97,7 @@
 #include <linux/atomic.h>
 
 #include <linux/kasan.h>
+#include <linux/kfence.h>
 #include <linux/kmemleak.h>
 #include <linux/memory_hotplug.h>
 
@@ -589,7 +590,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
        atomic_set(&object->use_count, 1);
        object->flags = OBJECT_ALLOCATED;
        object->pointer = ptr;
-       object->size = size;
+       object->size = kfence_ksize((void *)ptr) ?: size;
        object->excess_ref = 0;
        object->min_count = min_count;
        object->count = 0;                      /* white color initially */
index 5efa07f..550405f 100644 (file)
@@ -166,7 +166,7 @@ static int __init init_zero_pfn(void)
        zero_pfn = page_to_pfn(ZERO_PAGE(0));
        return 0;
 }
-core_initcall(init_zero_pfn);
+early_initcall(init_zero_pfn);
 
 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
 {
index 61ee40e..459d195 100644 (file)
@@ -501,10 +501,33 @@ static int mn_hlist_invalidate_range_start(
                                                "");
                                WARN_ON(mmu_notifier_range_blockable(range) ||
                                        _ret != -EAGAIN);
+                               /*
+                                * We call all the notifiers on any EAGAIN,
+                                * there is no way for a notifier to know if
+                                * its start method failed, thus a start that
+                                * does EAGAIN can't also do end.
+                                */
+                               WARN_ON(ops->invalidate_range_end);
                                ret = _ret;
                        }
                }
        }
+
+       if (ret) {
+               /*
+                * Must be non-blocking to get here.  If there are multiple
+                * notifiers and one or more failed start, any that succeeded
+                * start are expecting their end to be called.  Do so now.
+                */
+               hlist_for_each_entry_rcu(subscription, &subscriptions->list,
+                                        hlist, srcu_read_lock_held(&srcu)) {
+                       if (!subscription->ops->invalidate_range_end)
+                               continue;
+
+                       subscription->ops->invalidate_range_end(subscription,
+                                                               range);
+               }
+       }
        srcu_read_unlock(&srcu, id);
 
        return ret;
index eb34d20..9e35b63 100644 (file)
@@ -2833,6 +2833,22 @@ void wait_on_page_writeback(struct page *page)
 }
 EXPORT_SYMBOL_GPL(wait_on_page_writeback);
 
+/*
+ * Wait for a page to complete writeback.  Returns -EINTR if we get a
+ * fatal signal while waiting.
+ */
+int wait_on_page_writeback_killable(struct page *page)
+{
+       while (PageWriteback(page)) {
+               trace_wait_on_page_writeback(page, page_mapping(page));
+               if (wait_on_page_bit_killable(page, PG_writeback))
+                       return -EINTR;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(wait_on_page_writeback_killable);
+
 /**
  * wait_for_stable_page() - wait for writeback to finish, if necessary.
  * @page:      The page to wait on.
index 65cdf84..655dc58 100644 (file)
@@ -77,12 +77,14 @@ static void unpoison_page(struct page *page)
        void *addr;
 
        addr = kmap_atomic(page);
+       kasan_disable_current();
        /*
         * Page poisoning when enabled poisons each and every page
         * that is freed to buddy. Thus no extra check is done to
         * see if a page was poisoned.
         */
-       check_poison_mem(addr, PAGE_SIZE);
+       check_poison_mem(kasan_reset_tag(addr), PAGE_SIZE);
+       kasan_enable_current();
        kunmap_atomic(addr);
 }
 
index 18b768a..095d7ea 100644 (file)
@@ -87,7 +87,7 @@ extern spinlock_t pcpu_lock;
 
 extern struct list_head *pcpu_chunk_lists;
 extern int pcpu_nr_slots;
-extern int pcpu_nr_empty_pop_pages;
+extern int pcpu_nr_empty_pop_pages[];
 
 extern struct pcpu_chunk *pcpu_first_chunk;
 extern struct pcpu_chunk *pcpu_reserved_chunk;
index c8400a2..f6026db 100644 (file)
@@ -145,6 +145,7 @@ static int percpu_stats_show(struct seq_file *m, void *v)
        int slot, max_nr_alloc;
        int *buffer;
        enum pcpu_chunk_type type;
+       int nr_empty_pop_pages;
 
 alloc_buffer:
        spin_lock_irq(&pcpu_lock);
@@ -165,7 +166,11 @@ alloc_buffer:
                goto alloc_buffer;
        }
 
-#define PL(X) \
+       nr_empty_pop_pages = 0;
+       for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
+               nr_empty_pop_pages += pcpu_nr_empty_pop_pages[type];
+
+#define PL(X)                                                          \
        seq_printf(m, "  %-20s: %12lld\n", #X, (long long int)pcpu_stats_ai.X)
 
        seq_printf(m,
@@ -196,7 +201,7 @@ alloc_buffer:
        PU(nr_max_chunks);
        PU(min_alloc_size);
        PU(max_alloc_size);
-       P("empty_pop_pages", pcpu_nr_empty_pop_pages);
+       P("empty_pop_pages", nr_empty_pop_pages);
        seq_putc(m, '\n');
 
 #undef PU
index 6596a0a..2330811 100644 (file)
@@ -173,10 +173,10 @@ struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
 static LIST_HEAD(pcpu_map_extend_chunks);
 
 /*
- * The number of empty populated pages, protected by pcpu_lock.  The
- * reserved chunk doesn't contribute to the count.
+ * The number of empty populated pages by chunk type, protected by pcpu_lock.
+ * The reserved chunk doesn't contribute to the count.
  */
-int pcpu_nr_empty_pop_pages;
+int pcpu_nr_empty_pop_pages[PCPU_NR_CHUNK_TYPES];
 
 /*
  * The number of populated pages in use by the allocator, protected by
@@ -556,7 +556,7 @@ static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
 {
        chunk->nr_empty_pop_pages += nr;
        if (chunk != pcpu_reserved_chunk)
-               pcpu_nr_empty_pop_pages += nr;
+               pcpu_nr_empty_pop_pages[pcpu_chunk_type(chunk)] += nr;
 }
 
 /*
@@ -1832,7 +1832,7 @@ area_found:
                mutex_unlock(&pcpu_alloc_mutex);
        }
 
-       if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
+       if (pcpu_nr_empty_pop_pages[type] < PCPU_EMPTY_POP_PAGES_LOW)
                pcpu_schedule_balance_work();
 
        /* clear the areas and return address relative to base address */
@@ -2000,7 +2000,7 @@ retry_pop:
                pcpu_atomic_alloc_failed = false;
        } else {
                nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
-                                 pcpu_nr_empty_pop_pages,
+                                 pcpu_nr_empty_pop_pages[type],
                                  0, PCPU_EMPTY_POP_PAGES_HIGH);
        }
 
@@ -2580,7 +2580,7 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 
        /* link the first chunk in */
        pcpu_first_chunk = chunk;
-       pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
+       pcpu_nr_empty_pop_pages[PCPU_CHUNK_ROOT] = pcpu_first_chunk->nr_empty_pop_pages;
        pcpu_chunk_relocate(pcpu_first_chunk, -1);
 
        /* include all regions of the first chunk */
index b5dafa7..9d889ad 100644 (file)
@@ -1346,8 +1346,22 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
                        page = list_entry(pos, struct page, lru);
 
                        zhdr = page_address(page);
-                       if (test_bit(PAGE_HEADLESS, &page->private))
+                       if (test_bit(PAGE_HEADLESS, &page->private)) {
+                               /*
+                                * For non-headless pages, we wait to do this
+                                * until we have the page lock to avoid racing
+                                * with __z3fold_alloc(). Headless pages don't
+                                * have a lock (and __z3fold_alloc() will never
+                                * see them), but we still need to test and set
+                                * PAGE_CLAIMED to avoid racing with
+                                * z3fold_free(), so just do it now before
+                                * leaving the loop.
+                                */
+                               if (test_and_set_bit(PAGE_CLAIMED, &page->private))
+                                       continue;
+
                                break;
+                       }
 
                        if (kref_get_unless_zero(&zhdr->refcount) == 0) {
                                zhdr = NULL;
index e48f7ac..3ddd66e 100644 (file)
@@ -702,7 +702,6 @@ MODULE_LICENSE("GPL");
 
 MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
 MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
-MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
 MODULE_VERSION(BATADV_SOURCE_VERSION);
 MODULE_ALIAS_RTNL_LINK("batadv");
 MODULE_ALIAS_GENL_FAMILY(BATADV_NL_NAME);
index f876128..434b4f0 100644 (file)
@@ -890,6 +890,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
        hlist_for_each_entry(vlan, &orig_node->vlan_list, list) {
                tt_vlan->vid = htons(vlan->vid);
                tt_vlan->crc = htonl(vlan->tt.crc);
+               tt_vlan->reserved = 0;
 
                tt_vlan++;
        }
@@ -973,6 +974,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
 
                tt_vlan->vid = htons(vlan->vid);
                tt_vlan->crc = htonl(vlan->tt.crc);
+               tt_vlan->reserved = 0;
 
                tt_vlan++;
        }
index b895038..1e24d9a 100644 (file)
@@ -128,6 +128,8 @@ br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
 {
        if (!fdb->dst)
                return;
+       if (test_bit(BR_FDB_LOCAL, &fdb->flags))
+               return;
 
        switch (type) {
        case RTM_DELNEIGH:
index 0e5c37b..909b9e6 100644 (file)
@@ -86,6 +86,8 @@ MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
 MODULE_ALIAS("can-proto-2");
 
+#define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
+
 /*
  * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
  * 64 bit aligned so the offset has to be multiples of 8 which is ensured
@@ -1292,7 +1294,7 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
                /* no bound device as default => check msg_name */
                DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
 
-               if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+               if (msg->msg_namelen < BCM_MIN_NAMELEN)
                        return -EINVAL;
 
                if (addr->can_family != AF_CAN)
@@ -1534,7 +1536,7 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
        struct net *net = sock_net(sk);
        int ret = 0;
 
-       if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+       if (len < BCM_MIN_NAMELEN)
                return -EINVAL;
 
        lock_sock(sk);
@@ -1616,8 +1618,8 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        sock_recv_ts_and_drops(msg, sk, skb);
 
        if (msg->msg_name) {
-               __sockaddr_check_size(sizeof(struct sockaddr_can));
-               msg->msg_namelen = sizeof(struct sockaddr_can);
+               __sockaddr_check_size(BCM_MIN_NAMELEN);
+               msg->msg_namelen = BCM_MIN_NAMELEN;
                memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
        }
 
index 3ef7f78..9f94ad3 100644 (file)
@@ -77,6 +77,8 @@ MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
 MODULE_ALIAS("can-proto-6");
 
+#define ISOTP_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp)
+
 #define SINGLE_MASK(id) (((id) & CAN_EFF_FLAG) ? \
                         (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
                         (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
@@ -196,7 +198,7 @@ static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
        nskb->dev = dev;
        can_skb_set_owner(nskb, sk);
        ncf = (struct canfd_frame *)nskb->data;
-       skb_put(nskb, so->ll.mtu);
+       skb_put_zero(nskb, so->ll.mtu);
 
        /* create & send flow control reply */
        ncf->can_id = so->txid;
@@ -215,8 +217,7 @@ static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
        if (ae)
                ncf->data[0] = so->opt.ext_address;
 
-       if (so->ll.mtu == CANFD_MTU)
-               ncf->flags = so->ll.tx_flags;
+       ncf->flags = so->ll.tx_flags;
 
        can_send_ret = can_send(nskb, 1);
        if (can_send_ret)
@@ -780,7 +781,7 @@ isotp_tx_burst:
                can_skb_prv(skb)->skbcnt = 0;
 
                cf = (struct canfd_frame *)skb->data;
-               skb_put(skb, so->ll.mtu);
+               skb_put_zero(skb, so->ll.mtu);
 
                /* create consecutive frame */
                isotp_fill_dataframe(cf, so, ae, 0);
@@ -790,8 +791,7 @@ isotp_tx_burst:
                so->tx.sn %= 16;
                so->tx.bs++;
 
-               if (so->ll.mtu == CANFD_MTU)
-                       cf->flags = so->ll.tx_flags;
+               cf->flags = so->ll.tx_flags;
 
                skb->dev = dev;
                can_skb_set_owner(skb, sk);
@@ -897,7 +897,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        so->tx.idx = 0;
 
        cf = (struct canfd_frame *)skb->data;
-       skb_put(skb, so->ll.mtu);
+       skb_put_zero(skb, so->ll.mtu);
 
        /* check for single frame transmission depending on TX_DL */
        if (size <= so->tx.ll_dl - SF_PCI_SZ4 - ae - off) {
@@ -939,8 +939,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        }
 
        /* send the first or only CAN frame */
-       if (so->ll.mtu == CANFD_MTU)
-               cf->flags = so->ll.tx_flags;
+       cf->flags = so->ll.tx_flags;
 
        skb->dev = dev;
        skb->sk = sk;
@@ -989,7 +988,8 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        sock_recv_timestamp(msg, sk, skb);
 
        if (msg->msg_name) {
-               msg->msg_namelen = sizeof(struct sockaddr_can);
+               __sockaddr_check_size(ISOTP_MIN_NAMELEN);
+               msg->msg_namelen = ISOTP_MIN_NAMELEN;
                memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
        }
 
@@ -1059,7 +1059,7 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
        int notify_enetdown = 0;
        int do_rx_reg = 1;
 
-       if (len < CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp))
+       if (len < ISOTP_MIN_NAMELEN)
                return -EINVAL;
 
        /* do not register frame reception for functional addressing */
@@ -1155,13 +1155,13 @@ static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
        if (peer)
                return -EOPNOTSUPP;
 
-       memset(addr, 0, sizeof(*addr));
+       memset(addr, 0, ISOTP_MIN_NAMELEN);
        addr->can_family = AF_CAN;
        addr->can_ifindex = so->ifindex;
        addr->can_addr.tp.rx_id = so->rxid;
        addr->can_addr.tp.tx_id = so->txid;
 
-       return sizeof(*addr);
+       return ISOTP_MIN_NAMELEN;
 }
 
 static int isotp_setsockopt(struct socket *sock, int level, int optname,
@@ -1228,7 +1228,8 @@ static int isotp_setsockopt(struct socket *sock, int level, int optname,
                        if (ll.mtu != CAN_MTU && ll.mtu != CANFD_MTU)
                                return -EINVAL;
 
-                       if (ll.mtu == CAN_MTU && ll.tx_dl > CAN_MAX_DLEN)
+                       if (ll.mtu == CAN_MTU &&
+                           (ll.tx_dl > CAN_MAX_DLEN || ll.tx_flags != 0))
                                return -EINVAL;
 
                        memcpy(&so->ll, &ll, sizeof(ll));
index 37b47a3..139d947 100644 (file)
@@ -60,6 +60,8 @@ MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
 MODULE_ALIAS("can-proto-1");
 
+#define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
+
 #define MASK_ALL 0
 
 /* A raw socket has a list of can_filters attached to it, each receiving
@@ -394,7 +396,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
        int err = 0;
        int notify_enetdown = 0;
 
-       if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+       if (len < RAW_MIN_NAMELEN)
                return -EINVAL;
        if (addr->can_family != AF_CAN)
                return -EINVAL;
@@ -475,11 +477,11 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
        if (peer)
                return -EOPNOTSUPP;
 
-       memset(addr, 0, sizeof(*addr));
+       memset(addr, 0, RAW_MIN_NAMELEN);
        addr->can_family  = AF_CAN;
        addr->can_ifindex = ro->ifindex;
 
-       return sizeof(*addr);
+       return RAW_MIN_NAMELEN;
 }
 
 static int raw_setsockopt(struct socket *sock, int level, int optname,
@@ -739,7 +741,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        if (msg->msg_name) {
                DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
 
-               if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+               if (msg->msg_namelen < RAW_MIN_NAMELEN)
                        return -EINVAL;
 
                if (addr->can_family != AF_CAN)
@@ -832,8 +834,8 @@ static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        sock_recv_ts_and_drops(msg, sk, skb);
 
        if (msg->msg_name) {
-               __sockaddr_check_size(sizeof(struct sockaddr_can));
-               msg->msg_namelen = sizeof(struct sockaddr_can);
+               __sockaddr_check_size(RAW_MIN_NAMELEN);
+               msg->msg_namelen = RAW_MIN_NAMELEN;
                memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
        }
 
index 6c5967e..af8c1ea 100644 (file)
@@ -1184,6 +1184,18 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
                        return -ENOMEM;
 
                for_each_netdev(net, d) {
+                       struct netdev_name_node *name_node;
+                       list_for_each_entry(name_node, &d->name_node->list, list) {
+                               if (!sscanf(name_node->name, name, &i))
+                                       continue;
+                               if (i < 0 || i >= max_netdevices)
+                                       continue;
+
+                               /*  avoid cases where sscanf is not exact inverse of printf */
+                               snprintf(buf, IFNAMSIZ, name, i);
+                               if (!strncmp(buf, name_node->name, IFNAMSIZ))
+                                       set_bit(i, inuse);
+                       }
                        if (!sscanf(d->name, name, &i))
                                continue;
                        if (i < 0 || i >= max_netdevices)
@@ -4294,6 +4306,13 @@ static inline void ____napi_schedule(struct softnet_data *sd,
                 */
                thread = READ_ONCE(napi->thread);
                if (thread) {
+                       /* Avoid doing set_bit() if the thread is in
+                        * INTERRUPTIBLE state, cause napi_thread_wait()
+                        * makes sure to proceed with napi polling
+                        * if the thread is explicitly woken from here.
+                        */
+                       if (READ_ONCE(thread->state) != TASK_INTERRUPTIBLE)
+                               set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
                        wake_up_process(thread);
                        return;
                }
@@ -6486,6 +6505,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
                WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
 
                new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
+                             NAPIF_STATE_SCHED_THREADED |
                              NAPIF_STATE_PREFER_BUSY_POLL);
 
                /* If STATE_MISSED was set, leave STATE_SCHED set,
@@ -6968,19 +6988,29 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
 
 static int napi_thread_wait(struct napi_struct *napi)
 {
+       bool woken = false;
+
        set_current_state(TASK_INTERRUPTIBLE);
 
-       while (!kthread_should_stop() && !napi_disable_pending(napi)) {
-               if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
+       while (!kthread_should_stop()) {
+               /* Testing SCHED_THREADED bit here to make sure the current
+                * kthread owns this napi and could poll on this napi.
+                * Testing SCHED bit is not enough because SCHED bit might be
+                * set by some other busy poll thread or by napi_disable().
+                */
+               if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
                        WARN_ON(!list_empty(&napi->poll_list));
                        __set_current_state(TASK_RUNNING);
                        return 0;
                }
 
                schedule();
+               /* woken being true indicates this thread owns this napi. */
+               woken = true;
                set_current_state(TASK_INTERRUPTIBLE);
        }
        __set_current_state(TASK_RUNNING);
+
        return -1;
 }
 
@@ -11346,7 +11376,7 @@ static void __net_exit default_device_exit(struct net *net)
                        continue;
 
                /* Leave virtual devices for the generic cleanup */
-               if (dev->rtnl_link_ops)
+               if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
                        continue;
 
                /* Push remaining network devices to init_net */
index 571f191..db65ce6 100644 (file)
@@ -1053,6 +1053,20 @@ static int net_dm_hw_monitor_start(struct netlink_ext_ack *extack)
        return 0;
 
 err_module_put:
+       for_each_possible_cpu(cpu) {
+               struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
+               struct sk_buff *skb;
+
+               del_timer_sync(&hw_data->send_timer);
+               cancel_work_sync(&hw_data->dm_alert_work);
+               while ((skb = __skb_dequeue(&hw_data->drop_queue))) {
+                       struct devlink_trap_metadata *hw_metadata;
+
+                       hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
+                       net_dm_hw_metadata_free(hw_metadata);
+                       consume_skb(skb);
+               }
+       }
        module_put(THIS_MODULE);
        return rc;
 }
@@ -1134,6 +1148,15 @@ static int net_dm_trace_on_set(struct netlink_ext_ack *extack)
 err_unregister_trace:
        unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL);
 err_module_put:
+       for_each_possible_cpu(cpu) {
+               struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
+               struct sk_buff *skb;
+
+               del_timer_sync(&data->send_timer);
+               cancel_work_sync(&data->dm_alert_work);
+               while ((skb = __skb_dequeue(&data->drop_queue)))
+                       consume_skb(skb);
+       }
        module_put(THIS_MODULE);
        return rc;
 }
index 0c01bd8..fb3bcba 100644 (file)
@@ -237,37 +237,62 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
 }
 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
 
-static struct dst_ops md_dst_ops = {
-       .family =               AF_UNSPEC,
-};
+struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie)
+{
+       return NULL;
+}
 
-static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
+u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old)
 {
-       WARN_ONCE(1, "Attempting to call output on metadata dst\n");
-       kfree_skb(skb);
-       return 0;
+       return NULL;
 }
 
-static int dst_md_discard(struct sk_buff *skb)
+struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
+                                            struct sk_buff *skb,
+                                            const void *daddr)
 {
-       WARN_ONCE(1, "Attempting to call input on metadata dst\n");
-       kfree_skb(skb);
-       return 0;
+       return NULL;
+}
+
+void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
+                              struct sk_buff *skb, u32 mtu,
+                              bool confirm_neigh)
+{
+}
+EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu);
+
+void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
+                           struct sk_buff *skb)
+{
+}
+EXPORT_SYMBOL_GPL(dst_blackhole_redirect);
+
+unsigned int dst_blackhole_mtu(const struct dst_entry *dst)
+{
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       return mtu ? : dst->dev->mtu;
 }
+EXPORT_SYMBOL_GPL(dst_blackhole_mtu);
+
+static struct dst_ops dst_blackhole_ops = {
+       .family         = AF_UNSPEC,
+       .neigh_lookup   = dst_blackhole_neigh_lookup,
+       .check          = dst_blackhole_check,
+       .cow_metrics    = dst_blackhole_cow_metrics,
+       .update_pmtu    = dst_blackhole_update_pmtu,
+       .redirect       = dst_blackhole_redirect,
+       .mtu            = dst_blackhole_mtu,
+};
 
 static void __metadata_dst_init(struct metadata_dst *md_dst,
                                enum metadata_type type, u8 optslen)
-
 {
        struct dst_entry *dst;
 
        dst = &md_dst->dst;
-       dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
+       dst_init(dst, &dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE,
                 DST_METADATA | DST_NOCOUNT);
-
-       dst->input = dst_md_discard;
-       dst->output = dst_md_discard_out;
-
        memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
        md_dst->type = type;
 }
index adfdad2..9323d34 100644 (file)
@@ -5658,7 +5658,7 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
        if (unlikely(flags & ~(BPF_MTU_CHK_SEGS)))
                return -EINVAL;
 
-       if (unlikely(flags & BPF_MTU_CHK_SEGS && len_diff))
+       if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len)))
                return -EINVAL;
 
        dev = __dev_via_ifindex(dev, ifindex);
@@ -5668,7 +5668,11 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
        mtu = READ_ONCE(dev->mtu);
 
        dev_len = mtu + dev->hard_header_len;
-       skb_len = skb->len + len_diff; /* minus result pass check */
+
+       /* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
+       skb_len = *mtu_len ? *mtu_len + dev->hard_header_len : skb->len;
+
+       skb_len += len_diff; /* minus result pass check */
        if (skb_len <= dev_len) {
                ret = BPF_MTU_CHK_RET_SUCCESS;
                goto out;
@@ -5713,6 +5717,10 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
        /* Add L2-header as dev MTU is L3 size */
        dev_len = mtu + dev->hard_header_len;
 
+       /* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
+       if (*mtu_len)
+               xdp_len = *mtu_len + dev->hard_header_len;
+
        xdp_len += len_diff; /* minus result pass check */
        if (xdp_len > dev_len)
                ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
index 2ef2224..a96a4f5 100644 (file)
@@ -176,7 +176,7 @@ void skb_flow_get_icmp_tci(const struct sk_buff *skb,
         * avoid confusion with packets without such field
         */
        if (icmp_has_id(ih->type))
-               key_icmp->id = ih->un.echo.id ? : 1;
+               key_icmp->id = ih->un.echo.id ? ntohs(ih->un.echo.id) : 1;
        else
                key_icmp->id = 0;
 }
index e2982b3..8379719 100644 (file)
@@ -1379,7 +1379,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
                         * we can reinject the packet there.
                         */
                        n2 = NULL;
-                       if (dst) {
+                       if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
                                n2 = dst_neigh_lookup_skb(dst, skb);
                                if (n2)
                                        n1 = n2;
index 1bdcb33..3485b16 100644 (file)
@@ -2863,7 +2863,7 @@ static int do_setlink(const struct sk_buff *skb,
 
                        BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
 
-                       err = af_ops->set_link_af(dev, af);
+                       err = af_ops->set_link_af(dev, af, extack);
                        if (err < 0) {
                                rcu_read_unlock();
                                goto errout;
index 1261512..5def3a2 100644 (file)
@@ -488,6 +488,7 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
        if (unlikely(!msg))
                return -EAGAIN;
        sk_msg_init(msg);
+       skb_set_owner_r(skb, sk);
        return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
 }
 
@@ -790,7 +791,6 @@ static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int
 {
        switch (verdict) {
        case __SK_REDIRECT:
-               skb_set_owner_r(skb, sk);
                sk_psock_skb_redirect(skb);
                break;
        case __SK_PASS:
@@ -808,10 +808,6 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
        rcu_read_lock();
        prog = READ_ONCE(psock->progs.skb_verdict);
        if (likely(prog)) {
-               /* We skip full set_owner_r here because if we do a SK_PASS
-                * or SK_DROP we can skip skb memory accounting and use the
-                * TLS context.
-                */
                skb->sk = psock->sk;
                tcp_skb_bpf_redirect_clear(skb);
                ret = sk_psock_bpf_run(psock, prog, skb);
@@ -880,12 +876,13 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
                kfree_skb(skb);
                goto out;
        }
-       skb_set_owner_r(skb, sk);
        prog = READ_ONCE(psock->progs.skb_verdict);
        if (likely(prog)) {
+               skb->sk = sk;
                tcp_skb_bpf_redirect_clear(skb);
                ret = sk_psock_bpf_run(psock, prog, skb);
                ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
+               skb->sk = NULL;
        }
        sk_psock_verdict_apply(psock, skb, ret);
 out:
@@ -956,12 +953,13 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
                kfree_skb(skb);
                goto out;
        }
-       skb_set_owner_r(skb, sk);
        prog = READ_ONCE(psock->progs.skb_verdict);
        if (likely(prog)) {
+               skb->sk = sk;
                tcp_skb_bpf_redirect_clear(skb);
                ret = sk_psock_bpf_run(psock, prog, skb);
                ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
+               skb->sk = NULL;
        }
        sk_psock_verdict_apply(psock, skb, ret);
 out:
index 0ed98f2..5ec90f9 100644 (file)
@@ -2132,16 +2132,10 @@ void skb_orphan_partial(struct sk_buff *skb)
        if (skb_is_tcp_pure_ack(skb))
                return;
 
-       if (can_skb_orphan_partial(skb)) {
-               struct sock *sk = skb->sk;
-
-               if (refcount_inc_not_zero(&sk->sk_refcnt)) {
-                       WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
-                       skb->destructor = sock_efree;
-               }
-       } else {
+       if (can_skb_orphan_partial(skb))
+               skb_set_owner_sk_safe(skb, skb->sk);
+       else
                skb_orphan(skb);
-       }
 }
 EXPORT_SYMBOL(skb_orphan_partial);
 
@@ -3440,6 +3434,32 @@ static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
        twsk_prot->twsk_slab = NULL;
 }
 
+static int tw_prot_init(const struct proto *prot)
+{
+       struct timewait_sock_ops *twsk_prot = prot->twsk_prot;
+
+       if (!twsk_prot)
+               return 0;
+
+       twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s",
+                                             prot->name);
+       if (!twsk_prot->twsk_slab_name)
+               return -ENOMEM;
+
+       twsk_prot->twsk_slab =
+               kmem_cache_create(twsk_prot->twsk_slab_name,
+                                 twsk_prot->twsk_obj_size, 0,
+                                 SLAB_ACCOUNT | prot->slab_flags,
+                                 NULL);
+       if (!twsk_prot->twsk_slab) {
+               pr_crit("%s: Can't create timewait sock SLAB cache!\n",
+                       prot->name);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
 static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
 {
        if (!rsk_prot)
@@ -3496,22 +3516,8 @@ int proto_register(struct proto *prot, int alloc_slab)
                if (req_prot_init(prot))
                        goto out_free_request_sock_slab;
 
-               if (prot->twsk_prot != NULL) {
-                       prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
-
-                       if (prot->twsk_prot->twsk_slab_name == NULL)
-                               goto out_free_request_sock_slab;
-
-                       prot->twsk_prot->twsk_slab =
-                               kmem_cache_create(prot->twsk_prot->twsk_slab_name,
-                                                 prot->twsk_prot->twsk_obj_size,
-                                                 0,
-                                                 SLAB_ACCOUNT |
-                                                 prot->slab_flags,
-                                                 NULL);
-                       if (prot->twsk_prot->twsk_slab == NULL)
-                               goto out_free_timewait_sock_slab;
-               }
+               if (tw_prot_init(prot))
+                       goto out_free_timewait_sock_slab;
        }
 
        mutex_lock(&proto_list_mutex);
index 0535497..858276e 100644 (file)
@@ -350,7 +350,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
                /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
                xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
                page = virt_to_head_page(data);
-               napi_direct &= !xdp_return_frame_no_direct();
+               if (napi_direct && xdp_return_frame_no_direct())
+                       napi_direct = false;
                page_pool_put_full_page(xa->page_pool, page, napi_direct);
                rcu_read_unlock();
                break;
index 1f73603..2be5c69 100644 (file)
@@ -319,6 +319,11 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        if (!ipv6_unicast_destination(skb))
                return 0;       /* discard, don't send a reset here */
 
+       if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
+               __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
+               return 0;
+       }
+
        if (dccp_bad_service_code(sk, service)) {
                dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
                goto drop;
index 4d4956e..3c3e56a 100644 (file)
@@ -795,8 +795,14 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
 
        list_for_each_entry(dp, &dst->ports, list) {
                err = dsa_port_setup(dp);
-               if (err)
+               if (err) {
+                       dsa_port_devlink_teardown(dp);
+                       dp->type = DSA_PORT_TYPE_UNUSED;
+                       err = dsa_port_devlink_setup(dp);
+                       if (err)
+                               goto teardown;
                        continue;
+               }
        }
 
        return 0;
@@ -1066,6 +1072,7 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master)
 {
        struct dsa_switch *ds = dp->ds;
        struct dsa_switch_tree *dst = ds->dst;
+       const struct dsa_device_ops *tag_ops;
        enum dsa_tag_protocol tag_protocol;
 
        tag_protocol = dsa_get_tag_protocol(dp, master);
@@ -1080,14 +1087,16 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master)
                 * nothing to do here.
                 */
        } else {
-               dst->tag_ops = dsa_tag_driver_get(tag_protocol);
-               if (IS_ERR(dst->tag_ops)) {
-                       if (PTR_ERR(dst->tag_ops) == -ENOPROTOOPT)
+               tag_ops = dsa_tag_driver_get(tag_protocol);
+               if (IS_ERR(tag_ops)) {
+                       if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
                                return -EPROBE_DEFER;
                        dev_warn(ds->dev, "No tagger for this switch\n");
                        dp->master = NULL;
-                       return PTR_ERR(dst->tag_ops);
+                       return PTR_ERR(tag_ops);
                }
+
+               dst->tag_ops = tag_ops;
        }
 
        dp->master = master;
index 4b5da89..3296327 100644 (file)
@@ -107,7 +107,7 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
        bool unset_vlan_filtering = br_vlan_enabled(info->br);
        struct dsa_switch_tree *dst = ds->dst;
        struct netlink_ext_ack extack = {0};
-       int err, i;
+       int err, port;
 
        if (dst->index == info->tree_index && ds->index == info->sw_index &&
            ds->ops->port_bridge_join)
@@ -124,13 +124,16 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
         * it. That is a good thing, because that lets us handle it and also
         * handle the case where the switch's vlan_filtering setting is global
         * (not per port). When that happens, the correct moment to trigger the
-        * vlan_filtering callback is only when the last port left this bridge.
+        * vlan_filtering callback is only when the last port leaves the last
+        * VLAN-aware bridge.
         */
        if (unset_vlan_filtering && ds->vlan_filtering_is_global) {
-               for (i = 0; i < ds->num_ports; i++) {
-                       if (i == info->port)
-                               continue;
-                       if (dsa_to_port(ds, i)->bridge_dev == info->br) {
+               for (port = 0; port < ds->num_ports; port++) {
+                       struct net_device *bridge_dev;
+
+                       bridge_dev = dsa_to_port(ds, port)->bridge_dev;
+
+                       if (bridge_dev && br_vlan_enabled(bridge_dev)) {
                                unset_vlan_filtering = false;
                                break;
                        }
index c6a383d..f9dcbad 100644 (file)
@@ -273,6 +273,7 @@ const struct link_mode_info link_mode_params[] = {
        __DEFINE_LINK_MODE_PARAMS(10000, KR, Full),
        [ETHTOOL_LINK_MODE_10000baseR_FEC_BIT] = {
                .speed  = SPEED_10000,
+               .lanes  = 1,
                .duplex = DUPLEX_FULL,
        },
        __DEFINE_LINK_MODE_PARAMS(20000, MLD2, Full),
@@ -562,3 +563,19 @@ void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops)
        rtnl_unlock();
 }
 EXPORT_SYMBOL_GPL(ethtool_set_ethtool_phy_ops);
+
+void
+ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
+                             enum ethtool_link_mode_bit_indices link_mode)
+{
+       const struct link_mode_info *link_info;
+
+       if (WARN_ON_ONCE(link_mode >= __ETHTOOL_LINK_MODE_MASK_NBITS))
+               return;
+
+       link_info = &link_mode_params[link_mode];
+       link_ksettings->base.speed = link_info->speed;
+       link_ksettings->lanes = link_info->lanes;
+       link_ksettings->base.duplex = link_info->duplex;
+}
+EXPORT_SYMBOL_GPL(ethtool_params_from_link_mode);
index 901b7de..e10bfcc 100644 (file)
@@ -169,8 +169,8 @@ int ethnl_set_eee(struct sk_buff *skb, struct genl_info *info)
        ethnl_update_bool32(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod);
        ethnl_update_bool32(&eee.tx_lpi_enabled,
                            tb[ETHTOOL_A_EEE_TX_LPI_ENABLED], &mod);
-       ethnl_update_bool32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER],
-                           &mod);
+       ethnl_update_u32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER],
+                        &mod);
        ret = 0;
        if (!mod)
                goto out_ops;
index 24783b7..771688e 100644 (file)
@@ -426,29 +426,13 @@ struct ethtool_link_usettings {
 int __ethtool_get_link_ksettings(struct net_device *dev,
                                 struct ethtool_link_ksettings *link_ksettings)
 {
-       const struct link_mode_info *link_info;
-       int err;
-
        ASSERT_RTNL();
 
        if (!dev->ethtool_ops->get_link_ksettings)
                return -EOPNOTSUPP;
 
        memset(link_ksettings, 0, sizeof(*link_ksettings));
-
-       link_ksettings->link_mode = -1;
-       err = dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
-       if (err)
-               return err;
-
-       if (link_ksettings->link_mode != -1) {
-               link_info = &link_mode_params[link_ksettings->link_mode];
-               link_ksettings->base.speed = link_info->speed;
-               link_ksettings->lanes = link_info->lanes;
-               link_ksettings->base.duplex = link_info->duplex;
-       }
-
-       return 0;
+       return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
 }
 EXPORT_SYMBOL(__ethtool_get_link_ksettings);
 
index 7444ec6..bfcdc75 100644 (file)
@@ -217,6 +217,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
        if (master) {
                skb->dev = master->dev;
+               skb_reset_mac_header(skb);
                hsr_forward_skb(skb, master);
        } else {
                atomic_long_inc(&dev->tx_dropped);
index ed82a47..b218e45 100644 (file)
@@ -555,12 +555,6 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
 {
        struct hsr_frame_info frame;
 
-       if (skb_mac_header(skb) != skb->data) {
-               WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n",
-                         __FILE__, __LINE__, port->dev->name);
-               goto out_drop;
-       }
-
        if (fill_frame_info(&frame, skb, port) < 0)
                goto out_drop;
 
index 9c640d6..0c1b077 100644 (file)
@@ -551,9 +551,7 @@ ieee802154_llsec_parse_key_id(struct genl_info *info,
        desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]);
 
        if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
-               if (!info->attrs[IEEE802154_ATTR_PAN_ID] &&
-                   !(info->attrs[IEEE802154_ATTR_SHORT_ADDR] ||
-                     info->attrs[IEEE802154_ATTR_HW_ADDR]))
+               if (!info->attrs[IEEE802154_ATTR_PAN_ID])
                        return -EINVAL;
 
                desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
@@ -562,6 +560,9 @@ ieee802154_llsec_parse_key_id(struct genl_info *info,
                        desc->device_addr.mode = IEEE802154_ADDR_SHORT;
                        desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
                } else {
+                       if (!info->attrs[IEEE802154_ATTR_HW_ADDR])
+                               return -EINVAL;
+
                        desc->device_addr.mode = IEEE802154_ADDR_LONG;
                        desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
                }
index 7c5a1aa..05f6bd8 100644 (file)
@@ -820,8 +820,13 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
                goto nla_put_failure;
 
 #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               goto out;
+
        if (nl802154_get_llsec_params(msg, rdev, wpan_dev) < 0)
                goto nla_put_failure;
+
+out:
 #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
 
        genlmsg_end(msg, hdr);
@@ -1384,6 +1389,9 @@ static int nl802154_set_llsec_params(struct sk_buff *skb,
        u32 changed = 0;
        int ret;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
        if (info->attrs[NL802154_ATTR_SEC_ENABLED]) {
                u8 enabled;
 
@@ -1490,6 +1498,11 @@ nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb)
        if (err)
                return err;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+               err = skb->len;
+               goto out_err;
+       }
+
        if (!wpan_dev->netdev) {
                err = -EINVAL;
                goto out_err;
@@ -1544,7 +1557,11 @@ static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info)
        struct ieee802154_llsec_key_id id = { };
        u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { };
 
-       if (nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
+           nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
                return -EINVAL;
 
        if (!attrs[NL802154_KEY_ATTR_USAGE_FRAMES] ||
@@ -1592,7 +1609,11 @@ static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info)
        struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
        struct ieee802154_llsec_key_id id;
 
-       if (nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
+           nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
                return -EINVAL;
 
        if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
@@ -1656,6 +1677,11 @@ nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb)
        if (err)
                return err;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+               err = skb->len;
+               goto out_err;
+       }
+
        if (!wpan_dev->netdev) {
                err = -EINVAL;
                goto out_err;
@@ -1742,6 +1768,9 @@ static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info)
        struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
        struct ieee802154_llsec_device dev_desc;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
        if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE],
                                          &dev_desc) < 0)
                return -EINVAL;
@@ -1757,7 +1786,11 @@ static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info)
        struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
        __le64 extended_addr;
 
-       if (nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL802154_ATTR_SEC_DEVICE] ||
+           nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
                return -EINVAL;
 
        if (!attrs[NL802154_DEV_ATTR_EXTENDED_ADDR])
@@ -1825,6 +1858,11 @@ nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb)
        if (err)
                return err;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+               err = skb->len;
+               goto out_err;
+       }
+
        if (!wpan_dev->netdev) {
                err = -EINVAL;
                goto out_err;
@@ -1882,6 +1920,9 @@ static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info
        struct ieee802154_llsec_device_key key;
        __le64 extended_addr;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
        if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
            nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack) < 0)
                return -EINVAL;
@@ -1913,7 +1954,11 @@ static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info
        struct ieee802154_llsec_device_key key;
        __le64 extended_addr;
 
-       if (nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
+           nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
                return -EINVAL;
 
        if (!attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR])
@@ -1986,6 +2031,11 @@ nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb)
        if (err)
                return err;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+               err = skb->len;
+               goto out_err;
+       }
+
        if (!wpan_dev->netdev) {
                err = -EINVAL;
                goto out_err;
@@ -2070,6 +2120,9 @@ static int nl802154_add_llsec_seclevel(struct sk_buff *skb,
        struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
        struct ieee802154_llsec_seclevel sl;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
        if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
                                 &sl) < 0)
                return -EINVAL;
@@ -2085,6 +2138,9 @@ static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
        struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
        struct ieee802154_llsec_seclevel sl;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
        if (!info->attrs[NL802154_ATTR_SEC_LEVEL] ||
            llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
                                 &sl) < 0)
@@ -2098,11 +2154,7 @@ static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
 #define NL802154_FLAG_NEED_NETDEV      0x02
 #define NL802154_FLAG_NEED_RTNL                0x04
 #define NL802154_FLAG_CHECK_NETDEV_UP  0x08
-#define NL802154_FLAG_NEED_NETDEV_UP   (NL802154_FLAG_NEED_NETDEV |\
-                                        NL802154_FLAG_CHECK_NETDEV_UP)
 #define NL802154_FLAG_NEED_WPAN_DEV    0x10
-#define NL802154_FLAG_NEED_WPAN_DEV_UP (NL802154_FLAG_NEED_WPAN_DEV |\
-                                        NL802154_FLAG_CHECK_NETDEV_UP)
 
 static int nl802154_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
                             struct genl_info *info)
index d99e1be..36ed85b 100644 (file)
@@ -141,7 +141,7 @@ static void ah_output_done(struct crypto_async_request *base, int err)
        }
 
        kfree(AH_SKB_CB(skb)->tmp);
-       xfrm_output_resume(skb, err);
+       xfrm_output_resume(skb->sk, skb, err);
 }
 
 static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
index 75f6799..2e35f68 100644 (file)
@@ -1978,7 +1978,8 @@ static int inet_validate_link_af(const struct net_device *dev,
        return 0;
 }
 
-static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
+static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla,
+                           struct netlink_ext_ack *extack)
 {
        struct in_device *in_dev = __in_dev_get_rcu(dev);
        struct nlattr *a, *tb[IFLA_INET_MAX+1];
index a3271ec..4b834bb 100644 (file)
@@ -279,7 +279,7 @@ static void esp_output_done(struct crypto_async_request *base, int err)
                    x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
                        esp_output_tail_tcp(x, skb);
                else
-                       xfrm_output_resume(skb, err);
+                       xfrm_output_resume(skb->sk, skb, err);
        }
 }
 
index 601f5fb..33687cf 100644 (file)
@@ -217,10 +217,12 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
 
        if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
             !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
-               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
+                                           NETIF_F_SCTP_CRC);
        else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
                 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
-               esp_features = features & ~NETIF_F_CSUM_MASK;
+               esp_features = features & ~(NETIF_F_CSUM_MASK |
+                                           NETIF_F_SCTP_CRC);
 
        xo->flags |= XFRM_GSO_SEGMENT;
 
@@ -312,8 +314,17 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_
        ip_hdr(skb)->tot_len = htons(skb->len);
        ip_send_check(ip_hdr(skb));
 
-       if (hw_offload)
+       if (hw_offload) {
+               if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
+                       return -ENOMEM;
+
+               xo = xfrm_offload(skb);
+               if (!xo)
+                       return -EINVAL;
+
+               xo->flags |= XFRM_XMIT;
                return 0;
+       }
 
        err = esp_output_tail(x, skb, &esp);
        if (err)
index 6bd7ca0..fd472ea 100644 (file)
@@ -705,12 +705,15 @@ static bool reqsk_queue_unlink(struct request_sock *req)
        return found;
 }
 
-void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
+bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
 {
-       if (reqsk_queue_unlink(req)) {
+       bool unlinked = reqsk_queue_unlink(req);
+
+       if (unlinked) {
                reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
                reqsk_put(req);
        }
+       return unlinked;
 }
 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
 
index eb20708..31c6c6d 100644 (file)
@@ -218,7 +218,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        }
 
        if (dst->flags & DST_XFRM_QUEUE)
-               goto queued;
+               goto xmit;
 
        if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) {
                dev->stats.tx_carrier_errors++;
@@ -238,6 +238,8 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        if (skb->len > mtu) {
                skb_dst_update_pmtu_no_confirm(skb, mtu);
                if (skb->protocol == htons(ETH_P_IP)) {
+                       if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+                               goto xmit;
                        icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                      htonl(mtu));
                } else {
@@ -251,7 +253,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
                goto tx_error;
        }
 
-queued:
+xmit:
        skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
        skb_dst_set(skb, dst);
        skb->dev = skb_dst(skb)->dev;
index 47db1bf..bc2f6ca 100644 (file)
@@ -309,7 +309,7 @@ have_carrier:
  */
 static void __init ic_close_devs(void)
 {
-       struct net_device *selected_dev = ic_dev->dev;
+       struct net_device *selected_dev = ic_dev ? ic_dev->dev : NULL;
        struct ic_device *d, *next;
        struct net_device *dev;
 
@@ -317,16 +317,18 @@ static void __init ic_close_devs(void)
        next = ic_first_dev;
        while ((d = next)) {
                bool bring_down = (d != ic_dev);
-               struct net_device *lower_dev;
+               struct net_device *lower;
                struct list_head *iter;
 
                next = d->next;
                dev = d->dev;
 
-               netdev_for_each_lower_dev(selected_dev, lower_dev, iter) {
-                       if (dev == lower_dev) {
-                               bring_down = false;
-                               break;
+               if (selected_dev) {
+                       netdev_for_each_lower_dev(selected_dev, lower, iter) {
+                               if (dev == lower) {
+                                       bring_down = false;
+                                       break;
+                               }
                        }
                }
                if (bring_down) {
index c576a63..d1e04d2 100644 (file)
@@ -203,7 +203,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
 
        local_bh_disable();
        addend = xt_write_recseq_begin();
-       private = rcu_access_pointer(table->private);
+       private = READ_ONCE(table->private); /* Address dependency. */
        cpu     = smp_processor_id();
        table_base = private->entries;
        jumpstack  = (struct arpt_entry **)private->jumpstack[cpu];
@@ -649,7 +649,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
 {
        unsigned int countersize;
        struct xt_counters *counters;
-       const struct xt_table_info *private = xt_table_get_private_protected(table);
+       const struct xt_table_info *private = table->private;
 
        /* We need atomic snapshot of counters: rest doesn't change
         * (other than comefrom, which userspace doesn't care
@@ -673,7 +673,7 @@ static int copy_entries_to_user(unsigned int total_size,
        unsigned int off, num;
        const struct arpt_entry *e;
        struct xt_counters *counters;
-       struct xt_table_info *private = xt_table_get_private_protected(table);
+       struct xt_table_info *private = table->private;
        int ret = 0;
        void *loc_cpu_entry;
 
@@ -807,7 +807,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
        t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
        if (!IS_ERR(t)) {
                struct arpt_getinfo info;
-               const struct xt_table_info *private = xt_table_get_private_protected(t);
+               const struct xt_table_info *private = t->private;
 #ifdef CONFIG_COMPAT
                struct xt_table_info tmp;
 
@@ -860,7 +860,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
 
        t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = xt_table_get_private_protected(t);
+               const struct xt_table_info *private = t->private;
 
                if (get.size == private->size)
                        ret = copy_entries_to_user(private->size,
@@ -1017,7 +1017,7 @@ static int do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
        }
 
        local_bh_disable();
-       private = xt_table_get_private_protected(t);
+       private = t->private;
        if (private->number != tmp.num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
@@ -1330,7 +1330,7 @@ static int compat_copy_entries_to_user(unsigned int total_size,
                                       void __user *userptr)
 {
        struct xt_counters *counters;
-       const struct xt_table_info *private = xt_table_get_private_protected(table);
+       const struct xt_table_info *private = table->private;
        void __user *pos;
        unsigned int size;
        int ret = 0;
@@ -1379,7 +1379,7 @@ static int compat_get_entries(struct net *net,
        xt_compat_lock(NFPROTO_ARP);
        t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = xt_table_get_private_protected(t);
+               const struct xt_table_info *private = t->private;
                struct xt_table_info info;
 
                ret = compat_table_info(private, &info);
index e8f6f9d..f15bc21 100644 (file)
@@ -258,7 +258,7 @@ ipt_do_table(struct sk_buff *skb,
        WARN_ON(!(table->valid_hooks & (1 << hook)));
        local_bh_disable();
        addend = xt_write_recseq_begin();
-       private = rcu_access_pointer(table->private);
+       private = READ_ONCE(table->private); /* Address dependency. */
        cpu        = smp_processor_id();
        table_base = private->entries;
        jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
@@ -791,7 +791,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
 {
        unsigned int countersize;
        struct xt_counters *counters;
-       const struct xt_table_info *private = xt_table_get_private_protected(table);
+       const struct xt_table_info *private = table->private;
 
        /* We need atomic snapshot of counters: rest doesn't change
           (other than comefrom, which userspace doesn't care
@@ -815,7 +815,7 @@ copy_entries_to_user(unsigned int total_size,
        unsigned int off, num;
        const struct ipt_entry *e;
        struct xt_counters *counters;
-       const struct xt_table_info *private = xt_table_get_private_protected(table);
+       const struct xt_table_info *private = table->private;
        int ret = 0;
        const void *loc_cpu_entry;
 
@@ -964,7 +964,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
        t = xt_request_find_table_lock(net, AF_INET, name);
        if (!IS_ERR(t)) {
                struct ipt_getinfo info;
-               const struct xt_table_info *private = xt_table_get_private_protected(t);
+               const struct xt_table_info *private = t->private;
 #ifdef CONFIG_COMPAT
                struct xt_table_info tmp;
 
@@ -1018,7 +1018,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
 
        t = xt_find_table_lock(net, AF_INET, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = xt_table_get_private_protected(t);
+               const struct xt_table_info *private = t->private;
                if (get.size == private->size)
                        ret = copy_entries_to_user(private->size,
                                                   t, uptr->entrytable);
@@ -1173,7 +1173,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
        }
 
        local_bh_disable();
-       private = xt_table_get_private_protected(t);
+       private = t->private;
        if (private->number != tmp.num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
@@ -1543,7 +1543,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
                            void __user *userptr)
 {
        struct xt_counters *counters;
-       const struct xt_table_info *private = xt_table_get_private_protected(table);
+       const struct xt_table_info *private = table->private;
        void __user *pos;
        unsigned int size;
        int ret = 0;
@@ -1589,7 +1589,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
        xt_compat_lock(AF_INET);
        t = xt_find_table_lock(net, AF_INET, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = xt_table_get_private_protected(t);
+               const struct xt_table_info *private = t->private;
                struct xt_table_info info;
                ret = compat_table_info(private, &info);
                if (!ret && get.size == info.size)
index 02d81d7..bba150f 100644 (file)
@@ -2687,44 +2687,15 @@ out:
        return rth;
 }
 
-static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
-{
-       return NULL;
-}
-
-static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
-{
-       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
-
-       return mtu ? : dst->dev->mtu;
-}
-
-static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                                         struct sk_buff *skb, u32 mtu,
-                                         bool confirm_neigh)
-{
-}
-
-static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
-                                      struct sk_buff *skb)
-{
-}
-
-static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
-                                         unsigned long old)
-{
-       return NULL;
-}
-
 static struct dst_ops ipv4_dst_blackhole_ops = {
-       .family                 =       AF_INET,
-       .check                  =       ipv4_blackhole_dst_check,
-       .mtu                    =       ipv4_blackhole_mtu,
-       .default_advmss         =       ipv4_default_advmss,
-       .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
-       .redirect               =       ipv4_rt_blackhole_redirect,
-       .cow_metrics            =       ipv4_rt_blackhole_cow_metrics,
-       .neigh_lookup           =       ipv4_neigh_lookup,
+       .family                 = AF_INET,
+       .default_advmss         = ipv4_default_advmss,
+       .neigh_lookup           = ipv4_neigh_lookup,
+       .check                  = dst_blackhole_check,
+       .cow_metrics            = dst_blackhole_cow_metrics,
+       .update_pmtu            = dst_blackhole_update_pmtu,
+       .redirect               = dst_blackhole_redirect,
+       .mtu                    = dst_blackhole_mtu,
 };
 
 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
index 0055ae0..7513ba4 100644 (file)
@@ -804,8 +804,11 @@ embryonic_reset:
                tcp_reset(sk, skb);
        }
        if (!fastopen) {
-               inet_csk_reqsk_queue_drop(sk, req);
-               __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
+               bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
+
+               if (unlinked)
+                       __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
+               *req_stolen = !unlinked;
        }
        return NULL;
 }
index 4a0478b..99d743e 100644 (file)
@@ -2754,6 +2754,10 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
                val = up->gso_size;
                break;
 
+       case UDP_GRO:
+               val = up->gro_enabled;
+               break;
+
        /* The following two cannot be changed on UDP sockets, the return is
         * always 0 (which corresponds to the full checksum coverage of UDP). */
        case UDPLITE_SEND_CSCOV:
index f2337fb..a9e53f5 100644 (file)
@@ -5669,7 +5669,8 @@ static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
        return 0;
 }
 
-static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
+static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token,
+                            struct netlink_ext_ack *extack)
 {
        struct inet6_ifaddr *ifp;
        struct net_device *dev = idev->dev;
@@ -5680,12 +5681,29 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
 
        if (!token)
                return -EINVAL;
-       if (dev->flags & (IFF_LOOPBACK | IFF_NOARP))
+
+       if (dev->flags & IFF_LOOPBACK) {
+               NL_SET_ERR_MSG_MOD(extack, "Device is loopback");
                return -EINVAL;
-       if (!ipv6_accept_ra(idev))
+       }
+
+       if (dev->flags & IFF_NOARP) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Device does not do neighbour discovery");
+               return -EINVAL;
+       }
+
+       if (!ipv6_accept_ra(idev)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Router advertisement is disabled on device");
                return -EINVAL;
-       if (idev->cnf.rtr_solicits == 0)
+       }
+
+       if (idev->cnf.rtr_solicits == 0) {
+               NL_SET_ERR_MSG(extack,
+                              "Router solicitation is disabled on device");
                return -EINVAL;
+       }
 
        write_lock_bh(&idev->lock);
 
@@ -5793,7 +5811,8 @@ static int inet6_validate_link_af(const struct net_device *dev,
        return 0;
 }
 
-static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
+static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
+                            struct netlink_ext_ack *extack)
 {
        struct inet6_dev *idev = __in6_dev_get(dev);
        struct nlattr *tb[IFLA_INET6_MAX + 1];
@@ -5806,7 +5825,8 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
                BUG();
 
        if (tb[IFLA_INET6_TOKEN]) {
-               err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
+               err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
+                                       extack);
                if (err)
                        return err;
        }
index 440080d..080ee7f 100644 (file)
@@ -316,7 +316,7 @@ static void ah6_output_done(struct crypto_async_request *base, int err)
        }
 
        kfree(AH_SKB_CB(skb)->tmp);
-       xfrm_output_resume(skb, err);
+       xfrm_output_resume(skb->sk, skb, err);
 }
 
 static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
index 153ad10..727d791 100644 (file)
@@ -314,7 +314,7 @@ static void esp_output_done(struct crypto_async_request *base, int err)
                    x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
                        esp_output_tail_tcp(x, skb);
                else
-                       xfrm_output_resume(skb, err);
+                       xfrm_output_resume(skb->sk, skb, err);
        }
 }
 
index 1ca516f..4af56af 100644 (file)
@@ -254,9 +254,11 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
        skb->encap_hdr_csum = 1;
 
        if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
-               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
+                                           NETIF_F_SCTP_CRC);
        else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
-               esp_features = features & ~NETIF_F_CSUM_MASK;
+               esp_features = features & ~(NETIF_F_CSUM_MASK |
+                                           NETIF_F_SCTP_CRC);
 
        xo->flags |= XFRM_GSO_SEGMENT;
 
@@ -346,8 +348,17 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features
 
        ipv6_hdr(skb)->payload_len = htons(len);
 
-       if (hw_offload)
+       if (hw_offload) {
+               if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
+                       return -ENOMEM;
+
+               xo = xfrm_offload(skb);
+               if (!xo)
+                       return -EINVAL;
+
+               xo->flags |= XFRM_XMIT;
                return 0;
+       }
 
        err = esp6_output_tail(x, skb, &esp);
        if (err)
index ef9d022..679699e 100644 (file)
@@ -2486,7 +2486,7 @@ static int ipv6_route_native_seq_show(struct seq_file *seq, void *v)
        const struct net_device *dev;
 
        if (rt->nh)
-               fib6_nh = nexthop_fib6_nh(rt->nh);
+               fib6_nh = nexthop_fib6_nh_bh(rt->nh);
 
        seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen);
 
index e9d2a4a..8025671 100644 (file)
@@ -245,16 +245,6 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
        if (ipv6_addr_is_multicast(&hdr->saddr))
                goto err;
 
-       /* While RFC4291 is not explicit about v4mapped addresses
-        * in IPv6 headers, it seems clear linux dual-stack
-        * model can not deal properly with these.
-        * Security models could be fooled by ::ffff:127.0.0.1 for example.
-        *
-        * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
-        */
-       if (ipv6_addr_v4mapped(&hdr->saddr))
-               goto err;
-
        skb->transport_header = skb->network_header + sizeof(*hdr);
        IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
 
index f10e7a7..e0cc32e 100644 (file)
@@ -494,7 +494,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        }
 
        if (dst->flags & DST_XFRM_QUEUE)
-               goto queued;
+               goto xmit;
 
        x = dst->xfrm;
        if (!vti6_state_check(x, &t->parms.raddr, &t->parms.laddr))
@@ -523,6 +523,8 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 
                        icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
                } else {
+                       if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+                               goto xmit;
                        icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                      htonl(mtu));
                }
@@ -531,7 +533,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
                goto tx_err_dst_release;
        }
 
-queued:
+xmit:
        skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
        skb_dst_set(skb, dst);
        skb->dev = skb_dst(skb)->dev;
index 0d453fa..2e2119b 100644 (file)
@@ -280,7 +280,7 @@ ip6t_do_table(struct sk_buff *skb,
 
        local_bh_disable();
        addend = xt_write_recseq_begin();
-       private = rcu_access_pointer(table->private);
+       private = READ_ONCE(table->private); /* Address dependency. */
        cpu        = smp_processor_id();
        table_base = private->entries;
        jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
@@ -807,7 +807,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
 {
        unsigned int countersize;
        struct xt_counters *counters;
-       const struct xt_table_info *private = xt_table_get_private_protected(table);
+       const struct xt_table_info *private = table->private;
 
        /* We need atomic snapshot of counters: rest doesn't change
           (other than comefrom, which userspace doesn't care
@@ -831,7 +831,7 @@ copy_entries_to_user(unsigned int total_size,
        unsigned int off, num;
        const struct ip6t_entry *e;
        struct xt_counters *counters;
-       const struct xt_table_info *private = xt_table_get_private_protected(table);
+       const struct xt_table_info *private = table->private;
        int ret = 0;
        const void *loc_cpu_entry;
 
@@ -980,7 +980,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
        t = xt_request_find_table_lock(net, AF_INET6, name);
        if (!IS_ERR(t)) {
                struct ip6t_getinfo info;
-               const struct xt_table_info *private = xt_table_get_private_protected(t);
+               const struct xt_table_info *private = t->private;
 #ifdef CONFIG_COMPAT
                struct xt_table_info tmp;
 
@@ -1035,7 +1035,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
 
        t = xt_find_table_lock(net, AF_INET6, get.name);
        if (!IS_ERR(t)) {
-               struct xt_table_info *private = xt_table_get_private_protected(t);
+               struct xt_table_info *private = t->private;
                if (get.size == private->size)
                        ret = copy_entries_to_user(private->size,
                                                   t, uptr->entrytable);
@@ -1189,7 +1189,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
        }
 
        local_bh_disable();
-       private = xt_table_get_private_protected(t);
+       private = t->private;
        if (private->number != tmp.num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
@@ -1552,7 +1552,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
                            void __user *userptr)
 {
        struct xt_counters *counters;
-       const struct xt_table_info *private = xt_table_get_private_protected(table);
+       const struct xt_table_info *private = table->private;
        void __user *pos;
        unsigned int size;
        int ret = 0;
@@ -1598,7 +1598,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
        xt_compat_lock(AF_INET6);
        t = xt_find_table_lock(net, AF_INET6, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = xt_table_get_private_protected(t);
+               const struct xt_table_info *private = t->private;
                struct xt_table_info info;
                ret = compat_table_info(private, &info);
                if (!ret && get.size == info.size)
index 1f56d9a..bf3646b 100644 (file)
@@ -298,7 +298,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                 */
                v4addr = LOOPBACK4_IPV6;
                if (!(addr_type & IPV6_ADDR_MULTICAST) &&
-                   !sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) {
+                   !ipv6_can_nonlocal_bind(sock_net(sk), inet)) {
                        err = -EADDRNOTAVAIL;
                        if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
                                           dev, 0)) {
index 1536f49..373d480 100644 (file)
@@ -260,34 +260,16 @@ static struct dst_ops ip6_dst_ops_template = {
        .confirm_neigh          =       ip6_confirm_neigh,
 };
 
-static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
-{
-       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
-
-       return mtu ? : dst->dev->mtu;
-}
-
-static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                                        struct sk_buff *skb, u32 mtu,
-                                        bool confirm_neigh)
-{
-}
-
-static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
-                                     struct sk_buff *skb)
-{
-}
-
 static struct dst_ops ip6_dst_blackhole_ops = {
-       .family                 =       AF_INET6,
-       .destroy                =       ip6_dst_destroy,
-       .check                  =       ip6_dst_check,
-       .mtu                    =       ip6_blackhole_mtu,
-       .default_advmss         =       ip6_default_advmss,
-       .update_pmtu            =       ip6_rt_blackhole_update_pmtu,
-       .redirect               =       ip6_rt_blackhole_redirect,
-       .cow_metrics            =       dst_cow_metrics_generic,
-       .neigh_lookup           =       ip6_dst_neigh_lookup,
+       .family                 = AF_INET6,
+       .default_advmss         = ip6_default_advmss,
+       .neigh_lookup           = ip6_dst_neigh_lookup,
+       .check                  = ip6_dst_check,
+       .destroy                = ip6_dst_destroy,
+       .cow_metrics            = dst_cow_metrics_generic,
+       .update_pmtu            = dst_blackhole_update_pmtu,
+       .redirect               = dst_blackhole_redirect,
+       .mtu                    = dst_blackhole_mtu,
 };
 
 static const u32 ip6_template_metrics[RTAX_MAX] = {
@@ -5227,9 +5209,11 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
                 * nexthops have been replaced by first new, the rest should
                 * be added to it.
                 */
-               cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
-                                                    NLM_F_REPLACE);
-               cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
+               if (cfg->fc_nlinfo.nlh) {
+                       cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
+                                                            NLM_F_REPLACE);
+                       cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
+               }
                nhn++;
        }
 
index bd44ded..d0f0077 100644 (file)
@@ -1175,6 +1175,11 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        if (!ipv6_unicast_destination(skb))
                goto drop;
 
+       if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
+               __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
+               return 0;
+       }
+
        return tcp_conn_request(&tcp6_request_sock_ops,
                                &tcp_request_sock_ipv6_ops, sk, skb);
 
index d7b3d90..b00d6f5 100644 (file)
@@ -23,6 +23,7 @@ int aead_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,
        struct aead_request *aead_req;
        int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
        u8 *__aad;
+       int ret;
 
        aead_req = kzalloc(reqsize + aad_len, GFP_ATOMIC);
        if (!aead_req)
@@ -40,10 +41,10 @@ int aead_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,
        aead_request_set_crypt(aead_req, sg, sg, data_len, b_0);
        aead_request_set_ad(aead_req, sg[0].length);
 
-       crypto_aead_encrypt(aead_req);
+       ret = crypto_aead_encrypt(aead_req);
        kfree_sensitive(aead_req);
 
-       return 0;
+       return ret;
 }
 
 int aead_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,
index 6f3b3a0..512cab0 100644 (file)
@@ -22,6 +22,7 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
        struct aead_request *aead_req;
        int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
        const __le16 *fc;
+       int ret;
 
        if (data_len < GMAC_MIC_LEN)
                return -EINVAL;
@@ -59,10 +60,10 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
        aead_request_set_crypt(aead_req, sg, sg, 0, iv);
        aead_request_set_ad(aead_req, GMAC_AAD_LEN + data_len);
 
-       crypto_aead_encrypt(aead_req);
+       ret = crypto_aead_encrypt(aead_req);
        kfree_sensitive(aead_req);
 
-       return 0;
+       return ret;
 }
 
 struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
index c4c70e3..860bc35 100644 (file)
@@ -1788,8 +1788,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
                }
 
                if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
-                   sta->sdata->u.vlan.sta)
+                   sta->sdata->u.vlan.sta) {
+                       ieee80211_clear_fast_rx(sta);
                        RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
+               }
 
                if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
                        ieee80211_vif_dec_num_mcast(sta->sdata);
@@ -2950,14 +2952,14 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
                        continue;
 
                for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) {
-                       if (~sdata->rc_rateidx_mcs_mask[i][j]) {
+                       if (sdata->rc_rateidx_mcs_mask[i][j] != 0xff) {
                                sdata->rc_has_mcs_mask[i] = true;
                                break;
                        }
                }
 
                for (j = 0; j < NL80211_VHT_NSS_MAX; j++) {
-                       if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) {
+                       if (sdata->rc_rateidx_vht_mcs_mask[i][j] != 0xffff) {
                                sdata->rc_has_vht_mcs_mask[i] = true;
                                break;
                        }
index 1f552f3..a7ac53a 100644 (file)
@@ -1874,6 +1874,8 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
 
        /* remove beacon */
        kfree(sdata->u.ibss.ie);
+       sdata->u.ibss.ie = NULL;
+       sdata->u.ibss.ie_len = 0;
 
        /* on the next join, re-program HT parameters */
        memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa));
index 4f3f8bb..1b9c826 100644 (file)
@@ -973,8 +973,19 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                        continue;
 
                if (!dflt_chandef.chan) {
+                       /*
+                        * Assign the first enabled channel to dflt_chandef
+                        * from the list of channels
+                        */
+                       for (i = 0; i < sband->n_channels; i++)
+                               if (!(sband->channels[i].flags &
+                                               IEEE80211_CHAN_DISABLED))
+                                       break;
+                       /* if none found then use the first anyway */
+                       if (i == sband->n_channels)
+                               i = 0;
                        cfg80211_chandef_create(&dflt_chandef,
-                                               &sband->channels[0],
+                                               &sband->channels[i],
                                                NL80211_CHAN_NO_HT);
                        /* init channel we're on */
                        if (!local->use_chanctx && !local->_oper_chandef.chan) {
index 2e33a12..96f487f 100644 (file)
@@ -4707,7 +4707,10 @@ static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
                timeout = sta->rx_stats.last_rx;
        timeout += IEEE80211_CONNECTION_IDLE_TIME;
 
-       if (time_is_before_jiffies(timeout)) {
+       /* If timeout is after now, then update timer to fire at
+        * the later date, but do not actually probe at this time.
+        */
+       if (time_is_after_jiffies(timeout)) {
                mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(timeout));
                return;
        }
@@ -5071,7 +5074,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
                he_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION,
                                                  ies->data, ies->len);
                if (he_oper_ie &&
-                   he_oper_ie[1] == ieee80211_he_oper_size(&he_oper_ie[3]))
+                   he_oper_ie[1] >= ieee80211_he_oper_size(&he_oper_ie[3]))
                        he_oper = (void *)(he_oper_ie + 3);
                else
                        he_oper = NULL;
index 2f44f49..ecad9b1 100644 (file)
@@ -805,7 +805,6 @@ minstrel_ht_group_min_rate_offset(struct minstrel_ht_sta *mi, int group,
 static u16
 minstrel_ht_next_inc_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur)
 {
-       struct minstrel_mcs_group_data *mg;
        u8 type = MINSTREL_SAMPLE_TYPE_INC;
        int i, index = 0;
        u8 group;
@@ -813,7 +812,6 @@ minstrel_ht_next_inc_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur)
        group = mi->sample[type].sample_group;
        for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
                group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups);
-               mg = &mi->groups[group];
 
                index = minstrel_ht_group_min_rate_offset(mi, group,
                                                          fast_rate_dur);
index 5d06de6..3b3bcef 100644 (file)
@@ -3573,7 +3573,7 @@ begin:
            test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
                goto out;
 
-       if (vif->txqs_stopped[ieee80211_ac_from_tid(txq->tid)]) {
+       if (vif->txqs_stopped[txq->ac]) {
                set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
                goto out;
        }
index f080fcf..c0fa526 100644 (file)
@@ -968,7 +968,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
                break;
        case WLAN_EID_EXT_HE_OPERATION:
                if (len >= sizeof(*elems->he_operation) &&
-                   len == ieee80211_he_oper_size(data) - 1) {
+                   len >= ieee80211_he_oper_size(data) - 1) {
                        if (crc)
                                *crc = crc32_be(*crc, (void *)elem,
                                                elem->datalen + 2);
index 585d331..55550ea 100644 (file)
@@ -152,7 +152,7 @@ err_tfm0:
        crypto_free_sync_skcipher(key->tfm0);
 err_tfm:
        for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
-               if (key->tfm[i])
+               if (!IS_ERR_OR_NULL(key->tfm[i]))
                        crypto_free_aead(key->tfm[i]);
 
        kfree_sensitive(key);
index 444a386..89a4225 100644 (file)
@@ -567,15 +567,15 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
 }
 
 static u64 add_addr_generate_hmac(u64 key1, u64 key2, u8 addr_id,
-                                 struct in_addr *addr)
+                                 struct in_addr *addr, u16 port)
 {
        u8 hmac[SHA256_DIGEST_SIZE];
        u8 msg[7];
 
        msg[0] = addr_id;
        memcpy(&msg[1], &addr->s_addr, 4);
-       msg[5] = 0;
-       msg[6] = 0;
+       msg[5] = port >> 8;
+       msg[6] = port & 0xFF;
 
        mptcp_crypto_hmac_sha(key1, key2, msg, 7, hmac);
 
@@ -584,15 +584,15 @@ static u64 add_addr_generate_hmac(u64 key1, u64 key2, u8 addr_id,
 
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
 static u64 add_addr6_generate_hmac(u64 key1, u64 key2, u8 addr_id,
-                                  struct in6_addr *addr)
+                                  struct in6_addr *addr, u16 port)
 {
        u8 hmac[SHA256_DIGEST_SIZE];
        u8 msg[19];
 
        msg[0] = addr_id;
        memcpy(&msg[1], &addr->s6_addr, 16);
-       msg[17] = 0;
-       msg[18] = 0;
+       msg[17] = port >> 8;
+       msg[18] = port & 0xFF;
 
        mptcp_crypto_hmac_sha(key1, key2, msg, 19, hmac);
 
@@ -646,7 +646,8 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
                        opts->ahmac = add_addr_generate_hmac(msk->local_key,
                                                             msk->remote_key,
                                                             opts->addr_id,
-                                                            &opts->addr);
+                                                            &opts->addr,
+                                                            opts->port);
                }
        }
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
@@ -657,7 +658,8 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
                        opts->ahmac = add_addr6_generate_hmac(msk->local_key,
                                                              msk->remote_key,
                                                              opts->addr_id,
-                                                             &opts->addr6);
+                                                             &opts->addr6,
+                                                             opts->port);
                }
        }
 #endif
@@ -962,12 +964,14 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk,
        if (mp_opt->family == MPTCP_ADDR_IPVERSION_4)
                hmac = add_addr_generate_hmac(msk->remote_key,
                                              msk->local_key,
-                                             mp_opt->addr_id, &mp_opt->addr);
+                                             mp_opt->addr_id, &mp_opt->addr,
+                                             mp_opt->port);
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
        else
                hmac = add_addr6_generate_hmac(msk->remote_key,
                                               msk->local_key,
-                                              mp_opt->addr_id, &mp_opt->addr6);
+                                              mp_opt->addr_id, &mp_opt->addr6,
+                                              mp_opt->port);
 #endif
 
        pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n",
index 7695857..4bde960 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/netdevice.h>
 #include <linux/sched/signal.h>
 #include <linux/atomic.h>
-#include <linux/igmp.h>
 #include <net/sock.h>
 #include <net/inet_common.h>
 #include <net/inet_hashtables.h>
@@ -20,7 +19,6 @@
 #include <net/tcp_states.h>
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
 #include <net/transp_v6.h>
-#include <net/addrconf.h>
 #endif
 #include <net/mptcp.h>
 #include <net/xfrm.h>
@@ -2878,6 +2876,48 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
        return ret;
 }
 
+static bool mptcp_unsupported(int level, int optname)
+{
+       if (level == SOL_IP) {
+               switch (optname) {
+               case IP_ADD_MEMBERSHIP:
+               case IP_ADD_SOURCE_MEMBERSHIP:
+               case IP_DROP_MEMBERSHIP:
+               case IP_DROP_SOURCE_MEMBERSHIP:
+               case IP_BLOCK_SOURCE:
+               case IP_UNBLOCK_SOURCE:
+               case MCAST_JOIN_GROUP:
+               case MCAST_LEAVE_GROUP:
+               case MCAST_JOIN_SOURCE_GROUP:
+               case MCAST_LEAVE_SOURCE_GROUP:
+               case MCAST_BLOCK_SOURCE:
+               case MCAST_UNBLOCK_SOURCE:
+               case MCAST_MSFILTER:
+                       return true;
+               }
+               return false;
+       }
+       if (level == SOL_IPV6) {
+               switch (optname) {
+               case IPV6_ADDRFORM:
+               case IPV6_ADD_MEMBERSHIP:
+               case IPV6_DROP_MEMBERSHIP:
+               case IPV6_JOIN_ANYCAST:
+               case IPV6_LEAVE_ANYCAST:
+               case MCAST_JOIN_GROUP:
+               case MCAST_LEAVE_GROUP:
+               case MCAST_JOIN_SOURCE_GROUP:
+               case MCAST_LEAVE_SOURCE_GROUP:
+               case MCAST_BLOCK_SOURCE:
+               case MCAST_UNBLOCK_SOURCE:
+               case MCAST_MSFILTER:
+                       return true;
+               }
+               return false;
+       }
+       return false;
+}
+
 static int mptcp_setsockopt(struct sock *sk, int level, int optname,
                            sockptr_t optval, unsigned int optlen)
 {
@@ -2886,6 +2926,9 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname,
 
        pr_debug("msk=%p", msk);
 
+       if (mptcp_unsupported(level, optname))
+               return -ENOPROTOOPT;
+
        if (level == SOL_SOCKET)
                return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
 
@@ -2968,7 +3011,7 @@ static void mptcp_release_cb(struct sock *sk)
        for (;;) {
                flags = 0;
                if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags))
-                       flags |= MPTCP_PUSH_PENDING;
+                       flags |= BIT(MPTCP_PUSH_PENDING);
                if (!flags)
                        break;
 
@@ -2981,7 +3024,7 @@ static void mptcp_release_cb(struct sock *sk)
                 */
 
                spin_unlock_bh(&sk->sk_lock.slock);
-               if (flags & MPTCP_PUSH_PENDING)
+               if (flags & BIT(MPTCP_PUSH_PENDING))
                        __mptcp_push_pending(sk, 0);
 
                cond_resched();
@@ -3419,34 +3462,10 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
        return mask;
 }
 
-static int mptcp_release(struct socket *sock)
-{
-       struct mptcp_subflow_context *subflow;
-       struct sock *sk = sock->sk;
-       struct mptcp_sock *msk;
-
-       if (!sk)
-               return 0;
-
-       lock_sock(sk);
-
-       msk = mptcp_sk(sk);
-
-       mptcp_for_each_subflow(msk, subflow) {
-               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-
-               ip_mc_drop_socket(ssk);
-       }
-
-       release_sock(sk);
-
-       return inet_release(sock);
-}
-
 static const struct proto_ops mptcp_stream_ops = {
        .family            = PF_INET,
        .owner             = THIS_MODULE,
-       .release           = mptcp_release,
+       .release           = inet_release,
        .bind              = mptcp_bind,
        .connect           = mptcp_stream_connect,
        .socketpair        = sock_no_socketpair,
@@ -3538,35 +3557,10 @@ void __init mptcp_proto_init(void)
 }
 
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
-static int mptcp6_release(struct socket *sock)
-{
-       struct mptcp_subflow_context *subflow;
-       struct mptcp_sock *msk;
-       struct sock *sk = sock->sk;
-
-       if (!sk)
-               return 0;
-
-       lock_sock(sk);
-
-       msk = mptcp_sk(sk);
-
-       mptcp_for_each_subflow(msk, subflow) {
-               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-
-               ip_mc_drop_socket(ssk);
-               ipv6_sock_mc_close(ssk);
-               ipv6_sock_ac_close(ssk);
-       }
-
-       release_sock(sk);
-       return inet6_release(sock);
-}
-
 static const struct proto_ops mptcp_v6_stream_ops = {
        .family            = PF_INET6,
        .owner             = THIS_MODULE,
-       .release           = mptcp6_release,
+       .release           = inet6_release,
        .bind              = mptcp_bind,
        .connect           = mptcp_stream_connect,
        .socketpair        = sock_no_socketpair,
index 3d47d67..d17d39c 100644 (file)
@@ -477,6 +477,11 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        if (!ipv6_unicast_destination(skb))
                goto drop;
 
+       if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
+               __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
+               return 0;
+       }
+
        return tcp_conn_request(&mptcp_subflow_request_sock_ops,
                                &subflow_request_sock_ipv6_ops, sk, skb);
 
index a9cb355..ffff8da 100644 (file)
@@ -105,13 +105,20 @@ static void ncsi_channel_monitor(struct timer_list *t)
        monitor_state = nc->monitor.state;
        spin_unlock_irqrestore(&nc->lock, flags);
 
-       if (!enabled || chained) {
-               ncsi_stop_channel_monitor(nc);
-               return;
-       }
+       if (!enabled)
+               return;         /* expected race disabling timer */
+       if (WARN_ON_ONCE(chained))
+               goto bad_state;
+
        if (state != NCSI_CHANNEL_INACTIVE &&
            state != NCSI_CHANNEL_ACTIVE) {
-               ncsi_stop_channel_monitor(nc);
+bad_state:
+               netdev_warn(ndp->ndev.dev,
+                           "Bad NCSI monitor state channel %d 0x%x %s queue\n",
+                           nc->id, state, chained ? "on" : "off");
+               spin_lock_irqsave(&nc->lock, flags);
+               nc->monitor.enabled = false;
+               spin_unlock_irqrestore(&nc->lock, flags);
                return;
        }
 
@@ -136,10 +143,9 @@ static void ncsi_channel_monitor(struct timer_list *t)
                ncsi_report_link(ndp, true);
                ndp->flags |= NCSI_DEV_RESHUFFLE;
 
-               ncsi_stop_channel_monitor(nc);
-
                ncm = &nc->modes[NCSI_MODE_LINK];
                spin_lock_irqsave(&nc->lock, flags);
+               nc->monitor.enabled = false;
                nc->state = NCSI_CHANNEL_INVISIBLE;
                ncm->data[2] &= ~0x1;
                spin_unlock_irqrestore(&nc->lock, flags);
index 1469365..1d519b0 100644 (file)
@@ -2962,6 +2962,7 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
        memset(&m, 0xFF, sizeof(m));
        memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
        m.src.u.all = mask->src.u.all;
+       m.src.l3num = tuple->src.l3num;
        m.dst.protonum = tuple->dst.protonum;
 
        nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK);
index 5b05487..db11e40 100644 (file)
@@ -218,9 +218,6 @@ int nf_conntrack_gre_packet(struct nf_conn *ct,
                            enum ip_conntrack_info ctinfo,
                            const struct nf_hook_state *state)
 {
-       if (state->pf != NFPROTO_IPV4)
-               return -NF_ACCEPT;
-
        if (!nf_ct_is_confirmed(ct)) {
                unsigned int *timeouts = nf_ct_timeout_lookup(ct);
 
index 5fa657b..c77ba86 100644 (file)
@@ -506,7 +506,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
 {
        int err;
 
-       INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
+       INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
        flow_block_init(&flowtable->flow_block);
        init_rwsem(&flowtable->flow_block_lock);
 
index 224c8e5..f57f1a6 100644 (file)
@@ -6783,6 +6783,9 @@ static int nft_register_flowtable_net_hooks(struct net *net,
 
        list_for_each_entry(hook, hook_list, list) {
                list_for_each_entry(ft, &table->flowtables, list) {
+                       if (!nft_is_active_next(net, ft))
+                               continue;
+
                        list_for_each_entry(hook2, &ft->hook_list, list) {
                                if (hook->ops.dev == hook2->ops.dev &&
                                    hook->ops.pf == hook2->ops.pf) {
@@ -6842,6 +6845,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
        struct nft_hook *hook, *next;
        struct nft_trans *trans;
        bool unregister = false;
+       u32 flags;
        int err;
 
        err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK],
@@ -6856,6 +6860,17 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
                }
        }
 
+       if (nla[NFTA_FLOWTABLE_FLAGS]) {
+               flags = ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
+               if (flags & ~NFT_FLOWTABLE_MASK)
+                       return -EOPNOTSUPP;
+               if ((flowtable->data.flags & NFT_FLOWTABLE_HW_OFFLOAD) ^
+                   (flags & NFT_FLOWTABLE_HW_OFFLOAD))
+                       return -EOPNOTSUPP;
+       } else {
+               flags = flowtable->data.flags;
+       }
+
        err = nft_register_flowtable_net_hooks(ctx->net, ctx->table,
                                               &flowtable_hook.list, flowtable);
        if (err < 0)
@@ -6869,6 +6884,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
                goto err_flowtable_update_hook;
        }
 
+       nft_trans_flowtable_flags(trans) = flags;
        nft_trans_flowtable(trans) = flowtable;
        nft_trans_flowtable_update(trans) = true;
        INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans));
@@ -6963,8 +6979,10 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
        if (nla[NFTA_FLOWTABLE_FLAGS]) {
                flowtable->data.flags =
                        ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
-               if (flowtable->data.flags & ~NFT_FLOWTABLE_MASK)
+               if (flowtable->data.flags & ~NFT_FLOWTABLE_MASK) {
+                       err = -EOPNOTSUPP;
                        goto err3;
+               }
        }
 
        write_pnet(&flowtable->data.net, net);
@@ -8176,6 +8194,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                        break;
                case NFT_MSG_NEWFLOWTABLE:
                        if (nft_trans_flowtable_update(trans)) {
+                               nft_trans_flowtable(trans)->data.flags =
+                                       nft_trans_flowtable_flags(trans);
                                nf_tables_flowtable_notify(&trans->ctx,
                                                           nft_trans_flowtable(trans),
                                                           &nft_trans_flowtable_hooks(trans),
index bce6ca2..6bd31a7 100644 (file)
@@ -1351,14 +1351,6 @@ struct xt_counters *xt_counters_alloc(unsigned int counters)
 }
 EXPORT_SYMBOL(xt_counters_alloc);
 
-struct xt_table_info
-*xt_table_get_private_protected(const struct xt_table *table)
-{
-       return rcu_dereference_protected(table->private,
-                                        mutex_is_locked(&xt[table->af].mutex));
-}
-EXPORT_SYMBOL(xt_table_get_private_protected);
-
 struct xt_table_info *
 xt_replace_table(struct xt_table *table,
              unsigned int num_counters,
@@ -1366,6 +1358,7 @@ xt_replace_table(struct xt_table *table,
              int *error)
 {
        struct xt_table_info *private;
+       unsigned int cpu;
        int ret;
 
        ret = xt_jumpstack_alloc(newinfo);
@@ -1375,20 +1368,47 @@ xt_replace_table(struct xt_table *table,
        }
 
        /* Do the substitution. */
-       private = xt_table_get_private_protected(table);
+       local_bh_disable();
+       private = table->private;
 
        /* Check inside lock: is the old number correct? */
        if (num_counters != private->number) {
                pr_debug("num_counters != table->private->number (%u/%u)\n",
                         num_counters, private->number);
+               local_bh_enable();
                *error = -EAGAIN;
                return NULL;
        }
 
        newinfo->initial_entries = private->initial_entries;
+       /*
+        * Ensure contents of newinfo are visible before assigning to
+        * private.
+        */
+       smp_wmb();
+       table->private = newinfo;
+
+       /* make sure all cpus see new ->private value */
+       smp_mb();
 
-       rcu_assign_pointer(table->private, newinfo);
-       synchronize_rcu();
+       /*
+        * Even though table entries have now been swapped, other CPU's
+        * may still be using the old entries...
+        */
+       local_bh_enable();
+
+       /* ... so wait for even xt_recseq on all cpus */
+       for_each_possible_cpu(cpu) {
+               seqcount_t *s = &per_cpu(xt_recseq, cpu);
+               u32 seq = raw_read_seqcount(s);
+
+               if (seq & 1) {
+                       do {
+                               cond_resched();
+                               cpu_relax();
+                       } while (seq == raw_read_seqcount(s));
+               }
+       }
 
        audit_log_nfcfg(table->name, table->af, private->number,
                        !private->number ? AUDIT_XT_OP_REGISTER :
@@ -1424,12 +1444,12 @@ struct xt_table *xt_register_table(struct net *net,
        }
 
        /* Simplifies replace_table code. */
-       rcu_assign_pointer(table->private, bootstrap);
+       table->private = bootstrap;
 
        if (!xt_replace_table(table, 0, newinfo, &ret))
                goto unlock;
 
-       private = xt_table_get_private_protected(table);
+       private = table->private;
        pr_debug("table->private->number = %u\n", private->number);
 
        /* save number of initial entries */
@@ -1452,8 +1472,7 @@ void *xt_unregister_table(struct xt_table *table)
        struct xt_table_info *private;
 
        mutex_lock(&xt[table->af].mutex);
-       private = xt_table_get_private_protected(table);
-       RCU_INIT_POINTER(table->private, NULL);
+       private = table->private;
        list_del(&table->list);
        mutex_unlock(&xt[table->af].mutex);
        audit_log_nfcfg(table->name, table->af, private->number,
index d257ed3..a3b46f8 100644 (file)
@@ -108,11 +108,13 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
                                          llcp_sock->service_name_len,
                                          GFP_KERNEL);
        if (!llcp_sock->service_name) {
+               nfc_llcp_local_put(llcp_sock->local);
                ret = -ENOMEM;
                goto put_dev;
        }
        llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
        if (llcp_sock->ssap == LLCP_SAP_MAX) {
+               nfc_llcp_local_put(llcp_sock->local);
                kfree(llcp_sock->service_name);
                llcp_sock->service_name = NULL;
                ret = -EADDRINUSE;
@@ -671,6 +673,10 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
                ret = -EISCONN;
                goto error;
        }
+       if (sk->sk_state == LLCP_CONNECTING) {
+               ret = -EINPROGRESS;
+               goto error;
+       }
 
        dev = nfc_get_device(addr->dev_idx);
        if (dev == NULL) {
@@ -702,6 +708,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
        llcp_sock->local = nfc_llcp_local_get(local);
        llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
        if (llcp_sock->ssap == LLCP_SAP_MAX) {
+               nfc_llcp_local_put(llcp_sock->local);
                ret = -ENOMEM;
                goto put_dev;
        }
@@ -743,9 +750,12 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
 
 sock_unlink:
        nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
+       kfree(llcp_sock->service_name);
+       llcp_sock->service_name = NULL;
 
 sock_llcp_release:
        nfc_llcp_put_ssap(local, llcp_sock->ssap);
+       nfc_llcp_local_put(llcp_sock->local);
 
 put_dev:
        nfc_put_device(dev);
index 5eddfe7..d217bd9 100644 (file)
@@ -271,9 +271,11 @@ static void ovs_ct_update_key(const struct sk_buff *skb,
 /* This is called to initialize CT key fields possibly coming in from the local
  * stack.
  */
-void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
+void ovs_ct_fill_key(const struct sk_buff *skb,
+                    struct sw_flow_key *key,
+                    bool post_ct)
 {
-       ovs_ct_update_key(skb, NULL, key, false, false);
+       ovs_ct_update_key(skb, NULL, key, post_ct, false);
 }
 
 int ovs_ct_put_key(const struct sw_flow_key *swkey,
@@ -1332,7 +1334,7 @@ int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key)
        if (skb_nfct(skb)) {
                nf_conntrack_put(skb_nfct(skb));
                nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
-               ovs_ct_fill_key(skb, key);
+               ovs_ct_fill_key(skb, key, false);
        }
 
        return 0;
@@ -2032,10 +2034,10 @@ static int ovs_ct_limit_del_zone_limit(struct nlattr *nla_zone_limit,
 static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info,
                                          struct sk_buff *reply)
 {
-       struct ovs_zone_limit zone_limit;
-
-       zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
-       zone_limit.limit = info->default_limit;
+       struct ovs_zone_limit zone_limit = {
+               .zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE,
+               .limit   = info->default_limit,
+       };
 
        return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
 }
index 59dc327..317e525 100644 (file)
@@ -25,7 +25,8 @@ int ovs_ct_execute(struct net *, struct sk_buff *, struct sw_flow_key *,
                   const struct ovs_conntrack_info *);
 int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key);
 
-void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key);
+void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key,
+                    bool post_ct);
 int ovs_ct_put_key(const struct sw_flow_key *swkey,
                   const struct sw_flow_key *output, struct sk_buff *skb);
 void ovs_ct_free_action(const struct nlattr *a);
@@ -74,7 +75,8 @@ static inline int ovs_ct_clear(struct sk_buff *skb,
 }
 
 static inline void ovs_ct_fill_key(const struct sk_buff *skb,
-                                  struct sw_flow_key *key)
+                                  struct sw_flow_key *key,
+                                  bool post_ct)
 {
        key->ct_state = 0;
        key->ct_zone = 0;
index c7f34d6..e586424 100644 (file)
@@ -857,6 +857,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
        struct tc_skb_ext *tc_ext;
 #endif
+       bool post_ct = false;
        int res, err;
 
        /* Extract metadata from packet. */
@@ -895,6 +896,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
                tc_ext = skb_ext_find(skb, TC_SKB_EXT);
                key->recirc_id = tc_ext ? tc_ext->chain : 0;
                OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0;
+               post_ct = tc_ext ? tc_ext->post_ct : false;
        } else {
                key->recirc_id = 0;
        }
@@ -904,7 +906,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
 
        err = key_extract(skb, key);
        if (!err)
-               ovs_ct_fill_key(skb, key);   /* Must be after key_extract(). */
+               ovs_ct_fill_key(skb, key, post_ct);   /* Must be after key_extract(). */
        return err;
 }
 
index edb6ac1..1e4fb56 100644 (file)
@@ -271,7 +271,10 @@ static int qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port,
                flow = kzalloc(sizeof(*flow), GFP_KERNEL);
                if (flow) {
                        init_waitqueue_head(&flow->resume_tx);
-                       radix_tree_insert(&node->qrtr_tx_flow, key, flow);
+                       if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) {
+                               kfree(flow);
+                               flow = NULL;
+                       }
                }
        }
        mutex_unlock(&node->qrtr_tx_lock);
@@ -1058,6 +1061,11 @@ static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
        rc = copied;
 
        if (addr) {
+               /* There is an anonymous 2-byte hole after sq_family,
+                * make sure to clear it.
+                */
+               memset(addr, 0, sizeof(*addr));
+
                addr->sq_family = AF_QIPCRTR;
                addr->sq_node = cb->src_node;
                addr->sq_port = cb->src_port;
index 071a261..4fc66ff 100644 (file)
@@ -180,6 +180,7 @@ void rds_message_put(struct rds_message *rm)
                rds_message_purge(rm);
 
                kfree(rm);
+               rm = NULL;
        }
 }
 EXPORT_SYMBOL_GPL(rds_message_put);
@@ -347,8 +348,9 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
        rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
        rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
        if (IS_ERR(rm->data.op_sg)) {
+               void *err = ERR_CAST(rm->data.op_sg);
                rds_message_put(rm);
-               return ERR_CAST(rm->data.op_sg);
+               return err;
        }
 
        for (i = 0; i < rm->data.op_nents; ++i) {
index 985d0b7..fe5264b 100644 (file)
@@ -665,7 +665,7 @@ static void rds_send_remove_from_sock(struct list_head *messages, int status)
 unlock_and_drop:
                spin_unlock_irqrestore(&rm->m_rs_lock, flags);
                rds_message_put(rm);
-               if (was_on_sock)
+               if (was_on_sock && rm)
                        rds_message_put(rm);
        }
 
index 68d6ef9..ac15a94 100644 (file)
@@ -69,7 +69,7 @@ struct rfkill {
 
 struct rfkill_int_event {
        struct list_head        list;
-       struct rfkill_event     ev;
+       struct rfkill_event_ext ev;
 };
 
 struct rfkill_data {
@@ -253,7 +253,8 @@ static void rfkill_global_led_trigger_unregister(void)
 }
 #endif /* CONFIG_RFKILL_LEDS */
 
-static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
+static void rfkill_fill_event(struct rfkill_event_ext *ev,
+                             struct rfkill *rfkill,
                              enum rfkill_operation op)
 {
        unsigned long flags;
@@ -1237,7 +1238,7 @@ static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
                                size_t count, loff_t *pos)
 {
        struct rfkill *rfkill;
-       struct rfkill_event ev;
+       struct rfkill_event_ext ev;
        int ret;
 
        /* we don't need the 'hard' variable but accept it */
index b919826..f6d5755 100644 (file)
@@ -158,7 +158,7 @@ static int __tcf_action_put(struct tc_action *p, bool bind)
        return 0;
 }
 
-int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
+static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
 {
        int ret = 0;
 
@@ -184,7 +184,18 @@ int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
 
        return ret;
 }
-EXPORT_SYMBOL(__tcf_idr_release);
+
+int tcf_idr_release(struct tc_action *a, bool bind)
+{
+       const struct tc_action_ops *ops = a->ops;
+       int ret;
+
+       ret = __tcf_idr_release(a, bind, false);
+       if (ret == ACT_P_DELETED)
+               module_put(ops->owner);
+       return ret;
+}
+EXPORT_SYMBOL(tcf_idr_release);
 
 static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
 {
@@ -493,6 +504,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
        }
 
        p->idrinfo = idrinfo;
+       __module_get(ops->owner);
        p->ops = ops;
        *a = p;
        return 0;
@@ -992,7 +1004,8 @@ struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
                                    struct nlattr *nla, struct nlattr *est,
                                    char *name, int ovr, int bind,
-                                   struct tc_action_ops *a_o, bool rtnl_held,
+                                   struct tc_action_ops *a_o, int *init_res,
+                                   bool rtnl_held,
                                    struct netlink_ext_ack *extack)
 {
        struct nla_bitfield32 flags = { 0, 0 };
@@ -1028,6 +1041,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        }
        if (err < 0)
                goto err_out;
+       *init_res = err;
 
        if (!name && tb[TCA_ACT_COOKIE])
                tcf_set_action_cookie(&a->act_cookie, cookie);
@@ -1035,13 +1049,6 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        if (!name)
                a->hw_stats = hw_stats;
 
-       /* module count goes up only when brand new policy is created
-        * if it exists and is only bound to in a_o->init() then
-        * ACT_P_CREATED is not returned (a zero is).
-        */
-       if (err != ACT_P_CREATED)
-               module_put(a_o->owner);
-
        return a;
 
 err_out:
@@ -1056,7 +1063,7 @@ err_out:
 
 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
                    struct nlattr *est, char *name, int ovr, int bind,
-                   struct tc_action *actions[], size_t *attr_size,
+                   struct tc_action *actions[], int init_res[], size_t *attr_size,
                    bool rtnl_held, struct netlink_ext_ack *extack)
 {
        struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
@@ -1084,7 +1091,8 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
 
        for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
                act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind,
-                                       ops[i - 1], rtnl_held, extack);
+                                       ops[i - 1], &init_res[i - 1], rtnl_held,
+                                       extack);
                if (IS_ERR(act)) {
                        err = PTR_ERR(act);
                        goto err;
@@ -1100,7 +1108,8 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
        tcf_idr_insert_many(actions);
 
        *attr_size = tcf_action_full_attrs_size(sz);
-       return i - 1;
+       err = i - 1;
+       goto err_mod;
 
 err:
        tcf_action_destroy(actions, bind);
@@ -1497,12 +1506,13 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
                          struct netlink_ext_ack *extack)
 {
        size_t attr_size = 0;
-       int loop, ret;
+       int loop, ret, i;
        struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
+       int init_res[TCA_ACT_MAX_PRIO] = {};
 
        for (loop = 0; loop < 10; loop++) {
                ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
-                                     actions, &attr_size, true, extack);
+                                     actions, init_res, &attr_size, true, extack);
                if (ret != -EAGAIN)
                        break;
        }
@@ -1510,8 +1520,12 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
        if (ret < 0)
                return ret;
        ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
-       if (ovr)
-               tcf_action_put_many(actions);
+
+       /* only put existing actions */
+       for (i = 0; i < TCA_ACT_MAX_PRIO; i++)
+               if (init_res[i] == ACT_P_CREATED)
+                       actions[i] = NULL;
+       tcf_action_put_many(actions);
 
        return ret;
 }
index f0a0aa1..16e888a 100644 (file)
@@ -945,13 +945,14 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
        tcf_lastuse_update(&c->tcf_tm);
 
        if (clear) {
+               qdisc_skb_cb(skb)->post_ct = false;
                ct = nf_ct_get(skb, &ctinfo);
                if (ct) {
                        nf_conntrack_put(&ct->ct_general);
                        nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
                }
 
-               goto out;
+               goto out_clear;
        }
 
        family = tcf_ct_skb_nf_family(skb);
@@ -1030,8 +1031,9 @@ out_push:
        skb_push_rcsum(skb, nh_ofs);
 
 out:
-       tcf_action_update_bstats(&c->common, skb);
        qdisc_skb_cb(skb)->post_ct = true;
+out_clear:
+       tcf_action_update_bstats(&c->common, skb);
        if (defrag)
                qdisc_skb_cb(skb)->pkt_len = skb->len;
        return retval;
index e37556c..340d5af 100644 (file)
@@ -646,7 +646,7 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
        struct net_device *dev = block_cb->indr.dev;
        struct Qdisc *sch = block_cb->indr.sch;
        struct netlink_ext_ack extack = {};
-       struct flow_block_offload bo;
+       struct flow_block_offload bo = {};
 
        tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
                               block_cb->indr.binder_type,
@@ -1629,6 +1629,7 @@ int tcf_classify_ingress(struct sk_buff *skb,
                        return TC_ACT_SHOT;
                ext->chain = last_executed_chain;
                ext->mru = qdisc_skb_cb(skb)->mru;
+               ext->post_ct = qdisc_skb_cb(skb)->post_ct;
        }
 
        return ret;
@@ -3039,6 +3040,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
 {
 #ifdef CONFIG_NET_CLS_ACT
        {
+               int init_res[TCA_ACT_MAX_PRIO] = {};
                struct tc_action *act;
                size_t attr_size = 0;
 
@@ -3050,12 +3052,11 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
                                return PTR_ERR(a_o);
                        act = tcf_action_init_1(net, tp, tb[exts->police],
                                                rate_tlv, "police", ovr,
-                                               TCA_ACT_BIND, a_o, rtnl_held,
-                                               extack);
-                       if (IS_ERR(act)) {
-                               module_put(a_o->owner);
+                                               TCA_ACT_BIND, a_o, init_res,
+                                               rtnl_held, extack);
+                       module_put(a_o->owner);
+                       if (IS_ERR(act))
                                return PTR_ERR(act);
-                       }
 
                        act->type = exts->type = TCA_OLD_COMPAT;
                        exts->actions[0] = act;
@@ -3066,8 +3067,8 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
 
                        err = tcf_action_init(net, tp, tb[exts->action],
                                              rate_tlv, NULL, ovr, TCA_ACT_BIND,
-                                             exts->actions, &attr_size,
-                                             rtnl_held, extack);
+                                             exts->actions, init_res,
+                                             &attr_size, rtnl_held, extack);
                        if (err < 0)
                                return err;
                        exts->nr_actions = err;
index d097b5c..c69a4ba 100644 (file)
@@ -1451,7 +1451,7 @@ static int fl_set_key_ct(struct nlattr **tb,
                               &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
                               sizeof(key->ct_state));
 
-               err = fl_validate_ct_state(mask->ct_state,
+               err = fl_validate_ct_state(key->ct_state & mask->ct_state,
                                           tb[TCA_FLOWER_KEY_CT_STATE_MASK],
                                           extack);
                if (err)
index 50f680f..2adbd94 100644 (file)
@@ -345,6 +345,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
        struct sk_buff **old = NULL;
        unsigned int mask;
        u32 max_P;
+       u8 *stab;
 
        if (opt == NULL)
                return -EINVAL;
@@ -361,8 +362,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
        max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
 
        ctl = nla_data(tb[TCA_CHOKE_PARMS]);
-
-       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
+       stab = nla_data(tb[TCA_CHOKE_STAB]);
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
                return -EINVAL;
 
        if (ctl->limit > CHOKE_MAX_QUEUE)
@@ -412,7 +413,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
 
        red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
                      ctl->Plog, ctl->Scell_log,
-                     nla_data(tb[TCA_CHOKE_STAB]),
+                     stab,
                      max_P);
        red_set_vars(&q->vars);
 
index e0bc775..f4132dc 100644 (file)
@@ -480,7 +480,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
        struct gred_sched *table = qdisc_priv(sch);
        struct gred_sched_data *q = table->tab[dp];
 
-       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log)) {
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) {
                NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
                return -EINVAL;
        }
index dff3adf..081c11d 100644 (file)
@@ -1020,6 +1020,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
        struct nlattr *tb[TCA_HTB_MAX + 1];
        struct tc_htb_glob *gopt;
        unsigned int ntx;
+       bool offload;
        int err;
 
        qdisc_watchdog_init(&q->watchdog, sch);
@@ -1044,9 +1045,9 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
        if (gopt->version != HTB_VER >> 16)
                return -EINVAL;
 
-       q->offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]);
+       offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]);
 
-       if (q->offload) {
+       if (offload) {
                if (sch->parent != TC_H_ROOT)
                        return -EOPNOTSUPP;
 
@@ -1076,7 +1077,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
                q->rate2quantum = 1;
        q->defcls = gopt->defcls;
 
-       if (!q->offload)
+       if (!offload)
                return 0;
 
        for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
@@ -1107,12 +1108,14 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
        if (err)
                goto err_free_qdiscs;
 
+       /* Defer this assignment, so that htb_destroy skips offload-related
+        * parts (especially calling ndo_setup_tc) on errors.
+        */
+       q->offload = true;
+
        return 0;
 
 err_free_qdiscs:
-       /* TC_HTB_CREATE call failed, avoid any further calls to the driver. */
-       q->offload = false;
-
        for (ntx = 0; ntx < q->num_direct_qdiscs && q->direct_qdiscs[ntx];
             ntx++)
                qdisc_put(q->direct_qdiscs[ntx]);
@@ -1340,8 +1343,12 @@ htb_select_queue(struct Qdisc *sch, struct tcmsg *tcm)
 {
        struct net_device *dev = qdisc_dev(sch);
        struct tc_htb_qopt_offload offload_opt;
+       struct htb_sched *q = qdisc_priv(sch);
        int err;
 
+       if (!q->offload)
+               return sch->dev_queue;
+
        offload_opt = (struct tc_htb_qopt_offload) {
                .command = TC_HTB_LEAF_QUERY_QUEUE,
                .classid = TC_H_MIN(tcm->tcm_parent),
@@ -1668,9 +1675,10 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
                                          cl->parent->common.classid,
                                          NULL);
                if (q->offload) {
-                       if (new_q)
+                       if (new_q) {
                                htb_set_lockdep_class_child(new_q);
-                       htb_parent_to_leaf_offload(sch, dev_queue, new_q);
+                               htb_parent_to_leaf_offload(sch, dev_queue, new_q);
+                       }
                }
        }
 
index b4ae34d..40adf1f 100644 (file)
@@ -242,6 +242,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
        unsigned char flags;
        int err;
        u32 max_P;
+       u8 *stab;
 
        if (tb[TCA_RED_PARMS] == NULL ||
            tb[TCA_RED_STAB] == NULL)
@@ -250,7 +251,9 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
        max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
 
        ctl = nla_data(tb[TCA_RED_PARMS]);
-       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
+       stab = nla_data(tb[TCA_RED_STAB]);
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
+                             ctl->Scell_log, stab))
                return -EINVAL;
 
        err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
@@ -288,7 +291,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
        red_set_parms(&q->parms,
                      ctl->qth_min, ctl->qth_max, ctl->Wlog,
                      ctl->Plog, ctl->Scell_log,
-                     nla_data(tb[TCA_RED_STAB]),
+                     stab,
                      max_P);
        red_set_vars(&q->vars);
 
index b25e514..066754a 100644 (file)
@@ -647,7 +647,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
        }
 
        if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
-                                       ctl_v1->Wlog, ctl_v1->Scell_log))
+                                       ctl_v1->Wlog, ctl_v1->Scell_log, NULL))
                return -EINVAL;
        if (ctl_v1 && ctl_v1->qth_min) {
                p = kmalloc(sizeof(*p), GFP_KERNEL);
index 2f1f0a3..6af6b95 100644 (file)
@@ -134,6 +134,9 @@ teql_destroy(struct Qdisc *sch)
        struct teql_sched_data *dat = qdisc_priv(sch);
        struct teql_master *master = dat->m;
 
+       if (!master)
+               return;
+
        prev = master->slaves;
        if (prev) {
                do {
index c3e89c7..bd08807 100644 (file)
@@ -664,8 +664,8 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
        if (!(type & IPV6_ADDR_UNICAST))
                return 0;
 
-       return sp->inet.freebind || net->ipv6.sysctl.ip_nonlocal_bind ||
-               ipv6_chk_addr(net, in6, NULL, 0);
+       return ipv6_can_nonlocal_bind(net, &sp->inet) ||
+              ipv6_chk_addr(net, in6, NULL, 0);
 }
 
 /* This function checks if the address is a valid address to be used for
@@ -954,8 +954,7 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
                        net = sock_net(&opt->inet.sk);
                        rcu_read_lock();
                        dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
-                       if (!dev || !(opt->inet.freebind ||
-                                     net->ipv6.sysctl.ip_nonlocal_bind ||
+                       if (!dev || !(ipv6_can_nonlocal_bind(net, &opt->inet) ||
                                      ipv6_chk_addr(net, &addr->v6.sin6_addr,
                                                    dev, 0))) {
                                rcu_read_unlock();
index 6614c9f..a6aa17d 100644 (file)
@@ -584,13 +584,6 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
                goto out;
        }
 
-       rcu_read_lock();
-       if (__sk_dst_get(sk) != tp->dst) {
-               dst_hold(tp->dst);
-               sk_setup_caps(sk, tp->dst);
-       }
-       rcu_read_unlock();
-
        /* pack up chunks */
        pkt_count = sctp_packet_pack(packet, head, gso, gfp);
        if (!pkt_count) {
index 3fd06a2..5cb1aa5 100644 (file)
@@ -1135,6 +1135,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
 
 static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx)
 {
+       struct sock *sk = ctx->asoc->base.sk;
        struct list_head *ltransport;
        struct sctp_packet *packet;
        struct sctp_transport *t;
@@ -1144,6 +1145,12 @@ static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx)
                t = list_entry(ltransport, struct sctp_transport, send_ready);
                packet = &t->packet;
                if (!sctp_packet_empty(packet)) {
+                       rcu_read_lock();
+                       if (t->dst && __sk_dst_get(sk) != t->dst) {
+                               dst_hold(t->dst);
+                               sk_setup_caps(sk, t->dst);
+                       }
+                       rcu_read_unlock();
                        error = sctp_packet_transmit(packet, ctx->gfp);
                        if (error < 0)
                                ctx->q->asoc->base.sk->sk_err = -error;
index bd4678d..6dff643 100644 (file)
@@ -1825,11 +1825,14 @@ static int
 svcauth_gss_release(struct svc_rqst *rqstp)
 {
        struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
-       struct rpc_gss_wire_cred *gc = &gsd->clcred;
+       struct rpc_gss_wire_cred *gc;
        struct xdr_buf *resbuf = &rqstp->rq_res;
        int stat = -EINVAL;
        struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
 
+       if (!gsd)
+               goto out;
+       gc = &gsd->clcred;
        if (gc->gc_proc != RPC_GSS_PROC_DATA)
                goto out;
        /* Release can be called twice, but we only wrap once. */
@@ -1870,10 +1873,10 @@ out_err:
        if (rqstp->rq_cred.cr_group_info)
                put_group_info(rqstp->rq_cred.cr_group_info);
        rqstp->rq_cred.cr_group_info = NULL;
-       if (gsd->rsci)
+       if (gsd && gsd->rsci) {
                cache_put(&gsd->rsci->h, sn->rsc_cache);
-       gsd->rsci = NULL;
-
+               gsd->rsci = NULL;
+       }
        return stat;
 }
 
index 61fb8a1..d76dc9d 100644 (file)
@@ -1413,7 +1413,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
 
  sendit:
        if (svc_authorise(rqstp))
-               goto close;
+               goto close_xprt;
        return 1;               /* Caller can now send it */
 
 release_dropit:
@@ -1425,6 +1425,8 @@ release_dropit:
        return 0;
 
  close:
+       svc_authorise(rqstp);
+close_xprt:
        if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
                svc_close_xprt(rqstp->rq_xprt);
        dprintk("svc: svc_process close\n");
@@ -1433,7 +1435,7 @@ release_dropit:
 err_short_len:
        svc_printk(rqstp, "short len %zd, dropping request\n",
                        argv->iov_len);
-       goto close;
+       goto close_xprt;
 
 err_bad_rpc:
        serv->sv_stats->rpcbadfmt++;
index dcc50ae..3cdd71a 100644 (file)
@@ -1060,7 +1060,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st
        struct svc_xprt *xprt;
        int ret = 0;
 
-       spin_lock(&serv->sv_lock);
+       spin_lock_bh(&serv->sv_lock);
        list_for_each_entry(xprt, xprt_list, xpt_list) {
                if (xprt->xpt_net != net)
                        continue;
@@ -1068,7 +1068,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st
                set_bit(XPT_CLOSE, &xprt->xpt_flags);
                svc_xprt_enqueue(xprt);
        }
-       spin_unlock(&serv->sv_lock);
+       spin_unlock_bh(&serv->sv_lock);
        return ret;
 }
 
index 4a1edbb..9150df3 100644 (file)
@@ -252,9 +252,9 @@ xprt_setup_rdma_bc(struct xprt_create *args)
        xprt->timeout = &xprt_rdma_bc_timeout;
        xprt_set_bound(xprt);
        xprt_set_connected(xprt);
-       xprt->bind_timeout = RPCRDMA_BIND_TO;
-       xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
-       xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
+       xprt->bind_timeout = 0;
+       xprt->reestablish_timeout = 0;
+       xprt->idle_timeout = 0;
 
        xprt->prot = XPRT_TRANSPORT_BC_RDMA;
        xprt->ops = &xprt_rdma_bc_procs;
index 6d28f23..7d34290 100644 (file)
@@ -266,46 +266,33 @@ void svc_rdma_release_rqst(struct svc_rqst *rqstp)
                svc_rdma_recv_ctxt_put(rdma, ctxt);
 }
 
-static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
-                                  unsigned int wanted, bool temp)
+static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
+                               struct svc_rdma_recv_ctxt *ctxt)
 {
-       const struct ib_recv_wr *bad_wr = NULL;
-       struct svc_rdma_recv_ctxt *ctxt;
-       struct ib_recv_wr *recv_chain;
        int ret;
 
-       recv_chain = NULL;
-       while (wanted--) {
-               ctxt = svc_rdma_recv_ctxt_get(rdma);
-               if (!ctxt)
-                       break;
-
-               trace_svcrdma_post_recv(ctxt);
-               ctxt->rc_temp = temp;
-               ctxt->rc_recv_wr.next = recv_chain;
-               recv_chain = &ctxt->rc_recv_wr;
-               rdma->sc_pending_recvs++;
-       }
-       if (!recv_chain)
-               return false;
-
-       ret = ib_post_recv(rdma->sc_qp, recv_chain, &bad_wr);
+       trace_svcrdma_post_recv(ctxt);
+       ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL);
        if (ret)
                goto err_post;
-       return true;
+       return 0;
 
 err_post:
-       while (bad_wr) {
-               ctxt = container_of(bad_wr, struct svc_rdma_recv_ctxt,
-                                   rc_recv_wr);
-               bad_wr = bad_wr->next;
-               svc_rdma_recv_ctxt_put(rdma, ctxt);
-       }
-
        trace_svcrdma_rq_post_err(rdma, ret);
-       /* Since we're destroying the xprt, no need to reset
-        * sc_pending_recvs. */
-       return false;
+       svc_rdma_recv_ctxt_put(rdma, ctxt);
+       return ret;
+}
+
+static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
+{
+       struct svc_rdma_recv_ctxt *ctxt;
+
+       if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
+               return 0;
+       ctxt = svc_rdma_recv_ctxt_get(rdma);
+       if (!ctxt)
+               return -ENOMEM;
+       return __svc_rdma_post_recv(rdma, ctxt);
 }
 
 /**
@@ -316,7 +303,20 @@ err_post:
  */
 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
 {
-       return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests, true);
+       struct svc_rdma_recv_ctxt *ctxt;
+       unsigned int i;
+       int ret;
+
+       for (i = 0; i < rdma->sc_max_requests; i++) {
+               ctxt = svc_rdma_recv_ctxt_get(rdma);
+               if (!ctxt)
+                       return false;
+               ctxt->rc_temp = true;
+               ret = __svc_rdma_post_recv(rdma, ctxt);
+               if (ret)
+                       return false;
+       }
+       return true;
 }
 
 /**
@@ -324,6 +324,8 @@ bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
  * @cq: Completion Queue context
  * @wc: Work Completion object
  *
+ * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
+ * the Receive completion handler could be running.
  */
 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
 {
@@ -331,8 +333,6 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
        struct ib_cqe *cqe = wc->wr_cqe;
        struct svc_rdma_recv_ctxt *ctxt;
 
-       rdma->sc_pending_recvs--;
-
        /* WARNING: Only wc->wr_cqe and wc->status are reliable */
        ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
 
@@ -340,6 +340,9 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
        if (wc->status != IB_WC_SUCCESS)
                goto flushed;
 
+       if (svc_rdma_post_recv(rdma))
+               goto post_err;
+
        /* All wc fields are now known to be valid */
        ctxt->rc_byte_len = wc->byte_len;
 
@@ -350,18 +353,11 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
        spin_unlock(&rdma->sc_rq_dto_lock);
        if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
                svc_xprt_enqueue(&rdma->sc_xprt);
-
-       if (!test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags) &&
-           rdma->sc_pending_recvs < rdma->sc_max_requests)
-               if (!svc_rdma_refresh_recvs(rdma, RPCRDMA_MAX_RECV_BATCH,
-                                           false))
-                       goto post_err;
-
        return;
 
 flushed:
-       svc_rdma_recv_ctxt_put(rdma, ctxt);
 post_err:
+       svc_rdma_recv_ctxt_put(rdma, ctxt);
        set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
        svc_xprt_enqueue(&rdma->sc_xprt);
 }
index 6bf4550..57c6a1a 100644 (file)
@@ -154,9 +154,9 @@ struct tipc_media {
  * care of initializing all other fields.
  */
 struct tipc_bearer {
-       void __rcu *media_ptr;                  /* initalized by media */
-       u32 mtu;                                /* initalized by media */
-       struct tipc_media_addr addr;            /* initalized by media */
+       void __rcu *media_ptr;                  /* initialized by media */
+       u32 mtu;                                /* initialized by media */
+       struct tipc_media_addr addr;            /* initialized by media */
        char name[TIPC_MAX_BEARER_NAME];
        struct tipc_media *media;
        struct tipc_media_addr bcast_addr;
index f4fca8f..97710ce 100644 (file)
@@ -1941,12 +1941,13 @@ static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
                        goto rcv;
                if (tipc_aead_clone(&tmp, aead) < 0)
                        goto rcv;
+               WARN_ON(!refcount_inc_not_zero(&tmp->refcnt));
                if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) {
                        tipc_aead_free(&tmp->rcu);
                        goto rcv;
                }
                tipc_aead_put(aead);
-               aead = tipc_aead_get(tmp);
+               aead = tmp;
        }
 
        if (unlikely(err)) {
index a129f66..faf6bf5 100644 (file)
@@ -89,7 +89,7 @@
  *     - A spin lock to protect the registry of kernel/driver users (reg.c)
  *     - A global spin_lock (tipc_port_lock), which only task is to ensure
  *       consistency where more than one port is involved in an operation,
- *       i.e., whe a port is part of a linked list of ports.
+ *       i.e., when a port is part of a linked list of ports.
  *       There are two such lists; 'port_list', which is used for management,
  *       and 'wait_list', which is used to queue ports during congestion.
  *
index 008670d..e0ee832 100644 (file)
@@ -1734,7 +1734,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
 }
 
 /* tipc_node_xmit_skb(): send single buffer to destination
- * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
+ * Buffers sent via this function are generally TIPC_SYSTEM_IMPORTANCE
  * messages, which will not be rejected
  * The only exception is datagram messages rerouted after secondary
  * lookup, which are rare and safe to dispose of anyway.
@@ -2895,17 +2895,22 @@ int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
 
 #ifdef CONFIG_TIPC_CRYPTO
 static int tipc_nl_retrieve_key(struct nlattr **attrs,
-                               struct tipc_aead_key **key)
+                               struct tipc_aead_key **pkey)
 {
        struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
+       struct tipc_aead_key *key;
 
        if (!attr)
                return -ENODATA;
 
-       *key = (struct tipc_aead_key *)nla_data(attr);
-       if (nla_len(attr) < tipc_aead_key_size(*key))
+       if (nla_len(attr) < sizeof(*key))
+               return -EINVAL;
+       key = (struct tipc_aead_key *)nla_data(attr);
+       if (key->keylen > TIPC_AEAD_KEYLEN_MAX ||
+           nla_len(attr) < tipc_aead_key_size(key))
                return -EINVAL;
 
+       *pkey = key;
        return 0;
 }
 
index cebcc10..022999e 100644 (file)
@@ -1265,7 +1265,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
                spin_lock_bh(&inputq->lock);
                if (skb_peek(arrvq) == skb) {
                        skb_queue_splice_tail_init(&tmpq, inputq);
-                       kfree_skb(__skb_dequeue(arrvq));
+                       __skb_dequeue(arrvq);
                }
                spin_unlock_bh(&inputq->lock);
                __skb_queue_purge(&tmpq);
index 5546710..bc7fb9b 100644 (file)
@@ -755,6 +755,7 @@ static struct sock *__vsock_create(struct net *net,
                vsk->buffer_size = psk->buffer_size;
                vsk->buffer_min_size = psk->buffer_min_size;
                vsk->buffer_max_size = psk->buffer_max_size;
+               security_sk_clone(parent, sk);
        } else {
                vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
                vsk->owner = get_current_cred();
index 521d36b..b1df42e 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 
 #include <linux/if.h>
@@ -70,7 +70,7 @@ __cfg80211_wdev_from_attrs(struct cfg80211_registered_device *rdev,
        struct wireless_dev *result = NULL;
        bool have_ifidx = attrs[NL80211_ATTR_IFINDEX];
        bool have_wdev_id = attrs[NL80211_ATTR_WDEV];
-       u64 wdev_id;
+       u64 wdev_id = 0;
        int wiphy_idx = -1;
        int ifidx = -1;
 
@@ -229,9 +229,13 @@ static int validate_beacon_head(const struct nlattr *attr,
        unsigned int len = nla_len(attr);
        const struct element *elem;
        const struct ieee80211_mgmt *mgmt = (void *)data;
-       bool s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control);
        unsigned int fixedlen, hdrlen;
+       bool s1g_bcn;
 
+       if (len < offsetofend(typeof(*mgmt), frame_control))
+               goto err;
+
+       s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control);
        if (s1g_bcn) {
                fixedlen = offsetof(struct ieee80211_ext,
                                    u.s1g_beacon.variable);
@@ -5485,7 +5489,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
                        rdev, info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP],
                        &params);
                if (err)
-                       return err;
+                       goto out;
        }
 
        nl80211_calculate_ap_params(&params);
@@ -14789,6 +14793,7 @@ bad_tid_conf:
 #define NL80211_FLAG_NEED_WDEV_UP      (NL80211_FLAG_NEED_WDEV |\
                                         NL80211_FLAG_CHECK_NETDEV_UP)
 #define NL80211_FLAG_CLEAR_SKB         0x20
+#define NL80211_FLAG_NO_WIPHY_MTX      0x40
 
 static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
                            struct genl_info *info)
@@ -14840,7 +14845,7 @@ static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
                info->user_ptr[0] = rdev;
        }
 
-       if (rdev) {
+       if (rdev && !(ops->internal_flags & NL80211_FLAG_NO_WIPHY_MTX)) {
                wiphy_lock(&rdev->wiphy);
                /* we keep the mutex locked until post_doit */
                __release(&rdev->wiphy.mtx);
@@ -14865,7 +14870,8 @@ static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
                }
        }
 
-       if (info->user_ptr[0]) {
+       if (info->user_ptr[0] &&
+           !(ops->internal_flags & NL80211_FLAG_NO_WIPHY_MTX)) {
                struct cfg80211_registered_device *rdev = info->user_ptr[0];
 
                /* we kept the mutex locked since pre_doit */
@@ -15329,7 +15335,9 @@ static const struct genl_small_ops nl80211_small_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = nl80211_wiphy_netns,
                .flags = GENL_UNS_ADMIN_PERM,
-               .internal_flags = NL80211_FLAG_NEED_WIPHY,
+               .internal_flags = NL80211_FLAG_NEED_WIPHY |
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_NO_WIPHY_MTX,
        },
        {
                .cmd = NL80211_CMD_GET_SURVEY,
index 019952d..758eb7d 100644 (file)
@@ -2352,14 +2352,16 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
                return NULL;
 
        if (ext) {
-               struct ieee80211_s1g_bcn_compat_ie *compat;
-               u8 *ie;
+               const struct ieee80211_s1g_bcn_compat_ie *compat;
+               const struct element *elem;
 
-               ie = (void *)cfg80211_find_ie(WLAN_EID_S1G_BCN_COMPAT,
-                                             variable, ielen);
-               if (!ie)
+               elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT,
+                                         variable, ielen);
+               if (!elem)
+                       return NULL;
+               if (elem->datalen < sizeof(*compat))
                        return NULL;
-               compat = (void *)(ie + 2);
+               compat = (void *)elem->data;
                bssid = ext->u.s1g_beacon.sa;
                capability = le16_to_cpu(compat->compat_info);
                beacon_int = le16_to_cpu(compat->beacon_int);
index 07756ca..08a70b4 100644 (file)
@@ -529,7 +529,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
                cfg80211_sme_free(wdev);
        }
 
-       if (WARN_ON(wdev->conn))
+       if (wdev->conn)
                return -EINPROGRESS;
 
        wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL);
index d8e8a11..a20aec9 100644 (file)
@@ -216,7 +216,7 @@ static struct nlmsghdr *xfrm_nlmsg_put_compat(struct sk_buff *skb,
        case XFRM_MSG_GETSADINFO:
        case XFRM_MSG_GETSPDINFO:
        default:
-               WARN_ONCE(1, "unsupported nlmsg_type %d", nlh_src->nlmsg_type);
+               pr_warn_once("unsupported nlmsg_type %d\n", nlh_src->nlmsg_type);
                return ERR_PTR(-EOPNOTSUPP);
        }
 
@@ -277,7 +277,7 @@ static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src)
                return xfrm_nla_cpy(dst, src, nla_len(src));
        default:
                BUILD_BUG_ON(XFRMA_MAX != XFRMA_IF_ID);
-               WARN_ONCE(1, "unsupported nla_type %d", src->nla_type);
+               pr_warn_once("unsupported nla_type %d\n", src->nla_type);
                return -EOPNOTSUPP;
        }
 }
@@ -315,8 +315,10 @@ static int xfrm_alloc_compat(struct sk_buff *skb, const struct nlmsghdr *nlh_src
        struct sk_buff *new = NULL;
        int err;
 
-       if (WARN_ON_ONCE(type >= ARRAY_SIZE(xfrm_msg_min)))
+       if (type >= ARRAY_SIZE(xfrm_msg_min)) {
+               pr_warn_once("unsupported nlmsg_type %d\n", nlh_src->nlmsg_type);
                return -EOPNOTSUPP;
+       }
 
        if (skb_shinfo(skb)->frag_list == NULL) {
                new = alloc_skb(skb->len + skb_tailroom(skb), GFP_ATOMIC);
@@ -378,6 +380,10 @@ static int xfrm_attr_cpy32(void *dst, size_t *pos, const struct nlattr *src,
        struct nlmsghdr *nlmsg = dst;
        struct nlattr *nla;
 
+       /* xfrm_user_rcv_msg_compat() relies on fact that 32-bit messages
+        * have the same len or shorted than 64-bit ones.
+        * 32-bit translation that is bigger than 64-bit original is unexpected.
+        */
        if (WARN_ON_ONCE(copy_len > payload))
                copy_len = payload;
 
index edf1189..6d6917b 100644 (file)
@@ -134,8 +134,6 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
                return skb;
        }
 
-       xo->flags |= XFRM_XMIT;
-
        if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) {
                struct sk_buff *segs;
 
index 495b1f5..8831f5a 100644 (file)
@@ -306,6 +306,8 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 
                        icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
                } else {
+                       if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+                               goto xmit;
                        icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                      htonl(mtu));
                }
@@ -314,6 +316,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
                return -EMSGSIZE;
        }
 
+xmit:
        xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
        skb_dst_set(skb, dst);
        skb->dev = tdev;
index a7ab193..e4cb0ff 100644 (file)
@@ -503,22 +503,22 @@ out:
        return err;
 }
 
-int xfrm_output_resume(struct sk_buff *skb, int err)
+int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err)
 {
        struct net *net = xs_net(skb_dst(skb)->xfrm);
 
        while (likely((err = xfrm_output_one(skb, err)) == 0)) {
                nf_reset_ct(skb);
 
-               err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
+               err = skb_dst(skb)->ops->local_out(net, sk, skb);
                if (unlikely(err != 1))
                        goto out;
 
                if (!skb_dst(skb)->xfrm)
-                       return dst_output(net, skb->sk, skb);
+                       return dst_output(net, sk, skb);
 
                err = nf_hook(skb_dst(skb)->ops->family,
-                             NF_INET_POST_ROUTING, net, skb->sk, skb,
+                             NF_INET_POST_ROUTING, net, sk, skb,
                              NULL, skb_dst(skb)->dev, xfrm_output2);
                if (unlikely(err != 1))
                        goto out;
@@ -534,7 +534,7 @@ EXPORT_SYMBOL_GPL(xfrm_output_resume);
 
 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-       return xfrm_output_resume(skb, 1);
+       return xfrm_output_resume(sk, skb, 1);
 }
 
 static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
@@ -660,6 +660,12 @@ static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
 {
        int err;
 
+       if (x->outer_mode.encap == XFRM_MODE_BEET &&
+           ip_is_fragment(ip_hdr(skb))) {
+               net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n");
+               return -EAFNOSUPPORT;
+       }
+
        err = xfrm4_tunnel_check_size(skb);
        if (err)
                return err;
@@ -705,8 +711,15 @@ out:
 static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
 {
 #if IS_ENABLED(CONFIG_IPV6)
+       unsigned int ptr = 0;
        int err;
 
+       if (x->outer_mode.encap == XFRM_MODE_BEET &&
+           ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL) >= 0) {
+               net_warn_ratelimited("BEET mode doesn't support inner IPv6 fragments\n");
+               return -EAFNOSUPPORT;
+       }
+
        err = xfrm6_tunnel_check_size(skb);
        if (err)
                return err;
index d01ca1a..4496f7e 100644 (file)
@@ -44,7 +44,6 @@ static void xfrm_state_gc_task(struct work_struct *work);
  */
 
 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
-static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation);
 static struct kmem_cache *xfrm_state_cache __ro_after_init;
 
 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
@@ -140,7 +139,7 @@ static void xfrm_hash_resize(struct work_struct *work)
        }
 
        spin_lock_bh(&net->xfrm.xfrm_state_lock);
-       write_seqcount_begin(&xfrm_state_hash_generation);
+       write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
 
        nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
        odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
@@ -156,7 +155,7 @@ static void xfrm_hash_resize(struct work_struct *work)
        rcu_assign_pointer(net->xfrm.state_byspi, nspi);
        net->xfrm.state_hmask = nhashmask;
 
-       write_seqcount_end(&xfrm_state_hash_generation);
+       write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
        spin_unlock_bh(&net->xfrm.xfrm_state_lock);
 
        osize = (ohashmask + 1) * sizeof(struct hlist_head);
@@ -1063,7 +1062,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
 
        to_put = NULL;
 
-       sequence = read_seqcount_begin(&xfrm_state_hash_generation);
+       sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
 
        rcu_read_lock();
        h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
@@ -1176,7 +1175,7 @@ out:
        if (to_put)
                xfrm_state_put(to_put);
 
-       if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) {
+       if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) {
                *err = -EAGAIN;
                if (x) {
                        xfrm_state_put(x);
@@ -2666,6 +2665,8 @@ int __net_init xfrm_state_init(struct net *net)
        net->xfrm.state_num = 0;
        INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
        spin_lock_init(&net->xfrm.xfrm_state_lock);
+       seqcount_spinlock_init(&net->xfrm.xfrm_state_hash_generation,
+                              &net->xfrm.xfrm_state_lock);
        return 0;
 
 out_byspi:
index 168cd27..2c52535 100644 (file)
@@ -20,6 +20,7 @@ SECTIONS {
 
        __patchable_function_entries : { *(__patchable_function_entries) }
 
+#ifdef CONFIG_LTO_CLANG
        /*
         * With CONFIG_LTO_CLANG, LLD always enables -fdata-sections and
         * -ffunction-sections, which increases the size of the final module.
@@ -41,6 +42,7 @@ SECTIONS {
        }
 
        .text : { *(.text .text.[0-9a-zA-Z_]*) }
+#endif
 }
 
 /* bring in arch-specific sections */
index 1d20003..0ba0184 100644 (file)
@@ -98,6 +98,14 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
        struct rb_node *node, *parent = NULL;
        struct integrity_iint_cache *iint, *test_iint;
 
+       /*
+        * The integrity's "iint_cache" is initialized at security_init(),
+        * unless it is not included in the ordered list of LSMs enabled
+        * on the boot command line.
+        */
+       if (!iint_cache)
+               panic("%s: lsm=integrity required.\n", __func__);
+
        iint = integrity_iint_find(inode);
        if (iint)
                return iint;
index 6fe2530..7650de0 100644 (file)
@@ -219,14 +219,21 @@ static inline bool selinux_policycap_genfs_seclabel_symlinks(void)
        return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS]);
 }
 
+struct selinux_policy_convert_data;
+
+struct selinux_load_state {
+       struct selinux_policy *policy;
+       struct selinux_policy_convert_data *convert_data;
+};
+
 int security_mls_enabled(struct selinux_state *state);
 int security_load_policy(struct selinux_state *state,
-                       void *data, size_t len,
-                       struct selinux_policy **newpolicyp);
+                        void *data, size_t len,
+                        struct selinux_load_state *load_state);
 void selinux_policy_commit(struct selinux_state *state,
-                       struct selinux_policy *newpolicy);
+                          struct selinux_load_state *load_state);
 void selinux_policy_cancel(struct selinux_state *state,
-                       struct selinux_policy *policy);
+                          struct selinux_load_state *load_state);
 int security_read_policy(struct selinux_state *state,
                         void **data, size_t *len);
 int security_read_state_kernel(struct selinux_state *state,
index 01a7d50..fff6bab 100644 (file)
@@ -563,17 +563,13 @@ static int sel_make_policy_nodes(struct selinux_fs_info *fsi,
 
        ret = sel_make_bools(newpolicy, tmp_bool_dir, &tmp_bool_num,
                             &tmp_bool_names, &tmp_bool_values);
-       if (ret) {
-               pr_err("SELinux: failed to load policy booleans\n");
+       if (ret)
                goto out;
-       }
 
        ret = sel_make_classes(newpolicy, tmp_class_dir,
                               &fsi->last_class_ino);
-       if (ret) {
-               pr_err("SELinux: failed to load policy classes\n");
+       if (ret)
                goto out;
-       }
 
        /* booleans */
        old_dentry = fsi->bool_dir;
@@ -616,7 +612,7 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
 
 {
        struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
-       struct selinux_policy *newpolicy;
+       struct selinux_load_state load_state;
        ssize_t length;
        void *data = NULL;
 
@@ -642,23 +638,23 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
        if (copy_from_user(data, buf, count) != 0)
                goto out;
 
-       length = security_load_policy(fsi->state, data, count, &newpolicy);
+       length = security_load_policy(fsi->state, data, count, &load_state);
        if (length) {
                pr_warn_ratelimited("SELinux: failed to load policy\n");
                goto out;
        }
 
-       length = sel_make_policy_nodes(fsi, newpolicy);
+       length = sel_make_policy_nodes(fsi, load_state.policy);
        if (length) {
-               selinux_policy_cancel(fsi->state, newpolicy);
-               goto out1;
+               pr_warn_ratelimited("SELinux: failed to initialize selinuxfs\n");
+               selinux_policy_cancel(fsi->state, &load_state);
+               goto out;
        }
 
-       selinux_policy_commit(fsi->state, newpolicy);
+       selinux_policy_commit(fsi->state, &load_state);
 
        length = count;
 
-out1:
        audit_log(audit_context(), GFP_KERNEL, AUDIT_MAC_POLICY_LOAD,
                "auid=%u ses=%u lsm=selinux res=1",
                from_kuid(&init_user_ns, audit_get_loginuid(current)),
index 6dcb6aa..75df329 100644 (file)
@@ -109,7 +109,7 @@ static int avtab_insert(struct avtab *h, struct avtab_key *key, struct avtab_dat
        struct avtab_node *prev, *cur, *newnode;
        u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
 
-       if (!h)
+       if (!h || !h->nslot)
                return -EINVAL;
 
        hvalue = avtab_hash(key, h->mask);
@@ -154,7 +154,7 @@ avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, struct avtab_datu
        struct avtab_node *prev, *cur;
        u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
 
-       if (!h)
+       if (!h || !h->nslot)
                return NULL;
        hvalue = avtab_hash(key, h->mask);
        for (prev = NULL, cur = h->htable[hvalue];
@@ -184,7 +184,7 @@ struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key)
        struct avtab_node *cur;
        u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
 
-       if (!h)
+       if (!h || !h->nslot)
                return NULL;
 
        hvalue = avtab_hash(key, h->mask);
@@ -220,7 +220,7 @@ avtab_search_node(struct avtab *h, struct avtab_key *key)
        struct avtab_node *cur;
        u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
 
-       if (!h)
+       if (!h || !h->nslot)
                return NULL;
 
        hvalue = avtab_hash(key, h->mask);
@@ -295,6 +295,7 @@ void avtab_destroy(struct avtab *h)
        }
        kvfree(h->htable);
        h->htable = NULL;
+       h->nel = 0;
        h->nslot = 0;
        h->mask = 0;
 }
@@ -303,88 +304,52 @@ void avtab_init(struct avtab *h)
 {
        h->htable = NULL;
        h->nel = 0;
+       h->nslot = 0;
+       h->mask = 0;
 }
 
-int avtab_alloc(struct avtab *h, u32 nrules)
+static int avtab_alloc_common(struct avtab *h, u32 nslot)
 {
-       u32 mask = 0;
-       u32 shift = 0;
-       u32 work = nrules;
-       u32 nslot = 0;
-
-       if (nrules == 0)
-               goto avtab_alloc_out;
-
-       while (work) {
-               work  = work >> 1;
-               shift++;
-       }
-       if (shift > 2)
-               shift = shift - 2;
-       nslot = 1 << shift;
-       if (nslot > MAX_AVTAB_HASH_BUCKETS)
-               nslot = MAX_AVTAB_HASH_BUCKETS;
-       mask = nslot - 1;
+       if (!nslot)
+               return 0;
 
        h->htable = kvcalloc(nslot, sizeof(void *), GFP_KERNEL);
        if (!h->htable)
                return -ENOMEM;
 
- avtab_alloc_out:
-       h->nel = 0;
        h->nslot = nslot;
-       h->mask = mask;
-       pr_debug("SELinux: %d avtab hash slots, %d rules.\n",
-              h->nslot, nrules);
+       h->mask = nslot - 1;
        return 0;
 }
 
-int avtab_duplicate(struct avtab *new, struct avtab *orig)
+int avtab_alloc(struct avtab *h, u32 nrules)
 {
-       int i;
-       struct avtab_node *node, *tmp, *tail;
-
-       memset(new, 0, sizeof(*new));
+       int rc;
+       u32 nslot = 0;
 
-       new->htable = kvcalloc(orig->nslot, sizeof(void *), GFP_KERNEL);
-       if (!new->htable)
-               return -ENOMEM;
-       new->nslot = orig->nslot;
-       new->mask = orig->mask;
-
-       for (i = 0; i < orig->nslot; i++) {
-               tail = NULL;
-               for (node = orig->htable[i]; node; node = node->next) {
-                       tmp = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL);
-                       if (!tmp)
-                               goto error;
-                       tmp->key = node->key;
-                       if (tmp->key.specified & AVTAB_XPERMS) {
-                               tmp->datum.u.xperms =
-                                       kmem_cache_zalloc(avtab_xperms_cachep,
-                                                       GFP_KERNEL);
-                               if (!tmp->datum.u.xperms) {
-                                       kmem_cache_free(avtab_node_cachep, tmp);
-                                       goto error;
-                               }
-                               tmp->datum.u.xperms = node->datum.u.xperms;
-                       } else
-                               tmp->datum.u.data = node->datum.u.data;
-
-                       if (tail)
-                               tail->next = tmp;
-                       else
-                               new->htable[i] = tmp;
-
-                       tail = tmp;
-                       new->nel++;
+       if (nrules != 0) {
+               u32 shift = 1;
+               u32 work = nrules >> 3;
+               while (work) {
+                       work >>= 1;
+                       shift++;
                }
+               nslot = 1 << shift;
+               if (nslot > MAX_AVTAB_HASH_BUCKETS)
+                       nslot = MAX_AVTAB_HASH_BUCKETS;
+
+               rc = avtab_alloc_common(h, nslot);
+               if (rc)
+                       return rc;
        }
 
+       pr_debug("SELinux: %d avtab hash slots, %d rules.\n", nslot, nrules);
        return 0;
-error:
-       avtab_destroy(new);
-       return -ENOMEM;
+}
+
+int avtab_alloc_dup(struct avtab *new, const struct avtab *orig)
+{
+       return avtab_alloc_common(new, orig->nslot);
 }
 
 void avtab_hash_eval(struct avtab *h, char *tag)
index 4c4445c..f2eeb36 100644 (file)
@@ -89,7 +89,7 @@ struct avtab {
 
 void avtab_init(struct avtab *h);
 int avtab_alloc(struct avtab *, u32);
-int avtab_duplicate(struct avtab *new, struct avtab *orig);
+int avtab_alloc_dup(struct avtab *new, const struct avtab *orig);
 struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *k);
 void avtab_destroy(struct avtab *h);
 void avtab_hash_eval(struct avtab *h, char *tag);
index 0b32f3a..1ef74c0 100644 (file)
@@ -605,7 +605,6 @@ static int cond_dup_av_list(struct cond_av_list *new,
                        struct cond_av_list *orig,
                        struct avtab *avtab)
 {
-       struct avtab_node *avnode;
        u32 i;
 
        memset(new, 0, sizeof(*new));
@@ -615,10 +614,11 @@ static int cond_dup_av_list(struct cond_av_list *new,
                return -ENOMEM;
 
        for (i = 0; i < orig->len; i++) {
-               avnode = avtab_search_node(avtab, &orig->nodes[i]->key);
-               if (WARN_ON(!avnode))
-                       return -EINVAL;
-               new->nodes[i] = avnode;
+               new->nodes[i] = avtab_insert_nonunique(avtab,
+                                                      &orig->nodes[i]->key,
+                                                      &orig->nodes[i]->datum);
+               if (!new->nodes[i])
+                       return -ENOMEM;
                new->len++;
        }
 
@@ -630,7 +630,7 @@ static int duplicate_policydb_cond_list(struct policydb *newp,
 {
        int rc, i, j;
 
-       rc = avtab_duplicate(&newp->te_cond_avtab, &origp->te_cond_avtab);
+       rc = avtab_alloc_dup(&newp->te_cond_avtab, &origp->te_cond_avtab);
        if (rc)
                return rc;
 
index 3438d01..3016331 100644 (file)
 #include "policycap_names.h"
 #include "ima.h"
 
+struct convert_context_args {
+       struct selinux_state *state;
+       struct policydb *oldp;
+       struct policydb *newp;
+};
+
+struct selinux_policy_convert_data {
+       struct convert_context_args args;
+       struct sidtab_convert_params sidtab_params;
+};
+
 /* Forward declaration. */
 static int context_struct_to_string(struct policydb *policydb,
                                    struct context *context,
@@ -1541,6 +1552,7 @@ static int security_context_to_sid_core(struct selinux_state *state,
                if (!str)
                        goto out;
        }
+retry:
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -1554,6 +1566,15 @@ static int security_context_to_sid_core(struct selinux_state *state,
        } else if (rc)
                goto out_unlock;
        rc = sidtab_context_to_sid(sidtab, &context, sid);
+       if (rc == -ESTALE) {
+               rcu_read_unlock();
+               if (context.str) {
+                       str = context.str;
+                       context.str = NULL;
+               }
+               context_destroy(&context);
+               goto retry;
+       }
        context_destroy(&context);
 out_unlock:
        rcu_read_unlock();
@@ -1703,7 +1724,7 @@ static int security_compute_sid(struct selinux_state *state,
        struct selinux_policy *policy;
        struct policydb *policydb;
        struct sidtab *sidtab;
-       struct class_datum *cladatum = NULL;
+       struct class_datum *cladatum;
        struct context *scontext, *tcontext, newcontext;
        struct sidtab_entry *sentry, *tentry;
        struct avtab_key avkey;
@@ -1725,6 +1746,8 @@ static int security_compute_sid(struct selinux_state *state,
                goto out;
        }
 
+retry:
+       cladatum = NULL;
        context_init(&newcontext);
 
        rcu_read_lock();
@@ -1869,6 +1892,11 @@ static int security_compute_sid(struct selinux_state *state,
        }
        /* Obtain the sid for the context. */
        rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid);
+       if (rc == -ESTALE) {
+               rcu_read_unlock();
+               context_destroy(&newcontext);
+               goto retry;
+       }
 out_unlock:
        rcu_read_unlock();
        context_destroy(&newcontext);
@@ -1974,12 +2002,6 @@ static inline int convert_context_handle_invalid_context(
        return 0;
 }
 
-struct convert_context_args {
-       struct selinux_state *state;
-       struct policydb *oldp;
-       struct policydb *newp;
-};
-
 /*
  * Convert the values in the security context
  * structure `oldc' from the values specified
@@ -2159,7 +2181,7 @@ static void selinux_policy_cond_free(struct selinux_policy *policy)
 }
 
 void selinux_policy_cancel(struct selinux_state *state,
-                       struct selinux_policy *policy)
+                          struct selinux_load_state *load_state)
 {
        struct selinux_policy *oldpolicy;
 
@@ -2167,7 +2189,8 @@ void selinux_policy_cancel(struct selinux_state *state,
                                        lockdep_is_held(&state->policy_mutex));
 
        sidtab_cancel_convert(oldpolicy->sidtab);
-       selinux_policy_free(policy);
+       selinux_policy_free(load_state->policy);
+       kfree(load_state->convert_data);
 }
 
 static void selinux_notify_policy_change(struct selinux_state *state,
@@ -2183,9 +2206,10 @@ static void selinux_notify_policy_change(struct selinux_state *state,
 }
 
 void selinux_policy_commit(struct selinux_state *state,
-                       struct selinux_policy *newpolicy)
+                          struct selinux_load_state *load_state)
 {
-       struct selinux_policy *oldpolicy;
+       struct selinux_policy *oldpolicy, *newpolicy = load_state->policy;
+       unsigned long flags;
        u32 seqno;
 
        oldpolicy = rcu_dereference_protected(state->policy,
@@ -2207,7 +2231,13 @@ void selinux_policy_commit(struct selinux_state *state,
        seqno = newpolicy->latest_granting;
 
        /* Install the new policy. */
-       rcu_assign_pointer(state->policy, newpolicy);
+       if (oldpolicy) {
+               sidtab_freeze_begin(oldpolicy->sidtab, &flags);
+               rcu_assign_pointer(state->policy, newpolicy);
+               sidtab_freeze_end(oldpolicy->sidtab, &flags);
+       } else {
+               rcu_assign_pointer(state->policy, newpolicy);
+       }
 
        /* Load the policycaps from the new policy */
        security_load_policycaps(state, newpolicy);
@@ -2225,6 +2255,7 @@ void selinux_policy_commit(struct selinux_state *state,
        /* Free the old policy */
        synchronize_rcu();
        selinux_policy_free(oldpolicy);
+       kfree(load_state->convert_data);
 
        /* Notify others of the policy change */
        selinux_notify_policy_change(state, seqno);
@@ -2241,11 +2272,10 @@ void selinux_policy_commit(struct selinux_state *state,
  * loading the new policy.
  */
 int security_load_policy(struct selinux_state *state, void *data, size_t len,
-                       struct selinux_policy **newpolicyp)
+                        struct selinux_load_state *load_state)
 {
        struct selinux_policy *newpolicy, *oldpolicy;
-       struct sidtab_convert_params convert_params;
-       struct convert_context_args args;
+       struct selinux_policy_convert_data *convert_data;
        int rc = 0;
        struct policy_file file = { data, len }, *fp = &file;
 
@@ -2275,10 +2305,10 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len,
                goto err_mapping;
        }
 
-
        if (!selinux_initialized(state)) {
                /* First policy load, so no need to preserve state from old policy */
-               *newpolicyp = newpolicy;
+               load_state->policy = newpolicy;
+               load_state->convert_data = NULL;
                return 0;
        }
 
@@ -2292,29 +2322,38 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len,
                goto err_free_isids;
        }
 
+       convert_data = kmalloc(sizeof(*convert_data), GFP_KERNEL);
+       if (!convert_data) {
+               rc = -ENOMEM;
+               goto err_free_isids;
+       }
+
        /*
         * Convert the internal representations of contexts
         * in the new SID table.
         */
-       args.state = state;
-       args.oldp = &oldpolicy->policydb;
-       args.newp = &newpolicy->policydb;
+       convert_data->args.state = state;
+       convert_data->args.oldp = &oldpolicy->policydb;
+       convert_data->args.newp = &newpolicy->policydb;
 
-       convert_params.func = convert_context;
-       convert_params.args = &args;
-       convert_params.target = newpolicy->sidtab;
+       convert_data->sidtab_params.func = convert_context;
+       convert_data->sidtab_params.args = &convert_data->args;
+       convert_data->sidtab_params.target = newpolicy->sidtab;
 
-       rc = sidtab_convert(oldpolicy->sidtab, &convert_params);
+       rc = sidtab_convert(oldpolicy->sidtab, &convert_data->sidtab_params);
        if (rc) {
                pr_err("SELinux:  unable to convert the internal"
                        " representation of contexts in the new SID"
                        " table\n");
-               goto err_free_isids;
+               goto err_free_convert_data;
        }
 
-       *newpolicyp = newpolicy;
+       load_state->policy = newpolicy;
+       load_state->convert_data = convert_data;
        return 0;
 
+err_free_convert_data:
+       kfree(convert_data);
 err_free_isids:
        sidtab_destroy(newpolicy->sidtab);
 err_mapping:
@@ -2342,13 +2381,15 @@ int security_port_sid(struct selinux_state *state,
        struct policydb *policydb;
        struct sidtab *sidtab;
        struct ocontext *c;
-       int rc = 0;
+       int rc;
 
        if (!selinux_initialized(state)) {
                *out_sid = SECINITSID_PORT;
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2367,6 +2408,10 @@ int security_port_sid(struct selinux_state *state,
                if (!c->sid[0]) {
                        rc = sidtab_context_to_sid(sidtab, &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2393,13 +2438,15 @@ int security_ib_pkey_sid(struct selinux_state *state,
        struct policydb *policydb;
        struct sidtab *sidtab;
        struct ocontext *c;
-       int rc = 0;
+       int rc;
 
        if (!selinux_initialized(state)) {
                *out_sid = SECINITSID_UNLABELED;
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2420,6 +2467,10 @@ int security_ib_pkey_sid(struct selinux_state *state,
                        rc = sidtab_context_to_sid(sidtab,
                                                   &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2445,13 +2496,15 @@ int security_ib_endport_sid(struct selinux_state *state,
        struct policydb *policydb;
        struct sidtab *sidtab;
        struct ocontext *c;
-       int rc = 0;
+       int rc;
 
        if (!selinux_initialized(state)) {
                *out_sid = SECINITSID_UNLABELED;
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2472,6 +2525,10 @@ int security_ib_endport_sid(struct selinux_state *state,
                if (!c->sid[0]) {
                        rc = sidtab_context_to_sid(sidtab, &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2495,7 +2552,7 @@ int security_netif_sid(struct selinux_state *state,
        struct selinux_policy *policy;
        struct policydb *policydb;
        struct sidtab *sidtab;
-       int rc = 0;
+       int rc;
        struct ocontext *c;
 
        if (!selinux_initialized(state)) {
@@ -2503,6 +2560,8 @@ int security_netif_sid(struct selinux_state *state,
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2519,10 +2578,18 @@ int security_netif_sid(struct selinux_state *state,
                if (!c->sid[0] || !c->sid[1]) {
                        rc = sidtab_context_to_sid(sidtab, &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                        rc = sidtab_context_to_sid(sidtab, &c->context[1],
                                                   &c->sid[1]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2572,6 +2639,7 @@ int security_node_sid(struct selinux_state *state,
                return 0;
        }
 
+retry:
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2620,6 +2688,10 @@ int security_node_sid(struct selinux_state *state,
                        rc = sidtab_context_to_sid(sidtab,
                                                   &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2661,18 +2733,24 @@ int security_get_user_sids(struct selinux_state *state,
        struct sidtab *sidtab;
        struct context *fromcon, usercon;
        u32 *mysids = NULL, *mysids2, sid;
-       u32 mynel = 0, maxnel = SIDS_NEL;
+       u32 i, j, mynel, maxnel = SIDS_NEL;
        struct user_datum *user;
        struct role_datum *role;
        struct ebitmap_node *rnode, *tnode;
-       int rc = 0, i, j;
+       int rc;
 
        *sids = NULL;
        *nel = 0;
 
        if (!selinux_initialized(state))
-               goto out;
+               return 0;
+
+       mysids = kcalloc(maxnel, sizeof(*mysids), GFP_KERNEL);
+       if (!mysids)
+               return -ENOMEM;
 
+retry:
+       mynel = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2692,11 +2770,6 @@ int security_get_user_sids(struct selinux_state *state,
 
        usercon.user = user->value;
 
-       rc = -ENOMEM;
-       mysids = kcalloc(maxnel, sizeof(*mysids), GFP_ATOMIC);
-       if (!mysids)
-               goto out_unlock;
-
        ebitmap_for_each_positive_bit(&user->roles, rnode, i) {
                role = policydb->role_val_to_struct[i];
                usercon.role = i + 1;
@@ -2708,6 +2781,10 @@ int security_get_user_sids(struct selinux_state *state,
                                continue;
 
                        rc = sidtab_context_to_sid(sidtab, &usercon, &sid);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out_unlock;
                        if (mynel < maxnel) {
@@ -2730,14 +2807,14 @@ out_unlock:
        rcu_read_unlock();
        if (rc || !mynel) {
                kfree(mysids);
-               goto out;
+               return rc;
        }
 
        rc = -ENOMEM;
        mysids2 = kcalloc(mynel, sizeof(*mysids2), GFP_KERNEL);
        if (!mysids2) {
                kfree(mysids);
-               goto out;
+               return rc;
        }
        for (i = 0, j = 0; i < mynel; i++) {
                struct av_decision dummy_avd;
@@ -2750,12 +2827,10 @@ out_unlock:
                        mysids2[j++] = mysids[i];
                cond_resched();
        }
-       rc = 0;
        kfree(mysids);
        *sids = mysids2;
        *nel = j;
-out:
-       return rc;
+       return 0;
 }
 
 /**
@@ -2768,6 +2843,9 @@ out:
  * Obtain a SID to use for a file in a filesystem that
  * cannot support xattr or use a fixed labeling behavior like
  * transition SIDs or task SIDs.
+ *
+ * WARNING: This function may return -ESTALE, indicating that the caller
+ * must retry the operation after re-acquiring the policy pointer!
  */
 static inline int __security_genfs_sid(struct selinux_policy *policy,
                                       const char *fstype,
@@ -2846,11 +2924,13 @@ int security_genfs_sid(struct selinux_state *state,
                return 0;
        }
 
-       rcu_read_lock();
-       policy = rcu_dereference(state->policy);
-       retval = __security_genfs_sid(policy,
-                               fstype, path, orig_sclass, sid);
-       rcu_read_unlock();
+       do {
+               rcu_read_lock();
+               policy = rcu_dereference(state->policy);
+               retval = __security_genfs_sid(policy, fstype, path,
+                                             orig_sclass, sid);
+               rcu_read_unlock();
+       } while (retval == -ESTALE);
        return retval;
 }
 
@@ -2873,7 +2953,7 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
        struct selinux_policy *policy;
        struct policydb *policydb;
        struct sidtab *sidtab;
-       int rc = 0;
+       int rc;
        struct ocontext *c;
        struct superblock_security_struct *sbsec = sb->s_security;
        const char *fstype = sb->s_type->name;
@@ -2884,6 +2964,8 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2901,6 +2983,10 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
                if (!c->sid[0]) {
                        rc = sidtab_context_to_sid(sidtab, &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2908,6 +2994,10 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
        } else {
                rc = __security_genfs_sid(policy, fstype, "/",
                                        SECCLASS_DIR, &sbsec->sid);
+               if (rc == -ESTALE) {
+                       rcu_read_unlock();
+                       goto retry;
+               }
                if (rc) {
                        sbsec->behavior = SECURITY_FS_USE_NONE;
                        rc = 0;
@@ -3117,12 +3207,13 @@ int security_sid_mls_copy(struct selinux_state *state,
        u32 len;
        int rc;
 
-       rc = 0;
        if (!selinux_initialized(state)) {
                *new_sid = sid;
-               goto out;
+               return 0;
        }
 
+retry:
+       rc = 0;
        context_init(&newcon);
 
        rcu_read_lock();
@@ -3181,10 +3272,14 @@ int security_sid_mls_copy(struct selinux_state *state,
                }
        }
        rc = sidtab_context_to_sid(sidtab, &newcon, new_sid);
+       if (rc == -ESTALE) {
+               rcu_read_unlock();
+               context_destroy(&newcon);
+               goto retry;
+       }
 out_unlock:
        rcu_read_unlock();
        context_destroy(&newcon);
-out:
        return rc;
 }
 
@@ -3777,6 +3872,8 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state,
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -3803,23 +3900,24 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state,
                                goto out;
                }
                rc = -EIDRM;
-               if (!mls_context_isvalid(policydb, &ctx_new))
-                       goto out_free;
+               if (!mls_context_isvalid(policydb, &ctx_new)) {
+                       ebitmap_destroy(&ctx_new.range.level[0].cat);
+                       goto out;
+               }
 
                rc = sidtab_context_to_sid(sidtab, &ctx_new, sid);
+               ebitmap_destroy(&ctx_new.range.level[0].cat);
+               if (rc == -ESTALE) {
+                       rcu_read_unlock();
+                       goto retry;
+               }
                if (rc)
-                       goto out_free;
+                       goto out;
 
                security_netlbl_cache_add(secattr, *sid);
-
-               ebitmap_destroy(&ctx_new.range.level[0].cat);
        } else
                *sid = SECSID_NULL;
 
-       rcu_read_unlock();
-       return 0;
-out_free:
-       ebitmap_destroy(&ctx_new.range.level[0].cat);
 out:
        rcu_read_unlock();
        return rc;
index 5ee190b..656d50b 100644 (file)
@@ -39,6 +39,7 @@ int sidtab_init(struct sidtab *s)
        for (i = 0; i < SECINITSID_NUM; i++)
                s->isids[i].set = 0;
 
+       s->frozen = false;
        s->count = 0;
        s->convert = NULL;
        hash_init(s->context_to_sid);
@@ -281,6 +282,15 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context,
        if (*sid)
                goto out_unlock;
 
+       if (unlikely(s->frozen)) {
+               /*
+                * This sidtab is now frozen - tell the caller to abort and
+                * get the new one.
+                */
+               rc = -ESTALE;
+               goto out_unlock;
+       }
+
        count = s->count;
        convert = s->convert;
 
@@ -474,6 +484,17 @@ void sidtab_cancel_convert(struct sidtab *s)
        spin_unlock_irqrestore(&s->lock, flags);
 }
 
+void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock)
+{
+       spin_lock_irqsave(&s->lock, *flags);
+       s->frozen = true;
+       s->convert = NULL;
+}
+void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock)
+{
+       spin_unlock_irqrestore(&s->lock, *flags);
+}
+
 static void sidtab_destroy_entry(struct sidtab_entry *entry)
 {
        context_destroy(&entry->context);
index 80c744d..4eff0e4 100644 (file)
@@ -86,6 +86,7 @@ struct sidtab {
        u32 count;
        /* access only under spinlock */
        struct sidtab_convert_params *convert;
+       bool frozen;
        spinlock_t lock;
 
 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
@@ -125,6 +126,9 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params);
 
 void sidtab_cancel_convert(struct sidtab *s);
 
+void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock);
+void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock);
+
 int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid);
 
 void sidtab_destroy(struct sidtab *s);
index 478f757..8dc6133 100644 (file)
@@ -613,7 +613,7 @@ static int tomoyo_check_unix_address(struct sockaddr *addr,
 static bool tomoyo_kernel_service(void)
 {
        /* Nothing to do if I am a kernel service. */
-       return (current->flags & (PF_KTHREAD | PF_IO_WORKER)) == PF_KTHREAD;
+       return current->flags & PF_KTHREAD;
 }
 
 /**
index 8a24e5a..80b814b 100644 (file)
@@ -33,7 +33,6 @@
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("A loopback soundcard");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{ALSA,Loopback soundcard}}");
 
 #define MAX_PCM_SUBSTREAMS     8
 
@@ -1572,6 +1571,14 @@ static int loopback_mixer_new(struct loopback *loopback, int notify)
                                        return -ENOMEM;
                                kctl->id.device = dev;
                                kctl->id.subdevice = substr;
+
+                               /* Add the control before copying the id so that
+                                * the numid field of the id is set in the copy.
+                                */
+                               err = snd_ctl_add(card, kctl);
+                               if (err < 0)
+                                       return err;
+
                                switch (idx) {
                                case ACTIVE_IDX:
                                        setup->active_id = kctl->id;
@@ -1588,9 +1595,6 @@ static int loopback_mixer_new(struct loopback *loopback, int notify)
                                default:
                                        break;
                                }
-                               err = snd_ctl_add(card, kctl);
-                               if (err < 0)
-                                       return err;
                        }
                }
        }
index 316c9af..01a3eab 100644 (file)
@@ -25,7 +25,6 @@
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("Dummy soundcard (/dev/null)");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{ALSA,Dummy soundcard}}");
 
 #define MAX_PCM_DEVICES                4
 #define MAX_PCM_SUBSTREAMS     128
index ce5fd17..df4b7f9 100644 (file)
@@ -53,7 +53,6 @@
 MODULE_AUTHOR("Michael T. Mayers");
 MODULE_DESCRIPTION("MOTU MidiTimePiece AV multiport MIDI");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{MOTU,MidiTimePiece AV multiport MIDI}}");
 
 // io resources
 #define MTPAV_IOBASE           0x378
index 9c708b6..322d530 100644 (file)
@@ -37,7 +37,6 @@ MODULE_PARM_DESC(enable, "Enable " CARD_NAME " soundcard.");
 MODULE_AUTHOR("Matthias Koenig <mk@phasorlab.de>");
 MODULE_DESCRIPTION("ESI Miditerminal 4140");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{ESI,Miditerminal 4140}}");
 
 /*********************************************************************
  * Chip specific
index fd79e57..7689fa2 100644 (file)
@@ -22,7 +22,6 @@
 MODULE_AUTHOR("Stas Sergeev <stsp@users.sourceforge.net>");
 MODULE_DESCRIPTION("PC-Speaker driver");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{PC-Speaker, pcsp}}");
 MODULE_ALIAS("platform:pcspkr");
 
 static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
index c876cf9..2f4514e 100644 (file)
@@ -57,7 +57,6 @@ MODULE_PARM_DESC(enable, "Enable " CARD_NAME " soundcard.");
 MODULE_AUTHOR("Levent Guendogdu, Tobias Gehrig, Matthias Koenig");
 MODULE_DESCRIPTION("Midiman Portman2x4");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Midiman,Portman2x4}}");
 
 /*********************************************************************
  * Chip specific
index 3947f08..6d5d1ca 100644 (file)
@@ -34,7 +34,6 @@
 
 MODULE_DESCRIPTION("MIDI serial u16550");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{ALSA, MIDI serial u16550}}");
 
 #define SNDRV_SERIAL_SOUNDCANVAS 0 /* Roland Soundcanvas; F5 NN selects part */
 #define SNDRV_SERIAL_MS124T 1      /* Midiator MS-124T */
index f1fb68b..4206d93 100644 (file)
@@ -43,7 +43,6 @@
 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
 MODULE_DESCRIPTION("Dummy soundcard for virtual rawmidi devices");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{ALSA,Virtual rawmidi device}}");
 
 #define MAX_MIDI_DEVICES       4
 
index 8e0c038..1a14c08 100644 (file)
@@ -493,11 +493,10 @@ void snd_dice_stream_stop_duplex(struct snd_dice *dice)
        struct reg_params tx_params, rx_params;
 
        if (dice->substreams_counter == 0) {
-               if (get_register_params(dice, &tx_params, &rx_params) >= 0) {
-                       amdtp_domain_stop(&dice->domain);
+               if (get_register_params(dice, &tx_params, &rx_params) >= 0)
                        finish_session(dice, &tx_params, &rx_params);
-               }
 
+               amdtp_domain_stop(&dice->domain);
                release_resources(dice);
        }
 }
index ca18fe3..f11af98 100644 (file)
 MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>");
 MODULE_DESCRIPTION("AD1816A, AD1815");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Highscreen,Sound-Boostar 16 3D},"
-               "{Analog Devices,AD1815},"
-               "{Analog Devices,AD1816A},"
-               "{TerraTec,Base 64},"
-               "{TerraTec,AudioSystem EWS64S},"
-               "{Aztech/Newcom SC-16 3D},"
-               "{Shark Predator ISA}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 1-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index 6f221ee..edafb49 100644 (file)
@@ -22,9 +22,6 @@
 MODULE_DESCRIPTION(CRD_NAME);
 MODULE_AUTHOR("Tugrul Galatali <galatalt@stuy.edu>, Jaroslav Kysela <perex@perex.cz>");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Analog Devices,AD1848},"
-               "{Analog Devices,AD1847},"
-               "{Crystal Semiconductors,CS4248}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index 1085f5b..bacb7a1 100644 (file)
 #define PFX "als100: "
 
 MODULE_DESCRIPTION("Avance Logic ALS007/ALS1X0");
-MODULE_SUPPORTED_DEVICE("{{Diamond Technologies DT-019X},"
-               "{Avance Logic ALS-007}}"
-               "{{Avance Logic,ALS100 - PRO16PNP},"
-               "{Avance Logic,ALS110},"
-               "{Avance Logic,ALS120},"
-               "{Avance Logic,ALS200},"
-               "{3D Melody,MF1000},"
-               "{Digimate,3D Sound},"
-               "{Avance Logic,ALS120},"
-               "{RTL,RTL3000}}");
-
 MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>");
 MODULE_LICENSE("GPL");
 
index 4ed5209..867e9ae 100644 (file)
 MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>");
 MODULE_DESCRIPTION("Aztech Systems AZT2320");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Aztech Systems,PRO16V},"
-               "{Aztech Systems,AZT2320},"
-               "{Aztech Systems,AZT3300},"
-               "{Aztech Systems,AZT2320},"
-               "{Aztech Systems,AZT3000}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index 19e2585..bc112df 100644 (file)
@@ -51,7 +51,6 @@
 MODULE_AUTHOR("George Talusan <gstalusan@uwaterloo.ca>");
 MODULE_DESCRIPTION("C-Media CMI8330/CMI8329");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8330,isapnp:{CMI0001,@@@0001,@X@0001}}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
index c56cbc0..ec054b9 100644 (file)
@@ -23,7 +23,6 @@
 MODULE_DESCRIPTION(CRD_NAME);
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Crystal Semiconductors,CS4231}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index 63fb0cb..186d7d4 100644 (file)
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Cirrus Logic CS4232-9");
-MODULE_SUPPORTED_DEVICE("{{Turtle Beach,TBS-2000},"
-               "{Turtle Beach,Tropez Plus},"
-               "{SIC CrystalWave 32},"
-               "{Hewlett Packard,Omnibook 5500},"
-               "{TerraTec,Maestro 32/96},"
-               "{Philips,PCA70PS}},"
-               "{{Crystal Semiconductors,CS4235},"
-               "{Crystal Semiconductors,CS4236},"
-               "{Crystal Semiconductors,CS4237},"
-               "{Crystal Semiconductors,CS4238},"
-               "{Crystal Semiconductors,CS4239},"
-               "{Acer,AW37},"
-               "{Acer,AW35/Pro},"
-               "{Crystal,3D},"
-               "{Crystal Computer,TidalWave128},"
-               "{Dell,Optiplex GX1},"
-               "{Dell,Workstation 400 sound},"
-               "{EliteGroup,P5TX-LA sound},"
-               "{Gallant,SC-70P},"
-               "{Gateway,E1000 Onboard CS4236B},"
-               "{Genius,Sound Maker 3DJ},"
-               "{Hewlett Packard,HP6330 sound},"
-               "{IBM,PC 300PL sound},"
-               "{IBM,Aptiva 2137 E24},"
-               "{IBM,IntelliStation M Pro},"
-               "{Intel,Marlin Spike Mobo CS4235},"
-               "{Intel PR440FX Onboard},"
-               "{Guillemot,MaxiSound 16 PnP},"
-               "{NewClear,3D},"
-               "{TerraTec,AudioSystem EWS64L/XL},"
-               "{Typhoon Soundsystem,CS4236B},"
-               "{Turtle Beach,Malibu},"
-               "{Unknown,Digital PC 5000 Onboard}}");
-
 MODULE_ALIAS("snd_cs4232");
 
 #define IDENT "CS4232+"
index 4a1f61f..750d499 100644 (file)
 MODULE_DESCRIPTION(CRD_NAME);
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{ESS,ES688 PnP AudioDrive,pnp:ESS0100},"
-               "{ESS,ES1688 PnP AudioDrive,pnp:ESS0102},"
-               "{ESS,ES688 AudioDrive,pnp:ESS6881},"
-               "{ESS,ES1688 AudioDrive,pnp:ESS1681}}");
-
 MODULE_ALIAS("snd_es968");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
index 9beef80..375a4a6 100644 (file)
@@ -1929,17 +1929,9 @@ static int snd_es18xx_mixer(struct snd_card *card)
 
 /* Card level */
 
-MODULE_AUTHOR("Christian Fischbach <fishbach@pool.informatik.rwth-aachen.de>, Abramo Bagnara <abramo@alsa-project.org>");  
+MODULE_AUTHOR("Christian Fischbach <fishbach@pool.informatik.rwth-aachen.de>, Abramo Bagnara <abramo@alsa-project.org>");
 MODULE_DESCRIPTION("ESS ES18xx AudioDrive");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{ESS,ES1868 PnP AudioDrive},"
-               "{ESS,ES1869 PnP AudioDrive},"
-               "{ESS,ES1878 PnP AudioDrive},"
-               "{ESS,ES1879 PnP AudioDrive},"
-               "{ESS,ES1887 PnP AudioDrive},"
-               "{ESS,ES1888 PnP AudioDrive},"
-               "{ESS,ES1887 AudioDrive},"
-               "{ESS,ES1888 AudioDrive}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index 015f88a..0fba5d8 100644 (file)
@@ -23,7 +23,6 @@
 MODULE_DESCRIPTION(CRD_NAME);
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Gravis,UltraSound Classic}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index c9f31b4..da2b2ca 100644 (file)
@@ -27,7 +27,6 @@
 MODULE_DESCRIPTION(CRD_NAME);
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Gravis,UltraSound Extreme}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index dc09fbd..24b945f 100644 (file)
@@ -21,7 +21,6 @@
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("Gravis UltraSound MAX");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Gravis,UltraSound MAX}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index e4d412e..99581fb 100644 (file)
@@ -28,14 +28,8 @@ MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_LICENSE("GPL");
 #ifndef SNDRV_STB
 MODULE_DESCRIPTION("AMD InterWave");
-MODULE_SUPPORTED_DEVICE("{{Gravis,UltraSound Plug & Play},"
-               "{STB,SoundRage32},"
-               "{MED,MED3210},"
-               "{Dynasonix,Dynasonix Pro},"
-               "{Panasonic,PCA761AW}}");
 #else
 MODULE_DESCRIPTION("AMD InterWave STB with TEA6330T");
-MODULE_SUPPORTED_DEVICE("{{AMD,InterWave STB with TEA6330T}}");
 #endif
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
index 7649a8a..9bde11d 100644 (file)
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("Yamaha OPL3SA2+");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Yamaha,YMF719E-S},"
-               "{Genius,Sound Maker 3DX},"
-               "{Yamaha,OPL3SA3},"
-               "{Intel,AL440LX sound},"
-               "{NeoMagic,MagicWave 3DX}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index 2093334..a510b20 100644 (file)
@@ -33,9 +33,6 @@
 MODULE_AUTHOR("Martin Langer <martin-langer@gmx.de>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Miro miroSOUND PCM1 pro, PCM12, PCM20 Radio");
-MODULE_SUPPORTED_DEVICE("{{Miro,miroSOUND PCM1 pro}, "
-                       "{Miro,miroSOUND PCM12}, "
-                       "{Miro,miroSOUND PCM20 Radio}}");
 
 static int index = SNDRV_DEFAULT_IDX1;         /* Index 0-MAX */
 static char *id = SNDRV_DEFAULT_STR1;          /* ID for this card */
index 758f5b5..08e61d9 100644 (file)
@@ -36,17 +36,11 @@ MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>");
 MODULE_LICENSE("GPL");
 #ifdef OPTi93X
 MODULE_DESCRIPTION("OPTi93X");
-MODULE_SUPPORTED_DEVICE("{{OPTi,82C931/3}}");
 #else  /* OPTi93X */
 #ifdef CS4231
 MODULE_DESCRIPTION("OPTi92X - CS4231");
-MODULE_SUPPORTED_DEVICE("{{OPTi,82C924 (CS4231)},"
-               "{OPTi,82C925 (CS4231)}}");
 #else  /* CS4231 */
 MODULE_DESCRIPTION("OPTi92X - AD1848");
-MODULE_SUPPORTED_DEVICE("{{OPTi,82C924 (AD1848)},"
-               "{OPTi,82C925 (AD1848)},"
-               "{OAK,Mozart}}");
 #endif /* CS4231 */
 #endif /* OPTi93X */
 
index 0e2e0ab..7ba5dd1 100644 (file)
@@ -28,9 +28,6 @@
 #define PFX "jazz16: "
 
 MODULE_DESCRIPTION("Media Vision Jazz16");
-MODULE_SUPPORTED_DEVICE("{{Media Vision ??? },"
-               "{RTL,RTL3000}}");
-
 MODULE_AUTHOR("Krzysztof Helt <krzysztof.h1@wp.pl>");
 MODULE_LICENSE("GPL");
 
index db284b7..63ef960 100644 (file)
@@ -31,16 +31,8 @@ MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_LICENSE("GPL");
 #ifndef SNDRV_SBAWE
 MODULE_DESCRIPTION("Sound Blaster 16");
-MODULE_SUPPORTED_DEVICE("{{Creative Labs,SB 16},"
-               "{Creative Labs,SB Vibra16S},"
-               "{Creative Labs,SB Vibra16C},"
-               "{Creative Labs,SB Vibra16CL},"
-               "{Creative Labs,SB Vibra16X}}");
 #else
 MODULE_DESCRIPTION("Sound Blaster AWE");
-MODULE_SUPPORTED_DEVICE("{{Creative Labs,SB AWE 32},"
-               "{Creative Labs,SB AWE 64},"
-               "{Creative Labs,SB AWE 64 Gold}}");
 #endif
 
 #if 0
index 8e3e67b..6c9d534 100644 (file)
@@ -17,7 +17,6 @@
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("Sound Blaster 1.0/2.0/Pro");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Creative Labs,SB 1.0/SB 2.0/SB Pro}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index def1375..3462663 100644 (file)
@@ -29,9 +29,6 @@
 MODULE_AUTHOR("Krzysztof Helt");
 MODULE_DESCRIPTION("Gallant SC-6000");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Gallant, SC-6000},"
-                       "{AudioExcel, Audio Excel DSP 16},"
-                       "{Zoltrix, AV302}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index b750a4f..a443797 100644 (file)
@@ -21,7 +21,6 @@
 MODULE_AUTHOR("Paul Barton-Davis <pbd@op.net>");
 MODULE_DESCRIPTION("Turtle Beach Wavefront");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Turtle Beach,Maui/Tropez/Tropez+}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;         /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;          /* ID for this card */
index 5bf1ea1..989f656 100644 (file)
@@ -32,7 +32,6 @@
 MODULE_AUTHOR("Vivien Chappelier <vivien.chappelier@linux-mips.org>");
 MODULE_DESCRIPTION("SGI O2 Audio");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Silicon Graphics, O2 Audio}}");
 
 static int index = SNDRV_DEFAULT_IDX1;  /* Index 0-MAX */
 static char *id = SNDRV_DEFAULT_STR1;   /* ID for this card */
index 5d835d2..4520022 100644 (file)
@@ -43,7 +43,6 @@
 MODULE_AUTHOR("Kyle McMartin <kyle@parisc-linux.org>, Thibaut Varene <t-bone@parisc-linux.org>");
 MODULE_DESCRIPTION("Analog Devices AD1889 ALSA sound driver");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Analog Devices,AD1889}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
 module_param_array(index, int, NULL, 0444);
index 51f2479..0d66b92 100644 (file)
@@ -29,7 +29,6 @@
 MODULE_AUTHOR("Matt Wu <Matt_Wu@acersoftech.com.cn>");
 MODULE_DESCRIPTION("ALI M5451");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{ALI,M5451,pci},{ALI,M5451}}");
 
 static int index = SNDRV_DEFAULT_IDX1; /* Index */
 static char *id = SNDRV_DEFAULT_STR1;  /* ID for this card */
index 1dc8c4e..bd4fd09 100644 (file)
@@ -86,7 +86,6 @@ enum {DEVICE_ALS300, DEVICE_ALS300_PLUS};
 MODULE_AUTHOR("Ash Willis <ashwillis@programmer.net>");
 MODULE_DESCRIPTION("Avance Logic ALS300");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Avance Logic,ALS300},{Avance Logic,ALS300+}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
index 2edc745..139ac2a 100644 (file)
@@ -68,7 +68,6 @@
 MODULE_AUTHOR("Bart Hartgers <bart@etpmod.phys.tue.nl>, Andreas Mohr");
 MODULE_DESCRIPTION("Avance Logic ALS4000");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Avance Logic,ALS4000}}");
 
 #if IS_REACHABLE(CONFIG_GAMEPORT)
 #define SUPPORT_JOYSTICK 1
index a25d754..579425c 100644 (file)
@@ -23,7 +23,6 @@
 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
 MODULE_DESCRIPTION("ATI IXP AC97 controller");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{ATI,IXP150/200/250/300/400/600}}");
 
 static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
 static char *id = SNDRV_DEFAULT_STR1;  /* ID for this card */
index ae88217..45e75af 100644 (file)
@@ -23,7 +23,6 @@
 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
 MODULE_DESCRIPTION("ATI IXP MC97 controller");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{ATI,IXP150/200/250}}");
 
 static int index = -2; /* Exclude the first card */
 static char *id = SNDRV_DEFAULT_STR1;  /* ID for this card */
index 5dd98e6..1b37b72 100644 (file)
@@ -41,8 +41,6 @@ MODULE_PARM_DESC(pcifix, "Enable VIA-workaround for " CARD_NAME " soundcard.");
 
 MODULE_DESCRIPTION("Aureal vortex");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Aureal Semiconductor Inc., Aureal Vortex Sound Processor}}");
-
 MODULE_DEVICE_TABLE(pci, snd_vortex_ids);
 
 static void vortex_fix_latency(struct pci_dev *vortex)
index 2ac594d..51dcf1b 100644 (file)
 MODULE_AUTHOR("Andreas Mohr <andi AT lisas.de>");
 MODULE_DESCRIPTION("Aztech AZF3328 (PCI168)");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Aztech,AZF3328}}");
 
 #if IS_REACHABLE(CONFIG_GAMEPORT)
 #define SUPPORT_GAMEPORT 1
index cf9f8d8..91512b3 100644 (file)
@@ -23,8 +23,6 @@
 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
 MODULE_DESCRIPTION("Brooktree Bt87x audio driver");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Brooktree,Bt878},"
-               "{Brooktree,Bt879}}");
 
 static int index[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = -2}; /* Exclude the first card */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index ee20f9a..bee4710 100644 (file)
 MODULE_AUTHOR("James Courtier-Dutton <James@superbug.demon.co.uk>");
 MODULE_DESCRIPTION("CA0106");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Creative,SB CA0106 chip}}");
 
 // module parameters (see "Module Parameters")
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
index 7363d61..5984463 100644 (file)
 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
 MODULE_DESCRIPTION("C-Media CMI8x38 PCI");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8738},"
-               "{C-Media,CMI8738B},"
-               "{C-Media,CMI8338A},"
-               "{C-Media,CMI8338B}}");
 
 #if IS_REACHABLE(CONFIG_GAMEPORT)
 #define SUPPORT_JOYSTICK 1
index 94d2a6a..bf3bb70 100644 (file)
@@ -25,7 +25,6 @@
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("Cirrus Logic CS4281");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Cirrus Logic,CS4281}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index a6e0a44..1db7b41 100644 (file)
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("Cirrus Logic Sound Fusion CS46XX");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Cirrus Logic,Sound Fusion (CS4280)},"
-               "{Cirrus Logic,Sound Fusion (CS4610)},"
-               "{Cirrus Logic,Sound Fusion (CS4612)},"
-               "{Cirrus Logic,Sound Fusion (CS4615)},"
-               "{Cirrus Logic,Sound Fusion (CS4622)},"
-               "{Cirrus Logic,Sound Fusion (CS4624)},"
-               "{Cirrus Logic,Sound Fusion (CS4630)}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index 359bc6a..9b716b5 100644 (file)
@@ -393,4 +393,3 @@ module_pci_driver(cs5535audio_driver);
 MODULE_AUTHOR("Jaya Kumar");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("CS5535 Audio");
-MODULE_SUPPORTED_DEVICE("CS5535 Audio");
index 8c07c64..713d36e 100644 (file)
@@ -18,7 +18,6 @@
 MODULE_AUTHOR("Creative Technology Ltd");
 MODULE_DESCRIPTION("X-Fi driver version 1.03");
 MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("{{Creative Labs, Sound Blaster X-Fi}");
 
 static unsigned int reference_rate = 48000;
 static unsigned int multiple = 2;
index a20b2bb..9bd67ac 100644 (file)
@@ -10,7 +10,6 @@
 MODULE_AUTHOR("Giuliano Pochini <pochini@shiny.it>");
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Echoaudio " ECHOCARD_NAME " soundcards driver");
-MODULE_SUPPORTED_DEVICE("{{Echoaudio," ECHOCARD_NAME "}}");
 MODULE_DEVICE_TABLE(pci, snd_echo_ids);
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
index 353934c..45833bc 100644 (file)
@@ -18,8 +18,6 @@
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("EMU10K1");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Creative Labs,SB Live!/PCI512/E-mu APS},"
-              "{Creative Labs,SB Audigy}}");
 
 #if IS_ENABLED(CONFIG_SND_SEQUENCER)
 #define ENABLE_SYNTH
index 785ec0c..d9a12cd 100644 (file)
@@ -31,7 +31,6 @@
 MODULE_AUTHOR("Francisco Moraes <fmoraes@nc.rr.com>");
 MODULE_DESCRIPTION("EMU10K1X");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Dell Creative Labs,SB Live!}");
 
 // module parameters (see "Module Parameters")
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
index 93c4fd3..3ccccdb 100644 (file)
@@ -52,17 +52,9 @@ MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Thomas Sailer <sailer@ife.ee.et
 MODULE_LICENSE("GPL");
 #ifdef CHIP1370
 MODULE_DESCRIPTION("Ensoniq AudioPCI ES1370");
-MODULE_SUPPORTED_DEVICE("{{Ensoniq,AudioPCI-97 ES1370},"
-               "{Creative Labs,SB PCI64/128 (ES1370)}}");
 #endif
 #ifdef CHIP1371
 MODULE_DESCRIPTION("Ensoniq/Creative AudioPCI ES1371+");
-MODULE_SUPPORTED_DEVICE("{{Ensoniq,AudioPCI ES1371/73},"
-               "{Ensoniq,AudioPCI ES1373},"
-               "{Creative Labs,Ectiva EV1938},"
-               "{Creative Labs,SB PCI64/128 (ES1371/73)},"
-               "{Creative Labs,Vibra PCI128},"
-               "{Ectiva,EV1938}}");
 #endif
 
 #if IS_REACHABLE(CONFIG_GAMEPORT)
index 3b5d68c..afc6634 100644 (file)
 MODULE_AUTHOR("Jaromir Koutek <miri@punknet.cz>");
 MODULE_DESCRIPTION("ESS Solo-1");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{ESS,ES1938},"
-                "{ESS,ES1946},"
-                "{ESS,ES1969},"
-               "{TerraTec,128i PCI}}");
 
 #if IS_REACHABLE(CONFIG_GAMEPORT)
 #define SUPPORT_JOYSTICK 1
index 747fa69..5fa1861 100644 (file)
 
 MODULE_DESCRIPTION("ESS Maestro");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{ESS,Maestro 2e},"
-               "{ESS,Maestro 2},"
-               "{ESS,Maestro 1},"
-               "{TerraTec,DMX}}");
 
 #if IS_REACHABLE(CONFIG_GAMEPORT)
 #define SUPPORT_JOYSTICK 1
index c6ad623..6279eb1 100644 (file)
@@ -26,8 +26,6 @@
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("ForteMedia FM801");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{ForteMedia,FM801},"
-               "{Genius,SoundMaker Live 5.1}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index 8b7c550..f5cba7a 100644 (file)
@@ -4065,7 +4065,7 @@ static int add_micmute_led_hook(struct hda_codec *codec)
 
        spec->micmute_led.led_mode = MICMUTE_LED_FOLLOW_MUTE;
        spec->micmute_led.capture = 0;
-       spec->micmute_led.led_value = 0;
+       spec->micmute_led.led_value = -1;
        spec->micmute_led.old_hook = spec->cap_sync_hook;
        spec->cap_sync_hook = update_micmute_led;
        if (!snd_hda_gen_add_kctl(spec, NULL, &micmute_led_mode_ctl))
index 5eea130..79ade33 100644 (file)
@@ -208,40 +208,6 @@ MODULE_PARM_DESC(snoop, "Enable/disable snooping");
 
 
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
-                        "{Intel, ICH6M},"
-                        "{Intel, ICH7},"
-                        "{Intel, ESB2},"
-                        "{Intel, ICH8},"
-                        "{Intel, ICH9},"
-                        "{Intel, ICH10},"
-                        "{Intel, PCH},"
-                        "{Intel, CPT},"
-                        "{Intel, PPT},"
-                        "{Intel, LPT},"
-                        "{Intel, LPT_LP},"
-                        "{Intel, WPT_LP},"
-                        "{Intel, SPT},"
-                        "{Intel, SPT_LP},"
-                        "{Intel, HPT},"
-                        "{Intel, PBG},"
-                        "{Intel, SCH},"
-                        "{ATI, SB450},"
-                        "{ATI, SB600},"
-                        "{ATI, RS600},"
-                        "{ATI, RS690},"
-                        "{ATI, RS780},"
-                        "{ATI, R600},"
-                        "{ATI, RV630},"
-                        "{ATI, RV610},"
-                        "{ATI, RV670},"
-                        "{ATI, RV635},"
-                        "{ATI, RV620},"
-                        "{ATI, RV770},"
-                        "{VIA, VT8251},"
-                        "{VIA, VT8237A},"
-                        "{SiS, SIS966},"
-                        "{ULI, M5461}}");
 MODULE_DESCRIPTION("Intel HDA driver");
 
 #if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO)
@@ -1023,8 +989,12 @@ static int azx_prepare(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip;
 
+       if (!azx_is_pm_ready(card))
+               return 0;
+
        chip = card->private_data;
        chip->pm_prepared = 1;
+       snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
 
        flush_work(&azx_bus(chip)->unsol_work);
 
@@ -1039,7 +1009,11 @@ static void azx_complete(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip;
 
+       if (!azx_is_pm_ready(card))
+               return;
+
        chip = card->private_data;
+       snd_power_change_state(card, SNDRV_CTL_POWER_D0);
        chip->pm_prepared = 0;
 }
 
index c20dad4..dfef9c1 100644 (file)
@@ -944,6 +944,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
        SND_PCI_QUIRK(0x103c, 0x8427, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x844f, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
index b47504f..a7544b7 100644 (file)
@@ -3927,6 +3927,15 @@ static void alc271_fixup_dmic(struct hda_codec *codec,
                snd_hda_sequence_write(codec, verbs);
 }
 
+/* Fix the speaker amp after resume, etc */
+static void alc269vb_fixup_aspire_e1_coef(struct hda_codec *codec,
+                                         const struct hda_fixup *fix,
+                                         int action)
+{
+       if (action == HDA_FIXUP_ACT_INIT)
+               alc_update_coef_idx(codec, 0x0d, 0x6000, 0x6000);
+}
+
 static void alc269_fixup_pcm_44k(struct hda_codec *codec,
                                 const struct hda_fixup *fix, int action)
 {
@@ -4225,6 +4234,12 @@ static void alc_fixup_hp_gpio_led(struct hda_codec *codec,
        }
 }
 
+static void alc236_fixup_hp_gpio_led(struct hda_codec *codec,
+                               const struct hda_fixup *fix, int action)
+{
+       alc_fixup_hp_gpio_led(codec, action, 0x02, 0x01);
+}
+
 static void alc269_fixup_hp_gpio_led(struct hda_codec *codec,
                                const struct hda_fixup *fix, int action)
 {
@@ -5250,7 +5265,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
        case 0x10ec0274:
        case 0x10ec0294:
                alc_process_coef_fw(codec, coef0274);
-               msleep(80);
+               msleep(850);
                val = alc_read_coef_idx(codec, 0x46);
                is_ctia = (val & 0x00f0) == 0x00f0;
                break;
@@ -5434,6 +5449,7 @@ static void alc_update_headset_jack_cb(struct hda_codec *codec,
                                       struct hda_jack_callback *jack)
 {
        snd_hda_gen_hp_automute(codec, jack);
+       alc_update_headset_mode(codec);
 }
 
 static void alc_probe_headset_mode(struct hda_codec *codec)
@@ -6294,6 +6310,7 @@ enum {
        ALC283_FIXUP_HEADSET_MIC,
        ALC255_FIXUP_MIC_MUTE_LED,
        ALC282_FIXUP_ASPIRE_V5_PINS,
+       ALC269VB_FIXUP_ASPIRE_E1_COEF,
        ALC280_FIXUP_HP_GPIO4,
        ALC286_FIXUP_HP_GPIO_LED,
        ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
@@ -6381,6 +6398,7 @@ enum {
        ALC294_FIXUP_ASUS_GX502_VERBS,
        ALC285_FIXUP_HP_GPIO_LED,
        ALC285_FIXUP_HP_MUTE_LED,
+       ALC236_FIXUP_HP_GPIO_LED,
        ALC236_FIXUP_HP_MUTE_LED,
        ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
        ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
@@ -6971,6 +6989,10 @@ static const struct hda_fixup alc269_fixups[] = {
                        { },
                },
        },
+       [ALC269VB_FIXUP_ASPIRE_E1_COEF] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269vb_fixup_aspire_e1_coef,
+       },
        [ALC280_FIXUP_HP_GPIO4] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc280_fixup_hp_gpio4,
@@ -7616,6 +7638,10 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc285_fixup_hp_mute_led,
        },
+       [ALC236_FIXUP_HP_GPIO_LED] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc236_fixup_hp_gpio_led,
+       },
        [ALC236_FIXUP_HP_MUTE_LED] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc236_fixup_hp_mute_led,
@@ -7889,6 +7915,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
+       SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
        SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
        SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
@@ -8045,9 +8072,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8783, "HP ZBook Fury 15 G7 Mobile Workstation",
                      ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x87f2, "HP ProBook 640 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
+       SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -8242,7 +8273,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
        SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
        SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
        SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
        SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
        SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
@@ -8377,6 +8410,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC283_FIXUP_HEADSET_MIC, .name = "alc283-headset"},
        {.id = ALC255_FIXUP_MIC_MUTE_LED, .name = "alc255-dell-mute"},
        {.id = ALC282_FIXUP_ASPIRE_V5_PINS, .name = "aspire-v5"},
+       {.id = ALC269VB_FIXUP_ASPIRE_E1_COEF, .name = "aspire-e1-coef"},
        {.id = ALC280_FIXUP_HP_GPIO4, .name = "hp-gpio4"},
        {.id = ALC286_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
        {.id = ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, .name = "hp-gpio2-hotkey"},
index f814dbb..d54cd51 100644 (file)
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("ICEnsemble ICE1712 (Envy24)");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{"
-              HOONTECH_DEVICE_DESC
-              DELTA_DEVICE_DESC
-              EWS_DEVICE_DESC
-              "{ICEnsemble,Generic ICE1712},"
-              "{ICEnsemble,Generic Envy24}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index c0fca94..ef2367d 100644 (file)
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("VIA ICEnsemble ICE1724/1720 (Envy24HT/PT)");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{"
-              REVO_DEVICE_DESC
-              AMP_AUDIO2000_DEVICE_DESC
-              AUREON_DEVICE_DESC
-              VT1720_MOBO_DEVICE_DESC
-              PONTIS_DEVICE_DESC
-              PRODIGY192_DEVICE_DESC
-              PRODIGY_HIFI_DEVICE_DESC
-              JULI_DEVICE_DESC
-              MAYA44_DEVICE_DESC
-              PHASE_DEVICE_DESC
-              WTM_DEVICE_DESC
-              SE_DEVICE_DESC
-              QTET_DEVICE_DESC
-               "{VIA,VT1720},"
-               "{VIA,VT1724},"
-               "{ICEnsemble,Generic ICE1724},"
-               "{ICEnsemble,Generic Envy24HT}"
-               "{ICEnsemble,Generic Envy24PT}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index 3349e45..35903d1 100644 (file)
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; SiS 7012; Ali 5455");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Intel,82801AA-ICH},"
-               "{Intel,82901AB-ICH0},"
-               "{Intel,82801BA-ICH2},"
-               "{Intel,82801CA-ICH3},"
-               "{Intel,82801DB-ICH4},"
-               "{Intel,ICH5},"
-               "{Intel,ICH6},"
-               "{Intel,ICH7},"
-               "{Intel,6300ESB},"
-               "{Intel,ESB2},"
-               "{Intel,MX440},"
-               "{SiS,SI7012},"
-               "{NVidia,nForce Audio},"
-               "{NVidia,nForce2 Audio},"
-               "{NVidia,nForce3 Audio},"
-               "{NVidia,MCP04},"
-               "{NVidia,MCP501},"
-               "{NVidia,CK804},"
-               "{NVidia,CK8},"
-               "{NVidia,CK8S},"
-               "{AMD,AMD768},"
-               "{AMD,AMD8111},"
-               "{ALI,M5455}}");
 
 static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
 static char *id = SNDRV_DEFAULT_STR1;  /* ID for this card */
index 19872ce..13ef838 100644 (file)
@@ -25,21 +25,6 @@ MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; "
                   "SiS 7013; NVidia MCP/2/2S/3 modems");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Intel,82801AA-ICH},"
-               "{Intel,82901AB-ICH0},"
-               "{Intel,82801BA-ICH2},"
-               "{Intel,82801CA-ICH3},"
-               "{Intel,82801DB-ICH4},"
-               "{Intel,ICH5},"
-               "{Intel,ICH6},"
-               "{Intel,ICH7},"
-               "{Intel,MX440},"
-               "{SiS,7013},"
-               "{NVidia,NForce Modem},"
-               "{NVidia,NForce2 Modem},"
-               "{NVidia,NForce2s Modem},"
-               "{NVidia,NForce3 Modem},"
-               "{AMD,AMD768}}");
 
 static int index = -2; /* Exclude the first card */
 static char *id = SNDRV_DEFAULT_STR1;  /* ID for this card */
index 2eddd9d..80ac3c6 100644 (file)
@@ -388,7 +388,6 @@ struct snd_korg1212 {
 
 MODULE_DESCRIPTION("korg1212");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{KORG,korg1212}}");
 MODULE_FIRMWARE("korg/k1212.dsp");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
index 491c90f..03b4be4 100644 (file)
@@ -54,7 +54,6 @@ MODULE_PARM_DESC(sample_rate_min, "Minimal sample rate");
  */
 
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Digigram, Lola}}");
 MODULE_DESCRIPTION("Digigram Lola driver");
 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
 
index b92ea07..1be97c3 100644 (file)
@@ -21,8 +21,6 @@
 MODULE_AUTHOR("Tim Blechmann");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("digigram lx6464es");
-MODULE_SUPPORTED_DEVICE("{digigram lx6464es{}}");
-
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
index d2c2cd6..cdc4b61 100644 (file)
 MODULE_AUTHOR("Zach Brown <zab@zabbo.net>, Takashi Iwai <tiwai@suse.de>");
 MODULE_DESCRIPTION("ESS Maestro3 PCI");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{ESS,Maestro3 PCI},"
-               "{ESS,ES1988},"
-               "{ESS,Allegro PCI},"
-               "{ESS,Allegro-1 PCI},"
-               "{ESS,Canyon3D-2/LE PCI}}");
 MODULE_FIRMWARE("ess/maestro3_assp_kernel.fw");
 MODULE_FIRMWARE("ess/maestro3_assp_minisrc.fw");
 
index efff220..a0bbb38 100644 (file)
@@ -32,7 +32,6 @@
 MODULE_AUTHOR("Digigram <alsa@digigram.com>");
 MODULE_DESCRIPTION("Digigram " CARD_NAME);
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Digigram," CARD_NAME "}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;             /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;              /* ID for this card */
index 9759946..6cb689a 100644 (file)
@@ -32,8 +32,6 @@
 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
 MODULE_DESCRIPTION("NeoMagic NM256AV/ZX");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{NeoMagic,NM256AV},"
-               "{NeoMagic,NM256ZX}}");
 
 /*
  * some compile conditions.
index a751fcc..e335c4b 100644 (file)
@@ -56,9 +56,6 @@
 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
 MODULE_DESCRIPTION("C-Media CMI8788 driver");
 MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8786}"
-                       ",{C-Media,CMI8787}"
-                       ",{C-Media,CMI8788}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
index 78c35a0..434f885 100644 (file)
@@ -29,7 +29,6 @@
 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
 MODULE_DESCRIPTION("Studio Evolution SE6X driver");
 MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("{{Studio Evolution,SE6X}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
index 98ab163..baa3244 100644 (file)
@@ -16,7 +16,6 @@
 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
 MODULE_DESCRIPTION("Asus Virtuoso driver");
 MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("{{Asus,AV66},{Asus,AV100},{Asus,AV200}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
index c2e4831..751f974 100644 (file)
@@ -35,7 +35,6 @@ MODULE_AUTHOR("Markus Bollinger <bollinger@digigram.com>, "
              "Marc Titinger <titinger@digigram.com>");
 MODULE_DESCRIPTION("Digigram " DRIVER_NAME " " PCXHR_DRIVER_VERSION_STRING);
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Digigram," DRIVER_NAME "}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index fcc2073..56827db 100644 (file)
 MODULE_AUTHOR("Peter Gruber <nokos@gmx.net>");
 MODULE_DESCRIPTION("riptide");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Conexant,Riptide}}");
 MODULE_FIRMWARE("riptide.hex");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
index 4eabece..54f3e39 100644 (file)
@@ -88,7 +88,6 @@ MODULE_PARM_DESC(fullduplex, "Support full-duplex mode.");
 MODULE_AUTHOR("Martin Langer <martin-langer@gmx.de>, Pilo Chambert <pilo.c@wanadoo.fr>");
 MODULE_DESCRIPTION("RME Digi32, Digi32/8, Digi32 PRO");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{RME,Digi32}," "{RME,Digi32/8}," "{RME,Digi32 PRO}}");
 
 /* Defines for RME Digi32 series */
 #define RME32_SPDIF_NCHANNELS 2
index 84eef6a..66082e9 100644 (file)
@@ -31,11 +31,6 @@ MODULE_AUTHOR("Anders Torger <torger@ludd.luth.se>");
 MODULE_DESCRIPTION("RME Digi96, Digi96/8, Digi96/8 PRO, Digi96/8 PST, "
                   "Digi96/8 PAD");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{RME,Digi96},"
-               "{RME,Digi96/8},"
-               "{RME,Digi96/8 PRO},"
-               "{RME,Digi96/8 PST},"
-               "{RME,Digi96/8 PAD}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index 6d90293..4cf879c 100644 (file)
@@ -44,9 +44,6 @@ MODULE_PARM_DESC(enable, "Enable/disable specific Hammerfall DSP soundcards.");
 MODULE_AUTHOR("Paul Davis <paul@linuxaudiosystems.com>, Marcus Andersson, Thomas Charbonnel <thomas@undata.org>");
 MODULE_DESCRIPTION("RME Hammerfall DSP");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{RME Hammerfall-DSP},"
-               "{RME HDSP-9652},"
-               "{RME HDSP-9632}}");
 MODULE_FIRMWARE("rpm_firmware.bin");
 MODULE_FIRMWARE("multiface_firmware.bin");
 MODULE_FIRMWARE("multiface_firmware_rev11.bin");
index b667115..8d900c1 100644 (file)
@@ -165,7 +165,6 @@ MODULE_AUTHOR
 );
 MODULE_DESCRIPTION("RME HDSPM");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
 
 /* --- Write registers. ---
   These are defined as byte-offsets from the iobase value.  */
index 012fbec..4df992e 100644 (file)
@@ -39,8 +39,6 @@ MODULE_PARM_DESC(precise_ptr, "Enable precise pointer (doesn't work reliably).")
 MODULE_AUTHOR("Paul Davis <pbd@op.net>, Winfried Ritsch");
 MODULE_DESCRIPTION("RME Digi9652/Digi9636");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{RME,Hammerfall},"
-               "{RME,Hammerfall-Light}}");
 
 /* The Hammerfall has two sets of 24 ADAT + 2 S/PDIF channels, one for
    capture, one for playback. Both the ADAT and S/PDIF channels appear
index 8ffa2f5..00ab51c 100644 (file)
@@ -24,7 +24,6 @@
 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
 MODULE_DESCRIPTION("SiS7019");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{SiS,SiS7019 Audio Accelerator}}");
 
 static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
 static char *id = SNDRV_DEFAULT_STR1;  /* ID for this card */
index 26fd1d0..7de1099 100644 (file)
@@ -29,7 +29,6 @@
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("S3 SonicVibes PCI");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{S3,SonicVibes PCI}}");
 
 #if IS_REACHABLE(CONFIG_GAMEPORT)
 #define SUPPORT_JOYSTICK 1
index 5bc79da..a510412 100644 (file)
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, <audio@tridentmicro.com>");
 MODULE_DESCRIPTION("Trident 4D-WaveDX/NX & SiS SI7018");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Trident,4DWave DX},"
-               "{Trident,4DWave NX},"
-               "{SiS,SI7018 PCI Audio},"
-               "{Best Union,Miss Melody 4DWave PCI},"
-               "{HIS,4DWave PCI},"
-               "{Warpspeed,ONSpeed 4DWave PCI},"
-               "{Aztech Systems,PCI 64-Q3D},"
-               "{Addonics,SV 750},"
-               "{CHIC,True Sound 4Dwave},"
-               "{Shark,Predator4D-PCI},"
-               "{Jaton,SonicWave 4D},"
-               "{Hoontech,SoundTrack Digital 4DWave NX}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index 154d88c..fd1f2f9 100644 (file)
@@ -56,7 +56,6 @@
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("VIA VT82xx audio");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{VIA,VT82C686A/B/C,pci},{VIA,VT8233A/C,8235}}");
 
 #if IS_REACHABLE(CONFIG_GAMEPORT)
 #define SUPPORT_JOYSTICK 1
index addfa19..3025330 100644 (file)
@@ -38,7 +38,6 @@
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("VIA VT82xx modem");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{VIA,VT82C686A/B/C modem,pci}}");
 
 static int index = -2; /* Exclude the first card */
 static char *id = SNDRV_DEFAULT_STR1;  /* ID for this card */
index f7800ed..2a9e1a7 100644 (file)
@@ -20,7 +20,6 @@
 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
 MODULE_DESCRIPTION("Digigram VX222 V2/Mic");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Digigram," CARD_NAME "}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index 9b0d18a..99be149 100644 (file)
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("Yamaha DS-1 PCI");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Yamaha,YMF724},"
-               "{Yamaha,YMF724F},"
-               "{Yamaha,YMF740},"
-               "{Yamaha,YMF740C},"
-               "{Yamaha,YMF744},"
-               "{Yamaha,YMF754}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index 27d9da6..1445823 100644 (file)
@@ -22,7 +22,6 @@
 MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
 MODULE_DESCRIPTION("Sound Core " CARD_NAME);
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Sound Core," CARD_NAME "}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index afd30a9..6363204 100644 (file)
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
-/*
- */
-
 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
 MODULE_DESCRIPTION("Digigram VXPocket");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Digigram,VXPocket},{Digigram,VXPocket440}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index 96ef550..9fb51eb 100644 (file)
@@ -18,7 +18,6 @@
 #define CHIP_NAME "PMac"
 
 MODULE_DESCRIPTION("PowerMac");
-MODULE_SUPPORTED_DEVICE("{{Apple,PowerMac}}");
 MODULE_LICENSE("GPL");
 
 static int index = SNDRV_DEFAULT_IDX1;         /* Index 0-MAX */
index 8fa6843..6e9d6bd 100644 (file)
@@ -32,7 +32,6 @@
 MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>");
 MODULE_DESCRIPTION("Dreamcast AICA sound (pcm) driver");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Yamaha/SEGA, AICA}}");
 MODULE_FIRMWARE("aica_firmware.bin");
 
 /* module parameters */
index feb2850..8ebd972 100644 (file)
@@ -25,7 +25,6 @@
 MODULE_AUTHOR("Rafael Ignacio Zurita <rizurita@yahoo.com>");
 MODULE_DESCRIPTION("SuperH DAC audio driver");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{SuperH DAC audio support}}");
 
 /* Module Parameters */
 static int index = SNDRV_DEFAULT_IDX1;
index 6e634b4..aa16a23 100644 (file)
@@ -1348,8 +1348,10 @@ static int cygnus_ssp_probe(struct platform_device *pdev)
                                        &cygnus_ssp_dai[active_port_count]);
 
                /* negative is err, 0 is active and good, 1 is disabled */
-               if (err < 0)
+               if (err < 0) {
+                       of_node_put(child_node);
                        return err;
+               }
                else if (!err) {
                        dev_dbg(dev, "Activating DAI: %s\n",
                                cygnus_ssp_dai[active_port_count].name);
index e4cf14e..1c87b42 100644 (file)
@@ -186,7 +186,6 @@ config SND_SOC_ALL_CODECS
        imply SND_SOC_SI476X
        imply SND_SOC_SIMPLE_AMPLIFIER
        imply SND_SOC_SIMPLE_MUX
-       imply SND_SOC_SIRF_AUDIO_CODEC
        imply SND_SOC_SPDIF
        imply SND_SOC_SSM2305
        imply SND_SOC_SSM2518
@@ -1279,10 +1278,6 @@ config SND_SOC_SIMPLE_MUX
        tristate "Simple Audio Mux"
        select GPIOLIB
 
-config SND_SOC_SIRF_AUDIO_CODEC
-       tristate "SiRF SoC internal audio codec"
-       select REGMAP_MMIO
-
 config SND_SOC_SPDIF
        tristate "S/PDIF CODEC"
 
index 472caad..85a1d00 100644 (file)
@@ -812,6 +812,7 @@ static const struct of_device_id ak4458_of_match[] = {
        { .compatible = "asahi-kasei,ak4497", .data = &ak4497_drvdata},
        { },
 };
+MODULE_DEVICE_TABLE(of, ak4458_of_match);
 
 static struct i2c_driver ak4458_i2c_driver = {
        .driver = {
index 8a32b01..85bdd05 100644 (file)
@@ -419,6 +419,7 @@ static const struct of_device_id ak5558_i2c_dt_ids[] __maybe_unused = {
        { .compatible = "asahi-kasei,ak5558"},
        { }
 };
+MODULE_DEVICE_TABLE(of, ak5558_i2c_dt_ids);
 
 static struct i2c_driver ak5558_i2c_driver = {
        .driver = {
index 210fcbe..811b7b1 100644 (file)
@@ -401,7 +401,7 @@ static const struct regmap_config cs42l42_regmap = {
 };
 
 static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
-static DECLARE_TLV_DB_SCALE(mixer_tlv, -6200, 100, false);
+static DECLARE_TLV_DB_SCALE(mixer_tlv, -6300, 100, true);
 
 static const char * const cs42l42_hpf_freq_text[] = {
        "1.86Hz", "120Hz", "235Hz", "466Hz"
@@ -458,7 +458,7 @@ static const struct snd_kcontrol_new cs42l42_snd_controls[] = {
                                CS42L42_DAC_HPF_EN_SHIFT, true, false),
        SOC_DOUBLE_R_TLV("Mixer Volume", CS42L42_MIXER_CHA_VOL,
                         CS42L42_MIXER_CHB_VOL, CS42L42_MIXER_CH_VOL_SHIFT,
-                               0x3e, 1, mixer_tlv)
+                               0x3f, 1, mixer_tlv)
 };
 
 static int cs42l42_hpdrv_evt(struct snd_soc_dapm_widget *w,
@@ -511,43 +511,6 @@ static const struct snd_soc_dapm_route cs42l42_audio_map[] = {
        {"HP", NULL, "HPDRV"}
 };
 
-static int cs42l42_set_bias_level(struct snd_soc_component *component,
-                                       enum snd_soc_bias_level level)
-{
-       struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component);
-       int ret;
-
-       switch (level) {
-       case SND_SOC_BIAS_ON:
-               break;
-       case SND_SOC_BIAS_PREPARE:
-               break;
-       case SND_SOC_BIAS_STANDBY:
-               if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) {
-                       regcache_cache_only(cs42l42->regmap, false);
-                       regcache_sync(cs42l42->regmap);
-                       ret = regulator_bulk_enable(
-                                               ARRAY_SIZE(cs42l42->supplies),
-                                               cs42l42->supplies);
-                       if (ret != 0) {
-                               dev_err(component->dev,
-                                       "Failed to enable regulators: %d\n",
-                                       ret);
-                               return ret;
-                       }
-               }
-               break;
-       case SND_SOC_BIAS_OFF:
-
-               regcache_cache_only(cs42l42->regmap, true);
-               regulator_bulk_disable(ARRAY_SIZE(cs42l42->supplies),
-                                                   cs42l42->supplies);
-               break;
-       }
-
-       return 0;
-}
-
 static int cs42l42_component_probe(struct snd_soc_component *component)
 {
        struct cs42l42_private *cs42l42 =
@@ -560,7 +523,6 @@ static int cs42l42_component_probe(struct snd_soc_component *component)
 
 static const struct snd_soc_component_driver soc_component_dev_cs42l42 = {
        .probe                  = cs42l42_component_probe,
-       .set_bias_level         = cs42l42_set_bias_level,
        .dapm_widgets           = cs42l42_dapm_widgets,
        .num_dapm_widgets       = ARRAY_SIZE(cs42l42_dapm_widgets),
        .dapm_routes            = cs42l42_audio_map,
@@ -691,24 +653,6 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
                                        CS42L42_CLK_OASRC_SEL_MASK,
                                        CS42L42_CLK_OASRC_SEL_12 <<
                                        CS42L42_CLK_OASRC_SEL_SHIFT);
-                       /* channel 1 on low LRCLK, 32 bit */
-                       snd_soc_component_update_bits(component,
-                                       CS42L42_ASP_RX_DAI0_CH1_AP_RES,
-                                       CS42L42_ASP_RX_CH_AP_MASK |
-                                       CS42L42_ASP_RX_CH_RES_MASK,
-                                       (CS42L42_ASP_RX_CH_AP_LOW <<
-                                       CS42L42_ASP_RX_CH_AP_SHIFT) |
-                                       (CS42L42_ASP_RX_CH_RES_32 <<
-                                       CS42L42_ASP_RX_CH_RES_SHIFT));
-                       /* Channel 2 on high LRCLK, 32 bit */
-                       snd_soc_component_update_bits(component,
-                                       CS42L42_ASP_RX_DAI0_CH2_AP_RES,
-                                       CS42L42_ASP_RX_CH_AP_MASK |
-                                       CS42L42_ASP_RX_CH_RES_MASK,
-                                       (CS42L42_ASP_RX_CH_AP_HI <<
-                                       CS42L42_ASP_RX_CH_AP_SHIFT) |
-                                       (CS42L42_ASP_RX_CH_RES_32 <<
-                                       CS42L42_ASP_RX_CH_RES_SHIFT));
                        if (pll_ratio_table[i].mclk_src_sel == 0) {
                                /* Pass the clock straight through */
                                snd_soc_component_update_bits(component,
@@ -797,27 +741,23 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        /* Bitclock/frame inversion */
        switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
        case SND_SOC_DAIFMT_NB_NF:
+               asp_cfg_val |= CS42L42_ASP_SCPOL_NOR << CS42L42_ASP_SCPOL_SHIFT;
                break;
        case SND_SOC_DAIFMT_NB_IF:
-               asp_cfg_val |= CS42L42_ASP_POL_INV <<
-                               CS42L42_ASP_LCPOL_IN_SHIFT;
+               asp_cfg_val |= CS42L42_ASP_SCPOL_NOR << CS42L42_ASP_SCPOL_SHIFT;
+               asp_cfg_val |= CS42L42_ASP_LCPOL_INV << CS42L42_ASP_LCPOL_SHIFT;
                break;
        case SND_SOC_DAIFMT_IB_NF:
-               asp_cfg_val |= CS42L42_ASP_POL_INV <<
-                               CS42L42_ASP_SCPOL_IN_DAC_SHIFT;
                break;
        case SND_SOC_DAIFMT_IB_IF:
-               asp_cfg_val |= CS42L42_ASP_POL_INV <<
-                               CS42L42_ASP_LCPOL_IN_SHIFT;
-               asp_cfg_val |= CS42L42_ASP_POL_INV <<
-                               CS42L42_ASP_SCPOL_IN_DAC_SHIFT;
+               asp_cfg_val |= CS42L42_ASP_LCPOL_INV << CS42L42_ASP_LCPOL_SHIFT;
                break;
        }
 
-       snd_soc_component_update_bits(component, CS42L42_ASP_CLK_CFG,
-                               CS42L42_ASP_MODE_MASK |
-                               CS42L42_ASP_SCPOL_IN_DAC_MASK |
-                               CS42L42_ASP_LCPOL_IN_MASK, asp_cfg_val);
+       snd_soc_component_update_bits(component, CS42L42_ASP_CLK_CFG, CS42L42_ASP_MODE_MASK |
+                                                                     CS42L42_ASP_SCPOL_MASK |
+                                                                     CS42L42_ASP_LCPOL_MASK,
+                                                                     asp_cfg_val);
 
        return 0;
 }
@@ -828,14 +768,29 @@ static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
 {
        struct snd_soc_component *component = dai->component;
        struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component);
-       int retval;
+       unsigned int width = (params_width(params) / 8) - 1;
+       unsigned int val = 0;
 
        cs42l42->srate = params_rate(params);
-       cs42l42->swidth = params_width(params);
 
-       retval = cs42l42_pll_config(component);
+       switch(substream->stream) {
+       case SNDRV_PCM_STREAM_PLAYBACK:
+               val |= width << CS42L42_ASP_RX_CH_RES_SHIFT;
+               /* channel 1 on low LRCLK */
+               snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH1_AP_RES,
+                                                        CS42L42_ASP_RX_CH_AP_MASK |
+                                                        CS42L42_ASP_RX_CH_RES_MASK, val);
+               /* Channel 2 on high LRCLK */
+               val |= CS42L42_ASP_RX_CH_AP_HI << CS42L42_ASP_RX_CH_AP_SHIFT;
+               snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH2_AP_RES,
+                                                        CS42L42_ASP_RX_CH_AP_MASK |
+                                                        CS42L42_ASP_RX_CH_RES_MASK, val);
+               break;
+       default:
+               break;
+       }
 
-       return retval;
+       return cs42l42_pll_config(component);
 }
 
 static int cs42l42_set_sysclk(struct snd_soc_dai *dai,
@@ -900,9 +855,9 @@ static int cs42l42_mute(struct snd_soc_dai *dai, int mute, int direction)
        return 0;
 }
 
-#define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE | \
-                       SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE | \
-                       SNDRV_PCM_FMTBIT_S32_LE)
+#define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+                        SNDRV_PCM_FMTBIT_S24_LE |\
+                        SNDRV_PCM_FMTBIT_S32_LE )
 
 
 static const struct snd_soc_dai_ops cs42l42_ops = {
@@ -1801,7 +1756,7 @@ static int cs42l42_i2c_probe(struct i2c_client *i2c_client,
                dev_dbg(&i2c_client->dev, "Found reset GPIO\n");
                gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
        }
-       mdelay(3);
+       usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2);
 
        /* Request IRQ */
        ret = devm_request_threaded_irq(&i2c_client->dev,
@@ -1926,6 +1881,7 @@ static int cs42l42_runtime_resume(struct device *dev)
        }
 
        gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
+       usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2);
 
        regcache_cache_only(cs42l42->regmap, false);
        regcache_sync(cs42l42->regmap);
index 9e3cc52..866d7c8 100644 (file)
 #define CS42L42_ASP_SLAVE_MODE         0x00
 #define CS42L42_ASP_MODE_SHIFT         4
 #define CS42L42_ASP_MODE_MASK          (1 << CS42L42_ASP_MODE_SHIFT)
-#define CS42L42_ASP_SCPOL_IN_DAC_SHIFT 2
-#define CS42L42_ASP_SCPOL_IN_DAC_MASK  (1 << CS42L42_ASP_SCPOL_IN_DAC_SHIFT)
-#define CS42L42_ASP_LCPOL_IN_SHIFT     0
-#define CS42L42_ASP_LCPOL_IN_MASK      (1 << CS42L42_ASP_LCPOL_IN_SHIFT)
-#define CS42L42_ASP_POL_INV            1
+#define CS42L42_ASP_SCPOL_SHIFT                2
+#define CS42L42_ASP_SCPOL_MASK         (3 << CS42L42_ASP_SCPOL_SHIFT)
+#define CS42L42_ASP_SCPOL_NOR          3
+#define CS42L42_ASP_LCPOL_SHIFT                0
+#define CS42L42_ASP_LCPOL_MASK         (3 << CS42L42_ASP_LCPOL_SHIFT)
+#define CS42L42_ASP_LCPOL_INV          3
 
 #define CS42L42_ASP_FRM_CFG            (CS42L42_PAGE_12 + 0x08)
 #define CS42L42_ASP_STP_SHIFT          4
 #define CS42L42_FRAC2_VAL(val) (((val) & 0xff0000) >> 16)
 
 #define CS42L42_NUM_SUPPLIES   5
+#define CS42L42_BOOT_TIME_US   3000
 
 static const char *const cs42l42_supply_names[CS42L42_NUM_SUPPLIES] = {
        "VA",
@@ -756,7 +758,6 @@ struct  cs42l42_private {
        struct completion pdn_done;
        u32 sclk;
        u32 srate;
-       u32 swidth;
        u8 plug_state;
        u8 hs_type;
        u8 ts_inv;
index d632055..067757d 100644 (file)
@@ -63,13 +63,8 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(adc_pga_gain_tlv,
        1, 1, TLV_DB_SCALE_ITEM(0, 0, 0),
        2, 2, TLV_DB_SCALE_ITEM(250, 0, 0),
        3, 3, TLV_DB_SCALE_ITEM(450, 0, 0),
-       4, 4, TLV_DB_SCALE_ITEM(700, 0, 0),
-       5, 5, TLV_DB_SCALE_ITEM(1000, 0, 0),
-       6, 6, TLV_DB_SCALE_ITEM(1300, 0, 0),
-       7, 7, TLV_DB_SCALE_ITEM(1600, 0, 0),
-       8, 8, TLV_DB_SCALE_ITEM(1800, 0, 0),
-       9, 9, TLV_DB_SCALE_ITEM(2100, 0, 0),
-       10, 10, TLV_DB_SCALE_ITEM(2400, 0, 0),
+       4, 7, TLV_DB_SCALE_ITEM(700, 300, 0),
+       8, 10, TLV_DB_SCALE_ITEM(1800, 300, 0),
 );
 
 static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpout_vol_tlv,
index c9c21d2..7878da8 100644 (file)
@@ -2895,7 +2895,7 @@ static int rx_macro_enable_echo(struct snd_soc_dapm_widget *w,
 {
        struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
        u16 val, ec_hq_reg;
-       int ec_tx;
+       int ec_tx = -1;
 
        val = snd_soc_component_read(component,
                        CDC_RX_INP_MUX_RX_MIX_CFG4);
@@ -3551,7 +3551,7 @@ static int rx_macro_probe(struct platform_device *pdev)
 
        /* set MCLK and NPL rates */
        clk_set_rate(rx->clks[2].clk, MCLK_FREQ);
-       clk_set_rate(rx->clks[3].clk, MCLK_FREQ);
+       clk_set_rate(rx->clks[3].clk, 2 * MCLK_FREQ);
 
        ret = clk_bulk_prepare_enable(RX_NUM_CLKS_MAX, rx->clks);
        if (ret)
index 36d7a64..e8c6c73 100644 (file)
@@ -1811,7 +1811,7 @@ static int tx_macro_probe(struct platform_device *pdev)
 
        /* set MCLK and NPL rates */
        clk_set_rate(tx->clks[2].clk, MCLK_FREQ);
-       clk_set_rate(tx->clks[3].clk, MCLK_FREQ);
+       clk_set_rate(tx->clks[3].clk, 2 * MCLK_FREQ);
 
        ret = clk_bulk_prepare_enable(TX_NUM_CLKS_MAX, tx->clks);
        if (ret)
index 91e6890..3d6976a 100644 (file)
@@ -189,7 +189,6 @@ struct va_macro {
        struct device *dev;
        unsigned long active_ch_mask[VA_MACRO_MAX_DAIS];
        unsigned long active_ch_cnt[VA_MACRO_MAX_DAIS];
-       unsigned long active_decimator[VA_MACRO_MAX_DAIS];
        u16 dmic_clk_div;
 
        int dec_mode[VA_MACRO_NUM_DECIMATORS];
@@ -549,11 +548,9 @@ static int va_macro_tx_mixer_put(struct snd_kcontrol *kcontrol,
        if (enable) {
                set_bit(dec_id, &va->active_ch_mask[dai_id]);
                va->active_ch_cnt[dai_id]++;
-               va->active_decimator[dai_id] = dec_id;
        } else {
                clear_bit(dec_id, &va->active_ch_mask[dai_id]);
                va->active_ch_cnt[dai_id]--;
-               va->active_decimator[dai_id] = -1;
        }
 
        snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, enable, update);
@@ -880,18 +877,19 @@ static int va_macro_digital_mute(struct snd_soc_dai *dai, int mute, int stream)
        struct va_macro *va = snd_soc_component_get_drvdata(component);
        u16 tx_vol_ctl_reg, decimator;
 
-       decimator = va->active_decimator[dai->id];
-
-       tx_vol_ctl_reg = CDC_VA_TX0_TX_PATH_CTL +
-                               VA_MACRO_TX_PATH_OFFSET * decimator;
-       if (mute)
-               snd_soc_component_update_bits(component, tx_vol_ctl_reg,
-                                             CDC_VA_TX_PATH_PGA_MUTE_EN_MASK,
-                                             CDC_VA_TX_PATH_PGA_MUTE_EN);
-       else
-               snd_soc_component_update_bits(component, tx_vol_ctl_reg,
-                                             CDC_VA_TX_PATH_PGA_MUTE_EN_MASK,
-                                             CDC_VA_TX_PATH_PGA_MUTE_DISABLE);
+       for_each_set_bit(decimator, &va->active_ch_mask[dai->id],
+                        VA_MACRO_DEC_MAX) {
+               tx_vol_ctl_reg = CDC_VA_TX0_TX_PATH_CTL +
+                                       VA_MACRO_TX_PATH_OFFSET * decimator;
+               if (mute)
+                       snd_soc_component_update_bits(component, tx_vol_ctl_reg,
+                                       CDC_VA_TX_PATH_PGA_MUTE_EN_MASK,
+                                       CDC_VA_TX_PATH_PGA_MUTE_EN);
+               else
+                       snd_soc_component_update_bits(component, tx_vol_ctl_reg,
+                                       CDC_VA_TX_PATH_PGA_MUTE_EN_MASK,
+                                       CDC_VA_TX_PATH_PGA_MUTE_DISABLE);
+       }
 
        return 0;
 }
index 5ebcd93..9ca49a1 100644 (file)
@@ -1211,14 +1211,16 @@ static int wsa_macro_enable_mix_path(struct snd_soc_dapm_widget *w,
                                     struct snd_kcontrol *kcontrol, int event)
 {
        struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
-       u16 gain_reg;
+       u16 path_reg, gain_reg;
        int val;
 
-       switch (w->reg) {
-       case CDC_WSA_RX0_RX_PATH_MIX_CTL:
+       switch (w->shift) {
+       case WSA_MACRO_RX_MIX0:
+               path_reg = CDC_WSA_RX0_RX_PATH_MIX_CTL;
                gain_reg = CDC_WSA_RX0_RX_VOL_MIX_CTL;
                break;
-       case CDC_WSA_RX1_RX_PATH_MIX_CTL:
+       case WSA_MACRO_RX_MIX1:
+               path_reg = CDC_WSA_RX1_RX_PATH_MIX_CTL;
                gain_reg = CDC_WSA_RX1_RX_VOL_MIX_CTL;
                break;
        default:
@@ -1231,7 +1233,7 @@ static int wsa_macro_enable_mix_path(struct snd_soc_dapm_widget *w,
                snd_soc_component_write(component, gain_reg, val);
                break;
        case SND_SOC_DAPM_POST_PMD:
-               snd_soc_component_update_bits(component, w->reg,
+               snd_soc_component_update_bits(component, path_reg,
                                              CDC_WSA_RX_PATH_MIX_CLK_EN_MASK,
                                              CDC_WSA_RX_PATH_MIX_CLK_DISABLE);
                break;
@@ -2068,14 +2070,14 @@ static const struct snd_soc_dapm_widget wsa_macro_dapm_widgets[] = {
        SND_SOC_DAPM_MUX("WSA_RX0 INP0", SND_SOC_NOPM, 0, 0, &rx0_prim_inp0_mux),
        SND_SOC_DAPM_MUX("WSA_RX0 INP1", SND_SOC_NOPM, 0, 0, &rx0_prim_inp1_mux),
        SND_SOC_DAPM_MUX("WSA_RX0 INP2", SND_SOC_NOPM, 0, 0, &rx0_prim_inp2_mux),
-       SND_SOC_DAPM_MUX_E("WSA_RX0 MIX INP", CDC_WSA_RX0_RX_PATH_MIX_CTL,
-                          0, 0, &rx0_mix_mux, wsa_macro_enable_mix_path,
+       SND_SOC_DAPM_MUX_E("WSA_RX0 MIX INP", SND_SOC_NOPM, WSA_MACRO_RX_MIX0,
+                          0, &rx0_mix_mux, wsa_macro_enable_mix_path,
                           SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
        SND_SOC_DAPM_MUX("WSA_RX1 INP0", SND_SOC_NOPM, 0, 0, &rx1_prim_inp0_mux),
        SND_SOC_DAPM_MUX("WSA_RX1 INP1", SND_SOC_NOPM, 0, 0, &rx1_prim_inp1_mux),
        SND_SOC_DAPM_MUX("WSA_RX1 INP2", SND_SOC_NOPM, 0, 0, &rx1_prim_inp2_mux),
-       SND_SOC_DAPM_MUX_E("WSA_RX1 MIX INP", CDC_WSA_RX1_RX_PATH_MIX_CTL,
-                          0, 0, &rx1_mix_mux, wsa_macro_enable_mix_path,
+       SND_SOC_DAPM_MUX_E("WSA_RX1 MIX INP", SND_SOC_NOPM, WSA_MACRO_RX_MIX1,
+                          0, &rx1_mix_mux, wsa_macro_enable_mix_path,
                           SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 
        SND_SOC_DAPM_MIXER_E("WSA_RX INT0 MIX", SND_SOC_NOPM, 0, 0, NULL, 0,
index 85f6865..ddb6436 100644 (file)
@@ -446,6 +446,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
        case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
        case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
        case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+       case MAX98373_R20FF_GLOBAL_SHDN:
        case MAX98373_R21FF_REV_ID:
                return true;
        default:
index d8c4766..f3a1220 100644 (file)
@@ -220,6 +220,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
        case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
        case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
        case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+       case MAX98373_R20FF_GLOBAL_SHDN:
        case MAX98373_R21FF_REV_ID:
        /* SoundWire Control Port Registers */
        case MAX98373_R0040_SCP_INIT_STAT_1 ... MAX98373_R0070_SCP_FRAME_CTLR:
index 746c829..1346a98 100644 (file)
@@ -28,11 +28,13 @@ static int max98373_dac_event(struct snd_soc_dapm_widget *w,
                regmap_update_bits(max98373->regmap,
                        MAX98373_R20FF_GLOBAL_SHDN,
                        MAX98373_GLOBAL_EN_MASK, 1);
+               usleep_range(30000, 31000);
                break;
        case SND_SOC_DAPM_POST_PMD:
                regmap_update_bits(max98373->regmap,
                        MAX98373_R20FF_GLOBAL_SHDN,
                        MAX98373_GLOBAL_EN_MASK, 0);
+               usleep_range(30000, 31000);
                max98373->tdm_mode = false;
                break;
        default:
index 37b5795..844e407 100644 (file)
@@ -209,6 +209,7 @@ static bool rt1015_volatile_register(struct device *dev, unsigned int reg)
        case RT1015_VENDOR_ID:
        case RT1015_DEVICE_ID:
        case RT1015_PRO_ALT:
+       case RT1015_MAN_I2C:
        case RT1015_DAC3:
        case RT1015_VBAT_TEST_OUT1:
        case RT1015_VBAT_TEST_OUT2:
@@ -513,6 +514,7 @@ static void rt1015_calibrate(struct rt1015_priv *rt1015)
        msleep(300);
        regmap_write(regmap, RT1015_PWR_STATE_CTRL, 0x0008);
        regmap_write(regmap, RT1015_SYS_RST1, 0x05F5);
+       regmap_write(regmap, RT1015_CLK_DET, 0x8000);
 
        regcache_cache_bypass(regmap, false);
        regcache_mark_dirty(regmap);
index 1414ad1..a5674c2 100644 (file)
@@ -339,9 +339,9 @@ static bool rt5640_readable_register(struct device *dev, unsigned int reg)
 }
 
 static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
+static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0);
 static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000);
 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
index d198e19..e59fdc8 100644 (file)
@@ -285,9 +285,9 @@ static bool rt5651_readable_register(struct device *dev, unsigned int reg)
 }
 
 static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
+static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0);
 static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000);
 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
index 41e5917..91a4ef7 100644 (file)
@@ -3426,12 +3426,17 @@ static int rt5659_set_component_sysclk(struct snd_soc_component *component, int
 {
        struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component);
        unsigned int reg_val = 0;
+       int ret;
 
        if (freq == rt5659->sysclk && clk_id == rt5659->sysclk_src)
                return 0;
 
        switch (clk_id) {
        case RT5659_SCLK_S_MCLK:
+               ret = clk_set_rate(rt5659->mclk, freq);
+               if (ret)
+                       return ret;
+
                reg_val |= RT5659_SCLK_SRC_MCLK;
                break;
        case RT5659_SCLK_S_PLL1:
index c29317e..4063aac 100644 (file)
@@ -629,21 +629,69 @@ static SOC_ENUM_SINGLE_DECL(rt5670_if2_dac_enum, RT5670_DIG_INF1_DATA,
 static SOC_ENUM_SINGLE_DECL(rt5670_if2_adc_enum, RT5670_DIG_INF1_DATA,
                                RT5670_IF2_ADC_SEL_SFT, rt5670_data_select);
 
+/*
+ * For reliable output-mute LED control we need a "DAC1 Playback Switch" control.
+ * We emulate this by only clearing the RT5670_M_DAC1_L/_R AD_DA_MIXER register
+ * bits when both our emulated DAC1 Playback Switch control and the DAC1 MIXL/R
+ * DAPM-mixer DAC1 input are enabled.
+ */
+static void rt5670_update_ad_da_mixer_dac1_m_bits(struct rt5670_priv *rt5670)
+{
+       int val = RT5670_M_DAC1_L | RT5670_M_DAC1_R;
+
+       if (rt5670->dac1_mixl_dac1_switch && rt5670->dac1_playback_switch_l)
+               val &= ~RT5670_M_DAC1_L;
+
+       if (rt5670->dac1_mixr_dac1_switch && rt5670->dac1_playback_switch_r)
+               val &= ~RT5670_M_DAC1_R;
+
+       regmap_update_bits(rt5670->regmap, RT5670_AD_DA_MIXER,
+                          RT5670_M_DAC1_L | RT5670_M_DAC1_R, val);
+}
+
+static int rt5670_dac1_playback_switch_get(struct snd_kcontrol *kcontrol,
+                                          struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+       struct rt5670_priv *rt5670 = snd_soc_component_get_drvdata(component);
+
+       ucontrol->value.integer.value[0] = rt5670->dac1_playback_switch_l;
+       ucontrol->value.integer.value[1] = rt5670->dac1_playback_switch_r;
+
+       return 0;
+}
+
+static int rt5670_dac1_playback_switch_put(struct snd_kcontrol *kcontrol,
+                                          struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+       struct rt5670_priv *rt5670 = snd_soc_component_get_drvdata(component);
+
+       if (rt5670->dac1_playback_switch_l == ucontrol->value.integer.value[0] &&
+           rt5670->dac1_playback_switch_r == ucontrol->value.integer.value[1])
+               return 0;
+
+       rt5670->dac1_playback_switch_l = ucontrol->value.integer.value[0];
+       rt5670->dac1_playback_switch_r = ucontrol->value.integer.value[1];
+
+       rt5670_update_ad_da_mixer_dac1_m_bits(rt5670);
+
+       return 1;
+}
+
 static const struct snd_kcontrol_new rt5670_snd_controls[] = {
        /* Headphone Output Volume */
-       SOC_DOUBLE("HP Playback Switch", RT5670_HP_VOL,
-               RT5670_L_MUTE_SFT, RT5670_R_MUTE_SFT, 1, 1),
        SOC_DOUBLE_TLV("HP Playback Volume", RT5670_HP_VOL,
                RT5670_L_VOL_SFT, RT5670_R_VOL_SFT,
                39, 1, out_vol_tlv),
        /* OUTPUT Control */
-       SOC_DOUBLE("OUT Channel Switch", RT5670_LOUT1,
-               RT5670_VOL_L_SFT, RT5670_VOL_R_SFT, 1, 1),
        SOC_DOUBLE_TLV("OUT Playback Volume", RT5670_LOUT1,
                RT5670_L_VOL_SFT, RT5670_R_VOL_SFT, 39, 1, out_vol_tlv),
        /* DAC Digital Volume */
        SOC_DOUBLE("DAC2 Playback Switch", RT5670_DAC_CTRL,
                RT5670_M_DAC_L2_VOL_SFT, RT5670_M_DAC_R2_VOL_SFT, 1, 1),
+       SOC_DOUBLE_EXT("DAC1 Playback Switch", SND_SOC_NOPM, 0, 1, 1, 0,
+                       rt5670_dac1_playback_switch_get, rt5670_dac1_playback_switch_put),
        SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5670_DAC1_DIG_VOL,
                        RT5670_L_VOL_SFT, RT5670_R_VOL_SFT,
                        175, 0, dac_vol_tlv),
@@ -913,18 +961,44 @@ static const struct snd_kcontrol_new rt5670_mono_adc_r_mix[] = {
                        RT5670_M_MONO_ADC_R2_SFT, 1, 1),
 };
 
+/* See comment above rt5670_update_ad_da_mixer_dac1_m_bits() */
+static int rt5670_put_dac1_mix_dac1_switch(struct snd_kcontrol *kcontrol,
+                                          struct snd_ctl_elem_value *ucontrol)
+{
+       struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value;
+       struct snd_soc_component *component = snd_soc_dapm_kcontrol_component(kcontrol);
+       struct rt5670_priv *rt5670 = snd_soc_component_get_drvdata(component);
+       int ret;
+
+       if (mc->shift == 0)
+               rt5670->dac1_mixl_dac1_switch = ucontrol->value.integer.value[0];
+       else
+               rt5670->dac1_mixr_dac1_switch = ucontrol->value.integer.value[0];
+
+       /* Apply the update (if any) */
+       ret = snd_soc_dapm_put_volsw(kcontrol, ucontrol);
+       if (ret == 0)
+               return 0;
+
+       rt5670_update_ad_da_mixer_dac1_m_bits(rt5670);
+
+       return 1;
+}
+
+#define SOC_DAPM_SINGLE_RT5670_DAC1_SW(name, shift) \
+       SOC_SINGLE_EXT(name, SND_SOC_NOPM, shift, 1, 0, \
+                      snd_soc_dapm_get_volsw, rt5670_put_dac1_mix_dac1_switch)
+
 static const struct snd_kcontrol_new rt5670_dac_l_mix[] = {
        SOC_DAPM_SINGLE("Stereo ADC Switch", RT5670_AD_DA_MIXER,
                        RT5670_M_ADCMIX_L_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC1 Switch", RT5670_AD_DA_MIXER,
-                       RT5670_M_DAC1_L_SFT, 1, 1),
+       SOC_DAPM_SINGLE_RT5670_DAC1_SW("DAC1 Switch", 0),
 };
 
 static const struct snd_kcontrol_new rt5670_dac_r_mix[] = {
        SOC_DAPM_SINGLE("Stereo ADC Switch", RT5670_AD_DA_MIXER,
                        RT5670_M_ADCMIX_R_SFT, 1, 1),
-       SOC_DAPM_SINGLE("DAC1 Switch", RT5670_AD_DA_MIXER,
-                       RT5670_M_DAC1_R_SFT, 1, 1),
+       SOC_DAPM_SINGLE_RT5670_DAC1_SW("DAC1 Switch", 1),
 };
 
 static const struct snd_kcontrol_new rt5670_sto_dac_l_mix[] = {
@@ -1656,12 +1730,10 @@ static const struct snd_soc_dapm_widget rt5670_dapm_widgets[] = {
                            RT5670_PWR_ADC_S1F_BIT, 0, NULL, 0),
        SND_SOC_DAPM_SUPPLY("ADC Stereo2 Filter", RT5670_PWR_DIG2,
                            RT5670_PWR_ADC_S2F_BIT, 0, NULL, 0),
-       SND_SOC_DAPM_MIXER("Sto1 ADC MIXL", RT5670_STO1_ADC_DIG_VOL,
-                          RT5670_L_MUTE_SFT, 1, rt5670_sto1_adc_l_mix,
-                          ARRAY_SIZE(rt5670_sto1_adc_l_mix)),
-       SND_SOC_DAPM_MIXER("Sto1 ADC MIXR", RT5670_STO1_ADC_DIG_VOL,
-                          RT5670_R_MUTE_SFT, 1, rt5670_sto1_adc_r_mix,
-                          ARRAY_SIZE(rt5670_sto1_adc_r_mix)),
+       SND_SOC_DAPM_MIXER("Sto1 ADC MIXL", SND_SOC_NOPM, 0, 0,
+                          rt5670_sto1_adc_l_mix, ARRAY_SIZE(rt5670_sto1_adc_l_mix)),
+       SND_SOC_DAPM_MIXER("Sto1 ADC MIXR", SND_SOC_NOPM, 0, 0,
+                          rt5670_sto1_adc_r_mix, ARRAY_SIZE(rt5670_sto1_adc_r_mix)),
        SND_SOC_DAPM_MIXER("Sto2 ADC MIXL", SND_SOC_NOPM, 0, 0,
                           rt5670_sto2_adc_l_mix,
                           ARRAY_SIZE(rt5670_sto2_adc_l_mix)),
@@ -2999,6 +3071,16 @@ static int rt5670_i2c_probe(struct i2c_client *i2c,
                dev_info(&i2c->dev, "quirk JD mode 3\n");
        }
 
+       /*
+        * Enable the emulated "DAC1 Playback Switch" by default to avoid
+        * muting the output with older UCM profiles.
+        */
+       rt5670->dac1_playback_switch_l = true;
+       rt5670->dac1_playback_switch_r = true;
+       /* The Power-On-Reset values for the DAC1 mixer have the DAC1 input enabled. */
+       rt5670->dac1_mixl_dac1_switch = true;
+       rt5670->dac1_mixr_dac1_switch = true;
+
        rt5670->regmap = devm_regmap_init_i2c(i2c, &rt5670_regmap);
        if (IS_ERR(rt5670->regmap)) {
                ret = PTR_ERR(rt5670->regmap);
index 56b13fe..6fb3c36 100644 (file)
 /* global definition */
 #define RT5670_L_MUTE                          (0x1 << 15)
 #define RT5670_L_MUTE_SFT                      15
-#define RT5670_VOL_L_MUTE                      (0x1 << 14)
-#define RT5670_VOL_L_SFT                       14
 #define RT5670_R_MUTE                          (0x1 << 7)
 #define RT5670_R_MUTE_SFT                      7
-#define RT5670_VOL_R_MUTE                      (0x1 << 6)
-#define RT5670_VOL_R_SFT                       6
 #define RT5670_L_VOL_MASK                      (0x3f << 8)
 #define RT5670_L_VOL_SFT                       8
 #define RT5670_R_VOL_MASK                      (0x3f)
@@ -2017,6 +2013,11 @@ struct rt5670_priv {
        int dsp_rate;
        int jack_type;
        int jack_type_saved;
+
+       bool dac1_mixl_dac1_switch;
+       bool dac1_mixr_dac1_switch;
+       bool dac1_playback_switch_l;
+       bool dac1_playback_switch_r;
 };
 
 void rt5670_jack_suspend(struct snd_soc_component *component);
index 85f7441..047f4e6 100644 (file)
@@ -895,6 +895,13 @@ static int rt711_probe(struct snd_soc_component *component)
        return 0;
 }
 
+static void rt711_remove(struct snd_soc_component *component)
+{
+       struct rt711_priv *rt711 = snd_soc_component_get_drvdata(component);
+
+       regcache_cache_only(rt711->regmap, true);
+}
+
 static const struct snd_soc_component_driver soc_codec_dev_rt711 = {
        .probe = rt711_probe,
        .set_bias_level = rt711_set_bias_level,
@@ -905,6 +912,7 @@ static const struct snd_soc_component_driver soc_codec_dev_rt711 = {
        .dapm_routes = rt711_audio_map,
        .num_dapm_routes = ARRAY_SIZE(rt711_audio_map),
        .set_jack = rt711_set_jack_detect,
+       .remove = rt711_remove,
 };
 
 static int rt711_set_sdw_stream(struct snd_soc_dai *dai, void *sdw_stream,
index 73551e3..6d9bb25 100644 (file)
@@ -71,7 +71,7 @@ static const struct reg_default sgtl5000_reg_defaults[] = {
        { SGTL5000_DAP_EQ_BASS_BAND4,           0x002f },
        { SGTL5000_DAP_MAIN_CHAN,               0x8000 },
        { SGTL5000_DAP_MIX_CHAN,                0x0000 },
-       { SGTL5000_DAP_AVC_CTRL,                0x0510 },
+       { SGTL5000_DAP_AVC_CTRL,                0x5100 },
        { SGTL5000_DAP_AVC_THRESHOLD,           0x1473 },
        { SGTL5000_DAP_AVC_ATTACK,              0x0028 },
        { SGTL5000_DAP_AVC_DECAY,               0x0050 },
diff --git a/sound/soc/codecs/sirf-audio-codec.h b/sound/soc/codecs/sirf-audio-codec.h
deleted file mode 100644 (file)
index a7fe268..0000000
+++ /dev/null
@@ -1,124 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * SiRF inner codec controllers define
- *
- * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
- */
-
-#ifndef _SIRF_AUDIO_CODEC_H
-#define _SIRF_AUDIO_CODEC_H
-
-
-#define AUDIO_IC_CODEC_PWR                     (0x00E0)
-#define AUDIO_IC_CODEC_CTRL0                   (0x00E4)
-#define AUDIO_IC_CODEC_CTRL1                   (0x00E8)
-#define AUDIO_IC_CODEC_CTRL2                   (0x00EC)
-#define AUDIO_IC_CODEC_CTRL3                   (0x00F0)
-
-#define MICBIASEN              (1 << 3)
-
-#define IC_RDACEN              (1 << 0)
-#define IC_LDACEN              (1 << 1)
-#define IC_HSREN               (1 << 2)
-#define IC_HSLEN               (1 << 3)
-#define IC_SPEN                        (1 << 4)
-#define IC_CPEN                        (1 << 5)
-
-#define IC_HPRSELR             (1 << 6)
-#define IC_HPLSELR             (1 << 7)
-#define IC_HPRSELL             (1 << 8)
-#define IC_HPLSELL             (1 << 9)
-#define IC_SPSELR              (1 << 10)
-#define IC_SPSELL              (1 << 11)
-
-#define IC_MONOR               (1 << 12)
-#define IC_MONOL               (1 << 13)
-
-#define IC_RXOSRSEL            (1 << 28)
-#define IC_CPFREQ              (1 << 29)
-#define IC_HSINVEN             (1 << 30)
-
-#define IC_MICINREN            (1 << 0)
-#define IC_MICINLEN            (1 << 1)
-#define IC_MICIN1SEL           (1 << 2)
-#define IC_MICIN2SEL           (1 << 3)
-#define IC_MICDIFSEL           (1 << 4)
-#define        IC_LINEIN1SEL           (1 << 5)
-#define        IC_LINEIN2SEL           (1 << 6)
-#define        IC_RADCEN               (1 << 7)
-#define        IC_LADCEN               (1 << 8)
-#define        IC_ALM                  (1 << 9)
-
-#define IC_DIGMICEN             (1 << 22)
-#define IC_DIGMICFREQ           (1 << 23)
-#define IC_ADC14B_12            (1 << 24)
-#define IC_FIRDAC_HSL_EN        (1 << 25)
-#define IC_FIRDAC_HSR_EN        (1 << 26)
-#define IC_FIRDAC_LOUT_EN       (1 << 27)
-#define IC_POR                  (1 << 28)
-#define IC_CODEC_CLK_EN         (1 << 29)
-#define IC_HP_3DB_BOOST         (1 << 30)
-
-#define IC_ADC_LEFT_GAIN_SHIFT 16
-#define IC_ADC_RIGHT_GAIN_SHIFT 10
-#define IC_ADC_GAIN_MASK       0x3F
-#define IC_MIC_MAX_GAIN                0x39
-
-#define IC_RXPGAR_MASK         0x3F
-#define IC_RXPGAR_SHIFT                14
-#define IC_RXPGAL_MASK         0x3F
-#define IC_RXPGAL_SHIFT                21
-#define IC_RXPGAR              0x7B
-#define IC_RXPGAL              0x7B
-
-#define AUDIO_PORT_TX_FIFO_LEVEL_CHECK_MASK     0x3F
-#define AUDIO_PORT_TX_FIFO_SC_OFFSET    0
-#define AUDIO_PORT_TX_FIFO_LC_OFFSET    10
-#define AUDIO_PORT_TX_FIFO_HC_OFFSET    20
-
-#define TX_FIFO_SC(x)           (((x) & AUDIO_PORT_TX_FIFO_LEVEL_CHECK_MASK) \
-                               << AUDIO_PORT_TX_FIFO_SC_OFFSET)
-#define TX_FIFO_LC(x)           (((x) & AUDIO_PORT_TX_FIFO_LEVEL_CHECK_MASK) \
-                               << AUDIO_PORT_TX_FIFO_LC_OFFSET)
-#define TX_FIFO_HC(x)           (((x) & AUDIO_PORT_TX_FIFO_LEVEL_CHECK_MASK) \
-                               << AUDIO_PORT_TX_FIFO_HC_OFFSET)
-
-#define AUDIO_PORT_RX_FIFO_LEVEL_CHECK_MASK     0x0F
-#define AUDIO_PORT_RX_FIFO_SC_OFFSET    0
-#define AUDIO_PORT_RX_FIFO_LC_OFFSET    10
-#define AUDIO_PORT_RX_FIFO_HC_OFFSET    20
-
-#define RX_FIFO_SC(x)           (((x) & AUDIO_PORT_RX_FIFO_LEVEL_CHECK_MASK) \
-                               << AUDIO_PORT_RX_FIFO_SC_OFFSET)
-#define RX_FIFO_LC(x)           (((x) & AUDIO_PORT_RX_FIFO_LEVEL_CHECK_MASK) \
-                               << AUDIO_PORT_RX_FIFO_LC_OFFSET)
-#define RX_FIFO_HC(x)           (((x) & AUDIO_PORT_RX_FIFO_LEVEL_CHECK_MASK) \
-                               << AUDIO_PORT_RX_FIFO_HC_OFFSET)
-#define AUDIO_PORT_IC_CODEC_TX_CTRL            (0x00F4)
-#define AUDIO_PORT_IC_CODEC_RX_CTRL            (0x00F8)
-
-#define AUDIO_PORT_IC_TXFIFO_OP                        (0x00FC)
-#define AUDIO_PORT_IC_TXFIFO_LEV_CHK           (0x0100)
-#define AUDIO_PORT_IC_TXFIFO_STS               (0x0104)
-#define AUDIO_PORT_IC_TXFIFO_INT               (0x0108)
-#define AUDIO_PORT_IC_TXFIFO_INT_MSK           (0x010C)
-
-#define AUDIO_PORT_IC_RXFIFO_OP                        (0x0110)
-#define AUDIO_PORT_IC_RXFIFO_LEV_CHK           (0x0114)
-#define AUDIO_PORT_IC_RXFIFO_STS               (0x0118)
-#define AUDIO_PORT_IC_RXFIFO_INT               (0x011C)
-#define AUDIO_PORT_IC_RXFIFO_INT_MSK           (0x0120)
-
-#define AUDIO_FIFO_START               (1 << 0)
-#define AUDIO_FIFO_RESET               (1 << 1)
-
-#define AUDIO_FIFO_FULL                        (1 << 0)
-#define AUDIO_FIFO_EMPTY               (1 << 1)
-#define AUDIO_FIFO_OFLOW               (1 << 2)
-#define AUDIO_FIFO_UFLOW               (1 << 3)
-
-#define IC_TX_ENABLE           (0x03)
-#define IC_RX_ENABLE_MONO      (0x01)
-#define IC_RX_ENABLE_STEREO    (0x03)
-
-#endif /*__SIRF_AUDIO_CODEC_H*/
index 40f682f..d18ae5e 100644 (file)
@@ -1873,6 +1873,12 @@ static int wcd934x_set_channel_map(struct snd_soc_dai *dai,
 
        wcd = snd_soc_component_get_drvdata(dai->component);
 
+       if (tx_num > WCD934X_TX_MAX || rx_num > WCD934X_RX_MAX) {
+               dev_err(wcd->dev, "Invalid tx %d or rx %d channel count\n",
+                       tx_num, rx_num);
+               return -EINVAL;
+       }
+
        if (!tx_slot || !rx_slot) {
                dev_err(wcd->dev, "Invalid tx_slot=%p, rx_slot=%p\n",
                        tx_slot, rx_slot);
index df35151..cda9cd9 100644 (file)
@@ -707,7 +707,13 @@ int wm8960_configure_pll(struct snd_soc_component *component, int freq_in,
        best_freq_out = -EINVAL;
        *sysclk_idx = *dac_idx = *bclk_idx = -1;
 
-       for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
+       /*
+        * From Datasheet, the PLL performs best when f2 is between
+        * 90MHz and 100MHz, the desired sysclk output is 11.2896MHz
+        * or 12.288MHz, then sysclkdiv = 2 is the best choice.
+        * So search sysclk_divs from 2 to 1 other than from 1 to 2.
+        */
+       for (i = ARRAY_SIZE(sysclk_divs) - 1; i >= 0; --i) {
                if (sysclk_divs[i] == -1)
                        continue;
                for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
index 08056fa..a857a62 100644 (file)
@@ -519,11 +519,13 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream,
                                   ESAI_SAICR_SYNC, esai_priv->synchronous ?
                                   ESAI_SAICR_SYNC : 0);
 
-               /* Set a default slot number -- 2 */
+               /* Set slots count */
                regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
-                                  ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
+                                  ESAI_xCCR_xDC_MASK,
+                                  ESAI_xCCR_xDC(esai_priv->slots));
                regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
-                                  ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
+                                  ESAI_xCCR_xDC_MASK,
+                                  ESAI_xCCR_xDC(esai_priv->slots));
        }
 
        return 0;
index 5781174..ad8af3f 100644 (file)
@@ -878,6 +878,7 @@ static int fsl_ssi_hw_free(struct snd_pcm_substream *substream,
 static int _fsl_ssi_set_dai_fmt(struct fsl_ssi *ssi, unsigned int fmt)
 {
        u32 strcr = 0, scr = 0, stcr, srcr, mask;
+       unsigned int slots;
 
        ssi->dai_fmt = fmt;
 
@@ -909,10 +910,11 @@ static int _fsl_ssi_set_dai_fmt(struct fsl_ssi *ssi, unsigned int fmt)
                        return -EINVAL;
                }
 
+               slots = ssi->slots ? : 2;
                regmap_update_bits(ssi->regs, REG_SSI_STCCR,
-                                  SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2));
+                                  SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots));
                regmap_update_bits(ssi->regs, REG_SSI_SRCCR,
-                                  SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(2));
+                                  SSI_SxCCR_DC_MASK, SSI_SxCCR_DC(slots));
 
                /* Data on rising edge of bclk, frame low, 1clk before data */
                strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP | SSI_STCR_TEFS;
index ab31045..6cada4c 100644 (file)
@@ -172,15 +172,16 @@ int asoc_simple_parse_clk(struct device *dev,
         *  or device's module clock.
         */
        clk = devm_get_clk_from_child(dev, node, NULL);
-       if (IS_ERR(clk))
-               clk = devm_get_clk_from_child(dev, dlc->of_node, NULL);
-
        if (!IS_ERR(clk)) {
-               simple_dai->clk = clk;
                simple_dai->sysclk = clk_get_rate(clk);
-       } else if (!of_property_read_u32(node, "system-clock-frequency",
-                                        &val)) {
+
+               simple_dai->clk = clk;
+       } else if (!of_property_read_u32(node, "system-clock-frequency", &val)) {
                simple_dai->sysclk = val;
+       } else {
+               clk = devm_get_clk_from_child(dev, dlc->of_node, NULL);
+               if (!IS_ERR(clk))
+                       simple_dai->sysclk = clk_get_rate(clk);
        }
 
        if (of_property_read_bool(node, "system-clock-direction-out"))
index 9e9b058..4124aa2 100644 (file)
@@ -487,15 +487,15 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
                .stream_name = "Headset Playback",
                .channels_min = SST_STEREO,
                .channels_max = SST_STEREO,
-               .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
-               .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+               .rates = SNDRV_PCM_RATE_48000,
+               .formats = SNDRV_PCM_FMTBIT_S16_LE,
        },
        .capture = {
                .stream_name = "Headset Capture",
                .channels_min = 1,
                .channels_max = 2,
-               .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
-               .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+               .rates = SNDRV_PCM_RATE_48000,
+               .formats = SNDRV_PCM_FMTBIT_S16_LE,
        },
 },
 {
@@ -505,8 +505,8 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
                .stream_name = "Deepbuffer Playback",
                .channels_min = SST_STEREO,
                .channels_max = SST_STEREO,
-               .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
-               .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+               .rates = SNDRV_PCM_RATE_48000,
+               .formats = SNDRV_PCM_FMTBIT_S16_LE,
        },
 },
 {
index 782f2b4..5d48cc3 100644 (file)
@@ -581,7 +581,7 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
                },
                .driver_data = (void *)(BYT_RT5640_DMIC1_MAP |
                                        BYT_RT5640_JD_SRC_JD1_IN4P |
-                                       BYT_RT5640_OVCD_TH_1500UA |
+                                       BYT_RT5640_OVCD_TH_2000UA |
                                        BYT_RT5640_OVCD_SF_0P75 |
                                        BYT_RT5640_MCLK_EN),
        },
index f5de1d7..f3bebed 100644 (file)
@@ -555,7 +555,9 @@ static int mtk_dai_tdm_hw_params(struct snd_pcm_substream *substream,
 
        /* set tdm */
        if (tdm_priv->bck_invert)
-               tdm_con |= 1 << BCK_INVERSE_SFT;
+               regmap_update_bits(afe->regmap, AUDIO_TOP_CON3,
+                                  BCK_INVERSE_MASK_SFT,
+                                  0x1 << BCK_INVERSE_SFT);
 
        if (tdm_priv->lck_invert)
                tdm_con |= 1 << LRCK_INVERSE_SFT;
index 562f25c..b9fb80d 100644 (file)
@@ -21,6 +21,11 @@ enum {
 /*****************************************************************************
  *                  R E G I S T E R       D E F I N I T I O N
  *****************************************************************************/
+/* AUDIO_TOP_CON3 */
+#define BCK_INVERSE_SFT                              3
+#define BCK_INVERSE_MASK                             0x1
+#define BCK_INVERSE_MASK_SFT                         (0x1 << 3)
+
 /* AFE_DAC_CON0 */
 #define VUL12_ON_SFT                                   31
 #define VUL12_ON_MASK                                  0x1
@@ -2079,9 +2084,6 @@ enum {
 #define TDM_EN_SFT                                     0
 #define TDM_EN_MASK                                    0x1
 #define TDM_EN_MASK_SFT                                (0x1 << 0)
-#define BCK_INVERSE_SFT                                1
-#define BCK_INVERSE_MASK                               0x1
-#define BCK_INVERSE_MASK_SFT                           (0x1 << 1)
 #define LRCK_INVERSE_SFT                               2
 #define LRCK_INVERSE_MASK                              0x1
 #define LRCK_INVERSE_MASK_SFT                          (0x1 << 2)
index c642e5f..be360a4 100644 (file)
@@ -739,7 +739,7 @@ static void of_lpass_cpu_parse_dai_data(struct device *dev,
 
        for_each_child_of_node(dev->of_node, node) {
                ret = of_property_read_u32(node, "reg", &id);
-               if (ret || id < 0 || id >= data->variant->num_dai) {
+               if (ret || id < 0) {
                        dev_err(dev, "valid dai id not found: %d\n", ret);
                        continue;
                }
index 6c2760e..153e9b2 100644 (file)
 #define SPK_TDM_RX_MASK         0x03
 #define NUM_TDM_SLOTS           8
 #define SLIM_MAX_TX_PORTS 16
-#define SLIM_MAX_RX_PORTS 16
+#define SLIM_MAX_RX_PORTS 13
 #define WCD934X_DEFAULT_MCLK_RATE      9600000
 
 struct sdm845_snd_data {
        struct snd_soc_jack jack;
        bool jack_setup;
-       bool stream_prepared[SLIM_MAX_RX_PORTS];
+       bool stream_prepared[AFE_PORT_MAX];
        struct snd_soc_card *card;
        uint32_t pri_mi2s_clk_count;
        uint32_t sec_mi2s_clk_count;
        uint32_t quat_tdm_clk_count;
-       struct sdw_stream_runtime *sruntime[SLIM_MAX_RX_PORTS];
+       struct sdw_stream_runtime *sruntime[AFE_PORT_MAX];
 };
 
 static unsigned int tdm_slot_offset[8] = {0, 4, 8, 12, 16, 20, 24, 28};
index f6d4e99..0cffc95 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/of.h>
 #include <linux/of_graph.h>
 #include <linux/dmi.h>
+#include <linux/acpi.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -1573,6 +1574,9 @@ int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour)
        if (card->long_name)
                return 0; /* long name already set by driver or from DMI */
 
+       if (!is_acpi_device_node(card->dev->fwnode))
+               return 0;
+
        /* make up dmi long name as: vendor-product-version-board */
        vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
        if (!vendor || !is_dmi_valid(vendor)) {
index 6d8f7d9..4a3d522 100644 (file)
@@ -399,7 +399,13 @@ int snd_sof_device_shutdown(struct device *dev)
 {
        struct snd_sof_dev *sdev = dev_get_drvdata(dev);
 
-       return snd_sof_shutdown(sdev);
+       if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+               cancel_work_sync(&sdev->probe_work);
+
+       if (sdev->fw_state == SOF_FW_BOOT_COMPLETE)
+               return snd_sof_shutdown(sdev);
+
+       return 0;
 }
 EXPORT_SYMBOL(snd_sof_device_shutdown);
 
index fc29b91..c7ed2b3 100644 (file)
@@ -27,9 +27,10 @@ static const struct snd_sof_debugfs_map apl_dsp_debugfs[] = {
 
 /* apollolake ops */
 const struct snd_sof_dsp_ops sof_apl_ops = {
-       /* probe and remove */
+       /* probe/remove/shutdown */
        .probe          = hda_dsp_probe,
        .remove         = hda_dsp_remove,
+       .shutdown       = hda_dsp_shutdown,
 
        /* Register IO */
        .write          = sof_io_write,
index e38db51..821f25f 100644 (file)
@@ -232,9 +232,10 @@ void cnl_ipc_dump(struct snd_sof_dev *sdev)
 
 /* cannonlake ops */
 const struct snd_sof_dsp_ops sof_cnl_ops = {
-       /* probe and remove */
+       /* probe/remove/shutdown */
        .probe          = hda_dsp_probe,
        .remove         = hda_dsp_remove,
+       .shutdown       = hda_dsp_shutdown,
 
        /* Register IO */
        .write          = sof_io_write,
@@ -349,22 +350,6 @@ const struct sof_intel_dsp_desc cnl_chip_info = {
 };
 EXPORT_SYMBOL_NS(cnl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
 
-const struct sof_intel_dsp_desc ehl_chip_info = {
-       /* Elkhartlake */
-       .cores_num = 4,
-       .init_core_mask = 1,
-       .host_managed_cores_mask = BIT(0),
-       .ipc_req = CNL_DSP_REG_HIPCIDR,
-       .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
-       .ipc_ack = CNL_DSP_REG_HIPCIDA,
-       .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
-       .ipc_ctl = CNL_DSP_REG_HIPCCTL,
-       .rom_init_timeout       = 300,
-       .ssp_count = ICL_SSP_COUNT,
-       .ssp_base_offset = CNL_SSP_BASE_OFFSET,
-};
-EXPORT_SYMBOL_NS(ehl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
-
 const struct sof_intel_dsp_desc jsl_chip_info = {
        /* Jasperlake */
        .cores_num = 2,
index 5788fe3..736a54b 100644 (file)
@@ -207,7 +207,7 @@ int hda_dsp_core_power_down(struct snd_sof_dev *sdev, unsigned int core_mask)
 
        ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
                                HDA_DSP_REG_ADSPCS, adspcs,
-                               !(adspcs & HDA_DSP_ADSPCS_SPA_MASK(core_mask)),
+                               !(adspcs & HDA_DSP_ADSPCS_CPA_MASK(core_mask)),
                                HDA_DSP_REG_POLL_INTERVAL_US,
                                HDA_DSP_PD_TIMEOUT * USEC_PER_MSEC);
        if (ret < 0)
@@ -226,10 +226,17 @@ bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev,
 
        val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
 
-       is_enable = (val & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) &&
-                   (val & HDA_DSP_ADSPCS_SPA_MASK(core_mask)) &&
-                   !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
-                   !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
+#define MASK_IS_EQUAL(v, m, field) ({  \
+       u32 _m = field(m);              \
+       ((v) & _m) == _m;               \
+})
+
+       is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
+               MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
+               !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
+               !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
+
+#undef MASK_IS_EQUAL
 
        dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
                is_enable, core_mask);
@@ -885,6 +892,12 @@ int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
        return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
 }
 
+int hda_dsp_shutdown(struct snd_sof_dev *sdev)
+{
+       sdev->system_suspend_target = SOF_SUSPEND_S3;
+       return snd_sof_suspend(sdev->dev);
+}
+
 int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
 {
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
index 1d29b1f..0c096db 100644 (file)
@@ -897,6 +897,7 @@ free_streams:
 /* dsp_unmap: not currently used */
        iounmap(sdev->bar[HDA_DSP_BAR]);
 hdac_bus_unmap:
+       platform_device_unregister(hdev->dmic_dev);
        iounmap(bus->remap_addr);
        hda_codec_i915_exit(sdev);
 err:
index 7c7579d..ae80725 100644 (file)
@@ -517,6 +517,7 @@ int hda_dsp_resume(struct snd_sof_dev *sdev);
 int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev);
 int hda_dsp_runtime_resume(struct snd_sof_dev *sdev);
 int hda_dsp_runtime_idle(struct snd_sof_dev *sdev);
+int hda_dsp_shutdown(struct snd_sof_dev *sdev);
 int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev);
 void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags);
 void hda_ipc_dump(struct snd_sof_dev *sdev);
index e9d5a0a..88a74be 100644 (file)
@@ -26,9 +26,10 @@ static const struct snd_sof_debugfs_map icl_dsp_debugfs[] = {
 
 /* Icelake ops */
 const struct snd_sof_dsp_ops sof_icl_ops = {
-       /* probe and remove */
+       /* probe/remove/shutdown */
        .probe          = hda_dsp_probe,
        .remove         = hda_dsp_remove,
+       .shutdown       = hda_dsp_shutdown,
 
        /* Register IO */
        .write          = sof_io_write,
index 4856074..38bc353 100644 (file)
@@ -65,7 +65,7 @@ static const struct sof_dev_desc ehl_desc = {
        .default_tplg_path = "intel/sof-tplg",
        .default_fw_filename = "sof-ehl.ri",
        .nocodec_tplg_filename = "sof-ehl-nocodec.tplg",
-       .ops = &sof_cnl_ops,
+       .ops = &sof_tgl_ops,
 };
 
 static const struct sof_dev_desc adls_desc = {
index 419f05b..54ba1b8 100644 (file)
@@ -25,7 +25,7 @@ const struct snd_sof_dsp_ops sof_tgl_ops = {
        /* probe/remove/shutdown */
        .probe          = hda_dsp_probe,
        .remove         = hda_dsp_remove,
-       .shutdown       = hda_dsp_remove,
+       .shutdown       = hda_dsp_shutdown,
 
        /* Register IO */
        .write          = sof_io_write,
@@ -156,6 +156,22 @@ const struct sof_intel_dsp_desc tglh_chip_info = {
 };
 EXPORT_SYMBOL_NS(tglh_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
 
+const struct sof_intel_dsp_desc ehl_chip_info = {
+       /* Elkhartlake */
+       .cores_num = 4,
+       .init_core_mask = 1,
+       .host_managed_cores_mask = BIT(0),
+       .ipc_req = CNL_DSP_REG_HIPCIDR,
+       .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+       .ipc_ack = CNL_DSP_REG_HIPCIDA,
+       .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+       .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+       .rom_init_timeout       = 300,
+       .ssp_count = ICL_SSP_COUNT,
+       .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+};
+EXPORT_SYMBOL_NS(ehl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+
 const struct sof_intel_dsp_desc adls_chip_info = {
        /* Alderlake-S */
        .cores_num = 2,
index 6c13cc8..2173991 100644 (file)
@@ -1364,6 +1364,7 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev)
                return ERR_PTR(-ENOMEM);
 
        card->dev               = dev;
+       card->owner             = THIS_MODULE;
        card->name              = "sun4i-codec";
        card->dapm_widgets      = sun4i_codec_card_dapm_widgets;
        card->num_dapm_widgets  = ARRAY_SIZE(sun4i_codec_card_dapm_widgets);
@@ -1396,6 +1397,7 @@ static struct snd_soc_card *sun6i_codec_create_card(struct device *dev)
                return ERR_PTR(-ENOMEM);
 
        card->dev               = dev;
+       card->owner             = THIS_MODULE;
        card->name              = "A31 Audio Codec";
        card->dapm_widgets      = sun6i_codec_card_dapm_widgets;
        card->num_dapm_widgets  = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
@@ -1449,6 +1451,7 @@ static struct snd_soc_card *sun8i_a23_codec_create_card(struct device *dev)
                return ERR_PTR(-ENOMEM);
 
        card->dev               = dev;
+       card->owner             = THIS_MODULE;
        card->name              = "A23 Audio Codec";
        card->dapm_widgets      = sun6i_codec_card_dapm_widgets;
        card->num_dapm_widgets  = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
@@ -1487,6 +1490,7 @@ static struct snd_soc_card *sun8i_h3_codec_create_card(struct device *dev)
                return ERR_PTR(-ENOMEM);
 
        card->dev               = dev;
+       card->owner             = THIS_MODULE;
        card->name              = "H3 Audio Codec";
        card->dapm_widgets      = sun6i_codec_card_dapm_widgets;
        card->num_dapm_widgets  = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
@@ -1525,6 +1529,7 @@ static struct snd_soc_card *sun8i_v3s_codec_create_card(struct device *dev)
                return ERR_PTR(-ENOMEM);
 
        card->dev               = dev;
+       card->owner             = THIS_MODULE;
        card->name              = "V3s Audio Codec";
        card->dapm_widgets      = sun6i_codec_card_dapm_widgets;
        card->num_dapm_widgets  = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
index 9d0da5f..d24ae00 100644 (file)
@@ -62,7 +62,6 @@ MODULE_PARM_DESC(enable, "Enable Sun AMD7930 soundcard.");
 MODULE_AUTHOR("Thomas K. Dyas and David S. Miller");
 MODULE_DESCRIPTION("Sun AMD7930");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Sun,AMD7930}}");
 
 /* Device register layout.  */
 
index 0eed5f7..35c1780 100644 (file)
@@ -52,7 +52,6 @@ MODULE_PARM_DESC(enable, "Enable Sun CS4231 soundcard.");
 MODULE_AUTHOR("Jaroslav Kysela, Derrick J. Brashear and David S. Miller");
 MODULE_DESCRIPTION("Sun CS4231");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Sun,CS4231}}");
 
 #ifdef SBUS_SUPPORT
 struct sbus_dma_info {
index 5a6fb66..b055f58 100644 (file)
@@ -76,7 +76,6 @@
 MODULE_AUTHOR("Rudolf Koenig, Brent Baccala and Martin Habets");
 MODULE_DESCRIPTION("Sun DBRI");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Sun,DBRI}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index 08c6e6a..33e9621 100644 (file)
@@ -26,7 +26,6 @@
 MODULE_AUTHOR("Torsten Schenk <torsten.schenk@zoho.com>");
 MODULE_DESCRIPTION("TerraTec DMX 6Fire USB audio driver");
 MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("{{TerraTec,DMX 6Fire USB}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for card */
index e03481c..49f63f8 100644 (file)
 MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
 MODULE_DESCRIPTION("caiaq USB audio");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Native Instruments,RigKontrol2},"
-                        "{Native Instruments,RigKontrol3},"
-                        "{Native Instruments,Kore Controller},"
-                        "{Native Instruments,Kore Controller 2},"
-                        "{Native Instruments,Audio Kontrol 1},"
-                        "{Native Instruments,Audio 2 DJ},"
-                        "{Native Instruments,Audio 4 DJ},"
-                        "{Native Instruments,Audio 8 DJ},"
-                        "{Native Instruments,Traktor Audio 2},"
-                        "{Native Instruments,Session I/O},"
-                        "{Native Instruments,GuitarRig mobile},"
-                        "{Native Instruments,Traktor Kontrol X1},"
-                        "{Native Instruments,Traktor Kontrol S4},"
-                        "{Native Instruments,Maschine Controller}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */
 static char* id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */
index b6f4c08..0826a43 100644 (file)
@@ -58,8 +58,6 @@
 MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
 MODULE_DESCRIPTION("USB Audio");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Generic,USB Audio}}");
-
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;     /* Index 0-MAX */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;      /* ID for this card */
index c282418..95385e9 100644 (file)
@@ -21,23 +21,6 @@ MODULE_AUTHOR("Michael Trimarchi <michael@amarulasolutions.com>");
 MODULE_AUTHOR("Antonio Ospite <ao2@amarulasolutions.com>");
 MODULE_DESCRIPTION("M2Tech hiFace USB-SPDIF audio driver");
 MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("{{M2Tech,Young},"
-                        "{M2Tech,hiFace},"
-                        "{M2Tech,North Star},"
-                        "{M2Tech,W4S Young},"
-                        "{M2Tech,Corrson},"
-                        "{M2Tech,AUDIA},"
-                        "{M2Tech,SL Audio},"
-                        "{M2Tech,Empirical},"
-                        "{M2Tech,Rockna},"
-                        "{M2Tech,Pathos},"
-                        "{M2Tech,Metronome},"
-                        "{M2Tech,CAD},"
-                        "{M2Tech,Audio Esclusive},"
-                        "{M2Tech,Rotel},"
-                        "{M2Tech,Eeaudio},"
-                        "{The Chord Company,CHORD},"
-                        "{AVA Group A/S,Vitus}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for card */
index 6b30155..5834d1d 100644 (file)
@@ -19,7 +19,6 @@
 MODULE_DESCRIPTION("Edirol UA-101/1000 driver");
 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
 MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("{{Edirol,UA-101},{Edirol,UA-1000}}");
 
 /*
  * Should not be lower than the minimum scheduling delay of the host
index 08873d2..ffd9223 100644 (file)
@@ -2883,7 +2883,7 @@ static int snd_djm_controls_put(struct snd_kcontrol *kctl, struct snd_ctl_elem_v
        u8 group = (private_value & SND_DJM_GROUP_MASK) >> SND_DJM_GROUP_SHIFT;
        u16 value = elem->value.enumerated.item[0];
 
-       kctl->private_value = ((device << SND_DJM_DEVICE_SHIFT) |
+       kctl->private_value = (((unsigned long)device << SND_DJM_DEVICE_SHIFT) |
                              (group << SND_DJM_GROUP_SHIFT) |
                              value);
 
@@ -2921,7 +2921,7 @@ static int snd_djm_controls_create(struct usb_mixer_interface *mixer,
                value = device->controls[i].default_value;
                knew.name = device->controls[i].name;
                knew.private_value = (
-                       (device_idx << SND_DJM_DEVICE_SHIFT) |
+                       ((unsigned long)device_idx << SND_DJM_DEVICE_SHIFT) |
                        (i << SND_DJM_GROUP_SHIFT) |
                        value);
                err = snd_djm_controls_update(mixer, device_idx, i, value);
index d3001fb..176437a 100644 (file)
@@ -1521,6 +1521,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
        case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
        case USB_ID(0x413c, 0xa506): /* Dell AE515 sound bar */
+       case USB_ID(0x046d, 0x084c): /* Logitech ConferenceCam Connect */
                return true;
        }
 
index c541581..3cd28d2 100644 (file)
 MODULE_AUTHOR("Karsten Wiese <annabellesgarden@yahoo.de>");
 MODULE_DESCRIPTION("TASCAM "NAME_ALLCAPS" Version 0.8.7.2");
 MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{TASCAM(0x1604),"NAME_ALLCAPS"(0x8001)(0x8005)(0x8007)}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */
 static char* id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */
index 1d66c3a..33b12aa 100644 (file)
@@ -1887,4 +1887,3 @@ MODULE_AUTHOR("Vaibhav Agarwal <vaibhav.agarwal@intel.com>");
 MODULE_AUTHOR("Jerome Anand <jerome.anand@intel.com>");
 MODULE_DESCRIPTION("Intel HDMI Audio driver");
 MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("{Intel,Intel_HAD}");
index 228d820..2cb0a19 100644 (file)
@@ -391,4 +391,3 @@ module_exit(xen_drv_fini);
 MODULE_DESCRIPTION("Xen virtual sound device frontend");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("xen:" XENSND_DRIVER_NAME);
-MODULE_SUPPORTED_DEVICE("{{ALSA,Virtual soundcard}}");
index 8b281f7..f6afee2 100644 (file)
@@ -1154,6 +1154,7 @@ struct kvm_x86_mce {
 #define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR       (1 << 0)
 #define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL     (1 << 1)
 #define KVM_XEN_HVM_CONFIG_SHARED_INFO         (1 << 2)
+#define KVM_XEN_HVM_CONFIG_RUNSTATE            (1 << 3)
 
 struct kvm_xen_hvm_config {
        __u32 flags;
@@ -1621,12 +1622,24 @@ struct kvm_xen_vcpu_attr {
        union {
                __u64 gpa;
                __u64 pad[8];
+               struct {
+                       __u64 state;
+                       __u64 state_entry_time;
+                       __u64 time_running;
+                       __u64 time_runnable;
+                       __u64 time_blocked;
+                       __u64 time_offline;
+               } runstate;
        } u;
 };
 
 /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
 #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO       0x0
 #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO  0x1
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR   0x2
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT        0x3
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA   0x4
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
 
 /* Secure Encrypted Virtualization command */
 enum sev_cmd_id {
index 71aabaf..8f13b84 100644 (file)
@@ -9,6 +9,7 @@ Type=simple
 ExecStart=/usr/bin/kvm_stat -dtcz -s 10 -L /var/log/kvm_stat.csv
 ExecReload=/bin/kill -HUP $MAINPID
 Restart=always
+RestartSec=60s
 SyslogIdentifier=kvm_stat
 SyslogLevel=debug
 
index 887a494..e9eb6a6 100644 (file)
@@ -215,7 +215,7 @@ define do_install
        if [ ! -d '$(DESTDIR_SQ)$2' ]; then             \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
        fi;                                             \
-       $(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
+       $(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2'
 endef
 
 install_lib: all_cmd
index 2f9d685..0911aea 100644 (file)
@@ -462,7 +462,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
                return err;
 
        case BTF_KIND_ARRAY:
-               return btf_dump_order_type(d, btf_array(t)->type, through_ptr);
+               return btf_dump_order_type(d, btf_array(t)->type, false);
 
        case BTF_KIND_STRUCT:
        case BTF_KIND_UNION: {
index d43cc3f..4181d17 100644 (file)
@@ -1181,7 +1181,8 @@ static int bpf_object__elf_init(struct bpf_object *obj)
        if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
                pr_warn("elf: failed to get section names strings from %s: %s\n",
                        obj->path, elf_errmsg(-1));
-               return -LIBBPF_ERRNO__FORMAT;
+               err = -LIBBPF_ERRNO__FORMAT;
+               goto errout;
        }
 
        /* Old LLVM set e_machine to EM_NONE */
index 4dd73de..d2cb28e 100644 (file)
@@ -40,7 +40,7 @@ static int libbpf_netlink_open(__u32 *nl_pid)
        memset(&sa, 0, sizeof(sa));
        sa.nl_family = AF_NETLINK;
 
-       sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+       sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
        if (sock < 0)
                return -errno;
 
index 8caaafe..e7a8d84 100644 (file)
@@ -227,7 +227,7 @@ static int ringbuf_process_ring(struct ring* r)
                        if ((len & BPF_RINGBUF_DISCARD_BIT) == 0) {
                                sample = (void *)len_ptr + BPF_RINGBUF_HDR_SZ;
                                err = r->sample_cb(r->ctx, sample, len);
-                               if (err) {
+                               if (err < 0) {
                                        /* update consumer pos and bail out */
                                        smp_store_release(r->consumer_pos,
                                                          cons_pos);
index 526fc35..d24b5cc 100644 (file)
@@ -59,6 +59,8 @@ struct xsk_umem {
        int fd;
        int refcount;
        struct list_head ctx_list;
+       bool rx_ring_setup_done;
+       bool tx_ring_setup_done;
 };
 
 struct xsk_ctx {
@@ -743,26 +745,30 @@ static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
        return NULL;
 }
 
-static void xsk_put_ctx(struct xsk_ctx *ctx)
+static void xsk_put_ctx(struct xsk_ctx *ctx, bool unmap)
 {
        struct xsk_umem *umem = ctx->umem;
        struct xdp_mmap_offsets off;
        int err;
 
-       if (--ctx->refcount == 0) {
-               err = xsk_get_mmap_offsets(umem->fd, &off);
-               if (!err) {
-                       munmap(ctx->fill->ring - off.fr.desc,
-                              off.fr.desc + umem->config.fill_size *
-                              sizeof(__u64));
-                       munmap(ctx->comp->ring - off.cr.desc,
-                              off.cr.desc + umem->config.comp_size *
-                              sizeof(__u64));
-               }
+       if (--ctx->refcount)
+               return;
 
-               list_del(&ctx->list);
-               free(ctx);
-       }
+       if (!unmap)
+               goto out_free;
+
+       err = xsk_get_mmap_offsets(umem->fd, &off);
+       if (err)
+               goto out_free;
+
+       munmap(ctx->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size *
+              sizeof(__u64));
+       munmap(ctx->comp->ring - off.cr.desc, off.cr.desc + umem->config.comp_size *
+              sizeof(__u64));
+
+out_free:
+       list_del(&ctx->list);
+       free(ctx);
 }
 
 static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
@@ -797,8 +803,6 @@ static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
        memcpy(ctx->ifname, ifname, IFNAMSIZ - 1);
        ctx->ifname[IFNAMSIZ - 1] = '\0';
 
-       umem->fill_save = NULL;
-       umem->comp_save = NULL;
        ctx->fill = fill;
        ctx->comp = comp;
        list_add(&ctx->list, &umem->ctx_list);
@@ -854,6 +858,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
        struct xsk_socket *xsk;
        struct xsk_ctx *ctx;
        int err, ifindex;
+       bool unmap = umem->fill_save != fill;
+       bool rx_setup_done = false, tx_setup_done = false;
 
        if (!umem || !xsk_ptr || !(rx || tx))
                return -EFAULT;
@@ -881,6 +887,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
                }
        } else {
                xsk->fd = umem->fd;
+               rx_setup_done = umem->rx_ring_setup_done;
+               tx_setup_done = umem->tx_ring_setup_done;
        }
 
        ctx = xsk_get_ctx(umem, ifindex, queue_id);
@@ -899,7 +907,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
        }
        xsk->ctx = ctx;
 
-       if (rx) {
+       if (rx && !rx_setup_done) {
                err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
                                 &xsk->config.rx_size,
                                 sizeof(xsk->config.rx_size));
@@ -907,8 +915,10 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
                        err = -errno;
                        goto out_put_ctx;
                }
+               if (xsk->fd == umem->fd)
+                       umem->rx_ring_setup_done = true;
        }
-       if (tx) {
+       if (tx && !tx_setup_done) {
                err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
                                 &xsk->config.tx_size,
                                 sizeof(xsk->config.tx_size));
@@ -916,6 +926,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
                        err = -errno;
                        goto out_put_ctx;
                }
+               if (xsk->fd == umem->fd)
+                       umem->rx_ring_setup_done = true;
        }
 
        err = xsk_get_mmap_offsets(xsk->fd, &off);
@@ -994,6 +1006,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
        }
 
        *xsk_ptr = xsk;
+       umem->fill_save = NULL;
+       umem->comp_save = NULL;
        return 0;
 
 out_mmap_tx:
@@ -1005,7 +1019,7 @@ out_mmap_rx:
                munmap(rx_map, off.rx.desc +
                       xsk->config.rx_size * sizeof(struct xdp_desc));
 out_put_ctx:
-       xsk_put_ctx(ctx);
+       xsk_put_ctx(ctx, unmap);
 out_socket:
        if (--umem->refcount)
                close(xsk->fd);
@@ -1019,6 +1033,9 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
                       struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
                       const struct xsk_socket_config *usr_config)
 {
+       if (!umem)
+               return -EFAULT;
+
        return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem,
                                         rx, tx, umem->fill_save,
                                         umem->comp_save, usr_config);
@@ -1068,7 +1085,7 @@ void xsk_socket__delete(struct xsk_socket *xsk)
                }
        }
 
-       xsk_put_ctx(ctx);
+       xsk_put_ctx(ctx, true);
 
        umem->refcount--;
        /* Do not close an fd that also has an associated umem connected
index ace8772..7c4a9d4 100644 (file)
@@ -402,35 +402,42 @@ static pid_t handle_signalfd(struct daemon *daemon)
        int status;
        pid_t pid;
 
+       /*
+        * Take signal fd data as pure signal notification and check all
+        * the sessions state. The reason is that multiple signals can get
+        * coalesced in kernel and we can receive only single signal even
+        * if multiple SIGCHLD were generated.
+        */
        err = read(daemon->signal_fd, &si, sizeof(struct signalfd_siginfo));
-       if (err != sizeof(struct signalfd_siginfo))
+       if (err != sizeof(struct signalfd_siginfo)) {
+               pr_err("failed to read signal fd\n");
                return -1;
+       }
 
        list_for_each_entry(session, &daemon->sessions, list) {
+               if (session->pid == -1)
+                       continue;
 
-               if (session->pid != (int) si.ssi_pid)
+               pid = waitpid(session->pid, &status, WNOHANG);
+               if (pid <= 0)
                        continue;
 
-               pid = waitpid(session->pid, &status, 0);
-               if (pid == session->pid) {
-                       if (WIFEXITED(status)) {
-                               pr_info("session '%s' exited, status=%d\n",
-                                       session->name, WEXITSTATUS(status));
-                       } else if (WIFSIGNALED(status)) {
-                               pr_info("session '%s' killed (signal %d)\n",
-                                       session->name, WTERMSIG(status));
-                       } else if (WIFSTOPPED(status)) {
-                               pr_info("session '%s' stopped (signal %d)\n",
-                                       session->name, WSTOPSIG(status));
-                       } else {
-                               pr_info("session '%s' Unexpected status (0x%x)\n",
-                                       session->name, status);
-                       }
+               if (WIFEXITED(status)) {
+                       pr_info("session '%s' exited, status=%d\n",
+                               session->name, WEXITSTATUS(status));
+               } else if (WIFSIGNALED(status)) {
+                       pr_info("session '%s' killed (signal %d)\n",
+                               session->name, WTERMSIG(status));
+               } else if (WIFSTOPPED(status)) {
+                       pr_info("session '%s' stopped (signal %d)\n",
+                               session->name, WSTOPSIG(status));
+               } else {
+                       pr_info("session '%s' Unexpected status (0x%x)\n",
+                               session->name, status);
                }
 
                session->state = KILL;
                session->pid = -1;
-               return pid;
        }
 
        return 0;
@@ -443,7 +450,6 @@ static int daemon_session__wait(struct daemon_session *session, struct daemon *d
                .fd     = daemon->signal_fd,
                .events = POLLIN,
        };
-       pid_t wpid = 0, pid = session->pid;
        time_t start;
 
        start = time(NULL);
@@ -452,7 +458,7 @@ static int daemon_session__wait(struct daemon_session *session, struct daemon *d
                int err = poll(&pollfd, 1, 1000);
 
                if (err > 0) {
-                       wpid = handle_signalfd(daemon);
+                       handle_signalfd(daemon);
                } else if (err < 0) {
                        perror("failed: poll\n");
                        return -1;
@@ -460,7 +466,7 @@ static int daemon_session__wait(struct daemon_session *session, struct daemon *d
 
                if (start + secs < time(NULL))
                        return -1;
-       } while (wpid != pid);
+       } while (session->pid != -1);
 
        return 0;
 }
@@ -902,7 +908,9 @@ static void daemon_session__kill(struct daemon_session *session,
                        daemon_session__signal(session, SIGKILL);
                        break;
                default:
-                       break;
+                       pr_err("failed to wait for session %s\n",
+                              session->name);
+                       return;
                }
                how++;
 
@@ -955,7 +963,8 @@ static void daemon__kill(struct daemon *daemon)
                        daemon__signal(daemon, SIGKILL);
                        break;
                default:
-                       break;
+                       pr_err("failed to wait for sessions\n");
+                       return;
                }
                how++;
 
@@ -1344,7 +1353,7 @@ out:
                close(sock_fd);
        if (conf_fd != -1)
                close(conf_fd);
-       if (conf_fd != -1)
+       if (signal_fd != -1)
                close(signal_fd);
 
        pr_info("daemon exited\n");
index 6fe44d9..ddccc0e 100644 (file)
@@ -906,7 +906,7 @@ int cmd_inject(int argc, const char **argv)
        }
 
        data.path = inject.input_name;
-       inject.session = perf_session__new(&data, true, &inject.tool);
+       inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool);
        if (IS_ERR(inject.session))
                return PTR_ERR(inject.session);
 
index f57e075..c72adbd 100644 (file)
@@ -86,7 +86,7 @@ static struct {
                .msg_load_fail    = "check your vmlinux setting?",
                .target_func      = &epoll_pwait_loop,
                .expect_result    = (NR_ITERS + 1) / 2,
-               .pin              = true,
+               .pin              = true,
        },
 #ifdef HAVE_BPF_PROLOGUE
        {
@@ -99,13 +99,6 @@ static struct {
                .expect_result    = (NR_ITERS + 1) / 4,
        },
 #endif
-       {
-               .prog_id          = LLVM_TESTCASE_BPF_RELOCATION,
-               .desc             = "BPF relocation checker",
-               .name             = "[bpf_relocation_test]",
-               .msg_compile_fail = "fix 'perf test LLVM' first",
-               .msg_load_fail    = "libbpf error when dealing with relocation",
-       },
 };
 
 static int do_test(struct bpf_object *obj, int (*func)(void),
index 5ad3ca8..5898438 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 # daemon operations
 # SPDX-License-Identifier: GPL-2.0
 
index f3ac9d4..2e5eff4 100644 (file)
@@ -210,8 +210,10 @@ static int arm_spe_do_get_packet(const unsigned char *buf, size_t len,
 
        if ((hdr & SPE_HEADER0_MASK2) == SPE_HEADER0_EXTENDED) {
                /* 16-bit extended format header */
-               ext_hdr = 1;
+               if (len == 1)
+                       return ARM_SPE_BAD_PACKET;
 
+               ext_hdr = 1;
                hdr = buf[1];
                if (hdr == SPE_HEADER1_ALIGNMENT)
                        return arm_spe_get_alignment(buf, len, packet);
index 953f4af..5b6ccb9 100644 (file)
@@ -298,10 +298,6 @@ static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
                queue->set = true;
                queue->tid = buffer->tid;
                queue->cpu = buffer->cpu;
-       } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
-               pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
-                      queue->cpu, queue->tid, buffer->cpu, buffer->tid);
-               return -EINVAL;
        }
 
        buffer->buffer_nr = queues->next_buffer_nr++;
index 423ec69..5ecd4f4 100644 (file)
@@ -201,7 +201,7 @@ static int block_total_cycles_pct_entry(struct perf_hpp_fmt *fmt,
        double ratio = 0.0;
 
        if (block_fmt->total_cycles)
-               ratio = (double)bi->cycles / (double)block_fmt->total_cycles;
+               ratio = (double)bi->cycles_aggr / (double)block_fmt->total_cycles;
 
        return color_pct(hpp, block_fmt->width, 100.0 * ratio);
 }
@@ -216,9 +216,9 @@ static int64_t block_total_cycles_pct_sort(struct perf_hpp_fmt *fmt,
        double l, r;
 
        if (block_fmt->total_cycles) {
-               l = ((double)bi_l->cycles /
+               l = ((double)bi_l->cycles_aggr /
                        (double)block_fmt->total_cycles) * 100000.0;
-               r = ((double)bi_r->cycles /
+               r = ((double)bi_r->cycles_aggr /
                        (double)block_fmt->total_cycles) * 100000.0;
                return (int64_t)l - (int64_t)r;
        }
index 57d58c8..cdecda1 100644 (file)
@@ -196,25 +196,32 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
        }
 
        if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
+               free(info_linear);
                pr_debug("%s: the kernel is too old, aborting\n", __func__);
                return -2;
        }
 
        info = &info_linear->info;
+       if (!info->jited_ksyms) {
+               free(info_linear);
+               return -1;
+       }
 
        /* number of ksyms, func_lengths, and tags should match */
        sub_prog_cnt = info->nr_jited_ksyms;
        if (sub_prog_cnt != info->nr_prog_tags ||
-           sub_prog_cnt != info->nr_jited_func_lens)
+           sub_prog_cnt != info->nr_jited_func_lens) {
+               free(info_linear);
                return -1;
+       }
 
        /* check BTF func info support */
        if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
                /* btf func info number should be same as sub_prog_cnt */
                if (sub_prog_cnt != info->nr_func_info) {
                        pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
-                       err = -1;
-                       goto out;
+                       free(info_linear);
+                       return -1;
                }
                if (btf__get_from_id(info->btf_id, &btf)) {
                        pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
index 42c84ad..c0c0fab 100644 (file)
@@ -356,6 +356,9 @@ __add_event(struct list_head *list, int *idx,
        struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) :
                               cpu_list ? perf_cpu_map__new(cpu_list) : NULL;
 
+       if (pmu && attr->type == PERF_TYPE_RAW)
+               perf_pmu__warn_invalid_config(pmu, attr->config, name);
+
        if (init_attr)
                event_attr_init(attr);
 
index 44ef283..46fd0f9 100644 (file)
@@ -1812,3 +1812,36 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu)
 
        return nr_caps;
 }
+
+void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
+                                  char *name)
+{
+       struct perf_pmu_format *format;
+       __u64 masks = 0, bits;
+       char buf[100];
+       unsigned int i;
+
+       list_for_each_entry(format, &pmu->format, list) {
+               if (format->value != PERF_PMU_FORMAT_VALUE_CONFIG)
+                       continue;
+
+               for_each_set_bit(i, format->bits, PERF_PMU_FORMAT_BITS)
+                       masks |= 1ULL << i;
+       }
+
+       /*
+        * Kernel doesn't export any valid format bits.
+        */
+       if (masks == 0)
+               return;
+
+       bits = config & ~masks;
+       if (bits == 0)
+               return;
+
+       bitmap_scnprintf((unsigned long *)&bits, sizeof(bits) * 8, buf, sizeof(buf));
+
+       pr_warning("WARNING: event '%s' not valid (bits %s of config "
+                  "'%llx' not supported by kernel)!\n",
+                  name ?: "N/A", buf, config);
+}
index 8164388..160b0f5 100644 (file)
@@ -123,4 +123,7 @@ int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
 
 int perf_pmu__caps_parse(struct perf_pmu *pmu);
 
+void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
+                                  char *name);
+
 #endif /* __PMU_H */
index b698046..dff1781 100644 (file)
@@ -424,7 +424,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
 
        while (!io.eof) {
                static const char anonstr[] = "//anon";
-               size_t size;
+               size_t size, aligned_size;
 
                /* ensure null termination since stack will be reused. */
                event->mmap2.filename[0] = '\0';
@@ -484,11 +484,12 @@ out:
                }
 
                size = strlen(event->mmap2.filename) + 1;
-               size = PERF_ALIGN(size, sizeof(u64));
+               aligned_size = PERF_ALIGN(size, sizeof(u64));
                event->mmap2.len -= event->mmap.start;
                event->mmap2.header.size = (sizeof(event->mmap2) -
-                                       (sizeof(event->mmap2.filename) - size));
-               memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
+                                       (sizeof(event->mmap2.filename) - aligned_size));
+               memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
+                       (aligned_size - size));
                event->mmap2.header.size += machine->id_hdr_size;
                event->mmap2.pid = tgid;
                event->mmap2.tid = pid;
@@ -758,7 +759,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
        for (i = 0; i < n; i++) {
                char *end;
                pid_t _pid;
-               bool kernel_thread;
+               bool kernel_thread = false;
 
                _pid = strtol(dirent[i]->d_name, &end, 10);
                if (*end)
index 3cc91ad..43beb16 100644 (file)
@@ -133,6 +133,8 @@ static struct dso *__machine__addnew_vdso(struct machine *machine, const char *s
        if (dso != NULL) {
                __dsos__add(&machine->dsos, dso);
                dso__set_long_name(dso, long_name, false);
+               /* Put dso here because __dsos_add already got it */
+               dso__put(dso);
        }
 
        return dso;
index a7f0603..6908700 100644 (file)
@@ -40,3 +40,5 @@
 # CONFIG_RESET_BRCMSTB_RESCAL is not set
 # CONFIG_RESET_INTEL_GW is not set
 # CONFIG_ADI_AXI_ADC is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_PAGE_POISONING is not set
index 0b550cb..1e2683d 100644 (file)
@@ -13,7 +13,7 @@ from typing import List, Set
 CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
 CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
 
-KconfigEntryBase = collections.namedtuple('KconfigEntry', ['name', 'value'])
+KconfigEntryBase = collections.namedtuple('KconfigEntryBase', ['name', 'value'])
 
 class KconfigEntry(KconfigEntryBase):
 
index 3b796dd..ca24f68 100644 (file)
@@ -296,21 +296,34 @@ static void *idr_throbber(void *arg)
        return NULL;
 }
 
+/*
+ * There are always either 1 or 2 objects in the IDR.  If we find nothing,
+ * or we find something at an ID we didn't expect, that's a bug.
+ */
 void idr_find_test_1(int anchor_id, int throbber_id)
 {
        pthread_t throbber;
        time_t start = time(NULL);
 
-       pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
-
        BUG_ON(idr_alloc(&find_idr, xa_mk_value(anchor_id), anchor_id,
                                anchor_id + 1, GFP_KERNEL) != anchor_id);
 
+       pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
+
+       rcu_read_lock();
        do {
                int id = 0;
                void *entry = idr_get_next(&find_idr, &id);
-               BUG_ON(entry != xa_mk_value(id));
+               rcu_read_unlock();
+               if ((id != anchor_id && id != throbber_id) ||
+                   entry != xa_mk_value(id)) {
+                       printf("%s(%d, %d): %p at %d\n", __func__, anchor_id,
+                               throbber_id, entry, id);
+                       abort();
+               }
+               rcu_read_lock();
        } while (time(NULL) < start + 11);
+       rcu_read_unlock();
 
        pthread_join(throbber, NULL);
 
@@ -577,6 +590,7 @@ void ida_tests(void)
 
 int __weak main(void)
 {
+       rcu_register_thread();
        radix_tree_init();
        idr_checks();
        ida_tests();
@@ -584,5 +598,6 @@ int __weak main(void)
        rcu_barrier();
        if (nr_allocated)
                printf("nr_allocated = %d\n", nr_allocated);
+       rcu_unregister_thread();
        return 0;
 }
diff --git a/tools/testing/radix-tree/linux/compiler_types.h b/tools/testing/radix-tree/linux/compiler_types.h
deleted file mode 100644 (file)
index e69de29..0000000
index 9eae0fb..e00520c 100644 (file)
@@ -224,7 +224,9 @@ void multiorder_checks(void)
 
 int __weak main(void)
 {
+       rcu_register_thread();
        radix_tree_init();
        multiorder_checks();
+       rcu_unregister_thread();
        return 0;
 }
index e61e43e..f20e12c 100644 (file)
@@ -25,11 +25,13 @@ void xarray_tests(void)
 
 int __weak main(void)
 {
+       rcu_register_thread();
        radix_tree_init();
        xarray_tests();
        radix_tree_cpu_dead(1);
        rcu_barrier();
        if (nr_allocated)
                printf("nr_allocated = %d\n", nr_allocated);
+       rcu_unregister_thread();
        return 0;
 }
index 9210691..e3e08d9 100644 (file)
@@ -284,16 +284,28 @@ endfunction
 // Set up test pattern in the FFR
 // x0: pid
 // x2: generation
+//
+// We need to generate a canonical FFR value, which consists of a number of
+// low "1" bits, followed by a number of zeros. This gives us 17 unique values
+// per 16 bits of FFR, so we create a 4 bit signature out of the PID and
+// generation, and use that as the initial number of ones in the pattern.
+// We fill the upper lanes of FFR with zeros.
 // Beware: corrupts P0.
 function setup_ffr
        mov     x4, x30
 
-       bl      pattern
+       and     w0, w0, #0x3
+       bfi     w0, w2, #2, #2
+       mov     w1, #1
+       lsl     w1, w1, w0
+       sub     w1, w1, #1
+
        ldr     x0, =ffrref
-       ldr     x1, =scratch
-       rdvl    x2, #1
-       lsr     x2, x2, #3
-       bl      memcpy
+       strh    w1, [x0], 2
+       rdvl    x1, #1
+       lsr     x1, x1, #3
+       sub     x1, x1, #2
+       bl      memclr
 
        mov     x0, #0
        ldr     x1, =ffrref
index 37c5494..e25917f 100644 (file)
@@ -6,6 +6,7 @@
 #include <test_progs.h>
 #include "bpf_dctcp.skel.h"
 #include "bpf_cubic.skel.h"
+#include "bpf_tcp_nogpl.skel.h"
 
 #define min(a, b) ((a) < (b) ? (a) : (b))
 
@@ -227,10 +228,53 @@ static void test_dctcp(void)
        bpf_dctcp__destroy(dctcp_skel);
 }
 
+static char *err_str;
+static bool found;
+
+static int libbpf_debug_print(enum libbpf_print_level level,
+                             const char *format, va_list args)
+{
+       char *log_buf;
+
+       if (level != LIBBPF_WARN ||
+           strcmp(format, "libbpf: \n%s\n")) {
+               vprintf(format, args);
+               return 0;
+       }
+
+       log_buf = va_arg(args, char *);
+       if (!log_buf)
+               goto out;
+       if (err_str && strstr(log_buf, err_str) != NULL)
+               found = true;
+out:
+       printf(format, log_buf);
+       return 0;
+}
+
+static void test_invalid_license(void)
+{
+       libbpf_print_fn_t old_print_fn;
+       struct bpf_tcp_nogpl *skel;
+
+       err_str = "struct ops programs must have a GPL compatible license";
+       found = false;
+       old_print_fn = libbpf_set_print(libbpf_debug_print);
+
+       skel = bpf_tcp_nogpl__open_and_load();
+       ASSERT_NULL(skel, "bpf_tcp_nogpl");
+       ASSERT_EQ(found, true, "expected_err_msg");
+
+       bpf_tcp_nogpl__destroy(skel);
+       libbpf_set_print(old_print_fn);
+}
+
 void test_bpf_tcp_ca(void)
 {
        if (test__start_subtest("dctcp"))
                test_dctcp();
        if (test__start_subtest("cubic"))
                test_cubic();
+       if (test__start_subtest("invalid_license"))
+               test_invalid_license();
 }
index 36af1c1..b62a393 100644 (file)
@@ -128,6 +128,8 @@ static void test_check_mtu_xdp(__u32 mtu, __u32 ifindex)
        test_check_mtu_run_xdp(skel, skel->progs.xdp_use_helper, mtu);
        test_check_mtu_run_xdp(skel, skel->progs.xdp_exceed_mtu, mtu);
        test_check_mtu_run_xdp(skel, skel->progs.xdp_minus_delta, mtu);
+       test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len, mtu);
+       test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len_exceed, mtu);
 
 cleanup:
        test_check_mtu__destroy(skel);
@@ -187,6 +189,8 @@ static void test_check_mtu_tc(__u32 mtu, __u32 ifindex)
        test_check_mtu_run_tc(skel, skel->progs.tc_exceed_mtu, mtu);
        test_check_mtu_run_tc(skel, skel->progs.tc_exceed_mtu_da, mtu);
        test_check_mtu_run_tc(skel, skel->progs.tc_minus_delta, mtu);
+       test_check_mtu_run_tc(skel, skel->progs.tc_input_len, mtu);
+       test_check_mtu_run_tc(skel, skel->progs.tc_input_len_exceed, mtu);
 cleanup:
        test_check_mtu__destroy(skel);
 }
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
new file mode 100644 (file)
index 0000000..6c4d42a
--- /dev/null
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <test_progs.h>
+#include <time.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include "fexit_sleep.skel.h"
+
+static int do_sleep(void *skel)
+{
+       struct fexit_sleep *fexit_skel = skel;
+       struct timespec ts1 = { .tv_nsec = 1 };
+       struct timespec ts2 = { .tv_sec = 10 };
+
+       fexit_skel->bss->pid = getpid();
+       (void)syscall(__NR_nanosleep, &ts1, NULL);
+       (void)syscall(__NR_nanosleep, &ts2, NULL);
+       return 0;
+}
+
+#define STACK_SIZE (1024 * 1024)
+static char child_stack[STACK_SIZE];
+
+void test_fexit_sleep(void)
+{
+       struct fexit_sleep *fexit_skel = NULL;
+       int wstatus, duration = 0;
+       pid_t cpid;
+       int err, fexit_cnt;
+
+       fexit_skel = fexit_sleep__open_and_load();
+       if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n"))
+               goto cleanup;
+
+       err = fexit_sleep__attach(fexit_skel);
+       if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
+               goto cleanup;
+
+       cpid = clone(do_sleep, child_stack + STACK_SIZE, CLONE_FILES | SIGCHLD, fexit_skel);
+       if (CHECK(cpid == -1, "clone", strerror(errno)))
+               goto cleanup;
+
+       /* wait until first sys_nanosleep ends and second sys_nanosleep starts */
+       while (READ_ONCE(fexit_skel->bss->fentry_cnt) != 2);
+       fexit_cnt = READ_ONCE(fexit_skel->bss->fexit_cnt);
+       if (CHECK(fexit_cnt != 1, "fexit_cnt", "%d", fexit_cnt))
+               goto cleanup;
+
+       /* close progs and detach them. That will trigger two nop5->jmp5 rewrites
+        * in the trampolines to skip nanosleep_fexit prog.
+        * The nanosleep_fentry prog will get detached first.
+        * The nanosleep_fexit prog will get detached second.
+        * Detaching will trigger freeing of both progs JITed images.
+        * There will be two dying bpf_tramp_image-s, but only the initial
+        * bpf_tramp_image (with both _fentry and _fexit progs will be stuck
+        * waiting for percpu_ref_kill to confirm). The other one
+        * will be freed quickly.
+        */
+       close(bpf_program__fd(fexit_skel->progs.nanosleep_fentry));
+       close(bpf_program__fd(fexit_skel->progs.nanosleep_fexit));
+       fexit_sleep__detach(fexit_skel);
+
+       /* kill the thread to unwind sys_nanosleep stack through the trampoline */
+       kill(cpid, 9);
+
+       if (CHECK(waitpid(cpid, &wstatus, 0) == -1, "waitpid", strerror(errno)))
+               goto cleanup;
+       if (CHECK(WEXITSTATUS(wstatus) != 0, "exitstatus", "failed"))
+               goto cleanup;
+
+       /* The bypassed nanosleep_fexit prog shouldn't have executed.
+        * Unlike progs the maps were not freed and directly accessible.
+        */
+       fexit_cnt = READ_ONCE(fexit_skel->bss->fexit_cnt);
+       if (CHECK(fexit_cnt != 1, "fexit_cnt", "%d", fexit_cnt))
+               goto cleanup;
+
+cleanup:
+       fexit_sleep__destroy(fexit_skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c b/tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c
new file mode 100644 (file)
index 0000000..2ecd833
--- /dev/null
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <linux/types.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_tcp_helpers.h"
+
+char _license[] SEC("license") = "X";
+
+void BPF_STRUCT_OPS(nogpltcp_init, struct sock *sk)
+{
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops bpf_nogpltcp = {
+       .init           = (void *)nogpltcp_init,
+       .name           = "bpf_nogpltcp",
+};
index 31975c9..3ac0c9a 100644 (file)
@@ -174,6 +174,12 @@ struct struct_in_struct {
        };
 };
 
+struct struct_in_array {};
+
+struct struct_in_array_typed {};
+
+typedef struct struct_in_array_typed struct_in_array_t[2];
+
 struct struct_with_embedded_stuff {
        int a;
        struct {
@@ -203,6 +209,8 @@ struct struct_with_embedded_stuff {
        } r[5];
        struct struct_in_struct s[10];
        int t[11];
+       struct struct_in_array (*u)[2];
+       struct_in_array_t *v;
 };
 
 struct root_struct {
diff --git a/tools/testing/selftests/bpf/progs/fexit_sleep.c b/tools/testing/selftests/bpf/progs/fexit_sleep.c
new file mode 100644 (file)
index 0000000..03a672d
--- /dev/null
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char LICENSE[] SEC("license") = "GPL";
+
+int pid = 0;
+int fentry_cnt = 0;
+int fexit_cnt = 0;
+
+SEC("fentry/__x64_sys_nanosleep")
+int BPF_PROG(nanosleep_fentry, const struct pt_regs *regs)
+{
+       if ((int)bpf_get_current_pid_tgid() != pid)
+               return 0;
+
+       fentry_cnt++;
+       return 0;
+}
+
+SEC("fexit/__x64_sys_nanosleep")
+int BPF_PROG(nanosleep_fexit, const struct pt_regs *regs, int ret)
+{
+       if ((int)bpf_get_current_pid_tgid() != pid)
+               return 0;
+
+       fexit_cnt++;
+       return 0;
+}
index b7787b4..c4a9bae 100644 (file)
@@ -105,6 +105,54 @@ int xdp_minus_delta(struct xdp_md *ctx)
        return retval;
 }
 
+SEC("xdp")
+int xdp_input_len(struct xdp_md *ctx)
+{
+       int retval = XDP_PASS; /* Expected retval on successful test */
+       void *data_end = (void *)(long)ctx->data_end;
+       void *data = (void *)(long)ctx->data;
+       __u32 ifindex = GLOBAL_USER_IFINDEX;
+       __u32 data_len = data_end - data;
+
+       /* API allow user give length to check as input via mtu_len param,
+        * resulting MTU value is still output in mtu_len param after call.
+        *
+        * Input len is L3, like MTU and iph->tot_len.
+        * Remember XDP data_len is L2.
+        */
+       __u32 mtu_len = data_len - ETH_HLEN;
+
+       if (bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0))
+               retval = XDP_ABORTED;
+
+       global_bpf_mtu_xdp = mtu_len;
+       return retval;
+}
+
+SEC("xdp")
+int xdp_input_len_exceed(struct xdp_md *ctx)
+{
+       int retval = XDP_ABORTED; /* Fail */
+       __u32 ifindex = GLOBAL_USER_IFINDEX;
+       int err;
+
+       /* API allow user give length to check as input via mtu_len param,
+        * resulting MTU value is still output in mtu_len param after call.
+        *
+        * Input length value is L3 size like MTU.
+        */
+       __u32 mtu_len = GLOBAL_USER_MTU;
+
+       mtu_len += 1; /* Exceed with 1 */
+
+       err = bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0);
+       if (err == BPF_MTU_CHK_RET_FRAG_NEEDED)
+               retval = XDP_PASS ; /* Success in exceeding MTU check */
+
+       global_bpf_mtu_xdp = mtu_len;
+       return retval;
+}
+
 SEC("classifier")
 int tc_use_helper(struct __sk_buff *ctx)
 {
@@ -196,3 +244,47 @@ int tc_minus_delta(struct __sk_buff *ctx)
        global_bpf_mtu_xdp = mtu_len;
        return retval;
 }
+
+SEC("classifier")
+int tc_input_len(struct __sk_buff *ctx)
+{
+       int retval = BPF_OK; /* Expected retval on successful test */
+       __u32 ifindex = GLOBAL_USER_IFINDEX;
+
+       /* API allow user give length to check as input via mtu_len param,
+        * resulting MTU value is still output in mtu_len param after call.
+        *
+        * Input length value is L3 size.
+        */
+       __u32 mtu_len = GLOBAL_USER_MTU;
+
+       if (bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0))
+               retval = BPF_DROP;
+
+       global_bpf_mtu_xdp = mtu_len;
+       return retval;
+}
+
+SEC("classifier")
+int tc_input_len_exceed(struct __sk_buff *ctx)
+{
+       int retval = BPF_DROP; /* Fail */
+       __u32 ifindex = GLOBAL_USER_IFINDEX;
+       int err;
+
+       /* API allow user give length to check as input via mtu_len param,
+        * resulting MTU value is still output in mtu_len param after call.
+        *
+        * Input length value is L3 size like MTU.
+        */
+       __u32 mtu_len = GLOBAL_USER_MTU;
+
+       mtu_len += 1; /* Exceed with 1 */
+
+       err = bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0);
+       if (err == BPF_MTU_CHK_RET_FRAG_NEEDED)
+               retval = BPF_OK; /* Success in exceeding MTU check */
+
+       global_bpf_mtu_xdp = mtu_len;
+       return retval;
+}
index 9afe947..ba6eadf 100644 (file)
@@ -508,10 +508,8 @@ int _ip6geneve_get_tunnel(struct __sk_buff *skb)
        }
 
        ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
-       if (ret < 0) {
-               ERROR(ret);
-               return TC_ACT_SHOT;
-       }
+       if (ret < 0)
+               gopt.opt_class = 0;
 
        bpf_trace_printk(fmt, sizeof(fmt),
                        key.tunnel_id, key.remote_ipv4, gopt.opt_class);
index 1fd07a4..c162498 100644 (file)
@@ -6,8 +6,9 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .result = REJECT,
+       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
        .errstr = "R0 tried to subtract pointer from scalar",
+       .result = REJECT,
 },
 {
        "check deducing bounds from const, 2",
@@ -20,6 +21,8 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
                BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 1,
 },
@@ -31,8 +34,9 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .result = REJECT,
+       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
        .errstr = "R0 tried to subtract pointer from scalar",
+       .result = REJECT,
 },
 {
        "check deducing bounds from const, 4",
@@ -45,6 +49,8 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
                BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
 },
 {
@@ -55,8 +61,9 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .result = REJECT,
+       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
        .errstr = "R0 tried to subtract pointer from scalar",
+       .result = REJECT,
 },
 {
        "check deducing bounds from const, 6",
@@ -67,8 +74,9 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .result = REJECT,
+       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
        .errstr = "R0 tried to subtract pointer from scalar",
+       .result = REJECT,
 },
 {
        "check deducing bounds from const, 7",
@@ -80,8 +88,9 @@
                            offsetof(struct __sk_buff, mark)),
                BPF_EXIT_INSN(),
        },
-       .result = REJECT,
+       .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
        .errstr = "dereference of modified ctx ptr",
+       .result = REJECT,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
                            offsetof(struct __sk_buff, mark)),
                BPF_EXIT_INSN(),
        },
-       .result = REJECT,
+       .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
        .errstr = "dereference of modified ctx ptr",
+       .result = REJECT,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .result = REJECT,
+       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
        .errstr = "R0 tried to subtract pointer from scalar",
+       .result = REJECT,
 },
 {
        "check deducing bounds from const, 10",
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .result = REJECT,
        .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
+       .result = REJECT,
 },
index b117bdd..6f610cf 100644 (file)
@@ -75,6 +75,8 @@
        BPF_EXIT_INSN(),
        },
        .fixup_map_hash_16b = { 4 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
        .result = ACCEPT,
 },
 {
@@ -91,5 +93,7 @@
        BPF_EXIT_INSN(),
        },
        .fixup_map_hash_16b = { 4 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
        .result = ACCEPT,
 },
index b018ad7..3e32400 100644 (file)
        .result = ACCEPT,
 },
 {
-       "unpriv: adding of fp",
+       "unpriv: adding of fp, reg",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_MOV64_IMM(BPF_REG_1, 0),
        BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
+       .result_unpriv = REJECT,
+       .result = ACCEPT,
+},
+{
+       "unpriv: adding of fp, imm",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
+       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
+       BPF_EXIT_INSN(),
+       },
        .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
        .result_unpriv = REJECT,
        .result = ACCEPT,
index ed4e76b..feb9126 100644 (file)
        .fixup_map_array_48b = { 1 },
        .result = ACCEPT,
        .result_unpriv = REJECT,
-       .errstr_unpriv = "R2 tried to add from different maps or paths",
+       .errstr_unpriv = "R2 tried to add from different maps, paths, or prohibited types",
        .retval = 0,
 },
 {
        .result = ACCEPT,
        .retval = 0xabcdef12,
 },
+{
+       "map access: value_ptr += N, value_ptr -= N known scalar",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+       BPF_MOV32_IMM(BPF_REG_1, 0x12345678),
+       BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
+       BPF_MOV64_IMM(BPF_REG_1, 2),
+       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 3 },
+       .result = ACCEPT,
+       .retval = 0x12345678,
+},
 {
        "map access: unknown scalar += value_ptr, 1",
        .insns = {
index 32b87cc..7bd7e77 100644 (file)
@@ -8,10 +8,13 @@
 /x86_64/debug_regs
 /x86_64/evmcs_test
 /x86_64/get_cpuid_test
+/x86_64/get_msr_index_features
 /x86_64/kvm_pv_test
+/x86_64/hyperv_clock
 /x86_64/hyperv_cpuid
 /x86_64/mmio_warning_test
 /x86_64/platform_info_test
+/x86_64/set_boot_cpu_id
 /x86_64/set_sregs_test
 /x86_64/smm_test
 /x86_64/state_test
index a6d61f4..67eebb5 100644 (file)
@@ -39,12 +39,15 @@ LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
 LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
 
 TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
+TEST_GEN_PROGS_x86_64 += x86_64/get_msr_index_features
 TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
 TEST_GEN_PROGS_x86_64 += x86_64/get_cpuid_test
+TEST_GEN_PROGS_x86_64 += x86_64/hyperv_clock
 TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
 TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test
 TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
 TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
+TEST_GEN_PROGS_x86_64 += x86_64/set_boot_cpu_id
 TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
 TEST_GEN_PROGS_x86_64 += x86_64/smm_test
 TEST_GEN_PROGS_x86_64 += x86_64/state_test
index 2f2eeb8..5aadf84 100644 (file)
@@ -108,7 +108,7 @@ static void run_test(uint32_t run)
        kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
        vm_create_irqchip(vm);
 
-       fprintf(stderr, "%s: [%d] start vcpus\n", __func__, run);
+       pr_debug("%s: [%d] start vcpus\n", __func__, run);
        for (i = 0; i < VCPU_NUM; ++i) {
                vm_vcpu_add_default(vm, i, guest_code);
                payloads[i].vm = vm;
@@ -124,7 +124,7 @@ static void run_test(uint32_t run)
                        check_set_affinity(throw_away, &cpu_set);
                }
        }
-       fprintf(stderr, "%s: [%d] all threads launched\n", __func__, run);
+       pr_debug("%s: [%d] all threads launched\n", __func__, run);
        sem_post(sem);
        for (i = 0; i < VCPU_NUM; ++i)
                check_join(threads[i], &b);
@@ -147,16 +147,16 @@ int main(int argc, char **argv)
                if (pid == 0)
                        run_test(i); /* This function always exits */
 
-               fprintf(stderr, "%s: [%d] waiting semaphore\n", __func__, i);
+               pr_debug("%s: [%d] waiting semaphore\n", __func__, i);
                sem_wait(sem);
                r = (rand() % DELAY_US_MAX) + 1;
-               fprintf(stderr, "%s: [%d] waiting %dus\n", __func__, i, r);
+               pr_debug("%s: [%d] waiting %dus\n", __func__, i, r);
                usleep(r);
                r = waitpid(pid, &s, WNOHANG);
                TEST_ASSERT(r != pid,
                            "%s: [%d] child exited unexpectedly status: [%d]",
                            __func__, i, s);
-               fprintf(stderr, "%s: [%d] killing child\n", __func__, i);
+               pr_debug("%s: [%d] killing child\n", __func__, i);
                kill(pid, SIGKILL);
        }
 
index 2d7eb69..0f4258e 100644 (file)
@@ -16,6 +16,7 @@
 
 #include "sparsebit.h"
 
+#define KVM_DEV_PATH "/dev/kvm"
 #define KVM_MAX_VCPUS 512
 
 /*
@@ -133,6 +134,7 @@ void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
 int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
                void *arg);
 void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
+int _vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg);
 void kvm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
 int _kvm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
index e5fbf16..b8849a1 100644 (file)
@@ -1697,11 +1697,16 @@ void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
 {
        int ret;
 
-       ret = ioctl(vm->fd, cmd, arg);
+       ret = _vm_ioctl(vm, cmd, arg);
        TEST_ASSERT(ret == 0, "vm ioctl %lu failed, rc: %i errno: %i (%s)",
                cmd, ret, errno, strerror(errno));
 }
 
+int _vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
+{
+       return ioctl(vm->fd, cmd, arg);
+}
+
 /*
  * KVM system ioctl
  *
index 34465dc..91ce1b5 100644 (file)
@@ -10,8 +10,6 @@
 
 #include "sparsebit.h"
 
-#define KVM_DEV_PATH           "/dev/kvm"
-
 struct userspace_mem_region {
        struct kvm_userspace_memory_region region;
        struct sparsebit *unused_phy_pages;
diff --git a/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c b/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c
new file mode 100644 (file)
index 0000000..cb953df
--- /dev/null
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test that KVM_GET_MSR_INDEX_LIST and
+ * KVM_GET_MSR_FEATURE_INDEX_LIST work as intended
+ *
+ * Copyright (C) 2020, Red Hat, Inc.
+ */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+
+static int kvm_num_index_msrs(int kvm_fd, int nmsrs)
+{
+       struct kvm_msr_list *list;
+       int r;
+
+       list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
+       list->nmsrs = nmsrs;
+       r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
+       TEST_ASSERT(r == -1 && errno == E2BIG,
+                               "Unexpected result from KVM_GET_MSR_INDEX_LIST probe, r: %i",
+                               r);
+
+       r = list->nmsrs;
+       free(list);
+       return r;
+}
+
+static void test_get_msr_index(void)
+{
+       int old_res, res, kvm_fd, r;
+       struct kvm_msr_list *list;
+
+       kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
+       if (kvm_fd < 0)
+               exit(KSFT_SKIP);
+
+       old_res = kvm_num_index_msrs(kvm_fd, 0);
+       TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0");
+
+       if (old_res != 1) {
+               res = kvm_num_index_msrs(kvm_fd, 1);
+               TEST_ASSERT(res > 1, "Expecting nmsrs to be > 1");
+               TEST_ASSERT(res == old_res, "Expecting nmsrs to be identical");
+       }
+
+       list = malloc(sizeof(*list) + old_res * sizeof(list->indices[0]));
+       list->nmsrs = old_res;
+       r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
+
+       TEST_ASSERT(r == 0,
+                   "Unexpected result from KVM_GET_MSR_FEATURE_INDEX_LIST, r: %i",
+                   r);
+       TEST_ASSERT(list->nmsrs == old_res, "Expecting nmsrs to be identical");
+       free(list);
+
+       close(kvm_fd);
+}
+
+static int kvm_num_feature_msrs(int kvm_fd, int nmsrs)
+{
+       struct kvm_msr_list *list;
+       int r;
+
+       list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
+       list->nmsrs = nmsrs;
+       r = ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, list);
+       TEST_ASSERT(r == -1 && errno == E2BIG,
+               "Unexpected result from KVM_GET_MSR_FEATURE_INDEX_LIST probe, r: %i",
+                               r);
+
+       r = list->nmsrs;
+       free(list);
+       return r;
+}
+
+struct kvm_msr_list *kvm_get_msr_feature_list(int kvm_fd, int nmsrs)
+{
+       struct kvm_msr_list *list;
+       int r;
+
+       list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
+       list->nmsrs = nmsrs;
+       r = ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, list);
+
+       TEST_ASSERT(r == 0,
+               "Unexpected result from KVM_GET_MSR_FEATURE_INDEX_LIST, r: %i",
+               r);
+
+       return list;
+}
+
+static void test_get_msr_feature(void)
+{
+       int res, old_res, i, kvm_fd;
+       struct kvm_msr_list *feature_list;
+
+       kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
+       if (kvm_fd < 0)
+               exit(KSFT_SKIP);
+
+       old_res = kvm_num_feature_msrs(kvm_fd, 0);
+       TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0");
+
+       if (old_res != 1) {
+               res = kvm_num_feature_msrs(kvm_fd, 1);
+               TEST_ASSERT(res > 1, "Expecting nmsrs to be > 1");
+               TEST_ASSERT(res == old_res, "Expecting nmsrs to be identical");
+       }
+
+       feature_list = kvm_get_msr_feature_list(kvm_fd, old_res);
+       TEST_ASSERT(old_res == feature_list->nmsrs,
+                               "Unmatching number of msr indexes");
+
+       for (i = 0; i < feature_list->nmsrs; i++)
+               kvm_get_feature_msr(feature_list->indices[i]);
+
+       free(feature_list);
+       close(kvm_fd);
+}
+
+int main(int argc, char *argv[])
+{
+       if (kvm_check_cap(KVM_CAP_GET_MSR_FEATURES))
+               test_get_msr_feature();
+
+       test_get_msr_index();
+}
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c
new file mode 100644 (file)
index 0000000..7f1d276
--- /dev/null
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021, Red Hat, Inc.
+ *
+ * Tests for Hyper-V clocksources
+ */
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+
+struct ms_hyperv_tsc_page {
+       volatile u32 tsc_sequence;
+       u32 reserved1;
+       volatile u64 tsc_scale;
+       volatile s64 tsc_offset;
+} __packed;
+
+#define HV_X64_MSR_GUEST_OS_ID                 0x40000000
+#define HV_X64_MSR_TIME_REF_COUNT              0x40000020
+#define HV_X64_MSR_REFERENCE_TSC               0x40000021
+#define HV_X64_MSR_TSC_FREQUENCY               0x40000022
+#define HV_X64_MSR_REENLIGHTENMENT_CONTROL     0x40000106
+#define HV_X64_MSR_TSC_EMULATION_CONTROL       0x40000107
+
+/* Simplified mul_u64_u64_shr() */
+static inline u64 mul_u64_u64_shr64(u64 a, u64 b)
+{
+       union {
+               u64 ll;
+               struct {
+                       u32 low, high;
+               } l;
+       } rm, rn, rh, a0, b0;
+       u64 c;
+
+       a0.ll = a;
+       b0.ll = b;
+
+       rm.ll = (u64)a0.l.low * b0.l.high;
+       rn.ll = (u64)a0.l.high * b0.l.low;
+       rh.ll = (u64)a0.l.high * b0.l.high;
+
+       rh.l.low = c = rm.l.high + rn.l.high + rh.l.low;
+       rh.l.high = (c >> 32) + rh.l.high;
+
+       return rh.ll;
+}
+
+static inline void nop_loop(void)
+{
+       int i;
+
+       for (i = 0; i < 1000000; i++)
+               asm volatile("nop");
+}
+
+static inline void check_tsc_msr_rdtsc(void)
+{
+       u64 tsc_freq, r1, r2, t1, t2;
+       s64 delta_ns;
+
+       tsc_freq = rdmsr(HV_X64_MSR_TSC_FREQUENCY);
+       GUEST_ASSERT(tsc_freq > 0);
+
+       /* First, check MSR-based clocksource */
+       r1 = rdtsc();
+       t1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
+       nop_loop();
+       r2 = rdtsc();
+       t2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
+
+       GUEST_ASSERT(r2 > r1 && t2 > t1);
+
+       /* HV_X64_MSR_TIME_REF_COUNT is in 100ns */
+       delta_ns = ((t2 - t1) * 100) - ((r2 - r1) * 1000000000 / tsc_freq);
+       if (delta_ns < 0)
+               delta_ns = -delta_ns;
+
+       /* 1% tolerance */
+       GUEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100);
+}
+
+static inline u64 get_tscpage_ts(struct ms_hyperv_tsc_page *tsc_page)
+{
+       return mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset;
+}
+
+static inline void check_tsc_msr_tsc_page(struct ms_hyperv_tsc_page *tsc_page)
+{
+       u64 r1, r2, t1, t2;
+
+       /* Compare TSC page clocksource with HV_X64_MSR_TIME_REF_COUNT */
+       t1 = get_tscpage_ts(tsc_page);
+       r1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
+
+       /* 10 ms tolerance */
+       GUEST_ASSERT(r1 >= t1 && r1 - t1 < 100000);
+       nop_loop();
+
+       t2 = get_tscpage_ts(tsc_page);
+       r2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
+       GUEST_ASSERT(r2 >= t1 && r2 - t2 < 100000);
+}
+
+static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_gpa)
+{
+       u64 tsc_scale, tsc_offset;
+
+       /* Set Guest OS id to enable Hyper-V emulation */
+       GUEST_SYNC(1);
+       wrmsr(HV_X64_MSR_GUEST_OS_ID, (u64)0x8100 << 48);
+       GUEST_SYNC(2);
+
+       check_tsc_msr_rdtsc();
+
+       GUEST_SYNC(3);
+
+       /* Set up TSC page is disabled state, check that it's clean */
+       wrmsr(HV_X64_MSR_REFERENCE_TSC, tsc_page_gpa);
+       GUEST_ASSERT(tsc_page->tsc_sequence == 0);
+       GUEST_ASSERT(tsc_page->tsc_scale == 0);
+       GUEST_ASSERT(tsc_page->tsc_offset == 0);
+
+       GUEST_SYNC(4);
+
+       /* Set up TSC page is enabled state */
+       wrmsr(HV_X64_MSR_REFERENCE_TSC, tsc_page_gpa | 0x1);
+       GUEST_ASSERT(tsc_page->tsc_sequence != 0);
+
+       GUEST_SYNC(5);
+
+       check_tsc_msr_tsc_page(tsc_page);
+
+       GUEST_SYNC(6);
+
+       tsc_offset = tsc_page->tsc_offset;
+       /* Call KVM_SET_CLOCK from userspace, check that TSC page was updated */
+
+       GUEST_SYNC(7);
+       /* Sanity check TSC page timestamp, it should be close to 0 */
+       GUEST_ASSERT(get_tscpage_ts(tsc_page) < 100000);
+
+       GUEST_ASSERT(tsc_page->tsc_offset != tsc_offset);
+
+       nop_loop();
+
+       /*
+        * Enable Re-enlightenment and check that TSC page stays constant across
+        * KVM_SET_CLOCK.
+        */
+       wrmsr(HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0x1 << 16 | 0xff);
+       wrmsr(HV_X64_MSR_TSC_EMULATION_CONTROL, 0x1);
+       tsc_offset = tsc_page->tsc_offset;
+       tsc_scale = tsc_page->tsc_scale;
+       GUEST_SYNC(8);
+       GUEST_ASSERT(tsc_page->tsc_offset == tsc_offset);
+       GUEST_ASSERT(tsc_page->tsc_scale == tsc_scale);
+
+       GUEST_SYNC(9);
+
+       check_tsc_msr_tsc_page(tsc_page);
+
+       /*
+        * Disable re-enlightenment and TSC page, check that KVM doesn't update
+        * it anymore.
+        */
+       wrmsr(HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
+       wrmsr(HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
+       wrmsr(HV_X64_MSR_REFERENCE_TSC, 0);
+       memset(tsc_page, 0, sizeof(*tsc_page));
+
+       GUEST_SYNC(10);
+       GUEST_ASSERT(tsc_page->tsc_sequence == 0);
+       GUEST_ASSERT(tsc_page->tsc_offset == 0);
+       GUEST_ASSERT(tsc_page->tsc_scale == 0);
+
+       GUEST_DONE();
+}
+
+#define VCPU_ID 0
+
+static void host_check_tsc_msr_rdtsc(struct kvm_vm *vm)
+{
+       u64 tsc_freq, r1, r2, t1, t2;
+       s64 delta_ns;
+
+       tsc_freq = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TSC_FREQUENCY);
+       TEST_ASSERT(tsc_freq > 0, "TSC frequency must be nonzero");
+
+       /* First, check MSR-based clocksource */
+       r1 = rdtsc();
+       t1 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT);
+       nop_loop();
+       r2 = rdtsc();
+       t2 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT);
+
+       TEST_ASSERT(t2 > t1, "Time reference MSR is not monotonic (%ld <= %ld)", t1, t2);
+
+       /* HV_X64_MSR_TIME_REF_COUNT is in 100ns */
+       delta_ns = ((t2 - t1) * 100) - ((r2 - r1) * 1000000000 / tsc_freq);
+       if (delta_ns < 0)
+               delta_ns = -delta_ns;
+
+       /* 1% tolerance */
+       TEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100,
+                   "Elapsed time does not match (MSR=%ld, TSC=%ld)",
+                   (t2 - t1) * 100, (r2 - r1) * 1000000000 / tsc_freq);
+}
+
+int main(void)
+{
+       struct kvm_vm *vm;
+       struct kvm_run *run;
+       struct ucall uc;
+       vm_vaddr_t tsc_page_gva;
+       int stage;
+
+       vm = vm_create_default(VCPU_ID, 0, guest_main);
+       run = vcpu_state(vm, VCPU_ID);
+
+       vcpu_set_hv_cpuid(vm, VCPU_ID);
+
+       tsc_page_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
+       memset(addr_gpa2hva(vm, tsc_page_gva), 0x0, getpagesize());
+       TEST_ASSERT((addr_gva2gpa(vm, tsc_page_gva) & (getpagesize() - 1)) == 0,
+               "TSC page has to be page aligned\n");
+       vcpu_args_set(vm, VCPU_ID, 2, tsc_page_gva, addr_gva2gpa(vm, tsc_page_gva));
+
+       host_check_tsc_msr_rdtsc(vm);
+
+       for (stage = 1;; stage++) {
+               _vcpu_run(vm, VCPU_ID);
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                           "Stage %d: unexpected exit reason: %u (%s),\n",
+                           stage, run->exit_reason,
+                           exit_reason_str(run->exit_reason));
+
+               switch (get_ucall(vm, VCPU_ID, &uc)) {
+               case UCALL_ABORT:
+                       TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
+                                 __FILE__, uc.args[1]);
+                       /* NOT REACHED */
+               case UCALL_SYNC:
+                       break;
+               case UCALL_DONE:
+                       /* Keep in sync with guest_main() */
+                       TEST_ASSERT(stage == 11, "Testing ended prematurely, stage %d\n",
+                                   stage);
+                       goto out;
+               default:
+                       TEST_FAIL("Unknown ucall %lu", uc.cmd);
+               }
+
+               TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
+                           uc.args[1] == stage,
+                           "Stage %d: Unexpected register values vmexit, got %lx",
+                           stage, (ulong)uc.args[1]);
+
+               /* Reset kvmclock triggering TSC page update */
+               if (stage == 7 || stage == 8 || stage == 10) {
+                       struct kvm_clock_data clock = {0};
+
+                       vm_ioctl(vm, KVM_SET_CLOCK, &clock);
+               }
+       }
+
+out:
+       kvm_vm_free(vm);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
new file mode 100644 (file)
index 0000000..12c558f
--- /dev/null
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test that KVM_SET_BOOT_CPU_ID works as intended
+ *
+ * Copyright (C) 2020, Red Hat, Inc.
+ */
+#define _GNU_SOURCE /* for program_invocation_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+
+#define N_VCPU 2
+#define VCPU_ID0 0
+#define VCPU_ID1 1
+
+static uint32_t get_bsp_flag(void)
+{
+       return rdmsr(MSR_IA32_APICBASE) & MSR_IA32_APICBASE_BSP;
+}
+
+static void guest_bsp_vcpu(void *arg)
+{
+       GUEST_SYNC(1);
+
+       GUEST_ASSERT(get_bsp_flag() != 0);
+
+       GUEST_DONE();
+}
+
+static void guest_not_bsp_vcpu(void *arg)
+{
+       GUEST_SYNC(1);
+
+       GUEST_ASSERT(get_bsp_flag() == 0);
+
+       GUEST_DONE();
+}
+
+static void test_set_boot_busy(struct kvm_vm *vm)
+{
+       int res;
+
+       res = _vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *) VCPU_ID0);
+       TEST_ASSERT(res == -1 && errno == EBUSY,
+                       "KVM_SET_BOOT_CPU_ID set while running vm");
+}
+
+static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid)
+{
+       struct ucall uc;
+       int stage;
+
+       for (stage = 0; stage < 2; stage++) {
+
+               vcpu_run(vm, vcpuid);
+
+               switch (get_ucall(vm, vcpuid, &uc)) {
+               case UCALL_SYNC:
+                       TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
+                                       uc.args[1] == stage + 1,
+                                       "Stage %d: Unexpected register values vmexit, got %lx",
+                                       stage + 1, (ulong)uc.args[1]);
+                       test_set_boot_busy(vm);
+                       break;
+               case UCALL_DONE:
+                       TEST_ASSERT(stage == 1,
+                                       "Expected GUEST_DONE in stage 2, got stage %d",
+                                       stage);
+                       break;
+               case UCALL_ABORT:
+                       TEST_ASSERT(false, "%s at %s:%ld\n\tvalues: %#lx, %#lx",
+                                               (const char *)uc.args[0], __FILE__,
+                                               uc.args[1], uc.args[2], uc.args[3]);
+               default:
+                       TEST_ASSERT(false, "Unexpected exit: %s",
+                                       exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason));
+               }
+       }
+}
+
+static struct kvm_vm *create_vm(void)
+{
+       struct kvm_vm *vm;
+       uint64_t vcpu_pages = (DEFAULT_STACK_PGS) * 2;
+       uint64_t extra_pg_pages = vcpu_pages / PTES_PER_MIN_PAGE * N_VCPU;
+       uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages;
+
+       pages = vm_adjust_num_guest_pages(VM_MODE_DEFAULT, pages);
+       vm = vm_create(VM_MODE_DEFAULT, pages, O_RDWR);
+
+       kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
+       vm_create_irqchip(vm);
+
+       return vm;
+}
+
+static void add_x86_vcpu(struct kvm_vm *vm, uint32_t vcpuid, bool bsp_code)
+{
+       if (bsp_code)
+               vm_vcpu_add_default(vm, vcpuid, guest_bsp_vcpu);
+       else
+               vm_vcpu_add_default(vm, vcpuid, guest_not_bsp_vcpu);
+
+       vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
+}
+
+static void run_vm_bsp(uint32_t bsp_vcpu)
+{
+       struct kvm_vm *vm;
+       bool is_bsp_vcpu1 = bsp_vcpu == VCPU_ID1;
+
+       vm = create_vm();
+
+       if (is_bsp_vcpu1)
+               vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *) VCPU_ID1);
+
+       add_x86_vcpu(vm, VCPU_ID0, !is_bsp_vcpu1);
+       add_x86_vcpu(vm, VCPU_ID1, is_bsp_vcpu1);
+
+       run_vcpu(vm, VCPU_ID0);
+       run_vcpu(vm, VCPU_ID1);
+
+       kvm_vm_free(vm);
+}
+
+static void check_set_bsp_busy(void)
+{
+       struct kvm_vm *vm;
+       int res;
+
+       vm = create_vm();
+
+       add_x86_vcpu(vm, VCPU_ID0, true);
+       add_x86_vcpu(vm, VCPU_ID1, false);
+
+       res = _vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *) VCPU_ID1);
+       TEST_ASSERT(res == -1 && errno == EBUSY, "KVM_SET_BOOT_CPU_ID set after adding vcpu");
+
+       run_vcpu(vm, VCPU_ID0);
+       run_vcpu(vm, VCPU_ID1);
+
+       res = _vm_ioctl(vm, KVM_SET_BOOT_CPU_ID, (void *) VCPU_ID1);
+       TEST_ASSERT(res == -1 && errno == EBUSY, "KVM_SET_BOOT_CPU_ID set to a terminated vcpu");
+
+       kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+       if (!kvm_check_cap(KVM_CAP_SET_BOOT_CPU_ID)) {
+               print_skip("set_boot_cpu_id not available");
+               return 0;
+       }
+
+       run_vm_bsp(VCPU_ID0);
+       run_vm_bsp(VCPU_ID1);
+       run_vm_bsp(VCPU_ID0);
+
+       check_set_bsp_busy();
+}
index ce6bea9..eb307ca 100755 (executable)
@@ -657,10 +657,21 @@ test_ecn_decap()
 {
        # In accordance with INET_ECN_decapsulate()
        __test_ecn_decap 00 00 0x00
+       __test_ecn_decap 00 01 0x00
+       __test_ecn_decap 00 02 0x00
+       # 00 03 is tested in test_ecn_decap_error()
+       __test_ecn_decap 01 00 0x01
        __test_ecn_decap 01 01 0x01
-       __test_ecn_decap 02 01 0x02
+       __test_ecn_decap 01 02 0x01
        __test_ecn_decap 01 03 0x03
+       __test_ecn_decap 02 00 0x02
+       __test_ecn_decap 02 01 0x01
+       __test_ecn_decap 02 02 0x02
        __test_ecn_decap 02 03 0x03
+       __test_ecn_decap 03 00 0x03
+       __test_ecn_decap 03 01 0x03
+       __test_ecn_decap 03 02 0x03
+       __test_ecn_decap 03 03 0x03
        test_ecn_decap_error
 }
 
index 964db9e..ad32240 100755 (executable)
@@ -11,6 +11,7 @@ ksft_skip=4
 timeout=30
 mptcp_connect=""
 capture=0
+do_all_tests=1
 
 TEST_COUNT=0
 
@@ -121,12 +122,6 @@ reset_with_add_addr_timeout()
                -j DROP
 }
 
-for arg in "$@"; do
-       if [ "$arg" = "-c" ]; then
-               capture=1
-       fi
-done
-
 ip -Version > /dev/null 2>&1
 if [ $? -ne 0 ];then
        echo "SKIP: Could not run test without ip tool"
@@ -1221,7 +1216,8 @@ usage()
        echo "  -4 v4mapped_tests"
        echo "  -b backup_tests"
        echo "  -p add_addr_ports_tests"
-       echo "  -c syncookies_tests"
+       echo "  -k syncookies_tests"
+       echo "  -c capture pcap files"
        echo "  -h help"
 }
 
@@ -1235,12 +1231,24 @@ make_file "$cin" "client" 1
 make_file "$sin" "server" 1
 trap cleanup EXIT
 
-if [ -z $1 ]; then
+for arg in "$@"; do
+       # check for "capture" arg before launching tests
+       if [[ "${arg}" =~ ^"-"[0-9a-zA-Z]*"c"[0-9a-zA-Z]*$ ]]; then
+               capture=1
+       fi
+
+       # exception for the capture option, the rest means: a part of the tests
+       if [ "${arg}" != "-c" ]; then
+               do_all_tests=0
+       fi
+done
+
+if [ $do_all_tests -eq 1 ]; then
        all_tests
        exit $ret
 fi
 
-while getopts 'fsltra64bpch' opt; do
+while getopts 'fsltra64bpkch' opt; do
        case $opt in
                f)
                        subflows_tests
@@ -1272,9 +1280,11 @@ while getopts 'fsltra64bpch' opt; do
                p)
                        add_addr_ports_tests
                        ;;
-               c)
+               k)
                        syncookies_tests
                        ;;
+               c)
+                       ;;
                h | *)
                        usage
                        ;;
index 7b01b7c..066efd3 100644 (file)
@@ -30,25 +30,25 @@ struct reuse_opts {
 };
 
 struct reuse_opts unreusable_opts[12] = {
-       {0, 0, 0, 0},
-       {0, 0, 0, 1},
-       {0, 0, 1, 0},
-       {0, 0, 1, 1},
-       {0, 1, 0, 0},
-       {0, 1, 0, 1},
-       {0, 1, 1, 0},
-       {0, 1, 1, 1},
-       {1, 0, 0, 0},
-       {1, 0, 0, 1},
-       {1, 0, 1, 0},
-       {1, 0, 1, 1},
+       {{0, 0}, {0, 0}},
+       {{0, 0}, {0, 1}},
+       {{0, 0}, {1, 0}},
+       {{0, 0}, {1, 1}},
+       {{0, 1}, {0, 0}},
+       {{0, 1}, {0, 1}},
+       {{0, 1}, {1, 0}},
+       {{0, 1}, {1, 1}},
+       {{1, 0}, {0, 0}},
+       {{1, 0}, {0, 1}},
+       {{1, 0}, {1, 0}},
+       {{1, 0}, {1, 1}},
 };
 
 struct reuse_opts reusable_opts[4] = {
-       {1, 1, 0, 0},
-       {1, 1, 0, 1},
-       {1, 1, 1, 0},
-       {1, 1, 1, 1},
+       {{1, 1}, {0, 0}},
+       {{1, 1}, {0, 1}},
+       {{1, 1}, {1, 0}},
+       {{1, 1}, {1, 1}},
 };
 
 int bind_port(struct __test_metadata *_metadata, int reuseaddr, int reuseport)
index d42115e..8b0cd42 100644 (file)
@@ -101,7 +101,7 @@ endef
 ifeq ($(CAN_BUILD_I386),1)
 $(BINARIES_32): CFLAGS += -m32
 $(BINARIES_32): LDLIBS += -lrt -ldl -lm
-$(BINARIES_32): %_32: %.c
+$(BINARIES_32): $(OUTPUT)/%_32: %.c
        $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
 $(foreach t,$(TARGETS),$(eval $(call gen-target-rule-32,$(t))))
 endif
@@ -109,7 +109,7 @@ endif
 ifeq ($(CAN_BUILD_X86_64),1)
 $(BINARIES_64): CFLAGS += -m64
 $(BINARIES_64): LDLIBS += -lrt -ldl
-$(BINARIES_64): %_64: %.c
+$(BINARIES_64): $(OUTPUT)/%_64: %.c
        $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
 $(foreach t,$(TARGETS),$(eval $(call gen-target-rule-64,$(t))))
 endif