Merge branch 'topic/ppc-kvm' into next
authorMichael Ellerman <mpe@ellerman.id.au>
Thu, 19 May 2022 13:10:42 +0000 (23:10 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Thu, 19 May 2022 13:10:42 +0000 (23:10 +1000)
Merge our KVM topic branch.

1086 files changed:
Documentation/ABI/testing/sysfs-class-cxl
Documentation/ABI/testing/sysfs-class-firmware-attributes
Documentation/ABI/testing/sysfs-driver-intel_sdsi
Documentation/ABI/testing/sysfs-fs-erofs
Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml
Documentation/devicetree/bindings/bus/ti-sysc.yaml
Documentation/devicetree/bindings/clock/samsung,exynos-audss-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos-ext-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos4412-isp-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos5260-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos5410-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos5433-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos7-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos7885-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos850-clock.yaml
Documentation/devicetree/bindings/clock/samsung,s2mps11.yaml
Documentation/devicetree/bindings/clock/samsung,s5pv210-audss-clock.yaml
Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.yaml
Documentation/devicetree/bindings/devfreq/event/samsung,exynos-nocp.yaml
Documentation/devicetree/bindings/devfreq/event/samsung,exynos-ppmu.yaml
Documentation/devicetree/bindings/display/msm/dpu-qcm2290.yaml
Documentation/devicetree/bindings/display/panel/panel-timing.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos-hdmi-ddc.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos-hdmi.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos-mixer.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos5433-decon.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos5433-mic.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos7-decon.yaml
Documentation/devicetree/bindings/display/samsung/samsung,fimd.yaml
Documentation/devicetree/bindings/dma/qcom,gpi.yaml
Documentation/devicetree/bindings/extcon/maxim,max77843.yaml
Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
Documentation/devicetree/bindings/hwmon/lltc,ltc4151.yaml
Documentation/devicetree/bindings/hwmon/microchip,mcp3021.yaml
Documentation/devicetree/bindings/hwmon/sensirion,sht15.yaml
Documentation/devicetree/bindings/hwmon/ti,tmp102.yaml
Documentation/devicetree/bindings/hwmon/ti,tmp108.yaml
Documentation/devicetree/bindings/hwmon/ti,tmp464.yaml
Documentation/devicetree/bindings/i2c/i2c-exynos5.yaml
Documentation/devicetree/bindings/i2c/samsung,s3c2410-i2c.yaml
Documentation/devicetree/bindings/iio/adc/adi,ad7476.yaml
Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
Documentation/devicetree/bindings/iio/dac/adi,ad5360.yaml
Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
Documentation/devicetree/bindings/interrupt-controller/mrvl,intc.yaml
Documentation/devicetree/bindings/interrupt-controller/samsung,exynos4210-combiner.yaml
Documentation/devicetree/bindings/leds/maxim,max77693.yaml
Documentation/devicetree/bindings/media/coda.yaml
Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml
Documentation/devicetree/bindings/media/mediatek,vcodec-encoder.yaml
Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml
Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.yaml
Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2-timings.yaml
Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2.yaml
Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr3-timings.yaml
Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr3.yaml
Documentation/devicetree/bindings/memory-controllers/marvell,mvebu-sdram-controller.yaml
Documentation/devicetree/bindings/memory-controllers/qca,ath79-ddr-controller.yaml
Documentation/devicetree/bindings/memory-controllers/renesas,h8300-bsc.yaml
Documentation/devicetree/bindings/memory-controllers/samsung,exynos5422-dmc.yaml
Documentation/devicetree/bindings/memory-controllers/synopsys,ddrc-ecc.yaml
Documentation/devicetree/bindings/memory-controllers/ti,da8xx-ddrctl.yaml
Documentation/devicetree/bindings/mfd/maxim,max14577.yaml
Documentation/devicetree/bindings/mfd/maxim,max77686.yaml
Documentation/devicetree/bindings/mfd/maxim,max77693.yaml
Documentation/devicetree/bindings/mfd/maxim,max77802.yaml
Documentation/devicetree/bindings/mfd/maxim,max77843.yaml
Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.yaml
Documentation/devicetree/bindings/mfd/samsung,s2mpa01.yaml
Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml
Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.yaml
Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml
Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml
Documentation/devicetree/bindings/net/nfc/nxp,pn532.yaml
Documentation/devicetree/bindings/net/nfc/nxp,pn544.yaml
Documentation/devicetree/bindings/net/nfc/st,st-nci.yaml
Documentation/devicetree/bindings/net/nfc/st,st21nfca.yaml
Documentation/devicetree/bindings/net/nfc/st,st95hf.yaml
Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
Documentation/devicetree/bindings/net/snps,dwmac.yaml
Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml
Documentation/devicetree/bindings/phy/nvidia,tegra20-usb-phy.yaml
Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml
Documentation/devicetree/bindings/phy/samsung,dp-video-phy.yaml
Documentation/devicetree/bindings/phy/samsung,exynos-hdmi-phy.yaml
Documentation/devicetree/bindings/phy/samsung,exynos5250-sata-phy.yaml
Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml
Documentation/devicetree/bindings/phy/samsung,usb2-phy.yaml
Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml
Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-gpio-bank.yaml
Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-pins-cfg.yaml
Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-wakeup-interrupt.yaml
Documentation/devicetree/bindings/pinctrl/samsung,pinctrl.yaml
Documentation/devicetree/bindings/power/renesas,apmu.yaml
Documentation/devicetree/bindings/power/supply/bq2415x.yaml
Documentation/devicetree/bindings/power/supply/maxim,max14577.yaml
Documentation/devicetree/bindings/power/supply/maxim,max77693.yaml
Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
Documentation/devicetree/bindings/regulator/maxim,max14577.yaml
Documentation/devicetree/bindings/regulator/maxim,max77686.yaml
Documentation/devicetree/bindings/regulator/maxim,max77693.yaml
Documentation/devicetree/bindings/regulator/maxim,max77802.yaml
Documentation/devicetree/bindings/regulator/maxim,max77843.yaml
Documentation/devicetree/bindings/regulator/maxim,max8952.yaml
Documentation/devicetree/bindings/regulator/maxim,max8973.yaml
Documentation/devicetree/bindings/regulator/maxim,max8997.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mpa01.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mps11.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mps13.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mps15.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mpu02.yaml
Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml
Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml
Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.yaml
Documentation/devicetree/bindings/reset/socionext,uniphier-reset.yaml
Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.yaml
Documentation/devicetree/bindings/rng/timeriomem_rng.yaml
Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml
Documentation/devicetree/bindings/sound/samsung,arndale.yaml
Documentation/devicetree/bindings/sound/samsung,smdk5250.yaml
Documentation/devicetree/bindings/sound/samsung,snow.yaml
Documentation/devicetree/bindings/sound/samsung,tm2.yaml
Documentation/devicetree/bindings/sound/st,stm32-sai.yaml
Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
Documentation/devicetree/bindings/spi/samsung,spi-peripheral-props.yaml
Documentation/devicetree/bindings/spi/samsung,spi.yaml
Documentation/devicetree/bindings/sram/sram.yaml
Documentation/devicetree/bindings/thermal/samsung,exynos-thermal.yaml
Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
Documentation/devicetree/bindings/usb/samsung,exynos-usb2.yaml
Documentation/filesystems/caching/backend-api.rst
Documentation/filesystems/caching/netfs-api.rst
Documentation/filesystems/ext4/attributes.rst
Documentation/networking/bonding.rst
Documentation/networking/ip-sysctl.rst
Documentation/virt/kvm/api.rst
Documentation/virt/kvm/vcpu-requests.rst
Documentation/virt/kvm/x86/amd-memory-encryption.rst
Documentation/virt/kvm/x86/errata.rst
Documentation/virt/kvm/x86/running-nested-guests.rst
MAINTAINERS
Makefile
arch/Kconfig
arch/arc/boot/dts/hsdk.dts
arch/arc/include/asm/atomic-llsc.h
arch/arc/include/asm/pgtable-levels.h
arch/arc/kernel/disasm.c
arch/arc/kernel/entry.S
arch/arc/kernel/signal.c
arch/arc/kernel/smp.c
arch/arc/kernel/unaligned.c
arch/arc/mm/cache.c
arch/arm/boot/dts/at91-kizbox3-hs.dts
arch/arm/boot/dts/at91-kizbox3_common.dtsi
arch/arm/boot/dts/at91-sam9_l9260.dts
arch/arm/boot/dts/da850-evm.dts
arch/arm/boot/dts/dm8168-evm.dts
arch/arm/boot/dts/imx28-ts4600.dts
arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
arch/arm/boot/dts/imx6ul-phytec-segin-peb-av-02.dtsi
arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
arch/arm/boot/dts/qcom-apq8064-pins.dtsi
arch/arm/boot/dts/qcom-ipq8064.dtsi
arch/arm/boot/dts/spear1310-evb.dts
arch/arm/boot/dts/spear1340-evb.dts
arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
arch/arm/boot/dts/stm32mp157c-ev1.dts
arch/arm/configs/gemini_defconfig
arch/arm/configs/imote2_defconfig [deleted file]
arch/arm/configs/u8500_defconfig
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-ep93xx/clock.c
arch/arm/mach-iop32x/cp6.c
arch/arm/mach-vexpress/spc.c
arch/arm/xen/enlighten.c
arch/arm64/Kconfig
arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
arch/arm64/boot/dts/nvidia/tegra186-p3509-0000+p3636-0001.dts
arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi
arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi
arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
arch/arm64/boot/dts/qcom/msm8996.dtsi
arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/processor.h
arch/arm64/kvm/mmu.c
arch/arm64/kvm/psci.c
arch/arm64/kvm/reset.c
arch/arm64/kvm/vgic/vgic-debug.c
arch/arm64/kvm/vgic/vgic-its.c
arch/powerpc/Kconfig
arch/powerpc/boot/Makefile
arch/powerpc/boot/crt0.S
arch/powerpc/boot/cuboot-hotfoot.c
arch/powerpc/boot/ops.h
arch/powerpc/crypto/aes-spe-glue.c
arch/powerpc/include/asm/book3s/64/hugetlb.h
arch/powerpc/include/asm/book3s/64/mmu-hash.h
arch/powerpc/include/asm/book3s/64/mmu.h
arch/powerpc/include/asm/book3s/64/slice.h
arch/powerpc/include/asm/checksum.h
arch/powerpc/include/asm/code-patching.h
arch/powerpc/include/asm/drmem.h
arch/powerpc/include/asm/eeh.h
arch/powerpc/include/asm/fadump-internal.h
arch/powerpc/include/asm/hugetlb.h
arch/powerpc/include/asm/io.h
arch/powerpc/include/asm/kup.h
arch/powerpc/include/asm/nohash/tlbflush.h
arch/powerpc/include/asm/paca.h
arch/powerpc/include/asm/page.h
arch/powerpc/include/asm/parport.h
arch/powerpc/include/asm/pci-bridge.h
arch/powerpc/include/asm/pnv-pci.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/include/asm/probes.h
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/slice.h [deleted file]
arch/powerpc/include/asm/smp.h
arch/powerpc/include/asm/svm.h
arch/powerpc/include/asm/switch_to.h
arch/powerpc/include/asm/task_size_64.h
arch/powerpc/include/asm/time.h
arch/powerpc/kernel/btext.c
arch/powerpc/kernel/cacheinfo.c
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/crash_dump.c
arch/powerpc/kernel/dawr.c
arch/powerpc/kernel/dt_cpu_ftrs.c
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/eeh_driver.c
arch/powerpc/kernel/eeh_event.c
arch/powerpc/kernel/eeh_pe.c
arch/powerpc/kernel/eeh_sysfs.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/fadump.c
arch/powerpc/kernel/idle.c
arch/powerpc/kernel/interrupt_64.S
arch/powerpc/kernel/iommu.c
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/isa-bridge.c
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/legacy_serial.c
arch/powerpc/kernel/module.c
arch/powerpc/kernel/module_32.c
arch/powerpc/kernel/module_64.c
arch/powerpc/kernel/nvram_64.c
arch/powerpc/kernel/paca.c
arch/powerpc/kernel/pci-common.c
arch/powerpc/kernel/pci-hotplug.c
arch/powerpc/kernel/pci_32.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/pci_dn.c
arch/powerpc/kernel/pci_of_scan.c
arch/powerpc/kernel/proc_powerpc.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/ptrace/ptrace-view.c
arch/powerpc/kernel/rtas-proc.c
arch/powerpc/kernel/rtas-rtc.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/rtas_flash.c
arch/powerpc/kernel/rtas_pci.c
arch/powerpc/kernel/rtasd.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/syscalls.c
arch/powerpc/kernel/sysfs.c
arch/powerpc/kernel/time.c
arch/powerpc/kernel/uprobes.c
arch/powerpc/kernel/vdso.c
arch/powerpc/kernel/watchdog.c
arch/powerpc/kexec/core.c
arch/powerpc/kexec/core_64.c
arch/powerpc/kexec/crash.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/kvm/book3s_emulate.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_nested.c
arch/powerpc/kvm/book3s_hv_p9_entry.c
arch/powerpc/kvm/book3s_hv_uvmem.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/book3s_rtas.c
arch/powerpc/kvm/book3s_xics.c
arch/powerpc/kvm/book3s_xive.c
arch/powerpc/kvm/e500mc.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/lib/code-patching.c
arch/powerpc/lib/sstep.c
arch/powerpc/mm/Makefile
arch/powerpc/mm/book3s32/mmu.c
arch/powerpc/mm/book3s64/Makefile
arch/powerpc/mm/book3s64/hash_pgtable.c
arch/powerpc/mm/book3s64/hash_utils.c
arch/powerpc/mm/book3s64/pgtable.c
arch/powerpc/mm/book3s64/radix_hugetlbpage.c
arch/powerpc/mm/book3s64/radix_pgtable.c
arch/powerpc/mm/book3s64/radix_tlb.c
arch/powerpc/mm/book3s64/slb.c
arch/powerpc/mm/book3s64/slice.c [new file with mode: 0644]
arch/powerpc/mm/drmem.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/init_32.c
arch/powerpc/mm/init_64.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/mmap.c [deleted file]
arch/powerpc/mm/mmu_decl.h
arch/powerpc/mm/nohash/40x.c
arch/powerpc/mm/nohash/book3e_hugetlbpage.c
arch/powerpc/mm/nohash/fsl_book3e.c
arch/powerpc/mm/nohash/kaslr_booke.c
arch/powerpc/mm/nohash/mmu_context.c
arch/powerpc/mm/nohash/tlb.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/pageattr.c
arch/powerpc/mm/pgtable-frag.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/mm/slice.c [deleted file]
arch/powerpc/perf/8xx-pmu.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/hv-24x7.c
arch/powerpc/perf/imc-pmu.c
arch/powerpc/perf/isa207-common.c
arch/powerpc/perf/power9-pmu.c
arch/powerpc/platforms/40x/ppc40x_simple.c
arch/powerpc/platforms/44x/canyonlands.c
arch/powerpc/platforms/44x/fsp2.c
arch/powerpc/platforms/44x/ppc44x_simple.c
arch/powerpc/platforms/44x/ppc476.c
arch/powerpc/platforms/44x/sam440ep.c
arch/powerpc/platforms/44x/warp.c
arch/powerpc/platforms/4xx/cpm.c
arch/powerpc/platforms/4xx/hsta_msi.c
arch/powerpc/platforms/4xx/pci.c
arch/powerpc/platforms/4xx/uic.c
arch/powerpc/platforms/512x/clock-commonclk.c
arch/powerpc/platforms/512x/mpc5121_ads.c
arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
arch/powerpc/platforms/512x/mpc512x_generic.c
arch/powerpc/platforms/512x/mpc512x_shared.c
arch/powerpc/platforms/52xx/efika.c
arch/powerpc/platforms/52xx/lite5200.c
arch/powerpc/platforms/52xx/lite5200_pm.c
arch/powerpc/platforms/52xx/media5200.c
arch/powerpc/platforms/52xx/mpc5200_simple.c
arch/powerpc/platforms/52xx/mpc52xx_common.c
arch/powerpc/platforms/52xx/mpc52xx_gpt.c
arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
arch/powerpc/platforms/52xx/mpc52xx_pci.c
arch/powerpc/platforms/52xx/mpc52xx_pic.c
arch/powerpc/platforms/52xx/mpc52xx_pm.c
arch/powerpc/platforms/82xx/ep8248e.c
arch/powerpc/platforms/82xx/km82xx.c
arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
arch/powerpc/platforms/83xx/km83xx.c
arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
arch/powerpc/platforms/83xx/mpc832x_mds.c
arch/powerpc/platforms/83xx/mpc832x_rdb.c
arch/powerpc/platforms/83xx/mpc834x_itx.c
arch/powerpc/platforms/83xx/mpc834x_mds.c
arch/powerpc/platforms/83xx/mpc836x_mds.c
arch/powerpc/platforms/83xx/mpc836x_rdk.c
arch/powerpc/platforms/83xx/mpc837x_mds.c
arch/powerpc/platforms/83xx/usb.c
arch/powerpc/platforms/85xx/corenet_generic.c
arch/powerpc/platforms/85xx/ge_imp3a.c
arch/powerpc/platforms/85xx/ksi8560.c
arch/powerpc/platforms/85xx/mpc8536_ds.c
arch/powerpc/platforms/85xx/mpc85xx_cds.c
arch/powerpc/platforms/85xx/mpc85xx_ds.c
arch/powerpc/platforms/85xx/mpc85xx_mds.c
arch/powerpc/platforms/85xx/mpc85xx_rdb.c
arch/powerpc/platforms/85xx/p1010rdb.c
arch/powerpc/platforms/85xx/p1022_ds.c
arch/powerpc/platforms/85xx/p1022_rdk.c
arch/powerpc/platforms/85xx/p1023_rdb.c
arch/powerpc/platforms/85xx/qemu_e500.c
arch/powerpc/platforms/85xx/smp.c
arch/powerpc/platforms/85xx/socrates.c
arch/powerpc/platforms/85xx/stx_gp3.c
arch/powerpc/platforms/85xx/tqm85xx.c
arch/powerpc/platforms/85xx/xes_mpc85xx.c
arch/powerpc/platforms/86xx/gef_ppc9a.c
arch/powerpc/platforms/86xx/gef_sbc310.c
arch/powerpc/platforms/86xx/gef_sbc610.c
arch/powerpc/platforms/86xx/mpc8610_hpcd.c
arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
arch/powerpc/platforms/86xx/mvme7100.c
arch/powerpc/platforms/8xx/Makefile
arch/powerpc/platforms/8xx/adder875.c
arch/powerpc/platforms/8xx/cpm1-ic.c [new file with mode: 0644]
arch/powerpc/platforms/8xx/cpm1.c
arch/powerpc/platforms/8xx/ep88xc.c
arch/powerpc/platforms/8xx/m8xx_setup.c
arch/powerpc/platforms/8xx/mpc86xads_setup.c
arch/powerpc/platforms/8xx/mpc885ads_setup.c
arch/powerpc/platforms/8xx/mpc8xx.h
arch/powerpc/platforms/8xx/pic.c
arch/powerpc/platforms/8xx/pic.h
arch/powerpc/platforms/8xx/tqm8xx_setup.c
arch/powerpc/platforms/Kconfig.cputype
arch/powerpc/platforms/amigaone/setup.c
arch/powerpc/platforms/book3s/vas-api.c
arch/powerpc/platforms/cell/axon_msi.c
arch/powerpc/platforms/cell/cbe_powerbutton.c
arch/powerpc/platforms/cell/cbe_regs.c
arch/powerpc/platforms/cell/cbe_thermal.c
arch/powerpc/platforms/cell/interrupt.c
arch/powerpc/platforms/cell/iommu.c
arch/powerpc/platforms/cell/pervasive.c
arch/powerpc/platforms/cell/ras.c
arch/powerpc/platforms/cell/setup.c
arch/powerpc/platforms/cell/smp.c
arch/powerpc/platforms/cell/spider-pci.c
arch/powerpc/platforms/cell/spider-pic.c
arch/powerpc/platforms/cell/spu_base.c
arch/powerpc/platforms/cell/spu_manage.c
arch/powerpc/platforms/cell/spu_priv1_mmio.c
arch/powerpc/platforms/cell/spufs/inode.c
arch/powerpc/platforms/chrp/nvram.c
arch/powerpc/platforms/chrp/pci.c
arch/powerpc/platforms/chrp/setup.c
arch/powerpc/platforms/chrp/smp.c
arch/powerpc/platforms/chrp/time.c
arch/powerpc/platforms/embedded6xx/gamecube.c
arch/powerpc/platforms/embedded6xx/holly.c
arch/powerpc/platforms/embedded6xx/linkstation.c
arch/powerpc/platforms/embedded6xx/ls_uart.c
arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
arch/powerpc/platforms/embedded6xx/mvme5100.c
arch/powerpc/platforms/embedded6xx/storcenter.c
arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
arch/powerpc/platforms/embedded6xx/wii.c
arch/powerpc/platforms/fsl_uli1575.c
arch/powerpc/platforms/maple/pci.c
arch/powerpc/platforms/maple/setup.c
arch/powerpc/platforms/maple/time.c
arch/powerpc/platforms/pasemi/dma_lib.c
arch/powerpc/platforms/pasemi/iommu.c
arch/powerpc/platforms/pasemi/misc.c
arch/powerpc/platforms/pasemi/msi.c
arch/powerpc/platforms/pasemi/pci.c
arch/powerpc/platforms/pasemi/setup.c
arch/powerpc/platforms/powermac/backlight.c
arch/powerpc/platforms/powermac/bootx_init.c
arch/powerpc/platforms/powermac/feature.c
arch/powerpc/platforms/powermac/low_i2c.c
arch/powerpc/platforms/powermac/nvram.c
arch/powerpc/platforms/powermac/pci.c
arch/powerpc/platforms/powermac/pfunc_core.c
arch/powerpc/platforms/powermac/pic.c
arch/powerpc/platforms/powermac/setup.c
arch/powerpc/platforms/powermac/smp.c
arch/powerpc/platforms/powermac/time.c
arch/powerpc/platforms/powermac/udbg_adb.c
arch/powerpc/platforms/powermac/udbg_scc.c
arch/powerpc/platforms/powernv/eeh-powernv.c
arch/powerpc/platforms/powernv/idle.c
arch/powerpc/platforms/powernv/ocxl.c
arch/powerpc/platforms/powernv/opal-fadump.c
arch/powerpc/platforms/powernv/opal-fadump.h
arch/powerpc/platforms/powernv/opal-imc.c
arch/powerpc/platforms/powernv/opal-lpc.c
arch/powerpc/platforms/powernv/opal-memory-errors.c
arch/powerpc/platforms/powernv/pci-cxl.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/pci-sriov.c
arch/powerpc/platforms/powernv/pci.c
arch/powerpc/platforms/ps3/Kconfig
arch/powerpc/platforms/ps3/htab.c
arch/powerpc/platforms/ps3/mm.c
arch/powerpc/platforms/ps3/os-area.c
arch/powerpc/platforms/ps3/setup.c
arch/powerpc/platforms/ps3/system-bus.c
arch/powerpc/platforms/pseries/cmm.c
arch/powerpc/platforms/pseries/dlpar.c
arch/powerpc/platforms/pseries/eeh_pseries.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/lparcfg.c
arch/powerpc/platforms/pseries/msi.c
arch/powerpc/platforms/pseries/nvram.c
arch/powerpc/platforms/pseries/pci.c
arch/powerpc/platforms/pseries/pmem.c
arch/powerpc/platforms/pseries/reconfig.c
arch/powerpc/platforms/pseries/rtas-fadump.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/platforms/pseries/smp.c
arch/powerpc/platforms/pseries/vas-sysfs.c
arch/powerpc/platforms/pseries/vas.c
arch/powerpc/platforms/pseries/vio.c
arch/powerpc/sysdev/cpm2_pic.c
arch/powerpc/sysdev/dart_iommu.c
arch/powerpc/sysdev/dcr.c
arch/powerpc/sysdev/fsl_lbc.c
arch/powerpc/sysdev/fsl_msi.c
arch/powerpc/sysdev/fsl_pci.c
arch/powerpc/sysdev/fsl_soc.c
arch/powerpc/sysdev/ge/ge_pic.c
arch/powerpc/sysdev/grackle.c
arch/powerpc/sysdev/i8259.c
arch/powerpc/sysdev/indirect_pci.c
arch/powerpc/sysdev/ipic.c
arch/powerpc/sysdev/mmio_nvram.c
arch/powerpc/sysdev/mpic.c
arch/powerpc/sysdev/mpic_msgr.c
arch/powerpc/sysdev/mpic_msi.c
arch/powerpc/sysdev/mpic_timer.c
arch/powerpc/sysdev/mpic_u3msi.c
arch/powerpc/sysdev/msi_bitmap.c
arch/powerpc/sysdev/pmi.c
arch/powerpc/sysdev/rtc_cmos_setup.c
arch/powerpc/sysdev/tsi108_dev.c
arch/powerpc/sysdev/tsi108_pci.c
arch/powerpc/sysdev/xics/icp-native.c
arch/powerpc/sysdev/xics/ics-native.c
arch/powerpc/sysdev/xics/ics-opal.c
arch/powerpc/sysdev/xics/ics-rtas.c
arch/powerpc/sysdev/xics/xics-common.c
arch/powerpc/sysdev/xive/common.c
arch/powerpc/sysdev/xive/native.c
arch/powerpc/sysdev/xive/spapr.c
arch/powerpc/xmon/ppc-opc.c
arch/powerpc/xmon/xmon.c
arch/riscv/Kconfig.socs
arch/riscv/include/asm/kvm_host.h
arch/riscv/kvm/vcpu.c
arch/riscv/kvm/vcpu_exit.c
arch/riscv/kvm/vcpu_fp.c
arch/riscv/mm/init.c
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/entry-common.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/stacktrace.h
arch/s390/kernel/machine_kexec.c
arch/s390/kernel/processor.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/pv.c
arch/s390/kvm/vsie.c
arch/s390/lib/test_unwind.c
arch/sparc/include/asm/cacheflush_32.h
arch/x86/events/intel/cstate.c
arch/x86/include/asm/compat.h
arch/x86/include/asm/io.h
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/msr-index.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/tsx.c
arch/x86/kernel/crash_dump_64.c
arch/x86/kernel/kvm.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/hyperv.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/pmu.h
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/lib/usercopy_64.c
arch/xtensa/kernel/coprocessor.S
arch/xtensa/kernel/jump_label.c
arch/xtensa/platforms/iss/console.c
block/bio.c
block/blk-mq.c
block/ioctl.c
drivers/ata/pata_marvell.c
drivers/base/dd.c
drivers/block/null_blk/main.c
drivers/char/random.c
drivers/cpuidle/cpuidle-riscv-sbi.c
drivers/dma/at_xdmac.c
drivers/dma/dw-edma/dw-edma-v0-core.c
drivers/dma/idxd/device.c
drivers/dma/idxd/submit.c
drivers/dma/idxd/sysfs.c
drivers/dma/imx-sdma.c
drivers/dma/mediatek/mtk-uart-apdma.c
drivers/edac/synopsys_edac.c
drivers/firmware/arm_scmi/clock.c
drivers/firmware/arm_scmi/driver.c
drivers/firmware/arm_scmi/optee.c
drivers/firmware/cirrus/cs_dsp.c
drivers/gpio/gpio-sim.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/drm_of.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_psr.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/msm/dp/dp_panel.c
drivers/gpu/drm/msm/dp/dp_panel.h
drivers/gpu/drm/msm/dsi/dsi_manager.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
drivers/gpu/drm/radeon/radeon_sync.c
drivers/gpu/drm/vc4/Kconfig
drivers/gpu/drm/vc4/vc4_dsi.c
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-ismt.c
drivers/i2c/busses/i2c-pasemi-core.c
drivers/i2c/busses/i2c-qcom-geni.c
drivers/i2c/i2c-dev.c
drivers/input/keyboard/cypress-sf.c
drivers/input/keyboard/omap4-keypad.c
drivers/macintosh/adb.c
drivers/macintosh/adbhid.c
drivers/macintosh/ams/ams-core.c
drivers/macintosh/ams/ams-i2c.c
drivers/macintosh/ans-lcd.c
drivers/macintosh/macio-adb.c
drivers/macintosh/macio_asic.c
drivers/macintosh/macio_sysfs.c
drivers/macintosh/mediabay.c
drivers/macintosh/rack-meter.c
drivers/macintosh/smu.c
drivers/macintosh/therm_adt746x.c
drivers/macintosh/therm_windtunnel.c
drivers/macintosh/via-cuda.c
drivers/macintosh/via-pmu-backlight.c
drivers/macintosh/via-pmu-led.c
drivers/macintosh/via-pmu.c
drivers/macintosh/windfarm_ad7417_sensor.c
drivers/macintosh/windfarm_core.c
drivers/macintosh/windfarm_cpufreq_clamp.c
drivers/macintosh/windfarm_fcu_controls.c
drivers/macintosh/windfarm_lm75_sensor.c
drivers/macintosh/windfarm_lm87_sensor.c
drivers/macintosh/windfarm_max6690_sensor.c
drivers/macintosh/windfarm_mpu.h
drivers/macintosh/windfarm_pm112.c
drivers/macintosh/windfarm_pm121.c
drivers/macintosh/windfarm_pm72.c
drivers/macintosh/windfarm_pm81.c
drivers/macintosh/windfarm_pm91.c
drivers/macintosh/windfarm_rm31.c
drivers/macintosh/windfarm_smu_controls.c
drivers/macintosh/windfarm_smu_sat.c
drivers/macintosh/windfarm_smu_sensors.c
drivers/md/bcache/journal.c
drivers/md/bcache/request.c
drivers/md/dm-integrity.c
drivers/md/dm-ps-historical-service-time.c
drivers/md/dm-zone.c
drivers/md/dm.c
drivers/media/platform/nxp/Kconfig
drivers/media/platform/rockchip/rga/rga.c
drivers/media/tuners/si2157.c
drivers/memory/atmel-ebi.c
drivers/memory/fsl_ifc.c
drivers/memory/renesas-rpc-if.c
drivers/misc/cxl/api.c
drivers/misc/cxl/cxl.h
drivers/misc/cxl/cxllib.c
drivers/misc/cxl/flash.c
drivers/misc/cxl/guest.c
drivers/misc/cxl/irq.c
drivers/misc/cxl/main.c
drivers/misc/cxl/native.c
drivers/misc/ocxl/afu_irq.c
drivers/misc/ocxl/file.c
drivers/misc/ocxl/link.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/ocelot/felix.c
drivers/net/dsa/ocelot/felix_vsc9959.c
drivers/net/dsa/realtek/Kconfig
drivers/net/dsa/realtek/realtek-smi.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/aquantia/atlantic/aq_vec.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/ice/ice_arfs.c
drivers/net/ethernet/intel/ice/ice_eswitch.c
drivers/net/ethernet/intel/ice/ice_eswitch.h
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_nvm.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/igc/igc_i225.c
drivers/net/ethernet/intel/igc/igc_phy.c
drivers/net/ethernet/intel/igc/igc_ptp.c
drivers/net/ethernet/mellanox/mlxsw/i2c.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
drivers/net/hippi/rrunner.c
drivers/net/macvlan.c
drivers/net/mdio/fwnode_mdio.c
drivers/net/phy/microchip_t1.c
drivers/net/tun.c
drivers/net/veth.c
drivers/net/vxlan/vxlan_core.c
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/perf/arm_pmu.c
drivers/platform/x86/acerhdf.c
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/barco-p50-gpio.c
drivers/platform/x86/samsung-laptop.c
drivers/platform/x86/think-lmi.c
drivers/platform/x86/think-lmi.h
drivers/power/supply/power_supply_core.c
drivers/power/supply/samsung-sdi-battery.c
drivers/reset/reset-rzg2l-usbphy-ctrl.c
drivers/reset/tegra/reset-bpmp.c
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/bnx2i/bnx2i_iscsi.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/libiscsi.c
drivers/scsi/libiscsi_tcp.c
drivers/scsi/pm8001/pm80xx_hwi.c
drivers/scsi/qedi/qedi_iscsi.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sr_ioctl.c
drivers/spi/atmel-quadspi.c
drivers/spi/spi-cadence-quadspi.c
drivers/spi/spi-intel-pci.c
drivers/spi/spi-mtk-nor.c
drivers/vfio/pci/vfio_pci_core.c
drivers/xen/balloon.c
drivers/xen/gntalloc.c
drivers/xen/unpopulated-alloc.c
fs/afs/write.c
fs/binfmt_elf.c
fs/btrfs/block-group.c
fs/btrfs/block-group.h
fs/btrfs/compression.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/volumes.c
fs/cachefiles/namei.c
fs/cachefiles/xattr.c
fs/cifs/cifsfs.c
fs/cifs/connect.c
fs/cifs/dfs_cache.c
fs/cifs/link.c
fs/cifs/smb2ops.c
fs/cifs/trace.h
fs/cifs/transport.c
fs/erofs/zdata.c
fs/erofs/zdata.h
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/namei.c
fs/ext4/page-io.c
fs/ext4/super.c
fs/fscache/Kconfig
fs/fscache/cache.c
fs/fscache/cookie.c
fs/fscache/internal.h
fs/fscache/io.c
fs/hugetlbfs/inode.c
fs/io-wq.h
fs/io_uring.c
fs/jbd2/commit.c
fs/ksmbd/misc.c
fs/ksmbd/misc.h
fs/ksmbd/oplock.c
fs/ksmbd/oplock.h
fs/ksmbd/smb2pdu.c
fs/ksmbd/vfs.c
fs/ksmbd/vfs_cache.c
fs/ksmbd/vfs_cache.h
fs/namei.c
fs/namespace.c
fs/nfsd/filecache.c
fs/nfsd/nfs2acl.c
fs/pipe.c
fs/posix_acl.c
fs/stat.c
fs/xattr.c
include/asm-generic/unaligned.h
include/linux/dma-buf-map.h [deleted file]
include/linux/fscache.h
include/linux/gpio/consumer.h
include/linux/hugetlb.h
include/linux/kernel.h
include/linux/kfence.h
include/linux/kvm_host.h
include/linux/memcontrol.h
include/linux/mm.h
include/linux/posix_acl_xattr.h
include/linux/sched.h
include/linux/sched/mm.h
include/linux/sunrpc/svc.h
include/linux/t10-pi.h
include/linux/timex.h
include/linux/vfio_pci_core.h
include/linux/vmalloc.h
include/net/esp.h
include/net/flow_dissector.h
include/net/ip_tunnels.h
include/net/netns/ipv6.h
include/scsi/libiscsi.h
include/scsi/scsi_transport_iscsi.h
include/sound/core.h
include/sound/memalloc.h
include/sound/soc-component.h
include/trace/events/sunrpc.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/io_uring.h
include/uapi/linux/stddef.h
kernel/cpu.c
kernel/dma/direct.h
kernel/events/core.c
kernel/events/internal.h
kernel/events/ring_buffer.c
kernel/irq/affinity.c
kernel/irq_work.c
kernel/kcov.c
kernel/sched/fair.c
kernel/smp.c
kernel/time/tick-sched.c
kernel/time/timer.c
lib/xarray.c
mm/compaction.c
mm/filemap.c
mm/hugetlb.c
mm/kasan/hw_tags.c
mm/kasan/kasan.h
mm/kfence/core.c
mm/kfence/kfence.h
mm/kfence/report.c
mm/kmemleak.c
mm/memcontrol.c
mm/memory-failure.c
mm/mmap.c
mm/mmu_notifier.c
mm/oom_kill.c
mm/page_alloc.c
mm/page_io.c
mm/secretmem.c
mm/shmem.c
mm/slab.c
mm/slab.h
mm/slab_common.c
mm/slob.c
mm/slub.c
mm/userfaultfd.c
mm/util.c
mm/vmalloc.c
mm/workingset.c
net/can/isotp.c
net/core/flow_dissector.c
net/core/rtnetlink.c
net/dsa/dsa2.c
net/dsa/tag_hellcreek.c
net/ipv4/esp4.c
net/ipv4/ip_gre.c
net/ipv4/ip_tunnel.c
net/ipv6/esp6.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/l3mdev/l3mdev.c
net/mac80211/debugfs_sta.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_socket.c
net/netlink/af_netlink.c
net/nfc/nci/core.c
net/openvswitch/flow_netlink.c
net/packet/af_packet.c
net/rxrpc/net_ns.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/sched/cls_u32.c
net/sched/sch_taprio.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/smc/af_smc.c
net/smc/smc_clc.c
net/smc/smc_pnet.c
net/sunrpc/svc_xprt.c
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/wireless/nl80211.c
net/wireless/scan.c
net/xfrm/xfrm_policy.c
scripts/gcc-plugins/latent_entropy_plugin.c
sound/core/init.c
sound/core/memalloc.c
sound/core/pcm_misc.c
sound/drivers/mtpav.c
sound/hda/hdac_i915.c
sound/hda/intel-dsp-config.c
sound/isa/galaxy/galaxy.c
sound/isa/sc6000.c
sound/oss/dmasound/dmasound.h
sound/oss/dmasound/dmasound_core.c
sound/pci/ad1889.c
sound/pci/ali5451/ali5451.c
sound/pci/als300.c
sound/pci/als4000.c
sound/pci/atiixp.c
sound/pci/atiixp_modem.c
sound/pci/au88x0/au88x0.c
sound/pci/aw2/aw2-alsa.c
sound/pci/azt3328.c
sound/pci/bt87x.c
sound/pci/ca0106/ca0106_main.c
sound/pci/cmipci.c
sound/pci/cs4281.c
sound/pci/cs5535audio/cs5535audio.c
sound/pci/echoaudio/echoaudio.c
sound/pci/emu10k1/emu10k1x.c
sound/pci/ens1370.c
sound/pci/es1938.c
sound/pci/es1968.c
sound/pci/fm801.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/ice1712/ice1724.c
sound/pci/intel8x0.c
sound/pci/intel8x0m.c
sound/pci/korg1212/korg1212.c
sound/pci/lola/lola.c
sound/pci/lx6464es/lx6464es.c
sound/pci/maestro3.c
sound/pci/nm256/nm256.c
sound/pci/oxygen/oxygen_lib.c
sound/pci/riptide/riptide.c
sound/pci/rme32.c
sound/pci/rme96.c
sound/pci/rme9652/hdsp.c
sound/pci/rme9652/hdspm.c
sound/pci/rme9652/rme9652.c
sound/pci/sis7019.c
sound/pci/sonicvibes.c
sound/pci/via82xx.c
sound/pci/via82xx_modem.c
sound/soc/atmel/sam9g20_wm8731.c
sound/soc/codecs/cs35l41-lib.c
sound/soc/codecs/lpass-rx-macro.c
sound/soc/codecs/lpass-tx-macro.c
sound/soc/codecs/lpass-va-macro.c
sound/soc/codecs/msm8916-wcd-digital.c
sound/soc/codecs/rk817_codec.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/rt5682s.c
sound/soc/codecs/rt711.c
sound/soc/codecs/wcd934x.c
sound/soc/codecs/wm8731.c
sound/soc/fsl/fsl_sai.c
sound/soc/generic/simple-card-utils.c
sound/soc/intel/boards/sof_es8336.c
sound/soc/intel/boards/sof_rt5682.c
sound/soc/intel/common/soc-acpi-intel-tgl-match.c
sound/soc/meson/aiu-acodec-ctrl.c
sound/soc/meson/aiu-codec-ctrl.c
sound/soc/meson/aiu.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/soc-pcm.c
sound/soc/soc-topology.c
sound/soc/sof/sof-pci-dev.c
sound/soc/sof/topology.c
sound/usb/midi.c
sound/usb/mixer_maps.c
sound/usb/pcm.c
sound/usb/usbaudio.h
sound/x86/intel_hdmi_audio.c
tools/arch/x86/include/asm/msr-index.h
tools/include/linux/slab.h
tools/lib/perf/evlist.c
tools/perf/bench/numa.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-script.c
tools/perf/tests/perf-time-to-tsc.c
tools/perf/util/c++/clang.cpp
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/parse-events.c
tools/perf/util/stat.c
tools/testing/radix-tree/linux.c
tools/testing/selftests/drivers/net/mlxsw/spectrum-2/vxlan_flooding_ipv6.sh
tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/aarch64/arch_timer.c
tools/testing/selftests/kvm/aarch64/get-reg-list.c
tools/testing/selftests/kvm/aarch64/vcpu_width_config.c [new file with mode: 0644]
tools/testing/selftests/kvm/dirty_log_perf_test.c
tools/testing/selftests/kvm/include/riscv/processor.h
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/kvm_page_table_test.c
tools/testing/selftests/kvm/lib/riscv/processor.c
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/x86_64/amx_test.c
tools/testing/selftests/kvm/x86_64/emulator_error_test.c
tools/testing/selftests/kvm/x86_64/smm_test.c
tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
tools/testing/selftests/mqueue/mq_perf_tests.c
tools/testing/selftests/powerpc/include/utils.h
tools/testing/selftests/powerpc/math/Makefile
tools/testing/selftests/powerpc/math/mma.S [new file with mode: 0644]
tools/testing/selftests/powerpc/math/mma.c [new file with mode: 0644]
tools/testing/selftests/powerpc/mm/.gitignore
tools/testing/selftests/powerpc/mm/Makefile
tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c [new file with mode: 0644]
tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c
tools/testing/selftests/powerpc/security/spectre_v2.c
tools/testing/selftests/vm/mremap_test.c
tools/testing/selftests/vm/run_vmtests.sh
virt/kvm/dirty_ring.c
virt/kvm/kvm_main.c
virt/kvm/kvm_mm.h

index 3c77677..594fda2 100644 (file)
@@ -103,8 +103,8 @@ What:           /sys/class/cxl/<afu>/api_version_compatible
 Date:           September 2014
 Contact:        linuxppc-dev@lists.ozlabs.org
 Description:    read only
-                Decimal value of the the lowest version of the userspace API
-                this this kernel supports.
+                Decimal value of the lowest version of the userspace API
+                this kernel supports.
 Users:         https://github.com/ibm-capi/libcxl
 
 
index 0582036..4cdba34 100644 (file)
@@ -116,7 +116,7 @@ Description:
                                            <value>[ForceIf:<attribute>=<value>]
                                            <value>[ForceIfNot:<attribute>=<value>]
 
-                                       For example:
+                                       For example::
 
                                            LegacyOrom/dell_value_modifier has value:
                                                    Disabled[ForceIf:SecureBoot=Enabled]
@@ -212,7 +212,7 @@ Description:
                the next boot.
 
                Lenovo specific class extensions
-               ------------------------------
+               --------------------------------
 
                On Lenovo systems the following additional settings are available:
 
@@ -246,9 +246,7 @@ Description:
                                        that is being referenced (e.g hdd0, hdd1 etc)
                                        This attribute defaults to device 0.
 
-               certificate:
-               signature:
-               save_signature:
+               certificate, signature, save_signature:
                                        These attributes are used for certificate based authentication. This is
                                        used in conjunction with a signing server as an alternative to password
                                        based authentication.
@@ -257,22 +255,27 @@ Description:
                                        The attributes can be displayed to check the stored value.
 
                                        Some usage examples:
-                                       Installing a certificate to enable feature:
-                                               echo <supervisor password > authentication/Admin/current_password
-                                               echo <signed certificate> > authentication/Admin/certificate
 
-                                       Updating the installed certificate:
-                                               echo <signature> > authentication/Admin/signature
-                                               echo <signed certificate> > authentication/Admin/certificate
+                                               Installing a certificate to enable feature::
+
+                                                       echo "supervisor password" > authentication/Admin/current_password
+                                                       echo "signed certificate" > authentication/Admin/certificate
+
+                                               Updating the installed certificate::
+
+                                                       echo "signature" > authentication/Admin/signature
+                                                       echo "signed certificate" > authentication/Admin/certificate
 
-                                       Removing the installed certificate:
-                                               echo <signature> > authentication/Admin/signature
-                                               echo '' > authentication/Admin/certificate
+                                               Removing the installed certificate::
 
-                                       Changing a BIOS setting:
-                                               echo <signature> > authentication/Admin/signature
-                                               echo <save signature> > authentication/Admin/save_signature
-                                               echo Enable > attribute/PasswordBeep/current_value
+                                                       echo "signature" > authentication/Admin/signature
+                                                       echo "" > authentication/Admin/certificate
+
+                                               Changing a BIOS setting::
+
+                                                       echo "signature" > authentication/Admin/signature
+                                                       echo "save signature" > authentication/Admin/save_signature
+                                                       echo Enable > attribute/PasswordBeep/current_value
 
                                        You cannot enable certificate authentication if a supervisor password
                                        has not been set.
@@ -288,9 +291,10 @@ Description:
                certificate_to_password:
                                        Write only attribute used to switch from certificate based authentication
                                        back to password based.
-                                       Usage:
-                                               echo <signature> > authentication/Admin/signature
-                                               echo <password> > authentication/Admin/certificate_to_password
+                                       Usage::
+
+                                               echo "signature" > authentication/Admin/signature
+                                               echo "password" > authentication/Admin/certificate_to_password
 
 
 What:          /sys/class/firmware-attributes/*/attributes/pending_reboot
@@ -345,7 +349,7 @@ Description:
 
                    # echo "factory" > /sys/class/firmware-attributes/*/device/attributes/reset_bios
                    # cat /sys/class/firmware-attributes/*/device/attributes/reset_bios
-                   builtinsafe lastknowngood [factory] custom
+                   builtinsafe lastknowngood [factory] custom
 
                Note that any changes to this attribute requires a reboot
                for changes to take effect.
index ab12212..96b92c1 100644 (file)
@@ -13,17 +13,19 @@ Description:
                Should the operation fail, one of the following error codes
                may be returned:
 
+               ==========      =====
                Error Code      Cause
-               ----------      -----
-               EIO             General mailbox failure. Log may indicate cause.
-               EBUSY           Mailbox is owned by another agent.
-               EPERM           SDSI capability is not enabled in hardware.
-               EPROTO          Failure in mailbox protocol detected by driver.
+               ==========      =====
+               EIO             General mailbox failure. Log may indicate cause.
+               EBUSY           Mailbox is owned by another agent.
+               EPERM           SDSI capability is not enabled in hardware.
+               EPROTO          Failure in mailbox protocol detected by driver.
                                See log for details.
-               EOVERFLOW       For provision commands, the size of the data
+               EOVERFLOW       For provision commands, the size of the data
                                exceeds what may be written.
-               ESPIPE          Seeking is not allowed.
-               ETIMEDOUT       Failure to complete mailbox transaction in time.
+               ESPIPE          Seeking is not allowed.
+               ETIMEDOUT       Failure to complete mailbox transaction in time.
+               ==========      =====
 
 What:          /sys/bus/auxiliary/devices/intel_vsec.sdsi.X/guid
 Date:          Feb 2022
index 0548237..bb4681a 100644 (file)
@@ -9,8 +9,9 @@ Description:    Shows all enabled kernel features.
 What:          /sys/fs/erofs/<disk>/sync_decompress
 Date:          November 2021
 Contact:       "Huang Jianan" <huangjianan@oppo.com>
-Description:   Control strategy of sync decompression
+Description:   Control strategy of sync decompression:
+
                - 0 (default, auto): enable for readpage, and enable for
-                                    readahead on atomic contexts only,
+                 readahead on atomic contexts only.
                - 1 (force on): enable for readpage and readahead.
                - 2 (force off): disable for all situations.
index 0afec83..564ae6a 100644 (file)
@@ -13,7 +13,6 @@ maintainers:
 properties:
   compatible:
     enum:
-      - nvidia,tegra20-pmc
       - nvidia,tegra20-pmc
       - nvidia,tegra30-pmc
       - nvidia,tegra114-pmc
index bd40213..fced408 100644 (file)
@@ -34,7 +34,6 @@ properties:
     oneOf:
       - items:
           - enum:
-              - ti,sysc-omap2
               - ti,sysc-omap2
               - ti,sysc-omap4
               - ti,sysc-omap4-simple
index f14f1d3..d819dfa 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos SoC Audio SubSystem clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 4e80628..0589a63 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 64d027d..c98eff6 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung SoC external/osc/XXTI/XusbXTI clock
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 1ed64ad..b644bbd 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos4412 SoC ISP clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index a3fac5c..b05f835 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos5260 SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 032862e..b737c9d 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos5410 SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index edd1b4a..3f9326e 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos5433 SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 599baf0..c137c67 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos7 SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 7e5a9ca..5073e56 100644 (file)
@@ -9,7 +9,7 @@ title: Samsung Exynos7885 SoC clock controller
 maintainers:
   - Dávid Virág <virag.david003@gmail.com>
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 80ba608..aa11815 100644 (file)
@@ -9,7 +9,7 @@ title: Samsung Exynos850 SoC clock controller
 maintainers:
   - Sam Protsenko <semen.protsenko@linaro.org>
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 1410c51..9248bfc 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2M and S5M family clock generator block
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index ae8f8fc..2659854 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung S5Pv210 SoC Audio SubSystem clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index dcb29a2..67a3366 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung S5P6442/S5PC110/S5PV210 SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index d318fcc..2bdd05a 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos NoC (Network on Chip) Probe
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   The Samsung Exynos542x SoC has a NoC (Network on Chip) Probe for NoC bus.
index c9a8cb5..e300df4 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos SoC PPMU (Platform Performance Monitoring Unit)
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   The Samsung Exynos SoC has PPMU (Platform Performance Monitoring Unit) for
index d31483a..6fb7e32 100644 (file)
@@ -160,7 +160,7 @@ examples:
     mdss: mdss@5e00000 {
         #address-cells = <1>;
         #size-cells = <1>;
-        compatible = "qcom,qcm2290-mdss", "qcom,mdss";
+        compatible = "qcom,qcm2290-mdss";
         reg = <0x05e00000 0x1000>;
         reg-names = "mdss";
         power-domains = <&dispcc MDSS_GDSC>;
@@ -180,7 +180,7 @@ examples:
                  <&apps_smmu 0x421 0x0>;
         ranges;
 
-        mdss_mdp: mdp@5e01000 {
+        mdss_mdp: display-controller@5e01000 {
                 compatible = "qcom,qcm2290-dpu";
                 reg = <0x05e01000 0x8f000>,
                       <0x05eb0000 0x2008>;
index 9bf592d..7749de9 100644 (file)
@@ -71,78 +71,72 @@ properties:
 
   hfront-porch:
     description: Horizontal front porch panel timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of pixels
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of pixels
 
   hback-porch:
     description: Horizontal back porch timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of pixels
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of pixels
 
   hsync-len:
     description: Horizontal sync length panel timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of pixels
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of pixels
 
   vfront-porch:
     description: Vertical front porch panel timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of lines
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of lines
 
   vback-porch:
     description: Vertical back porch panel timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of lines
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of lines
 
   vsync-len:
     description: Vertical sync length panel timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of lines
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of lines
index f998a3a..919734c 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index cb8e735..63379fa 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index ba40284..00e325a 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description:
   Samsung Exynos SoC Mixer is responsible for mixing and blending multiple data
index 6f79683..7c37470 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   DECON (Display and Enhancement Controller) is the Display Controller for the
index 01fccb1..c5c6239 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   MIC (Mobile Image Compressor) resides between DECON and MIPI DSI. MIPI DSI is
index afa137d..320eedc 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   DECON (Display and Enhancement Controller) is the Display Controller for the
index 9cf5f12..c62ea9d 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index e614fe3..d09d79d 100644 (file)
@@ -29,6 +29,7 @@ properties:
   interrupts:
     description:
       Interrupt lines for each GPI instance
+    minItems: 1
     maxItems: 13
 
   "#dma-cells":
index f9ffe3d..1289605 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77843 MicroUSB and Companion Power Management IC Extcon
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77843 MicroUSB
@@ -25,7 +25,7 @@ properties:
     $ref: /schemas/connector/usb-connector.yaml#
 
   ports:
-    $ref: /schemas/graph.yaml#/properties/port
+    $ref: /schemas/graph.yaml#/properties/ports
     description:
       Any connector to the data bus of this controller should be modelled using
       the OF graph bindings specified
index 4d6bfae..85f8d47 100644 (file)
@@ -20,6 +20,7 @@ properties:
           - mediatek,mt8183-mali
           - realtek,rtd1619-mali
           - renesas,r9a07g044-mali
+          - renesas,r9a07g054-mali
           - rockchip,px30-mali
           - rockchip,rk3568-mali
       - const: arm,mali-bifrost # Mali Bifrost GPU model/revision is fully discoverable
@@ -109,7 +110,9 @@ allOf:
       properties:
         compatible:
           contains:
-            const: renesas,r9a07g044-mali
+            enum:
+              - renesas,r9a07g044-mali
+              - renesas,r9a07g054-mali
     then:
       properties:
         interrupts:
index 4b5851c..b1a4c23 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: LTC4151 High Voltage I2C Current and Voltage Monitor
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index c42051f..028d6e5 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Microchip MCP3021 A/D converter
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 4669217..80df718 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Sensirion SHT15 humidity and temperature sensor
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index d3eff4f..c5a889e 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: TMP102 temperature sensor
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index eda55bb..dcbc6fb 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: TMP108 temperature sensor
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 801ca9b..e7493e2 100644 (file)
@@ -58,9 +58,8 @@ patternProperties:
           The value (two's complement) to be programmed in the channel specific N correction register.
           For remote channels only.
         $ref: /schemas/types.yaml#/definitions/int32
-        items:
-          minimum: -128
-          maximum: 127
+        minimum: -128
+        maximum: 127
 
     required:
       - reg
index 19874e8..3e52a0d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung's High Speed I2C controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   The Samsung's High Speed I2C controller is used to interface with I2C devices
index 84051b0..c262305 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC I2C Controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index cf71108..666414a 100644 (file)
@@ -98,6 +98,7 @@ allOf:
               - ti,adc121s
               - ti,ads7866
               - ti,ads7868
+    then:
       required:
         - vcc-supply
   # Devices with a vref
index 7c260f2..92f9472 100644 (file)
@@ -108,9 +108,7 @@ patternProperties:
           - [1-5]: order 1 to 5.
           For audio purpose it is recommended to use order 3 to 5.
         $ref: /schemas/types.yaml#/definitions/uint32
-        items:
-          minimum: 0
-          maximum: 5
+        maximum: 5
 
       "#io-channel-cells":
         const: 1
@@ -174,7 +172,7 @@ patternProperties:
               contains:
                 const: st,stm32-dfsdm-adc
 
-      - then:
+        then:
           properties:
             st,adc-channels:
               minItems: 1
@@ -206,7 +204,7 @@ patternProperties:
               contains:
                 const: st,stm32-dfsdm-dmic
 
-      - then:
+        then:
           properties:
             st,adc-channels:
               maxItems: 1
@@ -254,7 +252,7 @@ allOf:
           contains:
             const: st,stm32h7-dfsdm
 
-  - then:
+    then:
       patternProperties:
         "^filter@[0-9]+$":
           properties:
@@ -269,7 +267,7 @@ allOf:
           contains:
             const: st,stm32mp1-dfsdm
 
-  - then:
+    then:
       patternProperties:
         "^filter@[0-9]+$":
           properties:
index 0d8fb56..65f86f2 100644 (file)
@@ -59,9 +59,9 @@ allOf:
           contains:
             enum:
               - adi,ad5371
-      then:
-        required:
-          - vref2-supply
+    then:
+      required:
+        - vref2-supply
 
 examples:
   - |
index 89853b4..8a676fe 100644 (file)
@@ -93,48 +93,48 @@ allOf:
               - qcom,sdm660-gnoc
               - qcom,sdm660-snoc
 
-      then:
-        properties:
-          clock-names:
-            items:
-              - const: bus
-              - const: bus_a
-
-          clocks:
-            items:
-              - description: Bus Clock
-              - description: Bus A Clock
-
-        # Child node's properties
-        patternProperties:
-          '^interconnect-[a-z0-9]+$':
-            type: object
-            description:
-              snoc-mm is a child of snoc, sharing snoc's register address space.
-
-            properties:
-              compatible:
-                enum:
-                  - qcom,msm8939-snoc-mm
-
-              '#interconnect-cells':
-                const: 1
-
-              clock-names:
-                items:
-                  - const: bus
-                  - const: bus_a
-
-              clocks:
-                items:
-                  - description: Bus Clock
-                  - description: Bus A Clock
-
-            required:
-              - compatible
-              - '#interconnect-cells'
-              - clock-names
-              - clocks
+    then:
+      properties:
+        clock-names:
+          items:
+            - const: bus
+            - const: bus_a
+
+        clocks:
+          items:
+            - description: Bus Clock
+            - description: Bus A Clock
+
+      # Child node's properties
+      patternProperties:
+        '^interconnect-[a-z0-9]+$':
+          type: object
+          description:
+            snoc-mm is a child of snoc, sharing snoc's register address space.
+
+          properties:
+            compatible:
+              enum:
+                - qcom,msm8939-snoc-mm
+
+            '#interconnect-cells':
+              const: 1
+
+            clock-names:
+              items:
+                - const: bus
+                - const: bus_a
+
+            clocks:
+              items:
+                - description: Bus Clock
+                - description: Bus A Clock
+
+          required:
+            - compatible
+            - '#interconnect-cells'
+            - clock-names
+            - clocks
 
   - if:
       properties:
index 372ccbf..5a583bf 100644 (file)
@@ -7,10 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Marvell MMP/Orion Interrupt controller bindings
 
 maintainers:
-  - Thomas Gleixner <tglx@linutronix.de>
-  - Jason Cooper <jason@lakedaemon.net>
-  - Marc Zyngier <maz@kernel.org>
-  - Rob Herring <robh+dt@kernel.org>
+  - Andrew Lunn <andrew@lunn.ch>
+  - Gregory Clement <gregory.clement@bootlin.com>
 
 allOf:
   - if:
index d631b75..72456a0 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC Interrupt Combiner Controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   Samsung's Exynos4 architecture includes a interrupt combiner controller which
index 86a0005..e27f57b 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX77693 MicroUSB and Companion Power Management IC LEDs
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77693 MicroUSB Integrated
index 36781ee..c9d5adb 100644 (file)
@@ -65,7 +65,6 @@ properties:
   iram:
     $ref: /schemas/types.yaml#/definitions/phandle
     description: phandle pointing to the SRAM device node
-    maxItems: 1
 
 required:
   - compatible
index 9b179bb..aa55ca6 100644 (file)
@@ -63,13 +63,11 @@ properties:
 
   mediatek,vpu:
     $ref: /schemas/types.yaml#/definitions/phandle
-    maxItems: 1
     description:
       Describes point to vpu.
 
   mediatek,scp:
     $ref: /schemas/types.yaml#/definitions/phandle
-    maxItems: 1
     description:
       Describes point to scp.
 
index e7b65a9..deb5b65 100644 (file)
@@ -55,13 +55,11 @@ properties:
 
   mediatek,vpu:
     $ref: /schemas/types.yaml#/definitions/phandle
-    maxItems: 1
     description:
       Describes point to vpu.
 
   mediatek,scp:
     $ref: /schemas/types.yaml#/definitions/phandle
-    maxItems: 1
     description:
       Describes point to scp.
 
@@ -106,7 +104,6 @@ allOf:
           enum:
             - mediatek,mt8173-vcodec-enc
             - mediatek,mt8192-vcodec-enc
-            - mediatek,mt8173-vcodec-enc
 
     then:
       properties:
index 7687be0..c73bf23 100644 (file)
@@ -61,7 +61,6 @@ properties:
 
   mediatek,scp:
     $ref: /schemas/types.yaml#/definitions/phandle
-    maxItems: 1
     description: |
       The node of system control processor (SCP), using
       the remoteproc & rpmsg framework.
index 769f132..08cbdcd 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: DDR PHY Front End (DPFE) for Broadcom STB
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Markus Mayer <mmayer@broadcom.com>
 
 properties:
index f3e62ee..1daa665 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: LPDDR2 SDRAM AC timing parameters for a given speed-bin
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index dd2141c..9d78f14 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: LPDDR2 SDRAM compliant to JEDEC JESD209-2
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 97c3e98..5c6512c 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: LPDDR3 SDRAM AC timing parameters for a given speed-bin
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index c542f32..48908a1 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: LPDDR3 SDRAM compliant to JEDEC JESD209-3
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 14a6bc8..9249624 100644 (file)
@@ -8,7 +8,7 @@ title: Marvell MVEBU SDRAM controller
 
 maintainers:
   - Jan Luebbe <jlu@pengutronix.de>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 9566b34..0c511ab 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm Atheros AR7xxx/AR9xxx DDR controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   The DDR controller of the AR7xxx and AR9xxx families provides an interface to
index 2b18cef..514b2c5 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: H8/300 bus controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Yoshinori Sato <ysato@users.sourceforge.jp>
 
 properties:
index f152243..098348b 100644 (file)
@@ -9,7 +9,7 @@ title: |
   Controller device
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Lukasz Luba <lukasz.luba@arm.com>
 
 description: |
index fb7ae38..f46e957 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Synopsys IntelliDDR Multi Protocol memory controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Manish Narani <manish.narani@xilinx.com>
   - Michal Simek <michal.simek@xilinx.com>
 
@@ -24,9 +24,9 @@ description: |
 properties:
   compatible:
     enum:
+      - snps,ddrc-3.80a
       - xlnx,zynq-ddrc-a05
       - xlnx,zynqmp-ddrc-2.40a
-      - snps,ddrc-3.80a
 
   interrupts:
     maxItems: 1
@@ -43,7 +43,9 @@ allOf:
       properties:
         compatible:
           contains:
-            const: xlnx,zynqmp-ddrc-2.40a
+            enum:
+              - snps,ddrc-3.80a
+              - xlnx,zynqmp-ddrc-2.40a
     then:
       required:
         - interrupts
index 9ed5118..382ddab 100644 (file)
@@ -8,7 +8,7 @@ title: Texas Instruments da8xx DDR2/mDDR memory controller
 
 maintainers:
   - Bartosz Golaszewski <bgolaszewski@baylibre.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   Documentation:
index 27870b8..52edd1b 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX14577/MAX77836 MicroUSB and Companion Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX14577/MAX77836 MicroUSB
index 859655a..d027aab 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77686 Power Management IC
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77686 Power Management
index 9061011..1b06a77 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77693 MicroUSB and Companion Power Management IC
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77693 MicroUSB
index baa1346..ad20139 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77802 Power Management IC
 
 maintainers:
   - Javier Martinez Canillas <javier@dowhile0.org>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77802 Power Management
index 61a0f9d..f30f96b 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX77843 MicroUSB and Companion Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77843 MicroUSB
index bae55c9..f7bb67d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC Low Power Audio Subsystem (LPASS)
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
 properties:
index 017befd..055dfc3 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPA01 Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 771b3f1..5ff6546 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPS11/13/14/15 and S2MPU02 Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 5531718..10c7b40 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S5M8767 Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index ce64b34..f3f4d5b 100644 (file)
@@ -197,6 +197,8 @@ allOf:
               - nvidia,tegra30-sdhci
               - nvidia,tegra114-sdhci
               - nvidia,tegra124-sdhci
+    then:
+      properties:
         clocks:
           items:
             - description: module clock
index 15a45db..1bcaf6b 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Marvell International Ltd. NCI NFC controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 7465aea..e381a3c 100644 (file)
@@ -8,7 +8,7 @@ title: NXP Semiconductors NCI NFC controller
 
 maintainers:
   - Charles Gorand <charles.gorand@effinnov.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index d8ba5a1..0509e01 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: NXP Semiconductors PN532 NFC controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index d520414..18b3a7d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: NXP Semiconductors PN544 NFC Controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index a6a1bc7..ef11550 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: STMicroelectronics ST NCI NFC controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 4356eac..8a72743 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: STMicroelectronics SAS ST21NFCA NFC controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index d3bca37..963d953 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: STMicroelectronics ST95HF NFC controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 40da2ac..404c8df 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments TRF7970A RFID/NFC/15693 Transceiver
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Mark Greer <mgreer@animalcreek.com>
 
 properties:
index 2d5248f..36c85eb 100644 (file)
@@ -53,20 +53,18 @@ properties:
         - allwinner,sun8i-r40-gmac
         - allwinner,sun8i-v3s-emac
         - allwinner,sun50i-a64-emac
-        - loongson,ls2k-dwmac
-        - loongson,ls7a-dwmac
         - amlogic,meson6-dwmac
         - amlogic,meson8b-dwmac
         - amlogic,meson8m2-dwmac
         - amlogic,meson-gxbb-dwmac
         - amlogic,meson-axg-dwmac
-        - loongson,ls2k-dwmac
-        - loongson,ls7a-dwmac
         - ingenic,jz4775-mac
         - ingenic,x1000-mac
         - ingenic,x1600-mac
         - ingenic,x1830-mac
         - ingenic,x2000-mac
+        - loongson,ls2k-dwmac
+        - loongson,ls7a-dwmac
         - rockchip,px30-gmac
         - rockchip,rk3128-gmac
         - rockchip,rk3228-gmac
index e602761..b0ebcef 100644 (file)
@@ -13,9 +13,6 @@ description: |
   This describes the devicetree bindings for AVE ethernet controller
   implemented on Socionext UniPhier SoCs.
 
-allOf:
-  - $ref: ethernet-controller.yaml#
-
 properties:
   compatible:
     enum:
@@ -44,25 +41,13 @@ properties:
     minItems: 1
     maxItems: 4
 
-  clock-names:
-    oneOf:
-      - items:          # for Pro4
-          - const: gio
-          - const: ether
-          - const: ether-gb
-          - const: ether-phy
-      - const: ether    # for others
+  clock-names: true
 
   resets:
     minItems: 1
     maxItems: 2
 
-  reset-names:
-    oneOf:
-      - items:          # for Pro4
-          - const: gio
-          - const: ether
-      - const: ether    # for others
+  reset-names: true
 
   socionext,syscon-phy-mode:
     $ref: /schemas/types.yaml#/definitions/phandle-array
@@ -78,6 +63,42 @@ properties:
     $ref: mdio.yaml#
     unevaluatedProperties: false
 
+allOf:
+  - $ref: ethernet-controller.yaml#
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: socionext,uniphier-pro4-ave4
+    then:
+      properties:
+        clocks:
+          minItems: 4
+          maxItems: 4
+        clock-names:
+          items:
+            - const: gio
+            - const: ether
+            - const: ether-gb
+            - const: ether-phy
+        resets:
+          minItems: 2
+          maxItems: 2
+        reset-names:
+          items:
+            - const: gio
+            - const: ether
+    else:
+      properties:
+        clocks:
+          maxItems: 1
+        clock-names:
+          const: ether
+        resets:
+          maxItems: 1
+        reset-names:
+          const: ether
+
 required:
   - compatible
   - reg
@@ -90,7 +111,7 @@ required:
   - reset-names
   - mdio
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index dbfca5e..6f44f95 100644 (file)
@@ -56,6 +56,7 @@ if:
     compatible:
       contains:
         const: ti,davinci_mdio
+then:
   required:
     - bus_freq
 
index dfde0ea..d61585c 100644 (file)
@@ -275,17 +275,17 @@ allOf:
           - nvidia,hssquelch-level
           - nvidia,hsdiscon-level
 
-        else:
-          properties:
-            clocks:
-              maxItems: 4
+      else:
+        properties:
+          clocks:
+            maxItems: 4
 
-            clock-names:
-              items:
-                - const: reg
-                - const: pll_u
-                - const: timer
-                - const: utmi-pads
+          clock-names:
+            items:
+              - const: reg
+              - const: pll_u
+              - const: timer
+              - const: utmi-pads
 
   - if:
       properties:
index e23e559..0655e48 100644 (file)
@@ -14,24 +14,24 @@ if:
     compatible:
       contains:
         const: qcom,usb-hs-phy-apq8064
-  then:
-    properties:
-      resets:
-        maxItems: 1
+then:
+  properties:
+    resets:
+      maxItems: 1
 
-      reset-names:
-        const: por
+    reset-names:
+      const: por
 
-  else:
-    properties:
-      resets:
-        minItems: 2
-        maxItems: 2
+else:
+  properties:
+    resets:
+      minItems: 2
+      maxItems: 2
 
-      reset-names:
-        items:
-          - const: phy
-          - const: por
+    reset-names:
+      items:
+        - const: phy
+        - const: por
 
 properties:
   compatible:
@@ -92,6 +92,8 @@ additionalProperties: false
 examples:
   - |
     otg: usb-controller {
+      #reset-cells = <1>;
+
       ulpi {
         phy {
           compatible = "qcom,usb-hs-phy-msm8974", "qcom,usb-hs-phy";
index 838c6d4..b03b2f0 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC DisplayPort PHY
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Marek Szyprowski <m.szyprowski@samsung.com>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
index c61574e..3e5f035 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 62b39bb..8751e55 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos5250 SoC SATA PHY
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Marek Szyprowski <m.szyprowski@samsung.com>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
index 54aa056..415440a 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S5P/Exynos SoC MIPI CSIS/DSIM DPHY
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Marek Szyprowski <m.szyprowski@samsung.com>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
index 056e270..d9f22a8 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S5P/Exynos SoC USB 2.0 PHY
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Marek Szyprowski <m.szyprowski@samsung.com>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
index f83f0f8..5ba55f9 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC USB 3.0 DRD PHY USB 2.0 PHY
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Marek Szyprowski <m.szyprowski@samsung.com>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
index 8a90d82..6bd42e4 100644 (file)
@@ -48,13 +48,12 @@ properties:
               Name of one pin group to configure.
             enum: [ aif1, aif2, aif3, aif4, mif1, mif2, mif3, pdmspk1,
                     pdmspk2, dmic4, dmic5, dmic6, gpio1, gpio2, gpio3,
-                    gpio4, gpio5, gpio6, gpio7, gpio7, gpio8, gpio9,
+                    gpio4, gpio5, gpio6, gpio7, gpio8, gpio9,
                     gpio10, gpio11, gpio12, gpio13, gpio14, gpio15,
-                    gpio16, gpio17, gpio17, gpio18, gpio19, gpio20,
-                    gpio21, gpio22, gpio23, gpio24, gpio25, gpio26,
-                    gpio27, gpio27, gpio28, gpio29, gpio30, gpio31,
-                    gpio32, gpio33, gpio34, gpio35, gpio36, gpio37,
-                    gpio37, gpio38, gpio39 ]
+                    gpio16, gpio17, gpio18, gpio19, gpio20, gpio21,
+                    gpio22, gpio23, gpio24, gpio25, gpio26, gpio27,
+                    gpio28, gpio29, gpio30, gpio31, gpio32, gpio33,
+                    gpio34, gpio35, gpio36, gpio37, gpio38, gpio39 ]
 
           function:
             description:
index f73348c..8cf3c47 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC pin controller - gpio bank
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index c71939a..9869d4d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC pin controller - pins configuration
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index a822f70..1de91a5 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC pin controller - wake-up interrupt controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 989e48c..3a65c66 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC pin controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 4d293b2..d77fc88 100644 (file)
@@ -36,7 +36,8 @@ properties:
   cpus:
     $ref: /schemas/types.yaml#/definitions/phandle-array
     items:
-      maxItems: 1
+      minItems: 1
+      maxItems: 4
     description: |
       Array of phandles pointing to CPU cores, which should match the order of
       CPU cores used by the WUPCR and PSTR registers in the Advanced Power
index f8461f0..118cf48 100644 (file)
@@ -16,7 +16,6 @@ allOf:
 properties:
   compatible:
     enum:
-      - ti,bq24150
       - ti,bq24150
       - ti,bq24150a
       - ti,bq24151
index 3978b48..4d3a1d0 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX14577/MAX77836 MicroUSB and Companion Power Management IC Charger
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX14577/MAX77836 MicroUSB
index a21dc1a..f5fd53d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX77693 MicroUSB and Companion Power Management IC Charger
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77693 MicroUSB Integrated
index 9b131c6..84eeaef 100644 (file)
@@ -18,23 +18,23 @@ description:
 
 allOf:
   - $ref: "regulator.yaml#"
-
-if:
-  properties:
-    compatible:
-      contains:
-        const: regulator-fixed-clock
-  required:
-    - clocks
-else:
-  if:
-    properties:
-      compatible:
-        contains:
-          const: regulator-fixed-domain
-    required:
-      - power-domains
-      - required-opps
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: regulator-fixed-clock
+    then:
+      required:
+        - clocks
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: regulator-fixed-domain
+    then:
+      required:
+        - power-domains
+        - required-opps
 
 properties:
   compatible:
index 16f0188..285dc71 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX14577/MAX77836 MicroUSB and Companion Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX14577/MAX77836 MicroUSB
index bb64b67..0e7cd4b 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77686 Power Management IC regulators
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77686 Power Management
index 20d8559..945a539 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77693 MicroUSB and Companion Power Management IC regulators
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77693 MicroUSB Integrated
index f2b4dd1..236348c 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77802 Power Management IC regulators
 
 maintainers:
   - Javier Martinez Canillas <javier@dowhile0.org>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77802 Power Management
index a963025..9695e72 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX77843 MicroUSB and Companion Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77843 MicroUSB Integrated
index e4e8c58..3ff0d7d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX8952 voltage regulator
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 allOf:
   - $ref: regulator.yaml#
index 5898dcf..b92eef6 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX8973/MAX77621 voltage regulator
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 allOf:
   - $ref: regulator.yaml#
index d5a44ca..4321f06 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX8997 Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   The Maxim MAX8997 is a Power Management IC which includes voltage and current
index 0627dec..0f9eb31 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPA01 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index e3b7807..f1c50dc 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPS11 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 579d77a..53b105a 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPS13 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index fdea290..01f9d4e 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPS14 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index b3a883c..9576c2d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPS15 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 0ded695..39b652c 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPU02 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 3c1617b..172631c 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S5M8767 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 2424de7..d99a729 100644 (file)
@@ -104,8 +104,7 @@ properties:
   qcom,smem-state-names:
     $ref: /schemas/types.yaml#/definitions/string
     description: The names of the state bits used for SMP2P output
-    items:
-      - const: stop
+    const: stop
 
   glink-edge:
     type: object
@@ -130,7 +129,6 @@ properties:
       qcom,remote-pid:
         $ref: /schemas/types.yaml#/definitions/uint32
         description: ID of the shared memory used by GLINK for communication with WPSS
-        maxItems: 1
 
     required:
       - interrupts
index b0c41ab..cdfcf32 100644 (file)
@@ -24,6 +24,11 @@ properties:
           - const: hisilicon,hi3670-reset
           - const: hisilicon,hi3660-reset
 
+  hisi,rst-syscon:
+    deprecated: true
+    description: phandle of the reset's syscon, use hisilicon,rst-syscon instead
+    $ref: /schemas/types.yaml#/definitions/phandle
+
   hisilicon,rst-syscon:
     description: phandle of the reset's syscon.
     $ref: /schemas/types.yaml#/definitions/phandle
index a50c34d..765d9f9 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC True Random Number Generator
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Łukasz Stelmach <l.stelmach@samsung.com>
 
 properties:
index 84bf518..4754174 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: TimerIO Random Number Generator
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index a98ed66..0cabb77 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung's Exynos USI (Universal Serial Interface) binding
 
 maintainers:
   - Sam Protsenko <semen.protsenko@linaro.org>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   USI IP-core provides selectable serial protocol (UART, SPI or High-Speed I2C).
index cea2bf3..9bc4585 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Insignal Arndale boards audio complex
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
 properties:
index cb51af9..ac151d3 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung SMDK5250 audio complex with WM8994 codec
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
 properties:
index 0c3b330..51a83d3 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Google Snow audio complex with MAX9809x codec
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
 properties:
index 74712d6..491e080 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos5433 TM2(E) audio complex with WM5110 codec
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
 properties:
index b3dbcba..fe2e155 100644 (file)
@@ -136,8 +136,7 @@ allOf:
         compatible:
           contains:
             const: st,stm32f4-sai
-
-  - then:
+    then:
       properties:
         clocks:
           items:
@@ -148,8 +147,7 @@ allOf:
           items:
             - const: x8k
             - const: x11k
-
-  - else:
+    else:
       properties:
         clocks:
           items:
index b104899..5de710a 100644 (file)
@@ -124,7 +124,6 @@ properties:
     description: |
       Override the default TX fifo size.  Unit is words.  Ignored if 0.
     $ref: /schemas/types.yaml#/definitions/uint32
-    maxItems: 1
     default: 64
 
   renesas,rx-fifo-size:
@@ -132,7 +131,6 @@ properties:
     description: |
       Override the default RX fifo size.  Unit is words.  Ignored if 0.
     $ref: /schemas/types.yaml#/definitions/uint32
-    maxItems: 1
     default: 64
 
 required:
index f0db3fb..25b1b6c 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Peripheral-specific properties for Samsung S3C/S5P/Exynos SoC SPI controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description:
   See spi-peripheral-props.yaml for more info.
index bf9a76d..a50f24f 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC SPI controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description:
   All the SPI controller nodes should be represented in the aliases node using
index 668a9a4..993430b 100644 (file)
@@ -136,14 +136,14 @@ required:
   - reg
 
 if:
-  properties:
-    compatible:
-      contains:
-        enum:
-          - qcom,rpm-msg-ram
-          - rockchip,rk3288-pmu-sram
-
-else:
+  not:
+    properties:
+      compatible:
+        contains:
+          enum:
+            - qcom,rpm-msg-ram
+            - rockchip,rk3288-pmu-sram
+then:
   required:
     - "#address-cells"
     - "#size-cells"
index 17129f7..1344df7 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC Thermal Management Unit (TMU)
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   For multi-instance tmu each instance should have an alias correctly numbered
index 22b91a2..6b9a3bc 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC USB 3.0 DWC3 Controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index fbf07d6..340dff8 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC USB 2.0 EHCI/OHCI Controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index be793c4..d7507be 100644 (file)
@@ -73,7 +73,7 @@ busy.
 If successful, the cache backend can then start setting up the cache.  In the
 event that the initialisation fails, the cache backend should call::
 
-       void fscache_relinquish_cookie(struct fscache_cache *cache);
+       void fscache_relinquish_cache(struct fscache_cache *cache);
 
 to reset and discard the cookie.
 
@@ -110,9 +110,9 @@ to withdraw them, calling::
 
 on the cookie that each object belongs to.  This schedules the specified cookie
 for withdrawal.  This gets offloaded to a workqueue.  The cache backend can
-test for completion by calling::
+wait for completion by calling::
 
-       bool fscache_are_objects_withdrawn(struct fscache_cookie *cache);
+       void fscache_wait_for_objects(struct fscache_cache *cache);
 
 Once all the cookies are withdrawn, a cache backend can withdraw all the
 volumes, calling::
@@ -125,7 +125,7 @@ outstanding accesses on the volume to complete before returning.
 When the the cache is completely withdrawn, fscache should be notified by
 calling::
 
-       void fscache_cache_relinquish(struct fscache_cache *cache);
+       void fscache_relinquish_cache(struct fscache_cache *cache);
 
 to clear fields in the cookie and discard the caller's ref on it.
 
index 5066113..7308d76 100644 (file)
@@ -404,22 +404,21 @@ schedule a write of that region::
 And if an error occurs before that point is reached, the marks can be removed
 by calling::
 
-       void fscache_clear_page_bits(struct fscache_cookie *cookie,
-                                    struct address_space *mapping,
+       void fscache_clear_page_bits(struct address_space *mapping,
                                     loff_t start, size_t len,
                                     bool caching)
 
-In both of these functions, the cookie representing the cache object to be
-written to and a pointer to the mapping to which the source pages are attached
-are passed in; start and len indicate the size of the region that's going to be
-written (it doesn't have to align to page boundaries necessarily, but it does
-have to align to DIO boundaries on the backing filesystem).  The caching
-parameter indicates if caching should be skipped, and if false, the functions
-do nothing.
-
-The write function takes some additional parameters: i_size indicates the size
-of the netfs file and term_func indicates an optional completion function, to
-which term_func_priv will be passed, along with the error or amount written.
+In these functions, a pointer to the mapping to which the source pages are
+attached is passed in and start and len indicate the size of the region that's
+going to be written (it doesn't have to align to page boundaries necessarily,
+but it does have to align to DIO boundaries on the backing filesystem).  The
+caching parameter indicates if caching should be skipped, and if false, the
+functions do nothing.
+
+The write function takes some additional parameters: the cookie representing
+the cache object to be written to, i_size indicates the size of the netfs file
+and term_func indicates an optional completion function, to which
+term_func_priv will be passed, along with the error or amount written.
 
 Note that the write function will always run asynchronously and will unmark all
 the pages upon completion before calling term_func.
index 54386a0..871d2da 100644 (file)
@@ -76,7 +76,7 @@ The beginning of an extended attribute block is in
      - Checksum of the extended attribute block.
    * - 0x14
      - \_\_u32
-     - h\_reserved[2]
+     - h\_reserved[3]
      - Zero.
 
 The checksum is calculated against the FS UUID, the 64-bit block number
index 525e684..43be378 100644 (file)
@@ -894,7 +894,7 @@ xmit_hash_policy
                Uses XOR of hardware MAC addresses and packet type ID
                field to generate the hash. The formula is
 
-               hash = source MAC XOR destination MAC XOR packet type ID
+               hash = source MAC[5] XOR destination MAC[5] XOR packet type ID
                slave number = hash modulo slave count
 
                This algorithm will place all traffic to a particular
@@ -910,7 +910,7 @@ xmit_hash_policy
                Uses XOR of hardware MAC addresses and IP addresses to
                generate the hash.  The formula is
 
-               hash = source MAC XOR destination MAC XOR packet type ID
+               hash = source MAC[5] XOR destination MAC[5] XOR packet type ID
                hash = hash XOR source IP XOR destination IP
                hash = hash XOR (hash RSHIFT 16)
                hash = hash XOR (hash RSHIFT 8)
index b0024aa..6682829 100644 (file)
@@ -267,6 +267,13 @@ ipfrag_max_dist - INTEGER
        from different IP datagrams, which could result in data corruption.
        Default: 64
 
+bc_forwarding - INTEGER
+       bc_forwarding enables the feature described in rfc1812#section-5.3.5.2
+       and rfc2644. It allows the router to forward directed broadcast.
+       To enable this feature, the 'all' entry and the input interface entry
+       should be set to 1.
+       Default: 0
+
 INET peer storage
 =================
 
index d13fa66..85c7abc 100644 (file)
@@ -6190,6 +6190,7 @@ Valid values for 'type' are:
                        unsigned long args[6];
                        unsigned long ret[2];
                } riscv_sbi;
+
 If exit reason is KVM_EXIT_RISCV_SBI then it indicates that the VCPU has
 done a SBI call which is not handled by KVM RISC-V kernel module. The details
 of the SBI call are available in 'riscv_sbi' member of kvm_run structure. The
index db43ee5..31f62b6 100644 (file)
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 =================
 KVM VCPU Requests
 =================
index 1c6847f..2d30781 100644 (file)
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 ======================================
 Secure Encrypted Virtualization (SEV)
 ======================================
index 806f049..410e0aa 100644 (file)
@@ -1,3 +1,4 @@
+.. SPDX-License-Identifier: GPL-2.0
 
 =======================================
 Known limitations of CPU virtualization
@@ -36,4 +37,3 @@ Nested virtualization features
 ------------------------------
 
 TBD
-
index bd70c69..a27e676 100644 (file)
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 ==============================
 Running nested guests with KVM
 ==============================
index 61d9f11..5e8c2f6 100644 (file)
@@ -201,6 +201,7 @@ F:  include/net/ieee80211_radiotap.h
 F:     include/net/iw_handler.h
 F:     include/net/wext.h
 F:     include/uapi/linux/nl80211.h
+F:     include/uapi/linux/wireless.h
 F:     net/wireless/
 
 8169 10/100/1000 GIGABIT ETHERNET DRIVER
@@ -2636,7 +2637,7 @@ F:        sound/soc/rockchip/
 N:     rockchip
 
 ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 R:     Alim Akhtar <alim.akhtar@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org
@@ -3742,7 +3743,7 @@ F:        include/linux/platform_data/b53.h
 
 BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE
 M:     Nicolas Saenz Julienne <nsaenz@kernel.org>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -3757,7 +3758,7 @@ BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
 M:     Ray Jui <rjui@broadcom.com>
 M:     Scott Branden <sbranden@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Maintained
 T:     git git://github.com/broadcom/mach-bcm
 F:     arch/arm/mach-bcm/
@@ -3777,7 +3778,7 @@ F:        arch/mips/include/asm/mach-bcm47xx/*
 
 BROADCOM BCM4908 ETHERNET DRIVER
 M:     Rafał Miłecki <rafal@milecki.pl>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
@@ -3786,7 +3787,7 @@ F:        drivers/net/ethernet/broadcom/unimac.h
 
 BROADCOM BCM4908 PINMUX DRIVER
 M:     Rafał Miłecki <rafal@milecki.pl>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-gpio@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/pinctrl/brcm,bcm4908-pinctrl.yaml
@@ -3796,7 +3797,7 @@ BROADCOM BCM5301X ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
 M:     Hauke Mehrtens <hauke@hauke-m.de>
 M:     Rafał Miłecki <zajec5@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/boot/dts/bcm470*
@@ -3807,7 +3808,7 @@ F:        arch/arm/mach-bcm/bcm_5301x.c
 BROADCOM BCM53573 ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
 M:     Rafał Miłecki <rafal@milecki.pl>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/boot/dts/bcm47189*
@@ -3815,7 +3816,7 @@ F:        arch/arm/boot/dts/bcm53573*
 
 BROADCOM BCM63XX ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://github.com/broadcom/stblinux.git
@@ -3829,7 +3830,7 @@ F:        drivers/usb/gadget/udc/bcm63xx_udc.*
 
 BROADCOM BCM7XXX ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://github.com/broadcom/stblinux.git
@@ -3847,21 +3848,21 @@ N:      bcm7120
 BROADCOM BDC DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
 L:     linux-usb@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Maintained
 F:     Documentation/devicetree/bindings/usb/brcm,bdc.yaml
 F:     drivers/usb/gadget/udc/bdc/
 
 BROADCOM BMIPS CPUFREQ DRIVER
 M:     Markus Mayer <mmayer@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 F:     drivers/cpufreq/bmips-cpufreq.c
 
 BROADCOM BMIPS MIPS ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-mips@vger.kernel.org
 S:     Maintained
 T:     git git://github.com/broadcom/stblinux.git
@@ -3927,53 +3928,53 @@ F:      drivers/net/wireless/broadcom/brcm80211/
 BROADCOM BRCMSTB GPIO DRIVER
 M:     Doug Berger <opendmb@gmail.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Supported
 F:     Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.yaml
 F:     drivers/gpio/gpio-brcmstb.c
 
 BROADCOM BRCMSTB I2C DRIVER
 M:     Kamal Dasu <kdasu.kdev@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-i2c@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Supported
 F:     Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml
 F:     drivers/i2c/busses/i2c-brcmstb.c
 
 BROADCOM BRCMSTB UART DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-serial@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
 F:     drivers/tty/serial/8250/8250_bcm7271.c
 
 BROADCOM BRCMSTB USB EHCI DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-usb@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     Documentation/devicetree/bindings/usb/brcm,bcm7445-ehci.yaml
 F:     drivers/usb/host/ehci-brcm.*
 
 BROADCOM BRCMSTB USB PIN MAP DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-usb@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     Documentation/devicetree/bindings/usb/brcm,usb-pinmap.yaml
 F:     drivers/usb/misc/brcmstb-usb-pinmap.c
 
 BROADCOM BRCMSTB USB2 and USB3 PHY DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-kernel@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     drivers/phy/broadcom/phy-brcm-usb*
 
 BROADCOM ETHERNET PHY DRIVERS
 M:     Florian Fainelli <f.fainelli@gmail.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt
@@ -3984,7 +3985,7 @@ F:        include/linux/brcmphy.h
 BROADCOM GENET ETHERNET DRIVER
 M:     Doug Berger <opendmb@gmail.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/devicetree/bindings/net/brcm,bcmgenet.yaml
@@ -3998,7 +3999,7 @@ F:        include/linux/platform_data/mdio-bcm-unimac.h
 BROADCOM IPROC ARM ARCHITECTURE
 M:     Ray Jui <rjui@broadcom.com>
 M:     Scott Branden <sbranden@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://github.com/broadcom/stblinux.git
@@ -4026,7 +4027,7 @@ N:        stingray
 
 BROADCOM IPROC GBIT ETHERNET DRIVER
 M:     Rafał Miłecki <rafal@milecki.pl>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/brcm,amac.yaml
@@ -4035,7 +4036,7 @@ F:        drivers/net/ethernet/broadcom/unimac.h
 
 BROADCOM KONA GPIO DRIVER
 M:     Ray Jui <rjui@broadcom.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Supported
 F:     Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt
 F:     drivers/gpio/gpio-bcm-kona.c
@@ -4068,7 +4069,7 @@ F:        drivers/firmware/broadcom/*
 BROADCOM PMB (POWER MANAGEMENT BUS) DRIVER
 M:     Rafał Miłecki <rafal@milecki.pl>
 M:     Florian Fainelli <f.fainelli@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 T:     git git://github.com/broadcom/stblinux.git
@@ -4084,7 +4085,7 @@ F:        include/linux/bcma/
 
 BROADCOM SPI DRIVER
 M:     Kamal Dasu <kdasu.kdev@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Maintained
 F:     Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.yaml
 F:     drivers/spi/spi-bcm-qspi.*
@@ -4093,7 +4094,7 @@ F:        drivers/spi/spi-iproc-qspi.c
 
 BROADCOM STB AVS CPUFREQ DRIVER
 M:     Markus Mayer <mmayer@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
@@ -4101,7 +4102,7 @@ F:        drivers/cpufreq/brcmstb*
 
 BROADCOM STB AVS TMON DRIVER
 M:     Markus Mayer <mmayer@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml
@@ -4109,7 +4110,7 @@ F:        drivers/thermal/broadcom/brcmstb*
 
 BROADCOM STB DPFE DRIVER
 M:     Markus Mayer <mmayer@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.yaml
@@ -4118,8 +4119,8 @@ F:        drivers/memory/brcmstb_dpfe.c
 BROADCOM STB NAND FLASH DRIVER
 M:     Brian Norris <computersforpeace@gmail.com>
 M:     Kamal Dasu <kdasu.kdev@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-mtd@lists.infradead.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     drivers/mtd/nand/raw/brcmnand/
 F:     include/linux/platform_data/brcmnand.h
@@ -4128,7 +4129,7 @@ BROADCOM STB PCIE DRIVER
 M:     Jim Quinlan <jim2101024@gmail.com>
 M:     Nicolas Saenz Julienne <nsaenz@kernel.org>
 M:     Florian Fainelli <f.fainelli@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-pci@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
@@ -4136,7 +4137,7 @@ F:        drivers/pci/controller/pcie-brcmstb.c
 
 BROADCOM SYSTEMPORT ETHERNET DRIVER
 M:     Florian Fainelli <f.fainelli@gmail.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/broadcom/bcmsysport.*
@@ -4153,7 +4154,7 @@ F:        drivers/net/ethernet/broadcom/tg3.*
 
 BROADCOM VK DRIVER
 M:     Scott Branden <scott.branden@broadcom.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Supported
 F:     drivers/misc/bcm-vk/
 F:     include/uapi/linux/misc/bcm_vk.h
@@ -9336,14 +9337,12 @@ F:      drivers/pci/hotplug/rpaphp*
 
 IBM Power SRIOV Virtual NIC Device Driver
 M:     Dany Madden <drt@linux.ibm.com>
-M:     Sukadev Bhattiprolu <sukadev@linux.ibm.com>
 R:     Thomas Falcon <tlfalcon@linux.ibm.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/ibm/ibmvnic.*
 
 IBM Power Virtual Accelerator Switchboard
-M:     Sukadev Bhattiprolu <sukadev@linux.ibm.com>
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Supported
 F:     arch/powerpc/include/asm/vas.h
@@ -10370,6 +10369,7 @@ F:      include/linux/isapnp.h
 ISCSI
 M:     Lee Duncan <lduncan@suse.com>
 M:     Chris Leech <cleech@redhat.com>
+M:     Mike Christie <michael.christie@oracle.com>
 L:     open-iscsi@googlegroups.com
 L:     linux-scsi@vger.kernel.org
 S:     Maintained
@@ -10547,6 +10547,7 @@ M:      Andrey Ryabinin <ryabinin.a.a@gmail.com>
 R:     Alexander Potapenko <glider@google.com>
 R:     Andrey Konovalov <andreyknvl@gmail.com>
 R:     Dmitry Vyukov <dvyukov@google.com>
+R:     Vincenzo Frascino <vincenzo.frascino@arm.com>
 L:     kasan-dev@googlegroups.com
 S:     Maintained
 F:     Documentation/dev-tools/kasan.rst
@@ -11906,7 +11907,7 @@ F:      drivers/iio/proximity/mb1232.c
 
 MAXIM MAX17040 FAMILY FUEL GAUGE DRIVERS
 R:     Iskren Chernev <iskren.chernev@gmail.com>
-R:     Krzysztof Kozlowski <krzk@kernel.org>
+R:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 R:     Marek Szyprowski <m.szyprowski@samsung.com>
 R:     Matheus Castello <matheus@castello.eng.br>
 L:     linux-pm@vger.kernel.org
@@ -11916,7 +11917,7 @@ F:      drivers/power/supply/max17040_battery.c
 
 MAXIM MAX17042 FAMILY FUEL GAUGE DRIVERS
 R:     Hans de Goede <hdegoede@redhat.com>
-R:     Krzysztof Kozlowski <krzk@kernel.org>
+R:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 R:     Marek Szyprowski <m.szyprowski@samsung.com>
 R:     Sebastian Krzyszkowiak <sebastian.krzyszkowiak@puri.sm>
 R:     Purism Kernel Team <kernel@puri.sm>
@@ -11968,7 +11969,7 @@ F:      Documentation/devicetree/bindings/power/supply/maxim,max77976.yaml
 F:     drivers/power/supply/max77976_charger.c
 
 MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-pm@vger.kernel.org
 S:     Supported
@@ -11979,7 +11980,7 @@ F:      drivers/power/supply/max77693_charger.c
 
 MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
 M:     Chanwoo Choi <cw00.choi@samsung.com>
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-kernel@vger.kernel.org
 S:     Supported
@@ -12402,7 +12403,7 @@ F:      drivers/mmc/host/mtk-sd.c
 
 MEDIATEK MT76 WIRELESS LAN DRIVER
 M:     Felix Fietkau <nbd@nbd.name>
-M:     Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+M:     Lorenzo Bianconi <lorenzo@kernel.org>
 M:     Ryder Lee <ryder.lee@mediatek.com>
 R:     Shayne Chen <shayne.chen@mediatek.com>
 R:     Sean Wang <sean.wang@mediatek.com>
@@ -12673,7 +12674,7 @@ F:      mm/memblock.c
 F:     tools/testing/memblock/
 
 MEMORY CONTROLLER DRIVERS
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-mem-ctrl.git
@@ -13817,10 +13818,11 @@ F:    include/uapi/linux/nexthop.h
 F:     net/ipv4/nexthop.c
 
 NFC SUBSYSTEM
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-nfc@lists.01.org (subscribers-only)
 L:     netdev@vger.kernel.org
 S:     Maintained
+B:     mailto:linux-nfc@lists.01.org
 F:     Documentation/devicetree/bindings/net/nfc/
 F:     drivers/nfc/
 F:     include/linux/platform_data/nfcmrvl.h
@@ -14134,7 +14136,7 @@ F:      Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
 F:     drivers/regulator/pf8x00-regulator.c
 
 NXP PTN5150A CC LOGIC AND EXTCON DRIVER
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
@@ -14687,7 +14689,7 @@ F:      scripts/dtc/
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
 M:     Rob Herring <robh+dt@kernel.org>
-M:     Krzysztof Kozlowski <krzk+dt@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski+dt@linaro.org>
 L:     devicetree@vger.kernel.org
 S:     Maintained
 C:     irc://irc.libera.chat/devicetree
@@ -15599,7 +15601,7 @@ F:      drivers/pinctrl/renesas/
 
 PIN CONTROLLER - SAMSUNG
 M:     Tomasz Figa <tomasz.figa@gmail.com>
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 R:     Alim Akhtar <alim.akhtar@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -17274,7 +17276,7 @@ W:      http://www.ibm.com/developerworks/linux/linux390/
 F:     drivers/s390/scsi/zfcp_*
 
 S3C ADC BATTERY DRIVER
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-samsung-soc@vger.kernel.org
 S:     Odd Fixes
 F:     drivers/power/supply/s3c_adc_battery.c
@@ -17319,7 +17321,7 @@ F:      Documentation/admin-guide/LSM/SafeSetID.rst
 F:     security/safesetid/
 
 SAMSUNG AUDIO (ASoC) DRIVERS
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
@@ -17327,7 +17329,7 @@ F:      Documentation/devicetree/bindings/sound/samsung*
 F:     sound/soc/samsung/
 
 SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-crypto@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
 S:     Maintained
@@ -17362,7 +17364,7 @@ S:      Maintained
 F:     drivers/platform/x86/samsung-laptop.c
 
 SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-kernel@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
@@ -17388,7 +17390,7 @@ F:      drivers/media/platform/samsung/s3c-camif/
 F:     include/media/drv-intf/s3c_camif.h
 
 SAMSUNG S3FWRN5 NFC DRIVER
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Krzysztof Opasiak <k.opasiak@samsung.com>
 L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Maintained
@@ -17410,7 +17412,7 @@ S:      Supported
 F:     drivers/media/i2c/s5k5baf.c
 
 SAMSUNG S5P Security SubSystem (SSS) DRIVER
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Vladimir Zapolskiy <vz@mleia.com>
 L:     linux-crypto@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
@@ -17445,7 +17447,7 @@ F:      include/linux/clk/samsung.h
 F:     include/linux/platform_data/clk-s3c2410.h
 
 SAMSUNG SPI DRIVERS
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Andi Shyti <andi@etezian.org>
 L:     linux-spi@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
@@ -17463,7 +17465,7 @@ F:      drivers/net/ethernet/samsung/sxgbe/
 
 SAMSUNG THERMAL DRIVER
 M:     Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-pm@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
 S:     Maintained
@@ -17647,8 +17649,8 @@ K:      \bTIF_SECCOMP\b
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) Broadcom BRCMSTB DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-mmc@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     drivers/mmc/host/sdhci-brcmstb*
 
@@ -21227,10 +21229,8 @@ S:     Maintained
 F:     drivers/hid/hid-wiimote*
 
 WILOCITY WIL6210 WIRELESS DRIVER
-M:     Maya Erez <merez@codeaurora.org>
 L:     linux-wireless@vger.kernel.org
-L:     wil6210@qti.qualcomm.com
-S:     Supported
+S:     Orphan
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/wil6210
 F:     drivers/net/wireless/ath/wil6210/
 
index 29e273d..c3ec1ea 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 18
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
 NAME = Superb Owl
 
 # *DOCUMENTATION*
index 29b0167..31c4fdc 100644 (file)
@@ -854,10 +854,8 @@ config HAVE_ARCH_HUGE_VMAP
 
 #
 #  Archs that select this would be capable of PMD-sized vmaps (i.e.,
-#  arch_vmap_pmd_supported() returns true), and they must make no assumptions
-#  that vmalloc memory is mapped with PAGE_SIZE ptes. The VM_NO_HUGE_VMAP flag
-#  can be used to prohibit arch-specific allocations from using hugepages to
-#  help with this (e.g., modules may require it).
+#  arch_vmap_pmd_supported() returns true). The VM_ALLOW_HUGE_VMAP flag
+#  must be used to enable allocations to use hugepages.
 #
 config HAVE_ARCH_HUGE_VMALLOC
        depends on HAVE_ARCH_HUGE_VMAP
index dcaa44e..f48ba03 100644 (file)
                        cs-gpios = <&creg_gpio 0 GPIO_ACTIVE_LOW>,
                                   <&creg_gpio 1 GPIO_ACTIVE_LOW>;
 
-                       spi-flash@0 {
+                       flash@0 {
                                compatible = "sst26wf016b", "jedec,spi-nor";
                                reg = <0>;
                                #address-cells = <1>;
index 088d348..1b0ffae 100644 (file)
@@ -5,7 +5,7 @@
 
 #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 
-#define ATOMIC_OP(op, c_op, asm_op)                                    \
+#define ATOMIC_OP(op, asm_op)                                  \
 static inline void arch_atomic_##op(int i, atomic_t *v)                        \
 {                                                                      \
        unsigned int val;                                               \
@@ -21,7 +21,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v)                       \
        : "cc");                                                        \
 }                                                                      \
 
-#define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
+#define ATOMIC_OP_RETURN(op, asm_op)                           \
 static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)        \
 {                                                                      \
        unsigned int val;                                               \
@@ -42,7 +42,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)       \
 #define arch_atomic_add_return_relaxed         arch_atomic_add_return_relaxed
 #define arch_atomic_sub_return_relaxed         arch_atomic_sub_return_relaxed
 
-#define ATOMIC_FETCH_OP(op, c_op, asm_op)                              \
+#define ATOMIC_FETCH_OP(op, asm_op)                            \
 static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
 {                                                                      \
        unsigned int val, orig;                                         \
@@ -69,23 +69,23 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)      \
 #define arch_atomic_fetch_or_relaxed           arch_atomic_fetch_or_relaxed
 #define arch_atomic_fetch_xor_relaxed          arch_atomic_fetch_xor_relaxed
 
-#define ATOMIC_OPS(op, c_op, asm_op)                                   \
-       ATOMIC_OP(op, c_op, asm_op)                                     \
-       ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
-       ATOMIC_FETCH_OP(op, c_op, asm_op)
+#define ATOMIC_OPS(op, asm_op)                                 \
+       ATOMIC_OP(op, asm_op)                                   \
+       ATOMIC_OP_RETURN(op, asm_op)                            \
+       ATOMIC_FETCH_OP(op, asm_op)
 
-ATOMIC_OPS(add, +=, add)
-ATOMIC_OPS(sub, -=, sub)
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
 
 #undef ATOMIC_OPS
-#define ATOMIC_OPS(op, c_op, asm_op)                                   \
-       ATOMIC_OP(op, c_op, asm_op)                                     \
-       ATOMIC_FETCH_OP(op, c_op, asm_op)
+#define ATOMIC_OPS(op, asm_op)                                 \
+       ATOMIC_OP(op, asm_op)                                   \
+       ATOMIC_FETCH_OP(op, asm_op)
 
-ATOMIC_OPS(and, &=, and)
-ATOMIC_OPS(andnot, &= ~, bic)
-ATOMIC_OPS(or, |=, or)
-ATOMIC_OPS(xor, ^=, xor)
+ATOMIC_OPS(and, and)
+ATOMIC_OPS(andnot, bic)
+ATOMIC_OPS(or, or)
+ATOMIC_OPS(xor, xor)
 
 #define arch_atomic_andnot             arch_atomic_andnot
 
index 7848348..64ca25d 100644 (file)
@@ -98,9 +98,6 @@
 /*
  * 1st level paging: pgd
  */
-#define pgd_index(addr)                ((addr) >> PGDIR_SHIFT)
-#define pgd_offset(mm, addr)   (((mm)->pgd) + pgd_index(addr))
-#define pgd_offset_k(addr)     pgd_offset(&init_mm, addr)
 #define pgd_ERROR(e) \
        pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 
index 03f8b1b..1e1db51 100644 (file)
@@ -366,7 +366,7 @@ void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
        case op_SP:     /* LD_S|LDB_S b,[sp,u7], ST_S|STB_S b,[sp,u7] */
                /* note: we are ignoring possibility of:
                 * ADD_S, SUB_S, PUSH_S, POP_S as these should not
-                * cause unaliged exception anyway */
+                * cause unaligned exception anyway */
                state->write = BITS(state->words[0], 6, 6);
                state->zz = BITS(state->words[0], 5, 5);
                if (state->zz)
@@ -503,7 +503,6 @@ int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
 {
        struct disasm_state instr;
 
-       memset(&instr, 0, sizeof(struct disasm_state));
        disasm_instr(pc, &instr, 0, regs, cregs);
 
        *next_pc = pc + instr.instr_len;
index dd77a0c..66ba549 100644 (file)
@@ -196,6 +196,7 @@ tracesys_exit:
        st  r0, [sp, PT_r0]     ; sys call return value in pt_regs
 
        ;POST Sys Call Ptrace Hook
+       mov r0, sp              ; pt_regs needed
        bl  @syscall_trace_exit
        b   ret_from_exception ; NOT ret_from_system_call at is saves r0 which
        ; we'd done before calling post hook above
index f748483..3c1590c 100644 (file)
@@ -319,7 +319,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
        regs->ret = (unsigned long)ksig->ka.sa.sa_handler;
 
        /*
-        * handler returns using sigreturn stub provided already by userpsace
+        * handler returns using sigreturn stub provided already by userspace
         * If not, nuke the process right away
         */
        if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
index 78e6d06..d947473 100644 (file)
@@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
 
 struct plat_smp_ops  __weak plat_smp_ops;
 
-/* XXX: per cpu ? Only needed once in early seconday boot */
+/* XXX: per cpu ? Only needed once in early secondary boot */
 struct task_struct *secondary_idle_tsk;
 
 /* Called from start_kernel */
@@ -274,7 +274,7 @@ static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
         * and read back old value
         */
        do {
-               new = old = READ_ONCE(*ipi_data_ptr);
+               new = old = *ipi_data_ptr;
                new |= 1U << msg;
        } while (cmpxchg(ipi_data_ptr, old, new) != old);
 
index d63ebd8..99a9b92 100644 (file)
@@ -237,7 +237,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
        if (state.fault)
                goto fault;
 
-       /* clear any remanants of delay slot */
+       /* clear any remnants of delay slot */
        if (delay_mode(regs)) {
                regs->ret = regs->bta & ~1U;
                regs->status32 &= ~STATUS_DE_MASK;
index 8aa1231..5446967 100644 (file)
@@ -401,7 +401,7 @@ static inline void __before_dc_op(const int op)
 {
        if (op == OP_FLUSH_N_INV) {
                /* Dcache provides 2 cmd: FLUSH or INV
-                * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
+                * INV in turn has sub-modes: DISCARD or FLUSH-BEFORE
                 * flush-n-inv is achieved by INV cmd but with IM=1
                 * So toggle INV sub-mode depending on op request and default
                 */
index 2799b2a..f7d90cf 100644 (file)
                pinctrl_pio_io_reset: gpio_io_reset {
                        pinmux = <PIN_PB30__GPIO>;
                        bias-disable;
-                       drive-open-drain = <1>;
+                       drive-open-drain;
                        output-low;
                };
                pinctrl_pio_input: gpio_input {
index abe27ad..4656646 100644 (file)
                pinmux = <PIN_PD12__FLEXCOM4_IO0>, //DATA
                <PIN_PD13__FLEXCOM4_IO1>; //CLK
                bias-disable;
-               drive-open-drain = <1>;
+               drive-open-drain;
        };
 
        pinctrl_pwm0 {
index 1e2a28c..2fb51b9 100644 (file)
                nand0: nand@40000000 {
                        nand-bus-width = <8>;
                        nand-ecc-mode = "soft";
-                       nand-on-flash-bbt = <1>;
+                       nand-on-flash-bbt;
                        status = "okay";
                };
 
index 87c517d..e9aecac 100644 (file)
        status = "okay";
        pinctrl-names = "default";
        pinctrl-0 = <&spi1_pins &spi1_cs0_pin>;
-       flash: m25p80@0 {
+       flash: flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
                compatible = "jedec,spi-nor";
index 5126e2d..778796c 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&mcspi1_pins>;
 
-       m25p80@0 {
+       flash@0 {
                compatible = "w25x32";
                spi-max-frequency = <48000000>;
                reg = <0>;
index 097ec35..0d58da1 100644 (file)
@@ -26,7 +26,7 @@
                                pinctrl-0 = <&mmc0_4bit_pins_a
                                             &mmc0_sck_cfg
                                             &en_sd_pwr>;
-                               broken-cd = <1>;
+                               broken-cd;
                                bus-width = <4>;
                                vmmc-supply = <&reg_vddio_sd0>;
                                status = "okay";
index 563bf9d..0b90c3f 100644 (file)
                regulators {
                        bcore1 {
                                regulator-name = "bcore1";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        bcore2 {
                                regulator-name = "bcore2";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        bpro {
                                regulator-name = "bpro";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        bperi {
                                regulator-name = "bperi";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        bmem {
                                regulator-name = "bmem";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo2 {
                                regulator-name = "ldo2";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <1800000>;
                        };
 
                        ldo3 {
                                regulator-name = "ldo3";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo4 {
                                regulator-name = "ldo4";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo5 {
                                regulator-name = "ldo5";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo6 {
                                regulator-name = "ldo6";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo7 {
                                regulator-name = "ldo7";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo8 {
                                regulator-name = "ldo8";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo9 {
                                regulator-name = "ldo9";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo10 {
                                regulator-name = "ldo10";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo11 {
                                regulator-name = "ldo11";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        bio {
                                regulator-name = "bio";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <1800000>;
                        };
index 7cda694..205e4d4 100644 (file)
@@ -72,8 +72,8 @@
                        st,settling = <2>;
                        st,fraction-z = <7>;
                        st,i-drive = <1>;
-                       touchscreen-inverted-x = <1>;
-                       touchscreen-inverted-y = <1>;
+                       touchscreen-inverted-x;
+                       touchscreen-inverted-y;
                };
        };
 };
index b4664ab..d3da8b1 100644 (file)
                gpmc,device-width = <2>;
                gpmc,wait-pin = <0>;
                gpmc,burst-length = <4>;
-               gpmc,cycle2cycle-samecsen = <1>;
-               gpmc,cycle2cycle-diffcsen = <1>;
+               gpmc,cycle2cycle-samecsen;
+               gpmc,cycle2cycle-diffcsen;
                gpmc,cs-on-ns = <0>;
                gpmc,cs-rd-off-ns = <45>;
                gpmc,cs-wr-off-ns = <45>;
index cbe42c4..b4d286a 100644 (file)
@@ -76,7 +76,7 @@
                pinconf {
                        pins = "gpio20", "gpio21";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
                pinconf {
                        pins = "gpio24", "gpio25";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
                pinconf {
                        pins = "gpio8", "gpio9";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
                pinconf {
                        pins = "gpio12", "gpio13";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
                pinconf {
                        pins = "gpio16", "gpio17";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
                pinconf {
                        pins = "gpio84", "gpio85";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
index 996f445..8cb04aa 100644 (file)
 
                        snps,axi-config = <&stmmac_axi_setup>;
                        snps,pbl = <32>;
-                       snps,aal = <1>;
+                       snps,aal;
 
                        qcom,nss-common = <&nss_common>;
                        qcom,qsgmii-csr = <&qsgmii_csr>;
 
                        snps,axi-config = <&stmmac_axi_setup>;
                        snps,pbl = <32>;
-                       snps,aal = <1>;
+                       snps,aal;
 
                        qcom,nss-common = <&nss_common>;
                        qcom,qsgmii-csr = <&qsgmii_csr>;
 
                        snps,axi-config = <&stmmac_axi_setup>;
                        snps,pbl = <32>;
-                       snps,aal = <1>;
+                       snps,aal;
 
                        qcom,nss-common = <&nss_common>;
                        qcom,qsgmii-csr = <&qsgmii_csr>;
 
                        snps,axi-config = <&stmmac_axi_setup>;
                        snps,pbl = <32>;
-                       snps,aal = <1>;
+                       snps,aal;
 
                        qcom,nss-common = <&nss_common>;
                        qcom,qsgmii-csr = <&qsgmii_csr>;
index 4cbadcb..ddd1cf4 100644 (file)
                                        };
                                };
 
-                               m25p80@1 {
+                               flash@1 {
                                        compatible = "st,m25p80";
                                        reg = <1>;
                                        spi-max-frequency = <12000000>;
index fd194eb..3a51a41 100644 (file)
                                cs-gpios = <&gpiopinctrl 80 0>, <&gpiopinctrl 24 0>,
                                           <&gpiopinctrl 85 0>;
 
-                               m25p80@0 {
+                               flash@0 {
                                        compatible = "m25p80";
                                        reg = <0>;
                                        spi-max-frequency = <12000000>;
index 33ae5e0..ac53ee3 100644 (file)
        #size-cells = <0>;
        status = "okay";
 
-       flash0: is25lp016d@0 {
+       flash0: flash@0 {
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <133000000>;
index e222d2d..d142dd3 100644 (file)
        #size-cells = <0>;
        status = "okay";
 
-       flash0: mx66l51235l@0 {
+       flash0: flash@0 {
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-rx-bus-width = <4>;
                #size-cells = <1>;
        };
 
-       flash1: mx66l51235l@1 {
+       flash1: flash@1 {
                compatible = "jedec,spi-nor";
                reg = <1>;
                spi-rx-bus-width = <4>;
index a7acfee..a80bc8a 100644 (file)
@@ -49,11 +49,13 @@ CONFIG_ATA=y
 CONFIG_PATA_FTIDE010=y
 CONFIG_NETDEVICES=y
 CONFIG_TUN=y
+CONFIG_NET_DSA_REALTEK=y
 CONFIG_NET_DSA_REALTEK_SMI=y
+CONFIG_NET_DSA_REALTEK_RTL8366RB=y
 CONFIG_GEMINI_ETHERNET=y
+CONFIG_MARVELL_PHY=y
 CONFIG_MDIO_BITBANG=y
 CONFIG_MDIO_GPIO=y
-CONFIG_MARVELL_PHY=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
 # CONFIG_INPUT_MOUSE is not set
@@ -66,6 +68,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_I2C_GPIO=y
 CONFIG_SPI=y
 CONFIG_SPI_GPIO=y
+CONFIG_SENSORS_DRIVETEMP=y
 CONFIG_SENSORS_GPIO_FAN=y
 CONFIG_SENSORS_LM75=y
 CONFIG_THERMAL=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
deleted file mode 100644 (file)
index 015b7ef..0000000
+++ /dev/null
@@ -1,365 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
-CONFIG_EXPERT=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_PXA=y
-CONFIG_MACH_INTELMOTE2=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=/dev/mtdblock2 rootfstype=jffs2 console=ttyS2,115200 mem=32M"
-CONFIG_KEXEC=y
-CONFIG_FPE_NWFPE=y
-CONFIG_BINFMT_AOUT=m
-CONFIG_BINFMT_MISC=m
-CONFIG_PM=y
-CONFIG_APM_EMULATION=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_DIAG is not set
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-CONFIG_IPV6_MIP6=m
-CONFIG_IPV6_TUNNEL=m
-CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_IPV6_SUBTREES=y
-CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
-CONFIG_NF_CONNTRACK=m
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_SCTP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
-CONFIG_NF_CONNTRACK_AMANDA=m
-CONFIG_NF_CONNTRACK_FTP=m
-CONFIG_NF_CONNTRACK_H323=m
-CONFIG_NF_CONNTRACK_IRC=m
-CONFIG_NF_CONNTRACK_NETBIOS_NS=m
-CONFIG_NF_CONNTRACK_PPTP=m
-CONFIG_NF_CONNTRACK_SANE=m
-CONFIG_NF_CONNTRACK_SIP=m
-CONFIG_NF_CONNTRACK_TFTP=m
-CONFIG_NF_CT_NETLINK=m
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
-CONFIG_NETFILTER_XT_TARGET_LED=m
-CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFLOG=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
-CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
-CONFIG_NETFILTER_XT_MATCH_DSCP=m
-CONFIG_NETFILTER_XT_MATCH_ESP=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_HELPER=m
-CONFIG_NETFILTER_XT_MATCH_LENGTH=m
-CONFIG_NETFILTER_XT_MATCH_LIMIT=m
-CONFIG_NETFILTER_XT_MATCH_MAC=m
-CONFIG_NETFILTER_XT_MATCH_MARK=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
-CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
-CONFIG_NETFILTER_XT_MATCH_QUOTA=m
-CONFIG_NETFILTER_XT_MATCH_REALM=m
-CONFIG_NETFILTER_XT_MATCH_SCTP=m
-CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
-CONFIG_NETFILTER_XT_MATCH_STRING=m
-CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_TIME=m
-CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
-CONFIG_IP_NF_MATCH_AH=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_TTL=m
-CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_AH=m
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_MATCH_FRAG=m
-CONFIG_IP6_NF_MATCH_OPTS=m
-CONFIG_IP6_NF_MATCH_HL=m
-CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_MH=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_FILTER=m
-CONFIG_IP6_NF_TARGET_REJECT=m
-CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_RAW=m
-CONFIG_BRIDGE=m
-# CONFIG_BRIDGE_IGMP_SNOOPING is not set
-CONFIG_IEEE802154=y
-# CONFIG_WIRELESS is not set
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_FW_LOADER=m
-CONFIG_CONNECTOR=m
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_AFS_PARTS=y
-CONFIG_MTD_AR7_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_ADV_OPTIONS=y
-CONFIG_MTD_CFI_GEOMETRY=y
-# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
-# CONFIG_MTD_CFI_I2 is not set
-CONFIG_MTD_OTP=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_PXA2XX=y
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-# CONFIG_WLAN is not set
-CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-# CONFIG_INPUT_MOUSEDEV is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_GPIO=y
-CONFIG_KEYBOARD_PXA27x=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_UINPUT=y
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_PXA=y
-CONFIG_SERIAL_PXA_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=8
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_PXA=y
-CONFIG_SPI=y
-CONFIG_SPI_PXA2XX=y
-CONFIG_GPIO_SYSFS=y
-CONFIG_POWER_SUPPLY=y
-# CONFIG_HWMON is not set
-CONFIG_PMIC_DA903X=y
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_DEBUG=y
-CONFIG_REGULATOR_DA903X=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_VIDEO_DEV=y
-CONFIG_MEDIA_TUNER_CUSTOMISE=y
-# CONFIG_MEDIA_TUNER_SIMPLE is not set
-# CONFIG_MEDIA_TUNER_TDA8290 is not set
-# CONFIG_MEDIA_TUNER_TDA827X is not set
-# CONFIG_MEDIA_TUNER_TDA18271 is not set
-# CONFIG_MEDIA_TUNER_TDA9887 is not set
-# CONFIG_MEDIA_TUNER_TEA5761 is not set
-# CONFIG_MEDIA_TUNER_TEA5767 is not set
-# CONFIG_MEDIA_TUNER_MT20XX is not set
-# CONFIG_MEDIA_TUNER_MT2060 is not set
-# CONFIG_MEDIA_TUNER_MT2266 is not set
-# CONFIG_MEDIA_TUNER_MT2131 is not set
-# CONFIG_MEDIA_TUNER_QT1010 is not set
-# CONFIG_MEDIA_TUNER_XC2028 is not set
-# CONFIG_MEDIA_TUNER_XC5000 is not set
-# CONFIG_MEDIA_TUNER_MXL5005S is not set
-# CONFIG_MEDIA_TUNER_MXL5007T is not set
-# CONFIG_MEDIA_TUNER_MC44S803 is not set
-# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
-CONFIG_VIDEO_PXA27x=y
-# CONFIG_V4L_USB_DRIVERS is not set
-# CONFIG_RADIO_ADAPTERS is not set
-CONFIG_FB=y
-CONFIG_FB_PXA=y
-CONFIG_FB_PXA_OVERLAY=y
-CONFIG_FB_PXA_PARAMETERS=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_MINI_4x6=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-# CONFIG_SND_DRIVERS is not set
-# CONFIG_SND_ARM is not set
-# CONFIG_SND_SPI is not set
-# CONFIG_SND_USB is not set
-CONFIG_SND_SOC=y
-CONFIG_SND_PXA2XX_SOC=y
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_PXA27X=y
-CONFIG_USB_ETH=m
-# CONFIG_USB_ETH_RNDIS is not set
-CONFIG_MMC=y
-CONFIG_SDIO_UART=m
-CONFIG_MMC_PXA=y
-CONFIG_MMC_SPI=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_LP3944=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_BACKLIGHT=y
-CONFIG_LEDS_TRIGGER_GPIO=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_PXA=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=m
-CONFIG_AUTOFS4_FS=y
-CONFIG_FUSE_FS=m
-CONFIG_CUSE=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_WBUF_VERIFY=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_JFFS2_FS_XATTR=y
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_JFFS2_LZO=y
-CONFIG_JFFS2_RUBIN=y
-CONFIG_CRAMFS=m
-CONFIG_SQUASHFS=m
-CONFIG_ROMFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3_ACL=y
-CONFIG_SMB_FS=m
-CONFIG_CIFS=m
-CONFIG_CIFS_STATS=y
-CONFIG_CIFS_XATTR=y
-CONFIG_CIFS_POSIX=y
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_SCHED_DEBUG is not set
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_PROVE_LOCKING=y
-# CONFIG_FTRACE is not set
-CONFIG_DEBUG_USER=y
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_GHASH=m
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SEED=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC16=y
index 3b30913..a352207 100644 (file)
@@ -20,7 +20,6 @@ CONFIG_VFP=y
 CONFIG_NEON=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_CMA=y
 CONFIG_NET=y
@@ -41,6 +40,8 @@ CONFIG_MAC80211_LEDS=y
 CONFIG_CAIF=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_GNSS=y
+CONFIG_GNSS_SIRF_SERIAL=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=65536
 CONFIG_NETDEVICES=y
@@ -83,6 +84,8 @@ CONFIG_SPI_GPIO=y
 CONFIG_SPI_PL022=y
 CONFIG_GPIO_STMPE=y
 CONFIG_GPIO_TC3589X=y
+CONFIG_BATTERY_SAMSUNG_SDI=y
+CONFIG_AB8500_BM=y
 CONFIG_SENSORS_IIO_HWMON=y
 CONFIG_SENSORS_NTC_THERMISTOR=y
 CONFIG_THERMAL=y
@@ -98,10 +101,13 @@ CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_V4L2_FLASH_LED_CLASS=y
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_NOVATEK_NT35510=y
+CONFIG_DRM_PANEL_NOVATEK_NT35560=y
+CONFIG_DRM_PANEL_SAMSUNG_DB7430=y
 CONFIG_DRM_PANEL_SAMSUNG_S6D16D0=y
+CONFIG_DRM_PANEL_SAMSUNG_S6D27A1=y
 CONFIG_DRM_PANEL_SAMSUNG_S6E63M0=y
 CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI=y
-CONFIG_DRM_PANEL_SONY_ACX424AKP=y
+CONFIG_DRM_PANEL_WIDECHIPS_WS2401=y
 CONFIG_DRM_LIMA=y
 CONFIG_DRM_MCDE=y
 CONFIG_FB=y
@@ -129,6 +135,7 @@ CONFIG_LEDS_LM3530=y
 CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_LP55XX_COMMON=y
 CONFIG_LEDS_LP5521=y
+CONFIG_LEDS_REGULATOR=y
 CONFIG_LEDS_RT8515=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_RTC_CLASS=y
@@ -144,17 +151,22 @@ CONFIG_IIO_SW_TRIGGER=y
 CONFIG_BMA180=y
 CONFIG_BMC150_ACCEL=y
 CONFIG_IIO_ST_ACCEL_3AXIS=y
+# CONFIG_IIO_ST_ACCEL_SPI_3AXIS is not set
 CONFIG_IIO_RESCALE=y
 CONFIG_MPU3050_I2C=y
 CONFIG_IIO_ST_GYRO_3AXIS=y
+# CONFIG_IIO_ST_GYRO_SPI_3AXIS is not set
 CONFIG_INV_MPU6050_I2C=y
 CONFIG_BH1780=y
 CONFIG_GP2AP002=y
+CONFIG_TSL2772=y
 CONFIG_AK8974=y
 CONFIG_IIO_ST_MAGN_3AXIS=y
+# CONFIG_IIO_ST_MAGN_SPI_3AXIS is not set
 CONFIG_YAMAHA_YAS530=y
 CONFIG_IIO_HRTIMER_TRIGGER=y
 CONFIG_IIO_ST_PRESS=y
+# CONFIG_IIO_ST_PRESS_SPI is not set
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
@@ -173,10 +185,9 @@ CONFIG_CRYPTO_DEV_UX500_CRYP=y
 CONFIG_CRYPTO_DEV_UX500_HASH=y
 CONFIG_CRYPTO_DEV_UX500_DEBUG=y
 CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_KERNEL=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_FTRACE is not set
 CONFIG_DEBUG_USER=y
index 4280126..7f7f6ba 100644 (file)
@@ -1101,11 +1101,13 @@ static int __init da850_evm_config_emac(void)
        int ret;
        u32 val;
        struct davinci_soc_info *soc_info = &davinci_soc_info;
-       u8 rmii_en = soc_info->emac_pdata->rmii_en;
+       u8 rmii_en;
 
        if (!machine_is_davinci_da850_evm())
                return 0;
 
+       rmii_en = soc_info->emac_pdata->rmii_en;
+
        cfg_chip3_base = DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG);
 
        val = __raw_readl(cfg_chip3_base);
index cc75087..4fa6ea5 100644 (file)
@@ -148,8 +148,10 @@ static struct clk_hw *ep93xx_clk_register_gate(const char *name,
        psc->lock = &clk_lock;
 
        clk = clk_register(NULL, &psc->hw);
-       if (IS_ERR(clk))
+       if (IS_ERR(clk)) {
                kfree(psc);
+               return ERR_CAST(clk);
+       }
 
        return &psc->hw;
 }
@@ -207,7 +209,7 @@ static int ep93xx_mux_determine_rate(struct clk_hw *hw,
                                struct clk_rate_request *req)
 {
        unsigned long rate = req->rate;
-       struct clk *best_parent = 0;
+       struct clk *best_parent = NULL;
        unsigned long __parent_rate;
        unsigned long best_rate = 0, actual_rate, mclk_rate;
        unsigned long best_parent_rate;
index 2882674..7135a0a 100644 (file)
@@ -7,6 +7,8 @@
 #include <asm/traps.h>
 #include <asm/ptrace.h>
 
+#include "iop3xx.h"
+
 void iop_enable_cp6(void)
 {
        u32 temp;
index 1da11bd..6e6985e 100644 (file)
@@ -122,13 +122,13 @@ static inline bool cluster_is_a15(u32 cluster)
 }
 
 /**
- * ve_spc_global_wakeup_irq()
+ * ve_spc_global_wakeup_irq() - sets/clears global wakeup IRQs
+ *
+ * @set: if true, global wake-up IRQs are set, if false they are cleared
  *
  * Function to set/clear global wakeup IRQs. Not protected by locking since
  * it might be used in code paths where normal cacheable locks are not
  * working. Locking must be provided by the caller to ensure atomicity.
- *
- * @set: if true, global wake-up IRQs are set, if false they are cleared
  */
 void ve_spc_global_wakeup_irq(bool set)
 {
@@ -145,15 +145,15 @@ void ve_spc_global_wakeup_irq(bool set)
 }
 
 /**
- * ve_spc_cpu_wakeup_irq()
- *
- * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
- * it might be used in code paths where normal cacheable locks are not
- * working. Locking must be provided by the caller to ensure atomicity.
+ * ve_spc_cpu_wakeup_irq() - sets/clears per-CPU wake-up IRQs
  *
  * @cluster: mpidr[15:8] bitfield describing cluster affinity level
  * @cpu: mpidr[7:0] bitfield describing cpu affinity level
  * @set: if true, wake-up IRQs are set, if false they are cleared
+ *
+ * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
+ * it might be used in code paths where normal cacheable locks are not
+ * working. Locking must be provided by the caller to ensure atomicity.
  */
 void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set)
 {
@@ -200,14 +200,14 @@ void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr)
 }
 
 /**
- * ve_spc_powerdown()
+ * ve_spc_powerdown() - enables/disables cluster powerdown
+ *
+ * @cluster: mpidr[15:8] bitfield describing cluster affinity level
+ * @enable: if true enables powerdown, if false disables it
  *
  * Function to enable/disable cluster powerdown. Not protected by locking
  * since it might be used in code paths where normal cacheable locks are not
  * working. Locking must be provided by the caller to ensure atomicity.
- *
- * @cluster: mpidr[15:8] bitfield describing cluster affinity level
- * @enable: if true enables powerdown, if false disables it
  */
 void ve_spc_powerdown(u32 cluster, bool enable)
 {
@@ -228,7 +228,7 @@ static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster)
 }
 
 /**
- * ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
+ * ve_spc_cpu_in_wfi() - Checks if the specified CPU is in WFI or not
  *
  * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster
  * @cluster: mpidr[15:8] bitfield describing cluster affinity level
@@ -580,7 +580,7 @@ static int __init ve_spc_clk_init(void)
                }
 
                cluster = topology_physical_package_id(cpu_dev->id);
-               if (init_opp_table[cluster])
+               if (cluster < 0 || init_opp_table[cluster])
                        continue;
 
                if (ve_init_opp_table(cpu_dev))
index ec5b082..07eb69f 100644 (file)
@@ -337,12 +337,15 @@ int __init arch_xen_unpopulated_init(struct resource **res)
 
        if (!nr_reg) {
                pr_err("No extended regions are found\n");
+               of_node_put(np);
                return -EINVAL;
        }
 
        regs = kcalloc(nr_reg, sizeof(*regs), GFP_KERNEL);
-       if (!regs)
+       if (!regs) {
+               of_node_put(np);
                return -ENOMEM;
+       }
 
        /*
         * Create resource from extended regions provided by the hypervisor to be
@@ -403,8 +406,8 @@ int __init arch_xen_unpopulated_init(struct resource **res)
        *res = &xen_resource;
 
 err:
+       of_node_put(np);
        kfree(regs);
-
        return rc;
 }
 #endif
@@ -424,8 +427,10 @@ static void __init xen_dt_guest_init(void)
 
        if (of_address_to_resource(xen_node, GRANT_TABLE_INDEX, &res)) {
                pr_err("Xen grant table region is not found\n");
+               of_node_put(xen_node);
                return;
        }
+       of_node_put(xen_node);
        xen_grant_frames = res.start;
 }
 
index 57c4c99..20ea89d 100644 (file)
@@ -175,8 +175,6 @@ config ARM64
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_CONTIGUOUS
        select HAVE_DYNAMIC_FTRACE
-       select HAVE_DYNAMIC_FTRACE_WITH_REGS \
-               if $(cc-option,-fpatchable-function-entry=2)
        select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
                if DYNAMIC_FTRACE_WITH_REGS
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -228,6 +226,17 @@ config ARM64
        help
          ARM 64-bit (AArch64) Linux support.
 
+config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS
+       def_bool CC_IS_CLANG
+       # https://github.com/ClangBuiltLinux/linux/issues/1507
+       depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600))
+       select HAVE_DYNAMIC_FTRACE_WITH_REGS
+
+config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS
+       def_bool CC_IS_GCC
+       depends on $(cc-option,-fpatchable-function-entry=2)
+       select HAVE_DYNAMIC_FTRACE_WITH_REGS
+
 config 64BIT
        def_bool y
 
@@ -678,7 +687,7 @@ config ARM64_ERRATUM_2051678
        default y
        help
          This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678.
-         Affected Coretex-A510 might not respect the ordering rules for
+         Affected Cortex-A510 might not respect the ordering rules for
          hardware update of the page table's dirty bit. The workaround
          is to not enable the feature on affected CPUs.
 
index 1dc9d18..a0bd540 100644 (file)
                pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
 
                ti,x-min = /bits/ 16 <125>;
-               touchscreen-size-x = /bits/ 16 <4008>;
+               touchscreen-size-x = <4008>;
                ti,y-min = /bits/ 16 <282>;
-               touchscreen-size-y = /bits/ 16 <3864>;
+               touchscreen-size-y = <3864>;
                ti,x-plate-ohms = /bits/ 16 <180>;
-               touchscreen-max-pressure = /bits/ 16 <255>;
-               touchscreen-average-samples = /bits/ 16 <10>;
+               touchscreen-max-pressure = <255>;
+               touchscreen-average-samples = <10>;
                ti,debounce-tol = /bits/ 16 <3>;
                ti,debounce-rep = /bits/ 16 <1>;
                ti,settle-delay-usec = /bits/ 16 <150>;
index b16c7ca..87b5e23 100644 (file)
                pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
 
                ti,x-min = /bits/ 16 <125>;
-               touchscreen-size-x = /bits/ 16 <4008>;
+               touchscreen-size-x = <4008>;
                ti,y-min = /bits/ 16 <282>;
-               touchscreen-size-y = /bits/ 16 <3864>;
+               touchscreen-size-y = <3864>;
                ti,x-plate-ohms = /bits/ 16 <180>;
-               touchscreen-max-pressure = /bits/ 16 <255>;
-               touchscreen-average-samples = /bits/ 16 <10>;
+               touchscreen-max-pressure = <255>;
+               touchscreen-average-samples = <10>;
                ti,debounce-tol = /bits/ 16 <3>;
                ti,debounce-rep = /bits/ 16 <1>;
                ti,settle-delay-usec = /bits/ 16 <150>;
index aff857d..1df8433 100644 (file)
                                        gpio4 {
                                                pins = "gpio4";
                                                function = "32k-out1";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio5 {
                                                pins = "gpio5";
                                                function = "gpio";
-                                               drive-push-pull = <0>;
+                                               drive-push-pull;
                                        };
 
                                        gpio6 {
                                                pins = "gpio6";
                                                function = "gpio";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio7 {
                                                pins = "gpio7";
                                                function = "gpio";
-                                               drive-push-pull = <0>;
+                                               drive-push-pull;
                                        };
                                };
 
index 4631504..1ab132c 100644 (file)
                                        gpio4 {
                                                pins = "gpio4";
                                                function = "32k-out1";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio5 {
                                                pins = "gpio5";
                                                function = "gpio";
-                                               drive-push-pull = <0>;
+                                               drive-push-pull;
                                        };
 
                                        gpio6 {
                                                pins = "gpio6";
                                                function = "gpio";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio7 {
                                                pins = "gpio7";
                                                function = "gpio";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
                                };
 
index a7d7cfd..634d0f4 100644 (file)
                                        gpio4 {
                                                pins = "gpio4";
                                                function = "32k-out1";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio6 {
                                                pins = "gpio6";
                                                function = "gpio";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio7 {
                                                pins = "gpio7";
                                                function = "gpio";
-                                               drive-push-pull = <0>;
+                                               drive-push-pull;
                                        };
                                };
 
index 0bd66f9..0b219e7 100644 (file)
                                        gpio4 {
                                                pins = "gpio4";
                                                function = "32k-out1";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio6 {
                                                pins = "gpio6";
                                                function = "gpio";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio7 {
                                                pins = "gpio7";
                                                function = "gpio";
-                                               drive-push-pull = <0>;
+                                               drive-push-pull;
                                        };
                                };
 
index 75eb743..0fe772b 100644 (file)
@@ -59,7 +59,7 @@
                                gpio1 {
                                        pins = "gpio1";
                                        function = "fps-out";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                        maxim,active-fps-power-up-slot = <7>;
                                        maxim,active-fps-power-down-slot = <0>;
@@ -68,7 +68,7 @@
                                gpio2_3 {
                                        pins = "gpio2", "gpio3";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                };
 
@@ -80,7 +80,7 @@
                                gpio5_6_7 {
                                        pins = "gpio5", "gpio6", "gpio7";
                                        function = "gpio";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                };
                        };
 
index 10347b6..936a309 100644 (file)
                                gpio1 {
                                        pins = "gpio1";
                                        function = "fps-out";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                        maxim,active-fps-power-up-slot = <7>;
                                        maxim,active-fps-power-down-slot = <0>;
                                gpio2 {
                                        pins = "gpio2";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                };
 
                                gpio3 {
                                        pins = "gpio3";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                };
 
                                gpio5_6_7 {
                                        pins = "gpio5", "gpio6", "gpio7";
                                        function = "gpio";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                };
                        };
 
index 72c2dc3..f644612 100644 (file)
                                gpio1 {
                                        pins = "gpio1";
                                        function = "fps-out";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
                                        maxim,active-fps-power-up-slot = <0>;
                                        maxim,active-fps-power-down-slot = <7>;
                                gpio2 {
                                        pins = "gpio2";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                        maxim,active-fps-power-up-slot = <0>;
                                        maxim,active-fps-power-down-slot = <7>;
                                gpio3 {
                                        pins = "gpio3";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                        maxim,active-fps-power-up-slot = <4>;
                                        maxim,active-fps-power-down-slot = <3>;
                                gpio5_6_7 {
                                        pins = "gpio5", "gpio6", "gpio7";
                                        function = "gpio";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                };
                        };
 
index a263d51..e42384f 100644 (file)
                                gpio3 {
                                        pins = "gpio3";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                        maxim,active-fps-power-up-slot = <4>;
                                        maxim,active-fps-power-down-slot = <2>;
                                gpio5_6 {
                                        pins = "gpio5", "gpio6";
                                        function = "gpio";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                };
 
                                gpio4 {
index f0f81c2..b9a48cf 100644 (file)
                                pins = "gpio47", "gpio48";
                                function = "blsp_i2c3";
                                drive-strength = <16>;
-                               bias-disable = <0>;
+                               bias-disable;
                        };
 
                        blsp1_i2c3_sleep: blsp1-i2c2-sleep {
                                pins = "gpio47", "gpio48";
                                function = "gpio";
                                drive-strength = <2>;
-                               bias-disable = <0>;
+                               bias-disable;
                        };
 
                        blsp2_uart3_4pins_default: blsp2-uart2-4pins {
index e90f99e..e47c74e 100644 (file)
@@ -33,7 +33,7 @@ ap_h1_spi: &spi0 {};
 };
 
 &alc5682 {
-       realtek,dmic-clk-driving-high = "true";
+       realtek,dmic-clk-driving-high;
 };
 
 &cpu6_alert0 {
index 1084d5c..07b729f 100644 (file)
                        pins = "gpio6", "gpio25", "gpio26";
                        function = "gpio";
                        drive-strength = <8>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
index 8553c8b..103cc40 100644 (file)
                config {
                        pins = "gpio6", "gpio11";
                        drive-strength = <8>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
index d62405c..7496dea 100644 (file)
@@ -43,10 +43,22 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
 
 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
 
+#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
 {
        return !(vcpu->arch.hcr_el2 & HCR_RW);
 }
+#else
+static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+
+       WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED,
+                              &kvm->arch.flags));
+
+       return test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
+}
+#endif
 
 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 {
@@ -72,15 +84,14 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
                vcpu->arch.hcr_el2 |= HCR_TVM;
        }
 
-       if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
+       if (vcpu_el1_is_32bit(vcpu))
                vcpu->arch.hcr_el2 &= ~HCR_RW;
-
-       /*
-        * TID3: trap feature register accesses that we virtualise.
-        * For now this is conditional, since no AArch32 feature regs
-        * are currently virtualised.
-        */
-       if (!vcpu_el1_is_32bit(vcpu))
+       else
+               /*
+                * TID3: trap feature register accesses that we virtualise.
+                * For now this is conditional, since no AArch32 feature regs
+                * are currently virtualised.
+                */
                vcpu->arch.hcr_el2 |= HCR_TID3;
 
        if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
index e3b25dc..94a27a7 100644 (file)
@@ -127,6 +127,16 @@ struct kvm_arch {
 #define KVM_ARCH_FLAG_MTE_ENABLED                      1
        /* At least one vCPU has ran in the VM */
 #define KVM_ARCH_FLAG_HAS_RAN_ONCE                     2
+       /*
+        * The following two bits are used to indicate the guest's EL1
+        * register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT
+        * bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set.
+        * Otherwise, the guest's EL1 register width has not yet been
+        * determined yet.
+        */
+#define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED             3
+#define KVM_ARCH_FLAG_EL1_32BIT                                4
+
        unsigned long flags;
 
        /*
index 94e147e..dff2b48 100644 (file)
@@ -535,7 +535,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
                                 PMD_TYPE_TABLE)
 #define pmd_sect(pmd)          ((pmd_val(pmd) & PMD_TYPE_MASK) == \
                                 PMD_TYPE_SECT)
-#define pmd_leaf(pmd)          pmd_sect(pmd)
+#define pmd_leaf(pmd)          (pmd_present(pmd) && !pmd_table(pmd))
 #define pmd_bad(pmd)           (!pmd_table(pmd))
 
 #define pmd_leaf_size(pmd)     (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
@@ -625,7 +625,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 #define pud_none(pud)          (!pud_val(pud))
 #define pud_bad(pud)           (!pud_table(pud))
 #define pud_present(pud)       pte_present(pud_pte(pud))
-#define pud_leaf(pud)          pud_sect(pud)
+#define pud_leaf(pud)          (pud_present(pud) && !pud_table(pud))
 #define pud_valid(pud)         pte_valid(pud_pte(pud))
 
 static inline void set_pud(pud_t *pudp, pud_t pud)
index 73e38d9..bd22d94 100644 (file)
@@ -92,8 +92,8 @@
 #endif /* CONFIG_COMPAT */
 
 #ifndef CONFIG_ARM64_FORCE_52BIT
-#define arch_get_mmap_end(addr) ((addr > DEFAULT_MAP_WINDOW) ? TASK_SIZE :\
-                               DEFAULT_MAP_WINDOW)
+#define arch_get_mmap_end(addr, len, flags) \
+               (((addr) > DEFAULT_MAP_WINDOW) ? TASK_SIZE : DEFAULT_MAP_WINDOW)
 
 #define arch_get_mmap_base(addr, base) ((addr > DEFAULT_MAP_WINDOW) ? \
                                        base + TASK_SIZE - DEFAULT_MAP_WINDOW :\
index 0d19259..53ae2c0 100644 (file)
@@ -1079,7 +1079,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        gfn_t gfn;
        kvm_pfn_t pfn;
        bool logging_active = memslot_is_logging(memslot);
-       bool logging_perm_fault = false;
+       bool use_read_lock = false;
        unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
        unsigned long vma_pagesize, fault_granule;
        enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
@@ -1114,7 +1114,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (logging_active) {
                force_pte = true;
                vma_shift = PAGE_SHIFT;
-               logging_perm_fault = (fault_status == FSC_PERM && write_fault);
+               use_read_lock = (fault_status == FSC_PERM && write_fault &&
+                                fault_granule == PAGE_SIZE);
        } else {
                vma_shift = get_vma_page_shift(vma, hva);
        }
@@ -1218,7 +1219,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * logging dirty logging, only acquire read lock for permission
         * relaxation.
         */
-       if (logging_perm_fault)
+       if (use_read_lock)
                read_lock(&kvm->mmu_lock);
        else
                write_lock(&kvm->mmu_lock);
@@ -1268,6 +1269,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
                ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
        } else {
+               WARN_ONCE(use_read_lock, "Attempted stage-2 map outside of write lock\n");
+
                ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
                                             __pfn_to_phys(pfn), prot,
                                             memcache);
@@ -1280,7 +1283,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        }
 
 out_unlock:
-       if (logging_perm_fault)
+       if (use_read_lock)
                read_unlock(&kvm->mmu_lock);
        else
                write_unlock(&kvm->mmu_lock);
index 372da09..baac2b4 100644 (file)
@@ -215,15 +215,11 @@ static void kvm_psci_narrow_to_32bit(struct kvm_vcpu *vcpu)
 
 static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32 fn)
 {
-       switch(fn) {
-       case PSCI_0_2_FN64_CPU_SUSPEND:
-       case PSCI_0_2_FN64_CPU_ON:
-       case PSCI_0_2_FN64_AFFINITY_INFO:
-               /* Disallow these functions for 32bit guests */
-               if (vcpu_mode_is_32bit(vcpu))
-                       return PSCI_RET_NOT_SUPPORTED;
-               break;
-       }
+       /*
+        * Prevent 32 bit guests from calling 64 bit PSCI functions.
+        */
+       if ((fn & PSCI_0_2_64BIT) && vcpu_mode_is_32bit(vcpu))
+               return PSCI_RET_NOT_SUPPORTED;
 
        return 0;
 }
@@ -235,10 +231,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
        unsigned long val;
        int ret = 1;
 
-       val = kvm_psci_check_allowed_function(vcpu, psci_fn);
-       if (val)
-               goto out;
-
        switch (psci_fn) {
        case PSCI_0_2_FN_PSCI_VERSION:
                /*
@@ -306,7 +298,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
                break;
        }
 
-out:
        smccc_set_retval(vcpu, val, 0, 0, 0);
        return ret;
 }
@@ -318,9 +309,6 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
        unsigned long val;
        int ret = 1;
 
-       if (minor > 1)
-               return -EINVAL;
-
        switch(psci_fn) {
        case PSCI_0_2_FN_PSCI_VERSION:
                val = minor == 0 ? KVM_ARM_PSCI_1_0 : KVM_ARM_PSCI_1_1;
@@ -426,6 +414,15 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
  */
 int kvm_psci_call(struct kvm_vcpu *vcpu)
 {
+       u32 psci_fn = smccc_get_function(vcpu);
+       unsigned long val;
+
+       val = kvm_psci_check_allowed_function(vcpu, psci_fn);
+       if (val) {
+               smccc_set_retval(vcpu, val, 0, 0, 0);
+               return 1;
+       }
+
        switch (kvm_psci_version(vcpu)) {
        case KVM_ARM_PSCI_1_1:
                return kvm_psci_1_x_call(vcpu, 1);
index ecc40c8..6c70c6f 100644 (file)
@@ -181,27 +181,51 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
+/**
+ * kvm_set_vm_width() - set the register width for the guest
+ * @vcpu: Pointer to the vcpu being configured
+ *
+ * Set both KVM_ARCH_FLAG_EL1_32BIT and KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED
+ * in the VM flags based on the vcpu's requested register width, the HW
+ * capabilities and other options (such as MTE).
+ * When REG_WIDTH_CONFIGURED is already set, the vcpu settings must be
+ * consistent with the value of the FLAG_EL1_32BIT bit in the flags.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
 {
-       struct kvm_vcpu *tmp;
+       struct kvm *kvm = vcpu->kvm;
        bool is32bit;
-       unsigned long i;
 
        is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
+
+       lockdep_assert_held(&kvm->lock);
+
+       if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
+               /*
+                * The guest's register width is already configured.
+                * Make sure that the vcpu is consistent with it.
+                */
+               if (is32bit == test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags))
+                       return 0;
+
+               return -EINVAL;
+       }
+
        if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
-               return false;
+               return -EINVAL;
 
        /* MTE is incompatible with AArch32 */
-       if (kvm_has_mte(vcpu->kvm) && is32bit)
-               return false;
+       if (kvm_has_mte(kvm) && is32bit)
+               return -EINVAL;
 
-       /* Check that the vcpus are either all 32bit or all 64bit */
-       kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
-               if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
-                       return false;
-       }
+       if (is32bit)
+               set_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
 
-       return true;
+       set_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags);
+
+       return 0;
 }
 
 /**
@@ -230,10 +254,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
        u32 pstate;
 
        mutex_lock(&vcpu->kvm->lock);
-       reset_state = vcpu->arch.reset_state;
-       WRITE_ONCE(vcpu->arch.reset_state.reset, false);
+       ret = kvm_set_vm_width(vcpu);
+       if (!ret) {
+               reset_state = vcpu->arch.reset_state;
+               WRITE_ONCE(vcpu->arch.reset_state.reset, false);
+       }
        mutex_unlock(&vcpu->kvm->lock);
 
+       if (ret)
+               return ret;
+
        /* Reset PMU outside of the non-preemptible section */
        kvm_pmu_vcpu_reset(vcpu);
 
@@ -260,14 +290,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
                }
        }
 
-       if (!vcpu_allowed_register_width(vcpu)) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        switch (vcpu->arch.target) {
        default:
-               if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
+               if (vcpu_el1_is_32bit(vcpu)) {
                        pstate = VCPU_RESET_PSTATE_SVC;
                } else {
                        pstate = VCPU_RESET_PSTATE_EL1;
index f38c40a..78cde68 100644 (file)
@@ -82,7 +82,7 @@ static bool end_of_vgic(struct vgic_state_iter *iter)
 
 static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
 {
-       struct kvm *kvm = (struct kvm *)s->private;
+       struct kvm *kvm = s->private;
        struct vgic_state_iter *iter;
 
        mutex_lock(&kvm->lock);
@@ -110,7 +110,7 @@ out:
 
 static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
 {
-       struct kvm *kvm = (struct kvm *)s->private;
+       struct kvm *kvm = s->private;
        struct vgic_state_iter *iter = kvm->arch.vgic.iter;
 
        ++*pos;
@@ -122,7 +122,7 @@ static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
 
 static void vgic_debug_stop(struct seq_file *s, void *v)
 {
-       struct kvm *kvm = (struct kvm *)s->private;
+       struct kvm *kvm = s->private;
        struct vgic_state_iter *iter;
 
        /*
@@ -229,8 +229,8 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
 
 static int vgic_debug_show(struct seq_file *s, void *v)
 {
-       struct kvm *kvm = (struct kvm *)s->private;
-       struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
+       struct kvm *kvm = s->private;
+       struct vgic_state_iter *iter = v;
        struct vgic_irq *irq;
        struct kvm_vcpu *vcpu = NULL;
        unsigned long flags;
index 089fc2f..2e13402 100644 (file)
@@ -2143,7 +2143,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
 static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
                                void *ptr, void *opaque)
 {
-       struct its_device *dev = (struct its_device *)opaque;
+       struct its_device *dev = opaque;
        struct its_collection *collection;
        struct kvm *kvm = its->dev->kvm;
        struct kvm_vcpu *vcpu = NULL;
index 174edab..145af02 100644 (file)
@@ -118,7 +118,6 @@ config PPC
        select ARCH_HAS_DEBUG_WX                if STRICT_KERNEL_RWX
        select ARCH_HAS_DEVMEM_IS_ALLOWED
        select ARCH_HAS_DMA_MAP_DIRECT          if PPC_PSERIES
-       select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_FORTIFY_SOURCE
        select ARCH_HAS_GCOV_PROFILE_ALL
        select ARCH_HAS_HUGEPD                  if HUGETLB_PAGE
@@ -154,6 +153,7 @@ config PPC
        select ARCH_USE_MEMTEST
        select ARCH_USE_QUEUED_RWLOCKS          if PPC_QUEUED_SPINLOCKS
        select ARCH_USE_QUEUED_SPINLOCKS        if PPC_QUEUED_SPINLOCKS
+       select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
        select ARCH_WANT_IPC_PARSE_VERSION
        select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
        select ARCH_WANT_LD_ORPHAN_WARN
index 4b4827c..5b156f9 100644 (file)
@@ -38,9 +38,13 @@ BOOTCFLAGS    := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
                 $(LINUXINCLUDE)
 
 ifdef CONFIG_PPC64_BOOT_WRAPPER
-BOOTCFLAGS     += -m64
+ifdef CONFIG_CPU_LITTLE_ENDIAN
+BOOTCFLAGS     += -m64 -mcpu=powerpc64le
 else
-BOOTCFLAGS     += -m32
+BOOTCFLAGS     += -m64 -mcpu=powerpc64
+endif
+else
+BOOTCFLAGS     += -m32 -mcpu=powerpc
 endif
 
 BOOTCFLAGS     += -isystem $(shell $(BOOTCC) -print-file-name=include)
index feadee1..4454472 100644 (file)
@@ -8,7 +8,8 @@
 #include "ppc_asm.h"
 
 RELA = 7
-RELACOUNT = 0x6ffffff9
+RELASZ = 8
+RELAENT = 9
 
        .data
        /* A procedure descriptor used when booting this as a COFF file.
@@ -75,34 +76,39 @@ p_base:     mflr    r10             /* r10 now points to runtime addr of p_base */
        bne     11f
        lwz     r9,4(r12)       /* get RELA pointer in r9 */
        b       12f
-11:    addis   r8,r8,(-RELACOUNT)@ha
-       cmpwi   r8,RELACOUNT@l
+11:    cmpwi   r8,RELASZ
+       bne     .Lcheck_for_relaent
+       lwz     r0,4(r12)       /* get RELASZ value in r0 */
+       b       12f
+.Lcheck_for_relaent:
+       cmpwi   r8,RELAENT
        bne     12f
-       lwz     r0,4(r12)       /* get RELACOUNT value in r0 */
+       lwz     r14,4(r12)      /* get RELAENT value in r14 */
 12:    addi    r12,r12,8
        b       9b
 
        /* The relocation section contains a list of relocations.
         * We now do the R_PPC_RELATIVE ones, which point to words
-        * which need to be initialized with addend + offset.
-        * The R_PPC_RELATIVE ones come first and there are RELACOUNT
-        * of them. */
+        * which need to be initialized with addend + offset */
 10:    /* skip relocation if we don't have both */
        cmpwi   r0,0
        beq     3f
        cmpwi   r9,0
        beq     3f
+       cmpwi   r14,0
+       beq     3f
 
        add     r9,r9,r11       /* Relocate RELA pointer */
+       divwu   r0,r0,r14       /* RELASZ / RELAENT */
        mtctr   r0
 2:     lbz     r0,4+3(r9)      /* ELF32_R_INFO(reloc->r_info) */
        cmpwi   r0,22           /* R_PPC_RELATIVE */
-       bne     3f
+       bne     .Lnext
        lwz     r12,0(r9)       /* reloc->r_offset */
        lwz     r0,8(r9)        /* reloc->r_addend */
        add     r0,r0,r11
        stwx    r0,r11,r12
-       addi    r9,r9,12
+.Lnext:        add     r9,r9,r14
        bdnz    2b
 
        /* Do a cache flush for our text, in case the loader didn't */
@@ -160,32 +166,39 @@ p_base:   mflr    r10             /* r10 now points to runtime addr of p_base */
        bne     10f
        ld      r13,8(r11)       /* get RELA pointer in r13 */
        b       11f
-10:    addis   r12,r12,(-RELACOUNT)@ha
-       cmpdi   r12,RELACOUNT@l
-       bne     11f
-       ld      r8,8(r11)       /* get RELACOUNT value in r8 */
+10:    cmpwi   r12,RELASZ
+       bne     .Lcheck_for_relaent
+       lwz     r8,8(r11)       /* get RELASZ pointer in r8 */
+       b       11f
+.Lcheck_for_relaent:
+       cmpwi   r12,RELAENT
+       bne     11f
+       lwz     r14,8(r11)      /* get RELAENT pointer in r14 */
 11:    addi    r11,r11,16
        b       9b
 12:
-       cmpdi   r13,0            /* check we have both RELA and RELACOUNT */
+       cmpdi   r13,0            /* check we have both RELA, RELASZ, RELAENT*/
        cmpdi   cr1,r8,0
        beq     3f
        beq     cr1,3f
+       cmpdi   r14,0
+       beq     3f
 
        /* Calcuate the runtime offset. */
        subf    r13,r13,r9
 
        /* Run through the list of relocations and process the
         * R_PPC64_RELATIVE ones. */
+       divdu   r8,r8,r14       /* RELASZ / RELAENT */
        mtctr   r8
 13:    ld      r0,8(r9)        /* ELF64_R_TYPE(reloc->r_info) */
        cmpdi   r0,22           /* R_PPC64_RELATIVE */
-       bne     3f
+       bne     .Lnext
        ld      r12,0(r9)        /* reloc->r_offset */
        ld      r0,16(r9)       /* reloc->r_addend */
        add     r0,r0,r13
        stdx    r0,r13,r12
-       addi    r9,r9,24
+.Lnext:        add     r9,r9,r14
        bdnz    13b
 
        /* Do a cache flush for our text, in case the loader didn't */
index 888a6b9..0e5532f 100644 (file)
@@ -70,7 +70,7 @@ static void hotfoot_fixups(void)
 
                printf("Fixing devtree for 4M Flash\n");
                
-               /* First fix up the base addresse */
+               /* First fix up the base address */
                getprop(devp, "reg", regs, sizeof(regs));
                regs[0] = 0;
                regs[1] = 0xffc00000;
index 6455fc9..8334bc3 100644 (file)
@@ -200,12 +200,6 @@ void __dt_fixup_mac_addresses(u32 startindex, ...);
        __dt_fixup_mac_addresses(0, __VA_ARGS__, NULL)
 
 
-static inline void *find_node_by_linuxphandle(const u32 linuxphandle)
-{
-       return find_node_by_prop_value(NULL, "linux,phandle",
-                       (char *)&linuxphandle, sizeof(u32));
-}
-
 static inline char *get_path(const void *phandle, char *buf, int len)
 {
        if (dt_ops.get_path)
index c2b23b6..e8dfe9f 100644 (file)
@@ -404,7 +404,7 @@ static int ppc_xts_decrypt(struct skcipher_request *req)
 
 /*
  * Algorithm definitions. Disabling alignment (cra_alignmask=0) was chosen
- * because the e500 platform can handle unaligned reads/writes very efficently.
+ * because the e500 platform can handle unaligned reads/writes very efficiently.
  * This improves IPsec thoughput by another few percent. Additionally we assume
  * that AES context is always aligned to at least 8 bytes because it is created
  * with kmalloc() in the crypto infrastructure
index 12e150e..b37a28f 100644 (file)
@@ -8,10 +8,6 @@
  */
 void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
 void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern unsigned long
-radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
-                               unsigned long len, unsigned long pgoff,
-                               unsigned long flags);
 
 extern void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
                                                unsigned long addr, pte_t *ptep,
index 21f7809..1c4eebb 100644 (file)
@@ -18,6 +18,7 @@
  * complete pgtable.h but only a portion of it.
  */
 #include <asm/book3s/64/pgtable.h>
+#include <asm/book3s/64/slice.h>
 #include <asm/task_size_64.h>
 #include <asm/cpu_has_feature.h>
 
index 006cbec..570a496 100644 (file)
@@ -4,12 +4,6 @@
 
 #include <asm/page.h>
 
-#ifdef CONFIG_HUGETLB_PAGE
-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-#endif
-#define HAVE_ARCH_UNMAPPED_AREA
-#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-
 #ifndef __ASSEMBLY__
 /*
  * Page size definition
index f0d3194..b8eb4ad 100644 (file)
@@ -2,6 +2,14 @@
 #ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H
 #define _ASM_POWERPC_BOOK3S_64_SLICE_H
 
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_HUGETLB_PAGE
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
 #define SLICE_LOW_SHIFT                28
 #define SLICE_LOW_TOP          (0x100000000ul)
 #define SLICE_NUM_LOW          (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
 
 #define SLB_ADDR_LIMIT_DEFAULT DEFAULT_MAP_WINDOW_USER64
 
+struct mm_struct;
+
+unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+                                     unsigned long flags, unsigned int psize,
+                                     int topdown);
+
+unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr);
+
+void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
+                          unsigned long len, unsigned int psize);
+
+void slice_init_new_context_exec(struct mm_struct *mm);
+void slice_setup_new_exec(void);
+
+#endif /* __ASSEMBLY__ */
+
 #endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */
index ab3832b..4b573a3 100644 (file)
@@ -38,14 +38,15 @@ extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
  */
 static inline __sum16 csum_fold(__wsum sum)
 {
-       unsigned int tmp;
-
-       /* swap the two 16-bit halves of sum */
-       __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum));
-       /* if there is a carry from adding the two 16-bit halves,
-          it will carry from the lower half into the upper half,
-          giving us the correct sum in the upper half. */
-       return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
+       u32 tmp = (__force u32)sum;
+
+       /*
+        * swap the two 16-bit halves of sum
+        * if there is a carry from adding the two 16-bit halves,
+        * it will carry from the lower half into the upper half,
+        * giving us the correct sum in the upper half.
+        */
+       return (__force __sum16)(~(tmp + rol32(tmp, 16)) >> 16);
 }
 
 static inline u32 from64to32(u64 x)
@@ -95,16 +96,15 @@ static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
 {
 #ifdef __powerpc64__
        u64 res = (__force u64)csum;
-#endif
+
+       res += (__force u64)addend;
+       return (__force __wsum)((u32)res + (res >> 32));
+#else
        if (__builtin_constant_p(csum) && csum == 0)
                return addend;
        if (__builtin_constant_p(addend) && addend == 0)
                return csum;
 
-#ifdef __powerpc64__
-       res += (__force u64)addend;
-       return (__force __wsum)((u32)res + (res >> 32));
-#else
        asm("addc %0,%0,%1;"
            "addze %0,%0;"
            : "+r" (csum) : "r" (addend) : "xer");
index 409483b..bccc3a5 100644 (file)
@@ -22,6 +22,8 @@
 #define BRANCH_SET_LINK        0x1
 #define BRANCH_ABSOLUTE        0x2
 
+DECLARE_STATIC_KEY_FALSE(init_mem_is_free);
+
 bool is_offset_in_branch_range(long offset);
 bool is_offset_in_cond_branch_range(long offset);
 int create_branch(ppc_inst_t *instr, const u32 *addr,
index 4265d5e..13bf6de 100644 (file)
@@ -23,6 +23,9 @@ struct drmem_lmb_info {
        u64                     lmb_size;
 };
 
+struct device_node;
+struct property;
+
 extern struct drmem_lmb_info *drmem_info;
 
 static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb,
index bd513fd..514dd05 100644 (file)
@@ -333,8 +333,6 @@ static inline bool eeh_enabled(void)
 
 static inline void eeh_show_enabled(void) { }
 
-static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { }
-
 static inline int eeh_check_failure(const volatile void __iomem *token)
 {
        return 0;
@@ -354,11 +352,7 @@ static inline int eeh_phb_pe_create(struct pci_controller *phb) { return 0; }
 #endif /* CONFIG_EEH */
 
 #if defined(CONFIG_PPC_PSERIES) && defined(CONFIG_EEH)
-void pseries_eeh_init_edev(struct pci_dn *pdn);
 void pseries_eeh_init_edev_recursive(struct pci_dn *pdn);
-#else
-static inline void pseries_eeh_add_device_early(struct pci_dn *pdn) { }
-static inline void pseries_eeh_add_device_tree_early(struct pci_dn *pdn) { }
 #endif
 
 #ifdef CONFIG_PPC64
index 81bcb9a..27f9e11 100644 (file)
@@ -50,7 +50,7 @@ struct fadump_crash_info_header {
        u64             elfcorehdr_addr;
        u32             crashing_cpu;
        struct pt_regs  regs;
-       struct cpumask  online_mask;
+       struct cpumask  cpu_mask;
 };
 
 struct fadump_memory_range {
index 6a1a1ac..ef86197 100644 (file)
@@ -24,7 +24,7 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
                                         unsigned long addr,
                                         unsigned long len)
 {
-       if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled())
+       if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU) && !radix_enabled())
                return slice_is_hugepage_only_range(mm, addr, len);
        return 0;
 }
index fee979d..c5a5f7c 100644 (file)
@@ -38,8 +38,6 @@ extern struct pci_dev *isa_bridge_pcidev;
 #define SIO_CONFIG_RA  0x398
 #define SIO_CONFIG_RD  0x399
 
-#define SLOW_DOWN_IO
-
 /* 32 bits uses slightly different variables for the various IO
  * bases. Most of this file only uses _IO_BASE though which we
  * define properly based on the platform
index fb22378..d751ddd 100644 (file)
@@ -52,7 +52,6 @@ __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
        return false;
 }
 
-static inline void __kuap_assert_locked(void) { }
 static inline void __kuap_lock(void) { }
 static inline void __kuap_save_and_lock(struct pt_regs *regs) { }
 static inline void kuap_user_restore(struct pt_regs *regs) { }
index c08d25e..698935d 100644 (file)
@@ -30,7 +30,6 @@ struct mm_struct;
 
 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                            unsigned long end);
-extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 #ifdef CONFIG_PPC_8xx
 static inline void local_flush_tlb_mm(struct mm_struct *mm)
@@ -45,7 +44,18 @@ static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned lon
 {
        asm volatile ("tlbie %0; sync" : : "r" (vmaddr) : "memory");
 }
+
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       start &= PAGE_MASK;
+
+       if (end - start <= PAGE_SIZE)
+               asm volatile ("tlbie %0; sync" : : "r" (start) : "memory");
+       else
+               asm volatile ("sync; tlbia; isync" : : : "memory");
+}
 #else
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 extern void local_flush_tlb_mm(struct mm_struct *mm);
 extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
 
index 8330968..03330b7 100644 (file)
@@ -152,16 +152,9 @@ struct paca_struct {
        struct tlb_core_data tcd;
 #endif /* CONFIG_PPC_BOOK3E */
 
-#ifdef CONFIG_PPC_BOOK3S
 #ifdef CONFIG_PPC_64S_HASH_MMU
-#ifdef CONFIG_PPC_MM_SLICES
        unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
        unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
-#else
-       u16 mm_ctx_user_psize;
-       u16 mm_ctx_sllp;
-#endif
-#endif
 #endif
 
        /*
index f2c5c26..b3101ff 100644 (file)
@@ -333,6 +333,5 @@ static inline unsigned long kaslr_offset(void)
 
 #include <asm-generic/memory_model.h>
 #endif /* __ASSEMBLY__ */
-#include <asm/slice.h>
 
 #endif /* _ASM_POWERPC_PAGE_H */
index 8abfb8f..42cc321 100644 (file)
@@ -11,7 +11,7 @@
 #define _ASM_POWERPC_PARPORT_H
 #ifdef __KERNEL__
 
-#include <asm/prom.h>
+#include <linux/of_irq.h>
 
 static int parport_pc_find_nonpci_ports (int autoirq, int autodma)
 {
index 90f488f..c85f901 100644 (file)
@@ -170,10 +170,10 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
        return bus->sysdata;
 }
 
-#ifndef CONFIG_PPC64
-
 extern int pci_device_from_OF_node(struct device_node *node,
                                   u8 *bus, u8 *devfn);
+#ifndef CONFIG_PPC64
+
 extern void pci_create_OF_bus_map(void);
 
 #else  /* CONFIG_PPC64 */
@@ -235,16 +235,6 @@ struct pci_dn *add_sriov_vf_pdns(struct pci_dev *pdev);
 void remove_sriov_vf_pdns(struct pci_dev *pdev);
 #endif
 
-static inline int pci_device_from_OF_node(struct device_node *np,
-                                         u8 *bus, u8 *devfn)
-{
-       if (!PCI_DN(np))
-               return -ENODEV;
-       *bus = PCI_DN(np)->busno;
-       *devfn = PCI_DN(np)->devfn;
-       return 0;
-}
-
 #if defined(CONFIG_EEH)
 static inline struct eeh_dev *pdn_to_eeh_dev(struct pci_dn *pdn)
 {
index b3f4807..8afc928 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/pci.h>
 #include <linux/pci_hotplug.h>
 #include <linux/irq.h>
+#include <linux/of.h>
 #include <misc/cxl-base.h>
 #include <asm/opal-api.h>
 
index 82f1f00..683e9bc 100644 (file)
 
 
 /* opcode and xopcode for instructions */
-#define OP_TRAP 3
-#define OP_TRAP_64 2
+#define OP_PREFIX      1
+#define OP_TRAP_64     2
+#define OP_TRAP                3
+#define OP_SC          17
+#define OP_19          19
+#define OP_31          31
+#define OP_LWZ         32
+#define OP_LWZU                33
+#define OP_LBZ         34
+#define OP_LBZU                35
+#define OP_STW         36
+#define OP_STWU                37
+#define OP_STB         38
+#define OP_STBU                39
+#define OP_LHZ         40
+#define OP_LHZU                41
+#define OP_LHA         42
+#define OP_LHAU                43
+#define OP_STH         44
+#define OP_STHU                45
+#define OP_LMW         46
+#define OP_STMW                47
+#define OP_LFS         48
+#define OP_LFSU                49
+#define OP_LFD         50
+#define OP_LFDU                51
+#define OP_STFS                52
+#define OP_STFSU       53
+#define OP_STFD                54
+#define OP_STFDU       55
+#define OP_LQ          56
+#define OP_LD          58
+#define OP_STD         62
+
+#define OP_19_XOP_RFID         18
+#define OP_19_XOP_RFMCI                38
+#define OP_19_XOP_RFDI         39
+#define OP_19_XOP_RFI          50
+#define OP_19_XOP_RFCI         51
+#define OP_19_XOP_RFSCV                82
+#define OP_19_XOP_HRFID                274
+#define OP_19_XOP_URFID                306
+#define OP_19_XOP_STOP         370
+#define OP_19_XOP_DOZE         402
+#define OP_19_XOP_NAP          434
+#define OP_19_XOP_SLEEP                466
+#define OP_19_XOP_RVWINKLE     498
 
 #define OP_31_XOP_TRAP      4
 #define OP_31_XOP_LDX       21
 #define OP_31_XOP_LHZUX     311
 #define OP_31_XOP_MSGSNDP   142
 #define OP_31_XOP_MSGCLRP   174
+#define OP_31_XOP_MTMSR     146
+#define OP_31_XOP_MTMSRD    178
 #define OP_31_XOP_TLBIE     306
 #define OP_31_XOP_MFSPR     339
 #define OP_31_XOP_LWAX      341
 /* VMX Vector Store Instructions */
 #define OP_31_XOP_STVX          231
 
-/* Prefixed Instructions */
-#define OP_PREFIX              1
-
-#define OP_31   31
-#define OP_LWZ  32
-#define OP_STFS 52
-#define OP_STFSU 53
-#define OP_STFD 54
-#define OP_STFDU 55
-#define OP_LD   58
-#define OP_LWZU 33
-#define OP_LBZ  34
-#define OP_LBZU 35
-#define OP_STW  36
-#define OP_STWU 37
-#define OP_STD  62
-#define OP_STB  38
-#define OP_STBU 39
-#define OP_LHZ  40
-#define OP_LHZU 41
-#define OP_LHA  42
-#define OP_LHAU 43
-#define OP_STH  44
-#define OP_STHU 45
-#define OP_LMW  46
-#define OP_STMW 47
-#define OP_LFS  48
-#define OP_LFSU 49
-#define OP_LFD  50
-#define OP_LFDU 51
-#define OP_STFS 52
-#define OP_STFSU 53
-#define OP_STFD  54
-#define OP_STFDU 55
-#define OP_LQ    56
-
 /* sorted alphabetically */
 #define PPC_INST_BCCTR_FLUSH           0x4c400420
 #define PPC_INST_COPY                  0x7c20060c
index c5d9847..6f66e35 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright IBM Corporation, 2012
  */
 #include <linux/types.h>
+#include <asm/disassemble.h>
 
 typedef u32 ppc_opcode_t;
 #define BREAKPOINT_INSTRUCTION 0x7fe00008      /* trap */
@@ -31,6 +32,41 @@ typedef u32 ppc_opcode_t;
 #define MSR_SINGLESTEP (MSR_SE)
 #endif
 
+static inline bool can_single_step(u32 inst)
+{
+       switch (get_op(inst)) {
+       case OP_TRAP_64:        return false;
+       case OP_TRAP:           return false;
+       case OP_SC:             return false;
+       case OP_19:
+               switch (get_xop(inst)) {
+               case OP_19_XOP_RFID:            return false;
+               case OP_19_XOP_RFMCI:           return false;
+               case OP_19_XOP_RFDI:            return false;
+               case OP_19_XOP_RFI:             return false;
+               case OP_19_XOP_RFCI:            return false;
+               case OP_19_XOP_RFSCV:           return false;
+               case OP_19_XOP_HRFID:           return false;
+               case OP_19_XOP_URFID:           return false;
+               case OP_19_XOP_STOP:            return false;
+               case OP_19_XOP_DOZE:            return false;
+               case OP_19_XOP_NAP:             return false;
+               case OP_19_XOP_SLEEP:           return false;
+               case OP_19_XOP_RVWINKLE:        return false;
+               }
+               break;
+       case OP_31:
+               switch (get_xop(inst)) {
+               case OP_31_XOP_TRAP:            return false;
+               case OP_31_XOP_TRAP_64:         return false;
+               case OP_31_XOP_MTMSR:           return false;
+               case OP_31_XOP_MTMSRD:          return false;
+               }
+               break;
+       }
+       return true;
+}
+
 /* Enable single stepping for the current task */
 static inline void enable_single_step(struct pt_regs *regs)
 {
index 39c2502..fdfaae1 100644 (file)
@@ -392,8 +392,6 @@ static inline void prefetchw(const void *x)
 
 #define spin_lock_prefetch(x)  prefetchw(x)
 
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
 /* asm stubs */
 extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val);
 extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);
diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h
deleted file mode 100644 (file)
index 0bdd9c6..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_SLICE_H
-#define _ASM_POWERPC_SLICE_H
-
-#ifdef CONFIG_PPC_BOOK3S_64
-#include <asm/book3s/64/slice.h>
-#endif
-
-#ifndef __ASSEMBLY__
-
-struct mm_struct;
-
-#ifdef CONFIG_PPC_MM_SLICES
-
-#ifdef CONFIG_HUGETLB_PAGE
-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-#endif
-#define HAVE_ARCH_UNMAPPED_AREA
-#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-
-unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
-                                     unsigned long flags, unsigned int psize,
-                                     int topdown);
-
-unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr);
-
-void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
-                          unsigned long len, unsigned int psize);
-
-void slice_init_new_context_exec(struct mm_struct *mm);
-void slice_setup_new_exec(void);
-
-#else /* CONFIG_PPC_MM_SLICES */
-
-static inline void slice_init_new_context_exec(struct mm_struct *mm) {}
-
-static inline unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
-{
-       return 0;
-}
-
-#endif /* CONFIG_PPC_MM_SLICES */
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_POWERPC_SLICE_H */
index 60ab739..f63505d 100644 (file)
@@ -189,8 +189,6 @@ extern void __cpu_die(unsigned int cpu);
 #define smp_setup_cpu_maps()
 #define thread_group_shares_l2  0
 #define thread_group_shares_l3 0
-static inline void inhibit_secondary_onlining(void) {}
-static inline void uninhibit_secondary_onlining(void) {}
 static inline const struct cpumask *cpu_sibling_mask(int cpu)
 {
        return cpumask_of(cpu);
index 7546402..da86462 100644 (file)
@@ -10,6 +10,8 @@
 
 #ifdef CONFIG_PPC_SVM
 
+#include <asm/reg.h>
+
 static inline bool is_secure_guest(void)
 {
        return mfmsr() & MSR_S;
index 1f43ef6..aee25e3 100644 (file)
@@ -62,6 +62,15 @@ static inline void disable_kernel_altivec(void)
 #else
 static inline void save_altivec(struct task_struct *t) { }
 static inline void __giveup_altivec(struct task_struct *t) { }
+static inline void enable_kernel_altivec(void)
+{
+       BUILD_BUG();
+}
+
+static inline void disable_kernel_altivec(void)
+{
+       BUILD_BUG();
+}
 #endif
 
 #ifdef CONFIG_VSX
index 38fdf80..5a70995 100644 (file)
 #define STACK_TOP_MAX TASK_SIZE_USER64
 #define STACK_TOP (is_32bit_task() ? STACK_TOP_USER32 : STACK_TOP_USER64)
 
+#define arch_get_mmap_base(addr, base) \
+       (((addr) > DEFAULT_MAP_WINDOW) ? (base) + TASK_SIZE - DEFAULT_MAP_WINDOW : (base))
+
+#define arch_get_mmap_end(addr, len, flags) \
+       (((addr) > DEFAULT_MAP_WINDOW) || \
+        (((flags) & MAP_FIXED) && ((addr) + (len) > DEFAULT_MAP_WINDOW)) ? TASK_SIZE : \
+                                                                           DEFAULT_MAP_WINDOW)
+
 #endif /* _ASM_POWERPC_TASK_SIZE_64_H */
index 924b215..1e5643a 100644 (file)
@@ -24,6 +24,7 @@ extern unsigned long tb_ticks_per_jiffy;
 extern unsigned long tb_ticks_per_usec;
 extern unsigned long tb_ticks_per_sec;
 extern struct clock_event_device decrementer_clockevent;
+extern u64 decrementer_max;
 
 
 extern void generic_calibrate_decr(void);
index 9d9d56b..8f69bb0 100644 (file)
@@ -10,9 +10,9 @@
 #include <linux/export.h>
 #include <linux/memblock.h>
 #include <linux/pgtable.h>
+#include <linux/of.h>
 
 #include <asm/sections.h>
-#include <asm/prom.h>
 #include <asm/btext.h>
 #include <asm/page.h>
 #include <asm/mmu.h>
@@ -45,8 +45,7 @@ unsigned long disp_BAT[2] __initdata = {0, 0};
 
 static unsigned char vga_font[cmapsz];
 
-int boot_text_mapped __force_data = 0;
-int force_printk_to_btext = 0;
+static int boot_text_mapped __force_data;
 
 extern void rmci_on(void);
 extern void rmci_off(void);
index 00b0992..f502337 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/of.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
-#include <asm/prom.h>
 #include <asm/cputhreads.h>
 #include <asm/smp.h>
 
index ae0fdef..7bd6546 100644 (file)
@@ -12,9 +12,9 @@
 #include <linux/init.h>
 #include <linux/export.h>
 #include <linux/jump_label.h>
+#include <linux/of.h>
 
 #include <asm/cputable.h>
-#include <asm/prom.h>          /* for PTRRELOC on ARCH=ppc */
 #include <asm/mce.h>
 #include <asm/mmu.h>
 #include <asm/setup.h>
@@ -2025,7 +2025,7 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
                 * oprofile_cpu_type already has a value, then we are
                 * possibly overriding a real PVR with a logical one,
                 * and, in that case, keep the current value for
-                * oprofile_cpu_type. Futhermore, let's ensure that the
+                * oprofile_cpu_type. Furthermore, let's ensure that the
                 * fix for the PMAO bug is enabled on compatibility mode.
                 */
                if (old.oprofile_cpu_type != NULL) {
index 5693e1c..c438c60 100644 (file)
@@ -12,9 +12,9 @@
 #include <linux/crash_dump.h>
 #include <linux/io.h>
 #include <linux/memblock.h>
+#include <linux/of.h>
 #include <asm/code-patching.h>
 #include <asm/kdump.h>
-#include <asm/prom.h>
 #include <asm/firmware.h>
 #include <linux/uaccess.h>
 #include <asm/rtas.h>
index 64e423d..30d4eca 100644 (file)
@@ -27,7 +27,7 @@ int set_dawr(int nr, struct arch_hw_breakpoint *brk)
        dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) >> 3;
        /*
         * DAWR length is stored in field MDR bits 48:53.  Matches range in
-        * doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
+        * doublewords (64 bits) biased by -1 eg. 0b000000=1DW and
         * 0b111111=64DW.
         * brk->hw_len is in bytes.
         * This aligns up to double word size, shifts and does the bias.
index 7d1b2c4..c8e147b 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/jump_label.h>
 #include <linux/libfdt.h>
 #include <linux/memblock.h>
+#include <linux/of_fdt.h>
 #include <linux/printk.h>
 #include <linux/sched.h>
 #include <linux/string.h>
@@ -19,7 +20,6 @@
 #include <asm/dt_cpu_ftrs.h>
 #include <asm/mce.h>
 #include <asm/mmu.h>
-#include <asm/prom.h>
 #include <asm/setup.h>
 
 
index 28bb1e7..ab316e1 100644 (file)
@@ -1329,7 +1329,7 @@ int eeh_pe_set_option(struct eeh_pe *pe, int option)
 
        /*
         * EEH functionality could possibly be disabled, just
-        * return error for the case. And the EEH functinality
+        * return error for the case. And the EEH functionality
         * isn't expected to be disabled on one specific PE.
         */
        switch (option) {
@@ -1804,7 +1804,7 @@ static int eeh_debugfs_break_device(struct pci_dev *pdev)
         *    PE freeze. Using the in_8() accessor skips the eeh detection hook
         *    so the freeze hook so the EEH Detection machinery won't be
         *    triggered here. This is to match the usual behaviour of EEH
-        *    where the HW will asyncronously freeze a PE and it's up to
+        *    where the HW will asynchronously freeze a PE and it's up to
         *    the kernel to notice and deal with it.
         *
         * 3. Turn Memory space back on. This is more important for VFs
index 422f80b..260273e 100644 (file)
@@ -16,7 +16,6 @@
 #include <asm/eeh_event.h>
 #include <asm/ppc-pci.h>
 #include <asm/pci-bridge.h>
-#include <asm/prom.h>
 #include <asm/rtas.h>
 
 struct eeh_rmv_data {
index a7a8dc1..c23a454 100644 (file)
@@ -143,7 +143,7 @@ int __eeh_send_failure_event(struct eeh_pe *pe)
 int eeh_send_failure_event(struct eeh_pe *pe)
 {
        /*
-        * If we've manually supressed recovery events via debugfs
+        * If we've manually suppressed recovery events via debugfs
         * then just drop it on the floor.
         */
        if (eeh_debugfs_no_recover) {
index 845e024..d7a9cf3 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/export.h>
 #include <linux/gfp.h>
 #include <linux/kernel.h>
+#include <linux/of.h>
 #include <linux/pci.h>
 #include <linux/string.h>
 
index 429620d..706e1eb 100644 (file)
@@ -6,6 +6,7 @@
  *
  * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
  */
+#include <linux/of.h>
 #include <linux/pci.h>
 #include <linux/stat.h>
 #include <asm/ppc-pci.h>
index 9581906..da18f83 100644 (file)
@@ -330,22 +330,22 @@ _GLOBAL(enter_rtas)
        clrldi  r4,r4,2                 /* convert to realmode address */
                mtlr    r4
 
-       li      r0,0
-       ori     r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
-       andc    r0,r6,r0
-       
-        li      r9,1
-        rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
-       ori     r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
-       andc    r6,r0,r9
-
 __enter_rtas:
-       sync                            /* disable interrupts so SRR0/1 */
-       mtmsrd  r0                      /* don't get trashed */
-
        LOAD_REG_ADDR(r4, rtas)
        ld      r5,RTASENTRY(r4)        /* get the rtas->entry value */
        ld      r4,RTASBASE(r4)         /* get the rtas->base value */
+
+       /*
+        * RTAS runs in 32-bit big endian real mode, but leave MSR[RI] on as we
+        * may hit NMI (SRESET or MCE) while in RTAS. RTAS should disable RI in
+        * its critical regions (as specified in PAPR+ section 7.2.1). MSR[S]
+        * is not impacted by RFI_TO_KERNEL (only urfid can unset it). So if
+        * MSR[S] is set, it will remain when entering RTAS.
+        */
+       LOAD_REG_IMMEDIATE(r6, MSR_ME | MSR_RI)
+
+       li      r0,0
+       mtmsrd  r0,1                    /* disable RI before using SRR0/1 */
        
        mtspr   SPRN_SRR0,r5
        mtspr   SPRN_SRR1,r6
index 65562c4..bfb671f 100644 (file)
 #include <linux/cma.h>
 #include <linux/hugetlb.h>
 #include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
 
 #include <asm/page.h>
-#include <asm/prom.h>
 #include <asm/fadump.h>
 #include <asm/fadump-internal.h>
 #include <asm/setup.h>
@@ -73,8 +74,8 @@ static struct cma *fadump_cma;
  * The total size of fadump reserved memory covers for boot memory size
  * + cpu data size + hpte size and metadata.
  * Initialize only the area equivalent to boot memory size for CMA use.
- * The reamining portion of fadump reserved memory will be not given
- * to CMA and pages for thoes will stay reserved. boot memory size is
+ * The remaining portion of fadump reserved memory will be not given
+ * to CMA and pages for those will stay reserved. boot memory size is
  * aligned per CMA requirement to satisy cma_init_reserved_mem() call.
  * But for some reason even if it fails we still have the memory reservation
  * with us and we can still continue doing fadump.
@@ -365,6 +366,11 @@ static unsigned long __init get_fadump_area_size(void)
 
        size += fw_dump.cpu_state_data_size;
        size += fw_dump.hpte_region_size;
+       /*
+        * Account for pagesize alignment of boot memory area destination address.
+        * This faciliates in mmap reading of first kernel's memory.
+        */
+       size = PAGE_ALIGN(size);
        size += fw_dump.boot_memory_size;
        size += sizeof(struct fadump_crash_info_header);
        size += sizeof(struct elfhdr); /* ELF core header.*/
@@ -728,7 +734,7 @@ void crash_fadump(struct pt_regs *regs, const char *str)
        else
                ppc_save_regs(&fdh->regs);
 
-       fdh->online_mask = *cpu_online_mask;
+       fdh->cpu_mask = *cpu_online_mask;
 
        /*
         * If we came in via system reset, wait a while for the secondary
@@ -867,7 +873,6 @@ static int fadump_alloc_mem_ranges(struct fadump_mrange_info *mrange_info)
                                       sizeof(struct fadump_memory_range));
        return 0;
 }
-
 static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
                                       u64 base, u64 end)
 {
@@ -886,7 +891,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
                start = mem_ranges[mrange_info->mem_range_cnt - 1].base;
                size  = mem_ranges[mrange_info->mem_range_cnt - 1].size;
 
-               if ((start + size) == base)
+               /*
+                * Boot memory area needs separate PT_LOAD segment(s) as it
+                * is moved to a different location at the time of crash.
+                * So, fold only if the region is not boot memory area.
+                */
+               if ((start + size) == base && start >= fw_dump.boot_mem_top)
                        is_adjacent = true;
        }
        if (!is_adjacent) {
@@ -1164,6 +1174,11 @@ static unsigned long init_fadump_header(unsigned long addr)
        fdh->elfcorehdr_addr = addr;
        /* We will set the crashing cpu id in crash_fadump() during crash. */
        fdh->crashing_cpu = FADUMP_CPU_UNKNOWN;
+       /*
+        * When LPAR is terminated by PYHP, ensure all possible CPUs'
+        * register data is processed while exporting the vmcore.
+        */
+       fdh->cpu_mask = *cpu_possible_mask;
 
        return addr;
 }
@@ -1271,7 +1286,6 @@ static void fadump_release_reserved_area(u64 start, u64 end)
 static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
 {
        struct fadump_memory_range *mem_ranges;
-       struct fadump_memory_range tmp_range;
        u64 base, size;
        int i, j, idx;
 
@@ -1286,11 +1300,8 @@ static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
                        if (mem_ranges[idx].base > mem_ranges[j].base)
                                idx = j;
                }
-               if (idx != i) {
-                       tmp_range = mem_ranges[idx];
-                       mem_ranges[idx] = mem_ranges[i];
-                       mem_ranges[i] = tmp_range;
-               }
+               if (idx != i)
+                       swap(mem_ranges[idx], mem_ranges[i]);
        }
 
        /* Merge adjacent reserved ranges */
@@ -1661,8 +1672,8 @@ int __init setup_fadump(void)
 }
 /*
  * Use subsys_initcall_sync() here because there is dependency with
- * crash_save_vmcoreinfo_init(), which mush run first to ensure vmcoreinfo initialization
- * is done before regisering with f/w.
+ * crash_save_vmcoreinfo_init(), which must run first to ensure vmcoreinfo initialization
+ * is done before registering with f/w.
  */
 subsys_initcall_sync(setup_fadump);
 #else /* !CONFIG_PRESERVE_FA_DUMP */
index 4ad79eb..77cd4c5 100644 (file)
@@ -37,7 +37,7 @@ static int __init powersave_off(char *arg)
 {
        ppc_md.power_save = NULL;
        cpuidle_disable = IDLE_POWERSAVE_OFF;
-       return 0;
+       return 1;
 }
 __setup("powersave=off", powersave_off);
 
index 7bab2d7..6471034 100644 (file)
@@ -219,16 +219,6 @@ system_call_vectored common 0x3000
  */
 system_call_vectored sigill 0x7ff0
 
-
-/*
- * Entered via kernel return set up by kernel/sstep.c, must match entry regs
- */
-       .globl system_call_vectored_emulate
-system_call_vectored_emulate:
-_ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
-       li      r10,IRQS_ALL_DISABLED
-       stb     r10,PACAIRQSOFTMASK(r13)
-       b       system_call_vectored_common
 #endif /* CONFIG_PPC_BOOK3S */
 
        .balign IFETCH_ALIGN_BYTES
index 6e090e8..7e56ddb 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/sched.h>
 #include <linux/debugfs.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/iommu.h>
 #include <asm/pci-bridge.h>
 #include <asm/machdep.h>
index 752fb18..2e055e1 100644 (file)
 #include <linux/of_irq.h>
 #include <linux/vmalloc.h>
 #include <linux/pgtable.h>
+#include <linux/static_call.h>
 
 #include <linux/uaccess.h>
 #include <asm/interrupt.h>
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/cache.h>
-#include <asm/prom.h>
 #include <asm/ptrace.h>
 #include <asm/machdep.h>
 #include <asm/udbg.h>
@@ -730,6 +730,8 @@ static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
        );
 }
 
+DEFINE_STATIC_CALL_RET0(ppc_get_irq, *ppc_md.get_irq);
+
 void __do_irq(struct pt_regs *regs)
 {
        unsigned int irq;
@@ -741,7 +743,7 @@ void __do_irq(struct pt_regs *regs)
         *
         * This will typically lower the interrupt line to the CPU
         */
-       irq = ppc_md.get_irq();
+       irq = static_call(ppc_get_irq)();
 
        /* We can hard enable interrupts now to allow perf interrupts */
        if (should_hard_irq_enable())
@@ -809,6 +811,9 @@ void __init init_IRQ(void)
 
        if (ppc_md.init_IRQ)
                ppc_md.init_IRQ();
+
+       if (!WARN_ON(!ppc_md.get_irq))
+               static_call_update(ppc_get_irq, ppc_md.get_irq);
 }
 
 #ifdef CONFIG_BOOKE_OR_40x
index 39c6257..dc74661 100644 (file)
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/notifier.h>
+#include <linux/of_address.h>
 #include <linux/vmalloc.h>
 
 #include <asm/processor.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/machdep.h>
 #include <asm/ppc-pci.h>
index 7dae0b0..d5f55e5 100644 (file)
@@ -150,8 +150,8 @@ int arch_prepare_kprobe(struct kprobe *p)
        if ((unsigned long)p->addr & 0x03) {
                printk("Attempt to register kprobe at an unaligned address\n");
                ret = -EINVAL;
-       } else if (IS_MTMSRD(insn) || IS_RFID(insn)) {
-               printk("Cannot register a kprobe on mtmsr[d]/rfi[d]\n");
+       } else if (!can_single_step(ppc_inst_val(insn))) {
+               printk("Cannot register a kprobe on instructions that can't be single stepped\n");
                ret = -EINVAL;
        } else if ((unsigned long)p->addr & ~PAGE_MASK &&
                   ppc_inst_prefixed(ppc_inst_read(p->addr - 1))) {
index cfc03e0..5c58460 100644 (file)
@@ -7,10 +7,10 @@
 #include <linux/pci.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
+#include <linux/of_irq.h>
 #include <linux/serial_reg.h>
 #include <asm/io.h>
 #include <asm/mmu.h>
-#include <asm/prom.h>
 #include <asm/serial.h>
 #include <asm/udbg.h>
 #include <asm/pci-bridge.h>
index 40a583e..97a76a8 100644 (file)
@@ -101,7 +101,7 @@ __module_alloc(unsigned long size, unsigned long start, unsigned long end, bool
         * too.
         */
        return __vmalloc_node_range(size, 1, start, end, gfp, prot,
-                                   VM_FLUSH_RESET_PERMS | VM_NO_HUGE_VMAP,
+                                   VM_FLUSH_RESET_PERMS,
                                    NUMA_NO_NODE, __builtin_return_address(0));
 }
 
index a0432ef..e25b796 100644 (file)
@@ -99,7 +99,7 @@ static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
 
                        /* Sort the relocation information based on a symbol and
                         * addend key. This is a stable O(n*log n) complexity
-                        * alogrithm but it will reduce the complexity of
+                        * algorithm but it will reduce the complexity of
                         * count_relocs() to linear complexity O(n)
                         */
                        sort((void *)hdr + sechdrs[i].sh_offset,
index 7947205..2cce576 100644 (file)
@@ -194,7 +194,7 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
 
                        /* Sort the relocation information based on a symbol and
                         * addend key. This is a stable O(n*log n) complexity
-                        * alogrithm but it will reduce the complexity of
+                        * algorithm but it will reduce the complexity of
                         * count_relocs() to linear complexity O(n)
                         */
                        sort((void *)sechdrs[i].sh_addr,
@@ -361,7 +361,7 @@ static inline int create_ftrace_stub(struct ppc64_stub_entry *entry,
        entry->jump[1] |= PPC_HA(reladdr);
        entry->jump[2] |= PPC_LO(reladdr);
 
-       /* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */
+       /* Even though we don't use funcdata in the stub, it's needed elsewhere. */
        entry->funcdata = func_desc(addr);
        entry->magic = STUB_MAGIC;
 
index 0d9f9cd..e385d31 100644 (file)
@@ -19,9 +19,9 @@
 #include <linux/pstore.h>
 #include <linux/zlib.h>
 #include <linux/uaccess.h>
+#include <linux/of.h>
 #include <asm/nvram.h>
 #include <asm/rtas.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 
 #undef DEBUG_NVRAM
index 39da688..ba593fd 100644 (file)
@@ -344,15 +344,10 @@ void copy_mm_to_paca(struct mm_struct *mm)
 {
        mm_context_t *context = &mm->context;
 
-#ifdef CONFIG_PPC_MM_SLICES
        VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
        memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
               LOW_SLICE_ARRAY_SZ);
        memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
               TASK_SLICE_ARRAY_SZ(context));
-#else /* CONFIG_PPC_MM_SLICES */
-       get_paca()->mm_ctx_user_psize = context->user_psize;
-       get_paca()->mm_ctx_sllp = context->sllp;
-#endif
 }
 #endif /* CONFIG_PPC_64S_HASH_MMU */
index 8bc9cf6..63ed90b 100644 (file)
 #include <linux/vgaarb.h>
 #include <linux/numa.h>
 #include <linux/msi.h>
+#include <linux/irqdomain.h>
 
 #include <asm/processor.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/byteorder.h>
 #include <asm/machdep.h>
@@ -1688,7 +1688,7 @@ EXPORT_SYMBOL_GPL(pcibios_scan_phb);
 static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
 {
        int i, class = dev->class >> 8;
-       /* When configured as agent, programing interface = 1 */
+       /* When configured as agent, programming interface = 1 */
        int prog_if = dev->class & 0xf;
 
        if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
index 2fc1219..0fe251c 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/pci.h>
 #include <linux/export.h>
+#include <linux/of.h>
 #include <asm/pci-bridge.h>
 #include <asm/ppc-pci.h>
 #include <asm/firmware.h>
index 4853796..5a17493 100644 (file)
@@ -21,7 +21,6 @@
 
 #include <asm/processor.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/sections.h>
 #include <asm/pci-bridge.h>
 #include <asm/ppc-pci.h>
index 3fb7e57..19b03dd 100644 (file)
 #include <linux/syscalls.h>
 #include <linux/irq.h>
 #include <linux/vmalloc.h>
+#include <linux/of.h>
 
 #include <asm/processor.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/byteorder.h>
 #include <asm/machdep.h>
@@ -285,3 +285,12 @@ int pcibus_to_node(struct pci_bus *bus)
 }
 EXPORT_SYMBOL(pcibus_to_node);
 #endif
+
+int pci_device_from_OF_node(struct device_node *np, u8 *bus, u8 *devfn)
+{
+       if (!PCI_DN(np))
+               return -ENODEV;
+       *bus = PCI_DN(np)->busno;
+       *devfn = PCI_DN(np)->devfn;
+       return 0;
+}
index 61571ae..938ab88 100644 (file)
@@ -12,9 +12,9 @@
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/gfp.h>
+#include <linux/of.h>
 
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/ppc-pci.h>
 #include <asm/firmware.h>
index c3024f1..756043d 100644 (file)
@@ -13,8 +13,8 @@
 
 #include <linux/pci.h>
 #include <linux/export.h>
+#include <linux/of.h>
 #include <asm/pci-bridge.h>
-#include <asm/prom.h>
 
 /**
  * get_int_prop - Decode a u32 from a device tree property
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(of_create_pci_dev);
  * @dev: pci_dev structure for the bridge
  *
  * of_scan_bus() calls this routine for each PCI bridge that it finds, and
- * this routine in turn call of_scan_bus() recusively to scan for more child
+ * this routine in turn call of_scan_bus() recursively to scan for more child
  * devices.
  */
 void of_scan_pci_bridge(struct pci_dev *dev)
index 6a029f2..b109cd7 100644 (file)
@@ -7,12 +7,12 @@
 #include <linux/mm.h>
 #include <linux/proc_fs.h>
 #include <linux/kernel.h>
+#include <linux/of.h>
 
 #include <asm/machdep.h>
 #include <asm/vdso_datapage.h>
 #include <asm/rtas.h>
 #include <linux/uaccess.h>
-#include <asm/prom.h>
 
 #ifdef CONFIG_PPC64
 
index 984813a..d00b20c 100644 (file)
 #include <linux/ftrace.h>
 #include <linux/kernel_stat.h>
 #include <linux/personality.h>
-#include <linux/random.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/uaccess.h>
-#include <linux/elf-randomize.h>
 #include <linux/pkeys.h>
 #include <linux/seq_buf.h>
 
@@ -45,7 +43,6 @@
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/mmu.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/time.h>
 #include <asm/runlatch.h>
@@ -307,7 +304,7 @@ static void __giveup_vsx(struct task_struct *tsk)
        unsigned long msr = tsk->thread.regs->msr;
 
        /*
-        * We should never be ssetting MSR_VSX without also setting
+        * We should never be setting MSR_VSX without also setting
         * MSR_FP and MSR_VEC
         */
        WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
@@ -645,7 +642,7 @@ static void do_break_handler(struct pt_regs *regs)
                return;
        }
 
-       /* Otherwise findout which DAWR caused exception and disable it. */
+       /* Otherwise find out which DAWR caused exception and disable it. */
        wp_get_instr_detail(regs, &instr, &type, &size, &ea);
 
        for (i = 0; i < nr_wp_slots(); i++) {
@@ -2313,42 +2310,3 @@ unsigned long arch_align_stack(unsigned long sp)
                sp -= get_random_int() & ~PAGE_MASK;
        return sp & ~0xf;
 }
-
-static inline unsigned long brk_rnd(void)
-{
-        unsigned long rnd = 0;
-
-       /* 8MB for 32bit, 1GB for 64bit */
-       if (is_32bit_task())
-               rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
-       else
-               rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
-
-       return rnd << PAGE_SHIFT;
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
-       unsigned long base = mm->brk;
-       unsigned long ret;
-
-#ifdef CONFIG_PPC_BOOK3S_64
-       /*
-        * If we are using 1TB segments and we are allowed to randomise
-        * the heap, we can put it above 1TB so it is backed by a 1TB
-        * segment. Otherwise the heap will be in the bottom 1TB
-        * which always uses 256MB segments and this may result in a
-        * performance penalty.
-        */
-       if (!radix_enabled() && !is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
-               base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
-#endif
-
-       ret = PAGE_ALIGN(base + brk_rnd());
-
-       if (ret < mm->brk)
-               return mm->brk;
-
-       return ret;
-}
-
index 86c4f00..feae850 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/cpu.h>
 #include <linux/pgtable.h>
 
-#include <asm/prom.h>
 #include <asm/rtas.h>
 #include <asm/page.h>
 #include <asm/processor.h>
index 0ac5faa..04694ec 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/bitops.h>
 #include <linux/pgtable.h>
 #include <linux/printk.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
 #include <asm/prom.h>
 #include <asm/rtas.h>
 #include <asm/page.h>
@@ -3416,7 +3418,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
         *
         * PowerMacs use a different mechanism to spin CPUs
         *
-        * (This must be done after instanciating RTAS)
+        * (This must be done after instantiating RTAS)
         */
        if (of_platform != PLATFORM_POWERMAC)
                prom_hold_cpus();
index f15bc78..076d867 100644 (file)
@@ -174,7 +174,7 @@ int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
 
        /*
         * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
-        * no more used as a flag, lets force usr to alway see the softe value as 1
+        * no more used as a flag, lets force usr to always see the softe value as 1
         * which means interrupts are not soft disabled.
         */
        if (IS_ENABLED(CONFIG_PPC64) && regno == PT_SOFTE) {
index 6857a5b..081b2b7 100644 (file)
 #include <linux/seq_file.h>
 #include <linux/bitops.h>
 #include <linux/rtc.h>
+#include <linux/of.h>
 
 #include <linux/uaccess.h>
 #include <asm/processor.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/rtas.h>
 #include <asm/machdep.h> /* for ppc_md */
 #include <asm/time.h>
@@ -259,7 +259,6 @@ __initcall(proc_rtas_init);
 static int parse_number(const char __user *p, size_t count, u64 *val)
 {
        char buf[40];
-       char *end;
 
        if (count > 39)
                return -EINVAL;
@@ -269,11 +268,7 @@ static int parse_number(const char __user *p, size_t count, u64 *val)
 
        buf[count] = 0;
 
-       *val = simple_strtoull(buf, &end, 10);
-       if (*end && *end != '\n')
-               return -EINVAL;
-
-       return 0;
+       return kstrtoull(buf, 10, val);
 }
 
 /* ****************************************************************** */
index 33c07c8..5a31d18 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/rtc.h>
 #include <linux/delay.h>
 #include <linux/ratelimit.h>
-#include <asm/prom.h>
 #include <asm/rtas.h>
 #include <asm/time.h>
 
index 1f42aab..03d173b 100644 (file)
 #include <linux/slab.h>
 #include <linux/reboot.h>
 #include <linux/syscalls.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
 
 #include <asm/interrupt.h>
-#include <asm/prom.h>
 #include <asm/rtas.h>
 #include <asm/hvcall.h>
 #include <asm/machdep.h>
@@ -49,6 +50,15 @@ void enter_rtas(unsigned long);
 
 static inline void do_enter_rtas(unsigned long args)
 {
+       unsigned long msr;
+
+       /*
+        * Make sure MSR[RI] is currently enabled as it will be forced later
+        * in enter_rtas.
+        */
+       msr = mfmsr();
+       BUG_ON(!(msr & MSR_RI));
+
        enter_rtas(args);
 
        srr_regs_clobbered(); /* rtas uses SRRs, invalidate */
index a99179d..bc817a5 100644 (file)
@@ -120,7 +120,7 @@ static struct kmem_cache *flash_block_cache = NULL;
 /*
  * Local copy of the flash block list.
  *
- * The rtas_firmware_flash_list varable will be
+ * The rtas_firmware_flash_list variable will be
  * set once the data is fully read.
  *
  * For convenience as we build the list we use virtual addrs,
index 781c186..5a2f5ea 100644 (file)
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/pgtable.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
 #include <asm/iommu.h>
index cf0f429..5270b45 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/uaccess.h>
 #include <asm/io.h>
 #include <asm/rtas.h>
-#include <asm/prom.h>
 #include <asm/nvram.h>
 #include <linux/atomic.h>
 #include <asm/machdep.h>
index 518ae5a..baafad0 100644 (file)
 #include <linux/serial_8250.h>
 #include <linux/percpu.h>
 #include <linux/memblock.h>
+#include <linux/of_irq.h>
+#include <linux/of_fdt.h>
 #include <linux/of_platform.h>
 #include <linux/hugetlb.h>
 #include <linux/pgtable.h>
 #include <asm/io.h>
 #include <asm/paca.h>
-#include <asm/prom.h>
 #include <asm/processor.h>
 #include <asm/vdso_datapage.h>
 #include <asm/smp.h>
@@ -279,7 +280,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                           proc_freq / 1000000, proc_freq % 1000000);
 
        /* If we are a Freescale core do a simple check so
-        * we dont have to keep adding cases in the future */
+        * we don't have to keep adding cases in the future */
        if (PVR_VER(pvr) & 0x8000) {
                switch (PVR_VER(pvr)) {
                case 0x8000:    /* 7441/7450/7451, Voyager */
index a6e9d36..8132617 100644 (file)
 #include <linux/export.h>
 #include <linux/nvram.h>
 #include <linux/pgtable.h>
+#include <linux/of_fdt.h>
+#include <linux/irq.h>
 
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/processor.h>
 #include <asm/setup.h>
 #include <asm/smp.h>
index a96f050..0e8fc1c 100644 (file)
 #include <linux/memory.h>
 #include <linux/nmi.h>
 #include <linux/pgtable.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
 
 #include <asm/kvm_guest.h>
 #include <asm/io.h>
 #include <asm/kdump.h>
-#include <asm/prom.h>
 #include <asm/processor.h>
 #include <asm/smp.h>
 #include <asm/elf.h>
index 73d483b..858fc13 100644 (file)
@@ -123,7 +123,7 @@ static long notrace __unsafe_setup_sigcontext(struct sigcontext __user *sc,
 #endif
        struct pt_regs *regs = tsk->thread.regs;
        unsigned long msr = regs->msr;
-       /* Force usr to alway see softe as 1 (interrupts enabled) */
+       /* Force usr to always see softe as 1 (interrupts enabled) */
        unsigned long softe = 0x1;
 
        BUG_ON(tsk != current);
index de0f6f0..4c4511b 100644 (file)
@@ -43,7 +43,6 @@
 #include <asm/kvm_ppc.h>
 #include <asm/dbell.h>
 #include <asm/page.h>
-#include <asm/prom.h>
 #include <asm/smp.h>
 #include <asm/time.h>
 #include <asm/machdep.h>
@@ -1102,7 +1101,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        DBG("smp_prepare_cpus\n");
 
        /* 
-        * setup_cpu may need to be called on the boot cpu. We havent
+        * setup_cpu may need to be called on the boot cpu. We haven't
         * spun any cpus up but lets be paranoid.
         */
        BUG_ON(boot_cpuid != smp_processor_id());
index c4f5b4c..fc99914 100644 (file)
@@ -73,7 +73,7 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len,
 int
 ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct __kernel_old_timeval __user *tvp)
 {
-       if ( (unsigned long)n >= 4096 )
+       if ((unsigned long)n >= 4096)
                return sys_old_select((void __user *)n);
 
        return sys_select(n, inp, outp, exp, tvp);
index 2069bbb..3a10cda 100644 (file)
@@ -9,12 +9,12 @@
 #include <linux/nodemask.h>
 #include <linux/cpumask.h>
 #include <linux/notifier.h>
+#include <linux/of.h>
 
 #include <asm/current.h>
 #include <asm/processor.h>
 #include <asm/cputable.h>
 #include <asm/hvcall.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/smp.h>
 #include <asm/pmc.h>
index f80cce0..587adcc 100644 (file)
 #include <linux/of_clk.h>
 #include <linux/suspend.h>
 #include <linux/processor.h>
-#include <asm/trace.h>
+#include <linux/mc146818rtc.h>
+#include <linux/platform_device.h>
 
+#include <asm/trace.h>
 #include <asm/interrupt.h>
 #include <asm/io.h>
 #include <asm/nvram.h>
@@ -63,7 +65,6 @@
 #include <asm/machdep.h>
 #include <linux/uaccess.h>
 #include <asm/time.h>
-#include <asm/prom.h>
 #include <asm/irq.h>
 #include <asm/div64.h>
 #include <asm/smp.h>
@@ -156,10 +157,6 @@ bool tb_invalid;
 u64 __cputime_usec_factor;
 EXPORT_SYMBOL(__cputime_usec_factor);
 
-#ifdef CONFIG_PPC_SPLPAR
-void (*dtl_consumer)(struct dtl_entry *, u64);
-#endif
-
 static void calc_cputime_factors(void)
 {
        struct div_result res;
@@ -185,6 +182,8 @@ static inline unsigned long read_spurr(unsigned long tb)
 
 #include <asm/dtl.h>
 
+void (*dtl_consumer)(struct dtl_entry *, u64);
+
 /*
  * Scan the dispatch trace log and count up the stolen time.
  * Should be called with interrupts disabled.
@@ -829,7 +828,7 @@ static void __read_persistent_clock(struct timespec64 *ts)
        static int first = 1;
 
        ts->tv_nsec = 0;
-       /* XXX this is a litle fragile but will work okay in the short term */
+       /* XXX this is a little fragile but will work okay in the short term */
        if (first) {
                first = 0;
                if (ppc_md.time_init)
@@ -974,7 +973,7 @@ void secondary_cpu_time_init(void)
         */
        start_cpu_decrementer();
 
-       /* FIME: Should make unrelatred change to move snapshot_timebase
+       /* FIME: Should make unrelated change to move snapshot_timebase
         * call here ! */
        register_decrementer_clockevent(smp_processor_id());
 }
index c697546..95a41ae 100644 (file)
@@ -48,6 +48,11 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
                return -EINVAL;
        }
 
+       if (!can_single_step(ppc_inst_val(ppc_inst_read(auprobe->insn)))) {
+               pr_info_ratelimited("Cannot register a uprobe on instructions that can't be single stepped\n");
+               return -ENOTSUPP;
+       }
+
        return 0;
 }
 
index 717f2c9..0da2875 100644 (file)
@@ -25,7 +25,6 @@
 #include <asm/processor.h>
 #include <asm/mmu.h>
 #include <asm/mmu_context.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/cputable.h>
 #include <asm/sections.h>
index bfc2749..7d28b95 100644 (file)
@@ -56,7 +56,7 @@
  * solved by also having a SMP watchdog where all CPUs check all other
  * CPUs heartbeat.
  *
- * The SMP checker can detect lockups on other CPUs. A gobal "pending"
+ * The SMP checker can detect lockups on other CPUs. A global "pending"
  * cpumask is kept, containing all CPUs which enable the watchdog. Each
  * CPU clears their pending bit in their heartbeat timer. When the bitmask
  * becomes empty, the last CPU to clear its pending bit updates a global
index abf5897..7ab4980 100644 (file)
@@ -18,7 +18,6 @@
 #include <asm/kdump.h>
 #include <asm/machdep.h>
 #include <asm/pgalloc.h>
-#include <asm/prom.h>
 #include <asm/sections.h>
 
 void machine_kexec_mask_interrupts(void) {
index 6cc7793..c2bea9d 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/kernel.h>
 #include <linux/cpu.h>
 #include <linux/hardirq.h>
+#include <linux/of.h>
 
 #include <asm/page.h>
 #include <asm/current.h>
@@ -25,7 +26,6 @@
 #include <asm/paca.h>
 #include <asm/mmu.h>
 #include <asm/sections.h>      /* _end */
-#include <asm/prom.h>
 #include <asm/smp.h>
 #include <asm/hw_breakpoint.h>
 #include <asm/svm.h>
@@ -406,7 +406,7 @@ static int __init export_htab_values(void)
        if (!node)
                return -ENODEV;
 
-       /* remove any stale propertys so ours can be found */
+       /* remove any stale properties so ours can be found */
        of_remove_property(node, of_find_property(node, htab_base_prop.name, NULL));
        of_remove_property(node, of_find_property(node, htab_size_prop.name, NULL));
 
index 22ceeeb..d85fa9f 100644 (file)
@@ -20,7 +20,6 @@
 #include <asm/processor.h>
 #include <asm/machdep.h>
 #include <asm/kexec.h>
-#include <asm/prom.h>
 #include <asm/smp.h>
 #include <asm/setjmp.h>
 #include <asm/debug.h>
index c036b1a..514fd45 100644 (file)
@@ -58,7 +58,7 @@ struct kvm_resize_hpt {
        /* Possible values and their usage:
         *  <0     an error occurred during allocation,
         *  -EBUSY allocation is in the progress,
-        *  0      allocation made successfuly.
+        *  0      allocation made successfully.
         */
        int error;
 
index e4ce2a3..42851c3 100644 (file)
@@ -168,9 +168,10 @@ int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
                        return -EINVAL;
                /* Read the entry from guest memory */
                addr = base + (index * sizeof(rpte));
-               vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+
+               kvm_vcpu_srcu_read_lock(vcpu);
                ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
-               srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                if (ret) {
                        if (pte_ret_p)
                                *pte_ret_p = addr;
@@ -246,9 +247,9 @@ int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
 
        /* Read the table to find the root of the radix tree */
        ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
-       vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
        ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
-       srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
        if (ret)
                return ret;
 
index fdb57be..5bbfb2e 100644 (file)
@@ -268,7 +268,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
 
                        /*
                         * add rules to fit in ISA specification regarding TM
-                        * state transistion in TM disable/Suspended state,
+                        * state transition in TM disable/Suspended state,
                         * and target TM state is TM inactive(00) state. (the
                         * change should be suppressed).
                         */
index 7f8cf19..e08fb31 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/module.h>
 #include <linux/compiler.h>
 #include <linux/of.h>
+#include <linux/irqdomain.h>
 
 #include <asm/ftrace.h>
 #include <asm/reg.h>
index ce08573..0644732 100644 (file)
@@ -305,10 +305,10 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
        /* copy parameters in */
        hv_ptr = kvmppc_get_gpr(vcpu, 4);
        regs_ptr = kvmppc_get_gpr(vcpu, 5);
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
        err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
                                              hv_ptr, regs_ptr);
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
        if (err)
                return H_PARAMETER;
 
@@ -409,10 +409,10 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
                byteswap_hv_regs(&l2_hv);
                byteswap_pt_regs(&l2_regs);
        }
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
        err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
                                               hv_ptr, regs_ptr);
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
        if (err)
                return H_AUTHORITY;
 
@@ -593,16 +593,16 @@ long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
                        goto not_found;
 
                /* Write what was loaded into our buffer back to the L1 guest */
-               vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
                rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
-               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                if (rc)
                        goto not_found;
        } else {
                /* Load the data to be stored from the L1 guest into our buf */
-               vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
                rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
-               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                if (rc)
                        goto not_found;
 
index 9dba3e3..112a09b 100644 (file)
@@ -379,7 +379,7 @@ void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
 {
        /*
         * current->thread.xxx registers must all be restored to host
-        * values before a potential context switch, othrewise the context
+        * values before a potential context switch, otherwise the context
         * switch itself will overwrite current->thread.xxx with the values
         * from the guest SPRs.
         */
index 36f2314..5980063 100644 (file)
@@ -120,7 +120,7 @@ static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
  *     content is un-encrypted.
  *
  * (c) Normal - The GFN is a normal. The GFN is associated with
- *     a normal VM. The contents of the GFN is accesible to
+ *     a normal VM. The contents of the GFN is accessible to
  *     the Hypervisor. Its content is never encrypted.
  *
  * States of a VM.
index 7bf9e6c..d6abed6 100644 (file)
@@ -1287,7 +1287,7 @@ int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
 
                /* Get last sc for papr */
                if (vcpu->arch.papr_enabled) {
-                       /* The sc instuction points SRR0 to the next inst */
+                       /* The sc instruction points SRR0 to the next inst */
                        emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
                        if (emul != EMULATE_DONE) {
                                kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
index 0f847f1..6808bda 100644 (file)
@@ -229,9 +229,9 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
         */
        args_phys = kvmppc_get_gpr(vcpu, 4) & KVM_PAM;
 
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
        rc = kvm_read_guest(vcpu->kvm, args_phys, &args, sizeof(args));
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
        if (rc)
                goto fail;
 
index ab6d37d..589a8f2 100644 (file)
@@ -462,7 +462,7 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
         * new guy. We cannot assume that the rejected interrupt is less
         * favored than the new one, and thus doesn't need to be delivered,
         * because by the time we exit icp_try_to_deliver() the target
-        * processor may well have alrady consumed & completed it, and thus
+        * processor may well have already consumed & completed it, and thus
         * the rejected interrupt might actually be already acceptable.
         */
        if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
index 3d47862..4ca2364 100644 (file)
@@ -726,7 +726,7 @@ void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
                 * interrupt might have fired and be on its way to the
                 * host queue while we mask it, and if we unmask it
                 * early enough (re-cede right away), there is a
-                * theorical possibility that it fires again, thus
+                * theoretical possibility that it fires again, thus
                 * landing in the target queue more than once which is
                 * a big no-no.
                 *
@@ -1227,7 +1227,7 @@ static int xive_target_interrupt(struct kvm *kvm,
 
 /*
  * Targetting rules: In order to avoid losing track of
- * pending interrupts accross mask and unmask, which would
+ * pending interrupts across mask and unmask, which would
  * allow queue overflows, we implement the following rules:
  *
  *  - Unless it was never enabled (or we run out of capacity)
@@ -1678,7 +1678,7 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
        /*
         * If old_p is set, the interrupt is pending, we switch it to
         * PQ=11. This will force a resend in the host so the interrupt
-        * isn't lost to whatver host driver may pick it up
+        * isn't lost to whatever host driver may pick it up
         */
        if (state->old_p)
                xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
index 2b9ad89..57e0ad6 100644 (file)
@@ -309,7 +309,7 @@ static int kvmppc_core_vcpu_create_e500mc(struct kvm_vcpu *vcpu)
        BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0);
        vcpu_e500 = to_e500(vcpu);
 
-       /* Invalid PIR value -- this LPID dosn't have valid state on any cpu */
+       /* Invalid PIR value -- this LPID doesn't have valid state on any cpu */
        vcpu->arch.oldpir = 0xffffffff;
 
        err = kvmppc_e500_tlb_init(vcpu_e500);
index 3256119..191992f 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/irqbypass.h>
 #include <linux/kvm_irqfd.h>
+#include <linux/of.h>
 #include <asm/cputable.h>
 #include <linux/uaccess.h>
 #include <asm/kvm_ppc.h>
@@ -425,9 +426,9 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
                return EMULATE_DONE;
        }
 
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
        rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
        if (rc)
                return EMULATE_DO_MMIO;
 
index 00c68e7..be670e1 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/init.h>
 #include <linux/cpuhotplug.h>
 #include <linux/uaccess.h>
+#include <linux/jump_label.h>
 
 #include <asm/tlbflush.h>
 #include <asm/page.h>
@@ -78,6 +79,8 @@ static int text_area_cpu_down(unsigned int cpu)
        return 0;
 }
 
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done);
+
 /*
  * Although BUG_ON() is rude, in this case it should only happen if ENOMEM, and
  * we judge it as being preferable to a kernel that will crash later when
@@ -88,6 +91,7 @@ void __init poking_init(void)
        BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
                "powerpc/text_poke:online", text_area_cpu_up,
                text_area_cpu_down));
+       static_branch_enable(&poking_init_done);
 }
 
 /*
@@ -97,7 +101,7 @@ static int map_patch_area(void *addr, unsigned long text_poke_addr)
 {
        unsigned long pfn;
 
-       if (is_vmalloc_or_module_addr(addr))
+       if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr))
                pfn = vmalloc_to_pfn(addr);
        else
                pfn = __pa_symbol(addr) >> PAGE_SHIFT;
@@ -170,7 +174,7 @@ static int do_patch_instruction(u32 *addr, ppc_inst_t instr)
         * when text_poke_area is not ready, but we still need
         * to allow patching. We just do the plain old patching
         */
-       if (!this_cpu_read(text_poke_area))
+       if (!static_branch_likely(&poking_init_done))
                return raw_patch_instruction(addr, instr);
 
        local_irq_save(flags);
@@ -188,10 +192,12 @@ static int do_patch_instruction(u32 *addr, ppc_inst_t instr)
 
 #endif /* CONFIG_STRICT_KERNEL_RWX */
 
+__ro_after_init DEFINE_STATIC_KEY_FALSE(init_mem_is_free);
+
 int patch_instruction(u32 *addr, ppc_inst_t instr)
 {
        /* Make sure we aren't patching a freed init section */
-       if (system_state >= SYSTEM_FREEING_INITMEM && init_section_contains(addr, 4))
+       if (static_branch_likely(&init_mem_is_free) && init_section_contains(addr, 4))
                return 0;
 
        return do_patch_instruction(addr, instr);
index 6f79bde..398b569 100644 (file)
@@ -15,9 +15,6 @@
 #include <asm/cputable.h>
 #include <asm/disassemble.h>
 
-extern char system_call_common[];
-extern char system_call_vectored_emulate[];
-
 #ifdef CONFIG_PPC64
 /* Bits in SRR1 that are copied from MSR */
 #define MSR_MASK       0xffffffff87c0ffffUL
@@ -1166,7 +1163,7 @@ static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
 
        if (carry_in)
                ++val;
-       op->type = COMPUTE + SETREG + SETXER;
+       op->type = COMPUTE | SETREG | SETXER;
        op->reg = rd;
        op->val = val;
        val = truncate_if_32bit(regs->msr, val);
@@ -1187,7 +1184,7 @@ static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
 {
        unsigned int crval, shift;
 
-       op->type = COMPUTE + SETCC;
+       op->type = COMPUTE | SETCC;
        crval = (regs->xer >> 31) & 1;          /* get SO bit */
        if (v1 < v2)
                crval |= 8;
@@ -1206,7 +1203,7 @@ static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
 {
        unsigned int crval, shift;
 
-       op->type = COMPUTE + SETCC;
+       op->type = COMPUTE | SETCC;
        crval = (regs->xer >> 31) & 1;          /* get SO bit */
        if (v1 < v2)
                crval |= 8;
@@ -1376,7 +1373,6 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
                if (branch_taken(word, regs, op))
                        op->type |= BRTAKEN;
                return 1;
-#ifdef CONFIG_PPC64
        case 17:        /* sc */
                if ((word & 0xfe2) == 2)
                        op->type = SYSCALL;
@@ -1388,7 +1384,6 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
                } else
                        op->type = UNKNOWN;
                return 0;
-#endif
        case 18:        /* b */
                op->type = BRANCH | BRTAKEN;
                imm = word & 0x03fffffc;
@@ -3643,43 +3638,22 @@ int emulate_step(struct pt_regs *regs, ppc_inst_t instr)
                regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
                goto instr_done;
 
-#ifdef CONFIG_PPC64
        case SYSCALL:   /* sc */
                /*
-                * N.B. this uses knowledge about how the syscall
-                * entry code works.  If that is changed, this will
-                * need to be changed also.
+                * Per ISA v3.1, section 7.5.15 'Trace Interrupt', we can't
+                * single step a system call instruction:
+                *
+                *   Successful completion for an instruction means that the
+                *   instruction caused no other interrupt. Thus a Trace
+                *   interrupt never occurs for a System Call or System Call
+                *   Vectored instruction, or for a Trap instruction that
+                *   traps.
                 */
-               if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) &&
-                               cpu_has_feature(CPU_FTR_REAL_LE) &&
-                               regs->gpr[0] == 0x1ebe) {
-                       regs_set_return_msr(regs, regs->msr ^ MSR_LE);
-                       goto instr_done;
-               }
-               regs->gpr[9] = regs->gpr[13];
-               regs->gpr[10] = MSR_KERNEL;
-               regs->gpr[11] = regs->nip + 4;
-               regs->gpr[12] = regs->msr & MSR_MASK;
-               regs->gpr[13] = (unsigned long) get_paca();
-               regs_set_return_ip(regs, (unsigned long) &system_call_common);
-               regs_set_return_msr(regs, MSR_KERNEL);
-               return 1;
-
-#ifdef CONFIG_PPC_BOOK3S_64
+               return -1;
        case SYSCALL_VECTORED_0:        /* scv 0 */
-               regs->gpr[9] = regs->gpr[13];
-               regs->gpr[10] = MSR_KERNEL;
-               regs->gpr[11] = regs->nip + 4;
-               regs->gpr[12] = regs->msr & MSR_MASK;
-               regs->gpr[13] = (unsigned long) get_paca();
-               regs_set_return_ip(regs, (unsigned long) &system_call_vectored_emulate);
-               regs_set_return_msr(regs, MSR_KERNEL);
-               return 1;
-#endif
-
+               return -1;
        case RFI:
                return -1;
-#endif
        }
        return 0;
 
index df8172d..503a6e2 100644 (file)
@@ -5,7 +5,7 @@
 
 ccflags-$(CONFIG_PPC64)        := $(NO_MINIMAL_TOC)
 
-obj-y                          := fault.o mem.o pgtable.o mmap.o maccess.o pageattr.o \
+obj-y                          := fault.o mem.o pgtable.o maccess.o pageattr.o \
                                   init_$(BITS).o pgtable_$(BITS).o \
                                   pgtable-frag.o ioremap.o ioremap_$(BITS).o \
                                   init-common.o mmu_context.o drmem.o \
@@ -14,7 +14,6 @@ obj-$(CONFIG_PPC_MMU_NOHASH)  += nohash/
 obj-$(CONFIG_PPC_BOOK3S_32)    += book3s32/
 obj-$(CONFIG_PPC_BOOK3S_64)    += book3s64/
 obj-$(CONFIG_NUMA) += numa.o
-obj-$(CONFIG_PPC_MM_SLICES)    += slice.o
 obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
 obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
 obj-$(CONFIG_PPC_COPRO_BASE)   += copro_fault.o
index 203735c..49a737f 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/highmem.h>
 #include <linux/memblock.h>
 
-#include <asm/prom.h>
 #include <asm/mmu.h>
 #include <asm/machdep.h>
 #include <asm/code-patching.h>
index 2d50cac..d527dc8 100644 (file)
@@ -5,7 +5,7 @@ ccflags-y       := $(NO_MINIMAL_TOC)
 obj-y                          += mmu_context.o pgtable.o trace.o
 ifdef CONFIG_PPC_64S_HASH_MMU
 CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE)
-obj-y                          += hash_pgtable.o hash_utils.o hash_tlb.o slb.o
+obj-y                          += hash_pgtable.o hash_utils.o hash_tlb.o slb.o slice.o
 obj-$(CONFIG_PPC_HASH_MMU_NATIVE)      += hash_native.o
 obj-$(CONFIG_PPC_4K_PAGES)     += hash_4k.o
 obj-$(CONFIG_PPC_64K_PAGES)    += hash_64k.o
index 7ce8914..2e0cad5 100644 (file)
@@ -377,7 +377,7 @@ int hash__has_transparent_hugepage(void)
        if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT)
                return 0;
        /*
-        * We need to make sure that we support 16MB hugepage in a segement
+        * We need to make sure that we support 16MB hugepage in a segment
         * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE
         * of 64K.
         */
index 985cabd..fc92613 100644 (file)
@@ -37,6 +37,9 @@
 #include <linux/cpu.h>
 #include <linux/pgtable.h>
 #include <linux/debugfs.h>
+#include <linux/random.h>
+#include <linux/elf-randomize.h>
+#include <linux/of_fdt.h>
 
 #include <asm/interrupt.h>
 #include <asm/processor.h>
@@ -46,7 +49,6 @@
 #include <asm/types.h>
 #include <linux/uaccess.h>
 #include <asm/machdep.h>
-#include <asm/prom.h>
 #include <asm/io.h>
 #include <asm/eeh.h>
 #include <asm/tlb.h>
@@ -1264,7 +1266,6 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
        return pp;
 }
 
-#ifdef CONFIG_PPC_MM_SLICES
 static unsigned int get_paca_psize(unsigned long addr)
 {
        unsigned char *psizes;
@@ -1281,12 +1282,6 @@ static unsigned int get_paca_psize(unsigned long addr)
        return (psizes[index >> 1] >> (mask_index * 4)) & 0xF;
 }
 
-#else
-unsigned int get_paca_psize(unsigned long addr)
-{
-       return get_paca()->mm_ctx_user_psize;
-}
-#endif
 
 /*
  * Demote a segment to using 4k pages.
@@ -1343,7 +1338,7 @@ static int subpage_protection(struct mm_struct *mm, unsigned long ea)
        spp >>= 30 - 2 * ((ea >> 12) & 0xf);
 
        /*
-        * 0 -> full premission
+        * 0 -> full permission
         * 1 -> Read only
         * 2 -> no access.
         * We return the flag that need to be cleared.
@@ -1664,7 +1659,7 @@ DEFINE_INTERRUPT_HANDLER(do_hash_fault)
 
        err = hash_page_mm(mm, ea, access, TRAP(regs), flags);
        if (unlikely(err < 0)) {
-               // failed to instert a hash PTE due to an hypervisor error
+               // failed to insert a hash PTE due to an hypervisor error
                if (user_mode(regs)) {
                        if (IS_ENABLED(CONFIG_PPC_SUBPAGE_PROT) && err == -2)
                                _exception(SIGSEGV, regs, SEGV_ACCERR, ea);
@@ -1680,7 +1675,6 @@ DEFINE_INTERRUPT_HANDLER(do_hash_fault)
        }
 }
 
-#ifdef CONFIG_PPC_MM_SLICES
 static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
 {
        int psize = get_slice_psize(mm, ea);
@@ -1697,12 +1691,6 @@ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
 
        return true;
 }
-#else
-static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
-{
-       return true;
-}
-#endif
 
 static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
                         bool is_exec, unsigned long trap)
@@ -2147,3 +2135,20 @@ void __init print_system_hash_info(void)
        if (htab_hash_mask)
                pr_info("htab_hash_mask    = 0x%lx\n", htab_hash_mask);
 }
+
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+       /*
+        * If we are using 1TB segments and we are allowed to randomise
+        * the heap, we can put it above 1TB so it is backed by a 1TB
+        * segment. Otherwise the heap will be in the bottom 1TB
+        * which always uses 256MB segments and this may result in a
+        * performance penalty.
+        */
+       if (is_32bit_task())
+               return randomize_page(mm->brk, SZ_32M);
+       else if (!radix_enabled() && mmu_highuser_ssize == MMU_SEGSIZE_1T)
+               return randomize_page(max_t(unsigned long, mm->brk, SZ_1T), SZ_1G);
+       else
+               return randomize_page(mm->brk, SZ_1G);
+}
index 052e659..071bb66 100644 (file)
@@ -331,7 +331,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
        spin_lock(&mm->page_table_lock);
        /*
         * If we find pgtable_page set, we return
-        * the allocated page with single fragement
+        * the allocated page with single fragment
         * count.
         */
        if (likely(!mm->context.pmd_frag)) {
index 23d3e08..d2fb776 100644 (file)
@@ -41,61 +41,6 @@ void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long st
                radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
 }
 
-/*
- * A vairant of hugetlb_get_unmapped_area doing topdown search
- * FIXME!! should we do as x86 does or non hugetlb area does ?
- * ie, use topdown or not based on mmap_is_legacy check ?
- */
-unsigned long
-radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
-                               unsigned long len, unsigned long pgoff,
-                               unsigned long flags)
-{
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-       struct hstate *h = hstate_file(file);
-       int fixed = (flags & MAP_FIXED);
-       unsigned long high_limit;
-       struct vm_unmapped_area_info info;
-
-       high_limit = DEFAULT_MAP_WINDOW;
-       if (addr >= high_limit || (fixed && (addr + len > high_limit)))
-               high_limit = TASK_SIZE;
-
-       if (len & ~huge_page_mask(h))
-               return -EINVAL;
-       if (len > high_limit)
-               return -ENOMEM;
-
-       if (fixed) {
-               if (addr > high_limit - len)
-                       return -ENOMEM;
-               if (prepare_hugepage_range(file, addr, len))
-                       return -EINVAL;
-               return addr;
-       }
-
-       if (addr) {
-               addr = ALIGN(addr, huge_page_size(h));
-               vma = find_vma(mm, addr);
-               if (high_limit - len >= addr && addr >= mmap_min_addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
-                       return addr;
-       }
-       /*
-        * We are always doing an topdown search here. Slice code
-        * does that too.
-        */
-       info.flags = VM_UNMAPPED_AREA_TOPDOWN;
-       info.length = len;
-       info.low_limit = max(PAGE_SIZE, mmap_min_addr);
-       info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
-       info.align_mask = PAGE_MASK & ~huge_page_mask(h);
-       info.align_offset = 0;
-
-       return vm_unmapped_area(&info);
-}
-
 void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
                                         unsigned long addr, pte_t *ptep,
                                         pte_t old_pte, pte_t pte)
index def0463..db2f3d1 100644 (file)
@@ -359,7 +359,7 @@ static void __init radix_init_pgtable(void)
        if (!cpu_has_feature(CPU_FTR_HVMODE) &&
                        cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
                /*
-                * Older versions of KVM on these machines perfer if the
+                * Older versions of KVM on these machines prefer if the
                 * guest only uses the low 19 PID bits.
                 */
                mmu_pid_bits = 19;
index 7724af1..dda51fe 100644 (file)
@@ -397,7 +397,7 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
 
        /*
         * Workaround the fact that the "ric" argument to __tlbie_pid
-        * must be a compile-time contraint to match the "i" constraint
+        * must be a compile-time constraint to match the "i" constraint
         * in the asm statement.
         */
        switch (ric) {
index 81091b9..6956f63 100644 (file)
@@ -347,7 +347,7 @@ void slb_setup_new_exec(void)
        /*
         * We have no good place to clear the slb preload cache on exec,
         * flush_thread is about the earliest arch hook but that happens
-        * after we switch to the mm and have aleady preloaded the SLBEs.
+        * after we switch to the mm and have already preloaded the SLBEs.
         *
         * For the most part that's probably okay to use entries from the
         * previous exec, they will age out if unused. It may turn out to
@@ -615,7 +615,7 @@ static void slb_cache_update(unsigned long esid_data)
        } else {
                /*
                 * Our cache is full and the current cache content strictly
-                * doesn't indicate the active SLB conents. Bump the ptr
+                * doesn't indicate the active SLB contents. Bump the ptr
                 * so that switch_slb() will ignore the cache.
                 */
                local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
diff --git a/arch/powerpc/mm/book3s64/slice.c b/arch/powerpc/mm/book3s64/slice.c
new file mode 100644 (file)
index 0000000..c0b58af
--- /dev/null
@@ -0,0 +1,807 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * address space "slices" (meta-segments) support
+ *
+ * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
+ *
+ * Based on hugetlb implementation
+ *
+ * Copyright (C) 2003 David Gibson, IBM Corporation.
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/hugetlb.h>
+#include <linux/sched/mm.h>
+#include <linux/security.h>
+#include <asm/mman.h>
+#include <asm/mmu.h>
+#include <asm/copro.h>
+#include <asm/hugetlb.h>
+#include <asm/mmu_context.h>
+
+static DEFINE_SPINLOCK(slice_convert_lock);
+
+#ifdef DEBUG
+int _slice_debug = 1;
+
+static void slice_print_mask(const char *label, const struct slice_mask *mask)
+{
+       if (!_slice_debug)
+               return;
+       pr_devel("%s low_slice: %*pbl\n", label,
+                       (int)SLICE_NUM_LOW, &mask->low_slices);
+       pr_devel("%s high_slice: %*pbl\n", label,
+                       (int)SLICE_NUM_HIGH, mask->high_slices);
+}
+
+#define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
+
+#else
+
+static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
+#define slice_dbg(fmt...)
+
+#endif
+
+static inline notrace bool slice_addr_is_low(unsigned long addr)
+{
+       u64 tmp = (u64)addr;
+
+       return tmp < SLICE_LOW_TOP;
+}
+
+static void slice_range_to_mask(unsigned long start, unsigned long len,
+                               struct slice_mask *ret)
+{
+       unsigned long end = start + len - 1;
+
+       ret->low_slices = 0;
+       if (SLICE_NUM_HIGH)
+               bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
+
+       if (slice_addr_is_low(start)) {
+               unsigned long mend = min(end,
+                                        (unsigned long)(SLICE_LOW_TOP - 1));
+
+               ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
+                       - (1u << GET_LOW_SLICE_INDEX(start));
+       }
+
+       if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
+               unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
+               unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
+               unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
+
+               bitmap_set(ret->high_slices, start_index, count);
+       }
+}
+
+static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
+                             unsigned long len)
+{
+       struct vm_area_struct *vma;
+
+       if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr)
+               return 0;
+       vma = find_vma(mm, addr);
+       return (!vma || (addr + len) <= vm_start_gap(vma));
+}
+
+static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
+{
+       return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
+                                  1ul << SLICE_LOW_SHIFT);
+}
+
+static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
+{
+       unsigned long start = slice << SLICE_HIGH_SHIFT;
+       unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
+
+       /* Hack, so that each addresses is controlled by exactly one
+        * of the high or low area bitmaps, the first high area starts
+        * at 4GB, not 0 */
+       if (start == 0)
+               start = (unsigned long)SLICE_LOW_TOP;
+
+       return !slice_area_is_free(mm, start, end - start);
+}
+
+static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
+                               unsigned long high_limit)
+{
+       unsigned long i;
+
+       ret->low_slices = 0;
+       if (SLICE_NUM_HIGH)
+               bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
+
+       for (i = 0; i < SLICE_NUM_LOW; i++)
+               if (!slice_low_has_vma(mm, i))
+                       ret->low_slices |= 1u << i;
+
+       if (slice_addr_is_low(high_limit - 1))
+               return;
+
+       for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
+               if (!slice_high_has_vma(mm, i))
+                       __set_bit(i, ret->high_slices);
+}
+
+static bool slice_check_range_fits(struct mm_struct *mm,
+                          const struct slice_mask *available,
+                          unsigned long start, unsigned long len)
+{
+       unsigned long end = start + len - 1;
+       u64 low_slices = 0;
+
+       if (slice_addr_is_low(start)) {
+               unsigned long mend = min(end,
+                                        (unsigned long)(SLICE_LOW_TOP - 1));
+
+               low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
+                               - (1u << GET_LOW_SLICE_INDEX(start));
+       }
+       if ((low_slices & available->low_slices) != low_slices)
+               return false;
+
+       if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
+               unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
+               unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
+               unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
+               unsigned long i;
+
+               for (i = start_index; i < start_index + count; i++) {
+                       if (!test_bit(i, available->high_slices))
+                               return false;
+               }
+       }
+
+       return true;
+}
+
+static void slice_flush_segments(void *parm)
+{
+#ifdef CONFIG_PPC64
+       struct mm_struct *mm = parm;
+       unsigned long flags;
+
+       if (mm != current->active_mm)
+               return;
+
+       copy_mm_to_paca(current->active_mm);
+
+       local_irq_save(flags);
+       slb_flush_and_restore_bolted();
+       local_irq_restore(flags);
+#endif
+}
+
+static void slice_convert(struct mm_struct *mm,
+                               const struct slice_mask *mask, int psize)
+{
+       int index, mask_index;
+       /* Write the new slice psize bits */
+       unsigned char *hpsizes, *lpsizes;
+       struct slice_mask *psize_mask, *old_mask;
+       unsigned long i, flags;
+       int old_psize;
+
+       slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
+       slice_print_mask(" mask", mask);
+
+       psize_mask = slice_mask_for_size(&mm->context, psize);
+
+       /* We need to use a spinlock here to protect against
+        * concurrent 64k -> 4k demotion ...
+        */
+       spin_lock_irqsave(&slice_convert_lock, flags);
+
+       lpsizes = mm_ctx_low_slices(&mm->context);
+       for (i = 0; i < SLICE_NUM_LOW; i++) {
+               if (!(mask->low_slices & (1u << i)))
+                       continue;
+
+               mask_index = i & 0x1;
+               index = i >> 1;
+
+               /* Update the slice_mask */
+               old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
+               old_mask = slice_mask_for_size(&mm->context, old_psize);
+               old_mask->low_slices &= ~(1u << i);
+               psize_mask->low_slices |= 1u << i;
+
+               /* Update the sizes array */
+               lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
+                               (((unsigned long)psize) << (mask_index * 4));
+       }
+
+       hpsizes = mm_ctx_high_slices(&mm->context);
+       for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) {
+               if (!test_bit(i, mask->high_slices))
+                       continue;
+
+               mask_index = i & 0x1;
+               index = i >> 1;
+
+               /* Update the slice_mask */
+               old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
+               old_mask = slice_mask_for_size(&mm->context, old_psize);
+               __clear_bit(i, old_mask->high_slices);
+               __set_bit(i, psize_mask->high_slices);
+
+               /* Update the sizes array */
+               hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
+                               (((unsigned long)psize) << (mask_index * 4));
+       }
+
+       slice_dbg(" lsps=%lx, hsps=%lx\n",
+                 (unsigned long)mm_ctx_low_slices(&mm->context),
+                 (unsigned long)mm_ctx_high_slices(&mm->context));
+
+       spin_unlock_irqrestore(&slice_convert_lock, flags);
+
+       copro_flush_all_slbs(mm);
+}
+
+/*
+ * Compute which slice addr is part of;
+ * set *boundary_addr to the start or end boundary of that slice
+ * (depending on 'end' parameter);
+ * return boolean indicating if the slice is marked as available in the
+ * 'available' slice_mark.
+ */
+static bool slice_scan_available(unsigned long addr,
+                                const struct slice_mask *available,
+                                int end, unsigned long *boundary_addr)
+{
+       unsigned long slice;
+       if (slice_addr_is_low(addr)) {
+               slice = GET_LOW_SLICE_INDEX(addr);
+               *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
+               return !!(available->low_slices & (1u << slice));
+       } else {
+               slice = GET_HIGH_SLICE_INDEX(addr);
+               *boundary_addr = (slice + end) ?
+                       ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
+               return !!test_bit(slice, available->high_slices);
+       }
+}
+
+static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
+                                             unsigned long addr, unsigned long len,
+                                             const struct slice_mask *available,
+                                             int psize, unsigned long high_limit)
+{
+       int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
+       unsigned long found, next_end;
+       struct vm_unmapped_area_info info;
+
+       info.flags = 0;
+       info.length = len;
+       info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
+       info.align_offset = 0;
+       /*
+        * Check till the allow max value for this mmap request
+        */
+       while (addr < high_limit) {
+               info.low_limit = addr;
+               if (!slice_scan_available(addr, available, 1, &addr))
+                       continue;
+
+ next_slice:
+               /*
+                * At this point [info.low_limit; addr) covers
+                * available slices only and ends at a slice boundary.
+                * Check if we need to reduce the range, or if we can
+                * extend it to cover the next available slice.
+                */
+               if (addr >= high_limit)
+                       addr = high_limit;
+               else if (slice_scan_available(addr, available, 1, &next_end)) {
+                       addr = next_end;
+                       goto next_slice;
+               }
+               info.high_limit = addr;
+
+               found = vm_unmapped_area(&info);
+               if (!(found & ~PAGE_MASK))
+                       return found;
+       }
+
+       return -ENOMEM;
+}
+
+static unsigned long slice_find_area_topdown(struct mm_struct *mm,
+                                            unsigned long addr, unsigned long len,
+                                            const struct slice_mask *available,
+                                            int psize, unsigned long high_limit)
+{
+       int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
+       unsigned long found, prev;
+       struct vm_unmapped_area_info info;
+       unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
+
+       info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+       info.length = len;
+       info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
+       info.align_offset = 0;
+       /*
+        * If we are trying to allocate above DEFAULT_MAP_WINDOW
+        * Add the different to the mmap_base.
+        * Only for that request for which high_limit is above
+        * DEFAULT_MAP_WINDOW we should apply this.
+        */
+       if (high_limit > DEFAULT_MAP_WINDOW)
+               addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW;
+
+       while (addr > min_addr) {
+               info.high_limit = addr;
+               if (!slice_scan_available(addr - 1, available, 0, &addr))
+                       continue;
+
+ prev_slice:
+               /*
+                * At this point [addr; info.high_limit) covers
+                * available slices only and starts at a slice boundary.
+                * Check if we need to reduce the range, or if we can
+                * extend it to cover the previous available slice.
+                */
+               if (addr < min_addr)
+                       addr = min_addr;
+               else if (slice_scan_available(addr - 1, available, 0, &prev)) {
+                       addr = prev;
+                       goto prev_slice;
+               }
+               info.low_limit = addr;
+
+               found = vm_unmapped_area(&info);
+               if (!(found & ~PAGE_MASK))
+                       return found;
+       }
+
+       /*
+        * A failed mmap() very likely causes application failure,
+        * so fall back to the bottom-up function here. This scenario
+        * can happen with large stack limits and large mmap()
+        * allocations.
+        */
+       return slice_find_area_bottomup(mm, TASK_UNMAPPED_BASE, len, available, psize, high_limit);
+}
+
+
+static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
+                                    const struct slice_mask *mask, int psize,
+                                    int topdown, unsigned long high_limit)
+{
+       if (topdown)
+               return slice_find_area_topdown(mm, mm->mmap_base, len, mask, psize, high_limit);
+       else
+               return slice_find_area_bottomup(mm, mm->mmap_base, len, mask, psize, high_limit);
+}
+
+static inline void slice_copy_mask(struct slice_mask *dst,
+                                       const struct slice_mask *src)
+{
+       dst->low_slices = src->low_slices;
+       if (!SLICE_NUM_HIGH)
+               return;
+       bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
+}
+
+static inline void slice_or_mask(struct slice_mask *dst,
+                                       const struct slice_mask *src1,
+                                       const struct slice_mask *src2)
+{
+       dst->low_slices = src1->low_slices | src2->low_slices;
+       if (!SLICE_NUM_HIGH)
+               return;
+       bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
+}
+
+static inline void slice_andnot_mask(struct slice_mask *dst,
+                                       const struct slice_mask *src1,
+                                       const struct slice_mask *src2)
+{
+       dst->low_slices = src1->low_slices & ~src2->low_slices;
+       if (!SLICE_NUM_HIGH)
+               return;
+       bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
+}
+
+#ifdef CONFIG_PPC_64K_PAGES
+#define MMU_PAGE_BASE  MMU_PAGE_64K
+#else
+#define MMU_PAGE_BASE  MMU_PAGE_4K
+#endif
+
+unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+                                     unsigned long flags, unsigned int psize,
+                                     int topdown)
+{
+       struct slice_mask good_mask;
+       struct slice_mask potential_mask;
+       const struct slice_mask *maskp;
+       const struct slice_mask *compat_maskp = NULL;
+       int fixed = (flags & MAP_FIXED);
+       int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
+       unsigned long page_size = 1UL << pshift;
+       struct mm_struct *mm = current->mm;
+       unsigned long newaddr;
+       unsigned long high_limit;
+
+       high_limit = DEFAULT_MAP_WINDOW;
+       if (addr >= high_limit || (fixed && (addr + len > high_limit)))
+               high_limit = TASK_SIZE;
+
+       if (len > high_limit)
+               return -ENOMEM;
+       if (len & (page_size - 1))
+               return -EINVAL;
+       if (fixed) {
+               if (addr & (page_size - 1))
+                       return -EINVAL;
+               if (addr > high_limit - len)
+                       return -ENOMEM;
+       }
+
+       if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
+               /*
+                * Increasing the slb_addr_limit does not require
+                * slice mask cache to be recalculated because it should
+                * be already initialised beyond the old address limit.
+                */
+               mm_ctx_set_slb_addr_limit(&mm->context, high_limit);
+
+               on_each_cpu(slice_flush_segments, mm, 1);
+       }
+
+       /* Sanity checks */
+       BUG_ON(mm->task_size == 0);
+       BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
+       VM_BUG_ON(radix_enabled());
+
+       slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
+       slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
+                 addr, len, flags, topdown);
+
+       /* If hint, make sure it matches our alignment restrictions */
+       if (!fixed && addr) {
+               addr = ALIGN(addr, page_size);
+               slice_dbg(" aligned addr=%lx\n", addr);
+               /* Ignore hint if it's too large or overlaps a VMA */
+               if (addr > high_limit - len || addr < mmap_min_addr ||
+                   !slice_area_is_free(mm, addr, len))
+                       addr = 0;
+       }
+
+       /* First make up a "good" mask of slices that have the right size
+        * already
+        */
+       maskp = slice_mask_for_size(&mm->context, psize);
+
+       /*
+        * Here "good" means slices that are already the right page size,
+        * "compat" means slices that have a compatible page size (i.e.
+        * 4k in a 64k pagesize kernel), and "free" means slices without
+        * any VMAs.
+        *
+        * If MAP_FIXED:
+        *      check if fits in good | compat => OK
+        *      check if fits in good | compat | free => convert free
+        *      else bad
+        * If have hint:
+        *      check if hint fits in good => OK
+        *      check if hint fits in good | free => convert free
+        * Otherwise:
+        *      search in good, found => OK
+        *      search in good | free, found => convert free
+        *      search in good | compat | free, found => convert free.
+        */
+
+       /*
+        * If we support combo pages, we can allow 64k pages in 4k slices
+        * The mask copies could be avoided in most cases here if we had
+        * a pointer to good mask for the next code to use.
+        */
+       if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
+               compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
+               if (fixed)
+                       slice_or_mask(&good_mask, maskp, compat_maskp);
+               else
+                       slice_copy_mask(&good_mask, maskp);
+       } else {
+               slice_copy_mask(&good_mask, maskp);
+       }
+
+       slice_print_mask(" good_mask", &good_mask);
+       if (compat_maskp)
+               slice_print_mask(" compat_mask", compat_maskp);
+
+       /* First check hint if it's valid or if we have MAP_FIXED */
+       if (addr != 0 || fixed) {
+               /* Check if we fit in the good mask. If we do, we just return,
+                * nothing else to do
+                */
+               if (slice_check_range_fits(mm, &good_mask, addr, len)) {
+                       slice_dbg(" fits good !\n");
+                       newaddr = addr;
+                       goto return_addr;
+               }
+       } else {
+               /* Now let's see if we can find something in the existing
+                * slices for that size
+                */
+               newaddr = slice_find_area(mm, len, &good_mask,
+                                         psize, topdown, high_limit);
+               if (newaddr != -ENOMEM) {
+                       /* Found within the good mask, we don't have to setup,
+                        * we thus return directly
+                        */
+                       slice_dbg(" found area at 0x%lx\n", newaddr);
+                       goto return_addr;
+               }
+       }
+       /*
+        * We don't fit in the good mask, check what other slices are
+        * empty and thus can be converted
+        */
+       slice_mask_for_free(mm, &potential_mask, high_limit);
+       slice_or_mask(&potential_mask, &potential_mask, &good_mask);
+       slice_print_mask(" potential", &potential_mask);
+
+       if (addr != 0 || fixed) {
+               if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
+                       slice_dbg(" fits potential !\n");
+                       newaddr = addr;
+                       goto convert;
+               }
+       }
+
+       /* If we have MAP_FIXED and failed the above steps, then error out */
+       if (fixed)
+               return -EBUSY;
+
+       slice_dbg(" search...\n");
+
+       /* If we had a hint that didn't work out, see if we can fit
+        * anywhere in the good area.
+        */
+       if (addr) {
+               newaddr = slice_find_area(mm, len, &good_mask,
+                                         psize, topdown, high_limit);
+               if (newaddr != -ENOMEM) {
+                       slice_dbg(" found area at 0x%lx\n", newaddr);
+                       goto return_addr;
+               }
+       }
+
+       /* Now let's see if we can find something in the existing slices
+        * for that size plus free slices
+        */
+       newaddr = slice_find_area(mm, len, &potential_mask,
+                                 psize, topdown, high_limit);
+
+       if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && newaddr == -ENOMEM &&
+           psize == MMU_PAGE_64K) {
+               /* retry the search with 4k-page slices included */
+               slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
+               newaddr = slice_find_area(mm, len, &potential_mask,
+                                         psize, topdown, high_limit);
+       }
+
+       if (newaddr == -ENOMEM)
+               return -ENOMEM;
+
+       slice_range_to_mask(newaddr, len, &potential_mask);
+       slice_dbg(" found potential area at 0x%lx\n", newaddr);
+       slice_print_mask(" mask", &potential_mask);
+
+ convert:
+       /*
+        * Try to allocate the context before we do slice convert
+        * so that we handle the context allocation failure gracefully.
+        */
+       if (need_extra_context(mm, newaddr)) {
+               if (alloc_extended_context(mm, newaddr) < 0)
+                       return -ENOMEM;
+       }
+
+       slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
+       if (compat_maskp && !fixed)
+               slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
+       if (potential_mask.low_slices ||
+               (SLICE_NUM_HIGH &&
+                !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
+               slice_convert(mm, &potential_mask, psize);
+               if (psize > MMU_PAGE_BASE)
+                       on_each_cpu(slice_flush_segments, mm, 1);
+       }
+       return newaddr;
+
+return_addr:
+       if (need_extra_context(mm, newaddr)) {
+               if (alloc_extended_context(mm, newaddr) < 0)
+                       return -ENOMEM;
+       }
+       return newaddr;
+}
+EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
+
+unsigned long arch_get_unmapped_area(struct file *filp,
+                                    unsigned long addr,
+                                    unsigned long len,
+                                    unsigned long pgoff,
+                                    unsigned long flags)
+{
+       if (radix_enabled())
+               return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
+
+       return slice_get_unmapped_area(addr, len, flags,
+                                      mm_ctx_user_psize(&current->mm->context), 0);
+}
+
+unsigned long arch_get_unmapped_area_topdown(struct file *filp,
+                                            const unsigned long addr0,
+                                            const unsigned long len,
+                                            const unsigned long pgoff,
+                                            const unsigned long flags)
+{
+       if (radix_enabled())
+               return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags);
+
+       return slice_get_unmapped_area(addr0, len, flags,
+                                      mm_ctx_user_psize(&current->mm->context), 1);
+}
+
+unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
+{
+       unsigned char *psizes;
+       int index, mask_index;
+
+       VM_BUG_ON(radix_enabled());
+
+       if (slice_addr_is_low(addr)) {
+               psizes = mm_ctx_low_slices(&mm->context);
+               index = GET_LOW_SLICE_INDEX(addr);
+       } else {
+               psizes = mm_ctx_high_slices(&mm->context);
+               index = GET_HIGH_SLICE_INDEX(addr);
+       }
+       mask_index = index & 0x1;
+       return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
+}
+EXPORT_SYMBOL_GPL(get_slice_psize);
+
+void slice_init_new_context_exec(struct mm_struct *mm)
+{
+       unsigned char *hpsizes, *lpsizes;
+       struct slice_mask *mask;
+       unsigned int psize = mmu_virtual_psize;
+
+       slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
+
+       /*
+        * In the case of exec, use the default limit. In the
+        * case of fork it is just inherited from the mm being
+        * duplicated.
+        */
+       mm_ctx_set_slb_addr_limit(&mm->context, SLB_ADDR_LIMIT_DEFAULT);
+       mm_ctx_set_user_psize(&mm->context, psize);
+
+       /*
+        * Set all slice psizes to the default.
+        */
+       lpsizes = mm_ctx_low_slices(&mm->context);
+       memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
+
+       hpsizes = mm_ctx_high_slices(&mm->context);
+       memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
+
+       /*
+        * Slice mask cache starts zeroed, fill the default size cache.
+        */
+       mask = slice_mask_for_size(&mm->context, psize);
+       mask->low_slices = ~0UL;
+       if (SLICE_NUM_HIGH)
+               bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
+}
+
+void slice_setup_new_exec(void)
+{
+       struct mm_struct *mm = current->mm;
+
+       slice_dbg("slice_setup_new_exec(mm=%p)\n", mm);
+
+       if (!is_32bit_task())
+               return;
+
+       mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
+}
+
+void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
+                          unsigned long len, unsigned int psize)
+{
+       struct slice_mask mask;
+
+       VM_BUG_ON(radix_enabled());
+
+       slice_range_to_mask(start, len, &mask);
+       slice_convert(mm, &mask, psize);
+}
+
+#ifdef CONFIG_HUGETLB_PAGE
+/*
+ * is_hugepage_only_range() is used by generic code to verify whether
+ * a normal mmap mapping (non hugetlbfs) is valid on a given area.
+ *
+ * until the generic code provides a more generic hook and/or starts
+ * calling arch get_unmapped_area for MAP_FIXED (which our implementation
+ * here knows how to deal with), we hijack it to keep standard mappings
+ * away from us.
+ *
+ * because of that generic code limitation, MAP_FIXED mapping cannot
+ * "convert" back a slice with no VMAs to the standard page size, only
+ * get_unmapped_area() can. It would be possible to fix it here but I
+ * prefer working on fixing the generic code instead.
+ *
+ * WARNING: This will not work if hugetlbfs isn't enabled since the
+ * generic code will redefine that function as 0 in that. This is ok
+ * for now as we only use slices with hugetlbfs enabled. This should
+ * be fixed as the generic code gets fixed.
+ */
+int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
+                          unsigned long len)
+{
+       const struct slice_mask *maskp;
+       unsigned int psize = mm_ctx_user_psize(&mm->context);
+
+       VM_BUG_ON(radix_enabled());
+
+       maskp = slice_mask_for_size(&mm->context, psize);
+
+       /* We need to account for 4k slices too */
+       if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
+               const struct slice_mask *compat_maskp;
+               struct slice_mask available;
+
+               compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
+               slice_or_mask(&available, maskp, compat_maskp);
+               return !slice_check_range_fits(mm, &available, addr, len);
+       }
+
+       return !slice_check_range_fits(mm, maskp, addr, len);
+}
+
+unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
+{
+       /* With radix we don't use slice, so derive it from vma*/
+       if (radix_enabled())
+               return vma_kernel_pagesize(vma);
+
+       return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start));
+}
+
+static int file_to_psize(struct file *file)
+{
+       struct hstate *hstate = hstate_file(file);
+       return shift_to_mmu_psize(huge_page_shift(hstate));
+}
+
+unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+                                       unsigned long len, unsigned long pgoff,
+                                       unsigned long flags)
+{
+       if (radix_enabled())
+               return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
+
+       return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
+}
+#endif
index 22197b1..2369d1b 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/of.h>
 #include <linux/of_fdt.h>
 #include <linux/memblock.h>
-#include <asm/prom.h>
+#include <linux/slab.h>
 #include <asm/drmem.h>
 
 static int n_root_addr_cells, n_root_size_cells;
index b642a5a..b282af3 100644 (file)
@@ -542,40 +542,6 @@ retry:
        return page;
 }
 
-#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-static inline int file_to_psize(struct file *file)
-{
-       struct hstate *hstate = hstate_file(file);
-       return shift_to_mmu_psize(huge_page_shift(hstate));
-}
-
-unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
-                                       unsigned long len, unsigned long pgoff,
-                                       unsigned long flags)
-{
-#ifdef CONFIG_PPC_RADIX_MMU
-       if (radix_enabled())
-               return radix__hugetlb_get_unmapped_area(file, addr, len,
-                                                      pgoff, flags);
-#endif
-#ifdef CONFIG_PPC_MM_SLICES
-       return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
-#endif
-       BUG();
-}
-#endif
-
-unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
-{
-       /* With radix we don't use slice, so derive it from vma*/
-       if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) {
-               unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
-
-               return 1UL << mmu_psize_to_shift(psize);
-       }
-       return vma_kernel_pagesize(vma);
-}
-
 bool __init arch_hugetlb_valid_size(unsigned long size)
 {
        int shift = __ffs(size);
index 3d690be..693a3a7 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/slab.h>
 #include <linux/hugetlb.h>
 
-#include <asm/prom.h>
 #include <asm/io.h>
 #include <asm/mmu.h>
 #include <asm/smp.h>
index 0f26086..05b0d58 100644 (file)
@@ -111,7 +111,7 @@ static int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_m
 }
 
 /*
- * vmemmap virtual address space management does not have a traditonal page
+ * vmemmap virtual address space management does not have a traditional page
  * table to track which virtual struct pages are backed by physical mapping.
  * The virtual to physical mappings are tracked in a simple linked list
  * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at
@@ -128,7 +128,7 @@ static struct vmemmap_backing *next;
 
 /*
  * The same pointer 'next' tracks individual chunks inside the allocated
- * full page during the boot time and again tracks the freeed nodes during
+ * full page during the boot time and again tracks the freed nodes during
  * runtime. It is racy but it does not happen as they are separated by the
  * boot process. Will create problem if some how we have memory hotplug
  * operation during boot !!
index 4d221d0..177fae5 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/kasan.h>
 #include <asm/svm.h>
 #include <asm/mmzone.h>
+#include <asm/code-patching.h>
 
 #include <mm/mmu_decl.h>
 
@@ -311,6 +312,7 @@ void free_initmem(void)
 {
        ppc_md.progress = ppc_printk_progress;
        mark_initmem_nx();
+       static_branch_enable(&init_mem_is_free);
        free_initmem_default(POISON_FREE_INITMEM);
 }
 
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
deleted file mode 100644 (file)
index c475cf8..0000000
+++ /dev/null
@@ -1,256 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- *  flexible mmap layout support
- *
- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
- * All Rights Reserved.
- *
- * Started by Ingo Molnar <mingo@elte.hu>
- */
-
-#include <linux/personality.h>
-#include <linux/mm.h>
-#include <linux/random.h>
-#include <linux/sched/signal.h>
-#include <linux/sched/mm.h>
-#include <linux/elf-randomize.h>
-#include <linux/security.h>
-#include <linux/mman.h>
-
-/*
- * Top of mmap area (just below the process stack).
- *
- * Leave at least a ~128 MB hole.
- */
-#define MIN_GAP (128*1024*1024)
-#define MAX_GAP (TASK_SIZE/6*5)
-
-static inline int mmap_is_legacy(struct rlimit *rlim_stack)
-{
-       if (current->personality & ADDR_COMPAT_LAYOUT)
-               return 1;
-
-       if (rlim_stack->rlim_cur == RLIM_INFINITY)
-               return 1;
-
-       return sysctl_legacy_va_layout;
-}
-
-unsigned long arch_mmap_rnd(void)
-{
-       unsigned long shift, rnd;
-
-       shift = mmap_rnd_bits;
-#ifdef CONFIG_COMPAT
-       if (is_32bit_task())
-               shift = mmap_rnd_compat_bits;
-#endif
-       rnd = get_random_long() % (1ul << shift);
-
-       return rnd << PAGE_SHIFT;
-}
-
-static inline unsigned long stack_maxrandom_size(void)
-{
-       if (!(current->flags & PF_RANDOMIZE))
-               return 0;
-
-       /* 8MB for 32bit, 1GB for 64bit */
-       if (is_32bit_task())
-               return (1<<23);
-       else
-               return (1<<30);
-}
-
-static inline unsigned long mmap_base(unsigned long rnd,
-                                     struct rlimit *rlim_stack)
-{
-       unsigned long gap = rlim_stack->rlim_cur;
-       unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
-
-       /* Values close to RLIM_INFINITY can overflow. */
-       if (gap + pad > gap)
-               gap += pad;
-
-       if (gap < MIN_GAP)
-               gap = MIN_GAP;
-       else if (gap > MAX_GAP)
-               gap = MAX_GAP;
-
-       return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd);
-}
-
-#ifdef HAVE_ARCH_UNMAPPED_AREA
-#ifdef CONFIG_PPC_RADIX_MMU
-/*
- * Same function as generic code used only for radix, because we don't need to overload
- * the generic one. But we will have to duplicate, because hash select
- * HAVE_ARCH_UNMAPPED_AREA
- */
-static unsigned long
-radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
-                            unsigned long len, unsigned long pgoff,
-                            unsigned long flags)
-{
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-       int fixed = (flags & MAP_FIXED);
-       unsigned long high_limit;
-       struct vm_unmapped_area_info info;
-
-       high_limit = DEFAULT_MAP_WINDOW;
-       if (addr >= high_limit || (fixed && (addr + len > high_limit)))
-               high_limit = TASK_SIZE;
-
-       if (len > high_limit)
-               return -ENOMEM;
-
-       if (fixed) {
-               if (addr > high_limit - len)
-                       return -ENOMEM;
-               return addr;
-       }
-
-       if (addr) {
-               addr = PAGE_ALIGN(addr);
-               vma = find_vma(mm, addr);
-               if (high_limit - len >= addr && addr >= mmap_min_addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
-                       return addr;
-       }
-
-       info.flags = 0;
-       info.length = len;
-       info.low_limit = mm->mmap_base;
-       info.high_limit = high_limit;
-       info.align_mask = 0;
-
-       return vm_unmapped_area(&info);
-}
-
-static unsigned long
-radix__arch_get_unmapped_area_topdown(struct file *filp,
-                                    const unsigned long addr0,
-                                    const unsigned long len,
-                                    const unsigned long pgoff,
-                                    const unsigned long flags)
-{
-       struct vm_area_struct *vma;
-       struct mm_struct *mm = current->mm;
-       unsigned long addr = addr0;
-       int fixed = (flags & MAP_FIXED);
-       unsigned long high_limit;
-       struct vm_unmapped_area_info info;
-
-       high_limit = DEFAULT_MAP_WINDOW;
-       if (addr >= high_limit || (fixed && (addr + len > high_limit)))
-               high_limit = TASK_SIZE;
-
-       if (len > high_limit)
-               return -ENOMEM;
-
-       if (fixed) {
-               if (addr > high_limit - len)
-                       return -ENOMEM;
-               return addr;
-       }
-
-       if (addr) {
-               addr = PAGE_ALIGN(addr);
-               vma = find_vma(mm, addr);
-               if (high_limit - len >= addr && addr >= mmap_min_addr &&
-                   (!vma || addr + len <= vm_start_gap(vma)))
-                       return addr;
-       }
-
-       info.flags = VM_UNMAPPED_AREA_TOPDOWN;
-       info.length = len;
-       info.low_limit = max(PAGE_SIZE, mmap_min_addr);
-       info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
-       info.align_mask = 0;
-
-       addr = vm_unmapped_area(&info);
-       if (!(addr & ~PAGE_MASK))
-               return addr;
-       VM_BUG_ON(addr != -ENOMEM);
-
-       /*
-        * A failed mmap() very likely causes application failure,
-        * so fall back to the bottom-up function here. This scenario
-        * can happen with large stack limits and large mmap()
-        * allocations.
-        */
-       return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
-}
-#endif
-
-unsigned long arch_get_unmapped_area(struct file *filp,
-                                    unsigned long addr,
-                                    unsigned long len,
-                                    unsigned long pgoff,
-                                    unsigned long flags)
-{
-#ifdef CONFIG_PPC_MM_SLICES
-       return slice_get_unmapped_area(addr, len, flags,
-                                      mm_ctx_user_psize(&current->mm->context), 0);
-#else
-       BUG();
-#endif
-}
-
-unsigned long arch_get_unmapped_area_topdown(struct file *filp,
-                                            const unsigned long addr0,
-                                            const unsigned long len,
-                                            const unsigned long pgoff,
-                                            const unsigned long flags)
-{
-#ifdef CONFIG_PPC_MM_SLICES
-       return slice_get_unmapped_area(addr0, len, flags,
-                                      mm_ctx_user_psize(&current->mm->context), 1);
-#else
-       BUG();
-#endif
-}
-#endif /* HAVE_ARCH_UNMAPPED_AREA */
-
-static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
-                                       unsigned long random_factor,
-                                       struct rlimit *rlim_stack)
-{
-#ifdef CONFIG_PPC_RADIX_MMU
-       if (mmap_is_legacy(rlim_stack)) {
-               mm->mmap_base = TASK_UNMAPPED_BASE;
-               mm->get_unmapped_area = radix__arch_get_unmapped_area;
-       } else {
-               mm->mmap_base = mmap_base(random_factor, rlim_stack);
-               mm->get_unmapped_area = radix__arch_get_unmapped_area_topdown;
-       }
-#endif
-}
-
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
-{
-       unsigned long random_factor = 0UL;
-
-       if (current->flags & PF_RANDOMIZE)
-               random_factor = arch_mmap_rnd();
-
-       if (radix_enabled())
-               return radix__arch_pick_mmap_layout(mm, random_factor,
-                                                   rlim_stack);
-       /*
-        * Fall back to the standard layout if the personality
-        * bit is set, or if the expected stack growth is unlimited:
-        */
-       if (mmap_is_legacy(rlim_stack)) {
-               mm->mmap_base = TASK_UNMAPPED_BASE;
-               mm->get_unmapped_area = arch_get_unmapped_area;
-       } else {
-               mm->mmap_base = mmap_base(random_factor, rlim_stack);
-               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
-       }
-}
index 0dd4c18..63c4b1a 100644 (file)
@@ -155,6 +155,10 @@ struct tlbcam {
        u32     MAS3;
        u32     MAS7;
 };
+
+#define NUM_TLBCAMS    64
+
+extern struct tlbcam TLBCAM[NUM_TLBCAMS];
 #endif
 
 #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_FSL_BOOKE) || defined(CONFIG_PPC_8xx)
index 95751c3..b32e465 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/highmem.h>
 #include <linux/memblock.h>
 
-#include <asm/prom.h>
 #include <asm/io.h>
 #include <asm/mmu_context.h>
 #include <asm/mmu.h>
index 8b88be9..307ca91 100644 (file)
@@ -142,7 +142,7 @@ book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
        tsize = shift - 10;
        /*
         * We can't be interrupted while we're setting up the MAS
-        * regusters or after we've confirmed that no tlb exists.
+        * registers or after we've confirmed that no tlb exists.
         */
        local_irq_save(flags);
 
index dfe715e..08a984e 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/highmem.h>
 #include <linux/memblock.h>
 
-#include <asm/prom.h>
 #include <asm/io.h>
 #include <asm/mmu_context.h>
 #include <asm/mmu.h>
 
 unsigned int tlbcam_index;
 
-#define NUM_TLBCAMS    (64)
 struct tlbcam TLBCAM[NUM_TLBCAMS];
 
-struct tlbcamrange {
+static struct {
        unsigned long start;
        unsigned long limit;
        phys_addr_t phys;
@@ -274,7 +272,7 @@ void __init adjust_total_lowmem(void)
 
        i = switch_to_as1();
        __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true);
-       restore_to_as0(i, 0, 0, 1);
+       restore_to_as0(i, 0, NULL, 1);
 
        pr_info("Memory CAM mapping: ");
        for (i = 0; i < tlbcam_index - 1; i++)
index 96c38f9..5da604c 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/libfdt.h>
 #include <linux/crash_core.h>
 #include <asm/cacheflush.h>
-#include <asm/prom.h>
 #include <asm/kdump.h>
 #include <mm/mmu_decl.h>
 #include <generated/compile.h>
@@ -315,7 +314,7 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size
        ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true);
        linear_sz = min_t(unsigned long, ram, SZ_512M);
 
-       /* If the linear size is smaller than 64M, do not randmize */
+       /* If the linear size is smaller than 64M, do not randomize */
        if (linear_sz < SZ_64M)
                return 0;
 
index 85b048f..ccd5819 100644 (file)
@@ -317,15 +317,6 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
  */
 int init_new_context(struct task_struct *t, struct mm_struct *mm)
 {
-       /*
-        * We have MMU_NO_CONTEXT set to be ~0. Hence check
-        * explicitly against context.id == 0. This ensures that we properly
-        * initialize context slice details for newly allocated mm's (which will
-        * have id == 0) and don't alter context slice inherited via fork (which
-        * will have id != 0).
-        */
-       if (mm->context.id == 0)
-               slice_init_new_context_exec(mm);
        mm->context.id = MMU_NO_CONTEXT;
        mm->context.active = 0;
        pte_frag_set(&mm->context, NULL);
index fd2c77a..5e7ccb4 100644 (file)
@@ -358,6 +358,7 @@ void __init early_init_mmu_47x(void)
 /*
  * Flush kernel TLB entries in the given range
  */
+#ifndef CONFIG_PPC_8xx
 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
 #ifdef CONFIG_SMP
@@ -370,6 +371,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 #endif
 }
 EXPORT_SYMBOL(flush_tlb_kernel_range);
+#endif
 
 /*
  * Currently, for range flushing, we just do a full mm flush. This should
@@ -773,9 +775,5 @@ void __init early_init_mmu(void)
 #ifdef CONFIG_PPC_47x
        early_init_mmu_47x();
 #endif
-
-#ifdef CONFIG_PPC_MM_SLICES
-       mm_ctx_set_slb_addr_limit(&init_mm.context, SLB_ADDR_LIMIT_DEFAULT);
-#endif
 }
 #endif /* CONFIG_PPC64 */
index 13022d7..4680e31 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/slab.h>
 #include <asm/cputhreads.h>
 #include <asm/sparsemem.h>
-#include <asm/prom.h>
 #include <asm/smp.h>
 #include <asm/topology.h>
 #include <asm/firmware.h>
@@ -1457,8 +1456,7 @@ int find_and_online_cpu_nid(int cpu)
 #endif
        }
 
-       pr_debug("%s:%d cpu %d nid %d\n", __FUNCTION__, __LINE__,
-               cpu, new_nid);
+       pr_debug("%s:%d cpu %d nid %d\n", __func__, __LINE__, cpu, new_nid);
        return new_nid;
 }
 
index 85753e3..6163e48 100644 (file)
@@ -31,6 +31,7 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
 {
        long action = (long)data;
 
+       addr &= PAGE_MASK;
        /* modify the PTE bits as desired */
        switch (action) {
        case SET_MEMORY_RO:
index 97ae493..20652da 100644 (file)
@@ -83,7 +83,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
        spin_lock(&mm->page_table_lock);
        /*
         * If we find pgtable_page set, we return
-        * the allocated page with single fragement
+        * the allocated page with single fragment
         * count.
         */
        if (likely(!pte_frag_get(&mm->context))) {
index 175aabf..5ac1fd3 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/hugetlb.h>
 
 #include <asm/page.h>
-#include <asm/prom.h>
 #include <asm/mmu_context.h>
 #include <asm/mmu.h>
 #include <asm/smp.h>
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
deleted file mode 100644 (file)
index f42711f..0000000
+++ /dev/null
@@ -1,762 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * address space "slices" (meta-segments) support
- *
- * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
- *
- * Based on hugetlb implementation
- *
- * Copyright (C) 2003 David Gibson, IBM Corporation.
- */
-
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/err.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-#include <linux/hugetlb.h>
-#include <linux/sched/mm.h>
-#include <linux/security.h>
-#include <asm/mman.h>
-#include <asm/mmu.h>
-#include <asm/copro.h>
-#include <asm/hugetlb.h>
-#include <asm/mmu_context.h>
-
-static DEFINE_SPINLOCK(slice_convert_lock);
-
-#ifdef DEBUG
-int _slice_debug = 1;
-
-static void slice_print_mask(const char *label, const struct slice_mask *mask)
-{
-       if (!_slice_debug)
-               return;
-       pr_devel("%s low_slice: %*pbl\n", label,
-                       (int)SLICE_NUM_LOW, &mask->low_slices);
-       pr_devel("%s high_slice: %*pbl\n", label,
-                       (int)SLICE_NUM_HIGH, mask->high_slices);
-}
-
-#define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
-
-#else
-
-static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
-#define slice_dbg(fmt...)
-
-#endif
-
-static inline notrace bool slice_addr_is_low(unsigned long addr)
-{
-       u64 tmp = (u64)addr;
-
-       return tmp < SLICE_LOW_TOP;
-}
-
-static void slice_range_to_mask(unsigned long start, unsigned long len,
-                               struct slice_mask *ret)
-{
-       unsigned long end = start + len - 1;
-
-       ret->low_slices = 0;
-       if (SLICE_NUM_HIGH)
-               bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
-
-       if (slice_addr_is_low(start)) {
-               unsigned long mend = min(end,
-                                        (unsigned long)(SLICE_LOW_TOP - 1));
-
-               ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
-                       - (1u << GET_LOW_SLICE_INDEX(start));
-       }
-
-       if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
-               unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
-               unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
-               unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
-
-               bitmap_set(ret->high_slices, start_index, count);
-       }
-}
-
-static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
-                             unsigned long len)
-{
-       struct vm_area_struct *vma;
-
-       if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr)
-               return 0;
-       vma = find_vma(mm, addr);
-       return (!vma || (addr + len) <= vm_start_gap(vma));
-}
-
-static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
-{
-       return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
-                                  1ul << SLICE_LOW_SHIFT);
-}
-
-static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
-{
-       unsigned long start = slice << SLICE_HIGH_SHIFT;
-       unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
-
-       /* Hack, so that each addresses is controlled by exactly one
-        * of the high or low area bitmaps, the first high area starts
-        * at 4GB, not 0 */
-       if (start == 0)
-               start = (unsigned long)SLICE_LOW_TOP;
-
-       return !slice_area_is_free(mm, start, end - start);
-}
-
-static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
-                               unsigned long high_limit)
-{
-       unsigned long i;
-
-       ret->low_slices = 0;
-       if (SLICE_NUM_HIGH)
-               bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
-
-       for (i = 0; i < SLICE_NUM_LOW; i++)
-               if (!slice_low_has_vma(mm, i))
-                       ret->low_slices |= 1u << i;
-
-       if (slice_addr_is_low(high_limit - 1))
-               return;
-
-       for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
-               if (!slice_high_has_vma(mm, i))
-                       __set_bit(i, ret->high_slices);
-}
-
-static bool slice_check_range_fits(struct mm_struct *mm,
-                          const struct slice_mask *available,
-                          unsigned long start, unsigned long len)
-{
-       unsigned long end = start + len - 1;
-       u64 low_slices = 0;
-
-       if (slice_addr_is_low(start)) {
-               unsigned long mend = min(end,
-                                        (unsigned long)(SLICE_LOW_TOP - 1));
-
-               low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
-                               - (1u << GET_LOW_SLICE_INDEX(start));
-       }
-       if ((low_slices & available->low_slices) != low_slices)
-               return false;
-
-       if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
-               unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
-               unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
-               unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
-               unsigned long i;
-
-               for (i = start_index; i < start_index + count; i++) {
-                       if (!test_bit(i, available->high_slices))
-                               return false;
-               }
-       }
-
-       return true;
-}
-
-static void slice_flush_segments(void *parm)
-{
-#ifdef CONFIG_PPC64
-       struct mm_struct *mm = parm;
-       unsigned long flags;
-
-       if (mm != current->active_mm)
-               return;
-
-       copy_mm_to_paca(current->active_mm);
-
-       local_irq_save(flags);
-       slb_flush_and_restore_bolted();
-       local_irq_restore(flags);
-#endif
-}
-
-static void slice_convert(struct mm_struct *mm,
-                               const struct slice_mask *mask, int psize)
-{
-       int index, mask_index;
-       /* Write the new slice psize bits */
-       unsigned char *hpsizes, *lpsizes;
-       struct slice_mask *psize_mask, *old_mask;
-       unsigned long i, flags;
-       int old_psize;
-
-       slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
-       slice_print_mask(" mask", mask);
-
-       psize_mask = slice_mask_for_size(&mm->context, psize);
-
-       /* We need to use a spinlock here to protect against
-        * concurrent 64k -> 4k demotion ...
-        */
-       spin_lock_irqsave(&slice_convert_lock, flags);
-
-       lpsizes = mm_ctx_low_slices(&mm->context);
-       for (i = 0; i < SLICE_NUM_LOW; i++) {
-               if (!(mask->low_slices & (1u << i)))
-                       continue;
-
-               mask_index = i & 0x1;
-               index = i >> 1;
-
-               /* Update the slice_mask */
-               old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
-               old_mask = slice_mask_for_size(&mm->context, old_psize);
-               old_mask->low_slices &= ~(1u << i);
-               psize_mask->low_slices |= 1u << i;
-
-               /* Update the sizes array */
-               lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
-                               (((unsigned long)psize) << (mask_index * 4));
-       }
-
-       hpsizes = mm_ctx_high_slices(&mm->context);
-       for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) {
-               if (!test_bit(i, mask->high_slices))
-                       continue;
-
-               mask_index = i & 0x1;
-               index = i >> 1;
-
-               /* Update the slice_mask */
-               old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
-               old_mask = slice_mask_for_size(&mm->context, old_psize);
-               __clear_bit(i, old_mask->high_slices);
-               __set_bit(i, psize_mask->high_slices);
-
-               /* Update the sizes array */
-               hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
-                               (((unsigned long)psize) << (mask_index * 4));
-       }
-
-       slice_dbg(" lsps=%lx, hsps=%lx\n",
-                 (unsigned long)mm_ctx_low_slices(&mm->context),
-                 (unsigned long)mm_ctx_high_slices(&mm->context));
-
-       spin_unlock_irqrestore(&slice_convert_lock, flags);
-
-       copro_flush_all_slbs(mm);
-}
-
-/*
- * Compute which slice addr is part of;
- * set *boundary_addr to the start or end boundary of that slice
- * (depending on 'end' parameter);
- * return boolean indicating if the slice is marked as available in the
- * 'available' slice_mark.
- */
-static bool slice_scan_available(unsigned long addr,
-                                const struct slice_mask *available,
-                                int end, unsigned long *boundary_addr)
-{
-       unsigned long slice;
-       if (slice_addr_is_low(addr)) {
-               slice = GET_LOW_SLICE_INDEX(addr);
-               *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
-               return !!(available->low_slices & (1u << slice));
-       } else {
-               slice = GET_HIGH_SLICE_INDEX(addr);
-               *boundary_addr = (slice + end) ?
-                       ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
-               return !!test_bit(slice, available->high_slices);
-       }
-}
-
-static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
-                                             unsigned long len,
-                                             const struct slice_mask *available,
-                                             int psize, unsigned long high_limit)
-{
-       int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
-       unsigned long addr, found, next_end;
-       struct vm_unmapped_area_info info;
-
-       info.flags = 0;
-       info.length = len;
-       info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
-       info.align_offset = 0;
-
-       addr = TASK_UNMAPPED_BASE;
-       /*
-        * Check till the allow max value for this mmap request
-        */
-       while (addr < high_limit) {
-               info.low_limit = addr;
-               if (!slice_scan_available(addr, available, 1, &addr))
-                       continue;
-
- next_slice:
-               /*
-                * At this point [info.low_limit; addr) covers
-                * available slices only and ends at a slice boundary.
-                * Check if we need to reduce the range, or if we can
-                * extend it to cover the next available slice.
-                */
-               if (addr >= high_limit)
-                       addr = high_limit;
-               else if (slice_scan_available(addr, available, 1, &next_end)) {
-                       addr = next_end;
-                       goto next_slice;
-               }
-               info.high_limit = addr;
-
-               found = vm_unmapped_area(&info);
-               if (!(found & ~PAGE_MASK))
-                       return found;
-       }
-
-       return -ENOMEM;
-}
-
-static unsigned long slice_find_area_topdown(struct mm_struct *mm,
-                                            unsigned long len,
-                                            const struct slice_mask *available,
-                                            int psize, unsigned long high_limit)
-{
-       int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
-       unsigned long addr, found, prev;
-       struct vm_unmapped_area_info info;
-       unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
-
-       info.flags = VM_UNMAPPED_AREA_TOPDOWN;
-       info.length = len;
-       info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
-       info.align_offset = 0;
-
-       addr = mm->mmap_base;
-       /*
-        * If we are trying to allocate above DEFAULT_MAP_WINDOW
-        * Add the different to the mmap_base.
-        * Only for that request for which high_limit is above
-        * DEFAULT_MAP_WINDOW we should apply this.
-        */
-       if (high_limit > DEFAULT_MAP_WINDOW)
-               addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW;
-
-       while (addr > min_addr) {
-               info.high_limit = addr;
-               if (!slice_scan_available(addr - 1, available, 0, &addr))
-                       continue;
-
- prev_slice:
-               /*
-                * At this point [addr; info.high_limit) covers
-                * available slices only and starts at a slice boundary.
-                * Check if we need to reduce the range, or if we can
-                * extend it to cover the previous available slice.
-                */
-               if (addr < min_addr)
-                       addr = min_addr;
-               else if (slice_scan_available(addr - 1, available, 0, &prev)) {
-                       addr = prev;
-                       goto prev_slice;
-               }
-               info.low_limit = addr;
-
-               found = vm_unmapped_area(&info);
-               if (!(found & ~PAGE_MASK))
-                       return found;
-       }
-
-       /*
-        * A failed mmap() very likely causes application failure,
-        * so fall back to the bottom-up function here. This scenario
-        * can happen with large stack limits and large mmap()
-        * allocations.
-        */
-       return slice_find_area_bottomup(mm, len, available, psize, high_limit);
-}
-
-
-static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
-                                    const struct slice_mask *mask, int psize,
-                                    int topdown, unsigned long high_limit)
-{
-       if (topdown)
-               return slice_find_area_topdown(mm, len, mask, psize, high_limit);
-       else
-               return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
-}
-
-static inline void slice_copy_mask(struct slice_mask *dst,
-                                       const struct slice_mask *src)
-{
-       dst->low_slices = src->low_slices;
-       if (!SLICE_NUM_HIGH)
-               return;
-       bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
-}
-
-static inline void slice_or_mask(struct slice_mask *dst,
-                                       const struct slice_mask *src1,
-                                       const struct slice_mask *src2)
-{
-       dst->low_slices = src1->low_slices | src2->low_slices;
-       if (!SLICE_NUM_HIGH)
-               return;
-       bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
-}
-
-static inline void slice_andnot_mask(struct slice_mask *dst,
-                                       const struct slice_mask *src1,
-                                       const struct slice_mask *src2)
-{
-       dst->low_slices = src1->low_slices & ~src2->low_slices;
-       if (!SLICE_NUM_HIGH)
-               return;
-       bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
-}
-
-#ifdef CONFIG_PPC_64K_PAGES
-#define MMU_PAGE_BASE  MMU_PAGE_64K
-#else
-#define MMU_PAGE_BASE  MMU_PAGE_4K
-#endif
-
-unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
-                                     unsigned long flags, unsigned int psize,
-                                     int topdown)
-{
-       struct slice_mask good_mask;
-       struct slice_mask potential_mask;
-       const struct slice_mask *maskp;
-       const struct slice_mask *compat_maskp = NULL;
-       int fixed = (flags & MAP_FIXED);
-       int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
-       unsigned long page_size = 1UL << pshift;
-       struct mm_struct *mm = current->mm;
-       unsigned long newaddr;
-       unsigned long high_limit;
-
-       high_limit = DEFAULT_MAP_WINDOW;
-       if (addr >= high_limit || (fixed && (addr + len > high_limit)))
-               high_limit = TASK_SIZE;
-
-       if (len > high_limit)
-               return -ENOMEM;
-       if (len & (page_size - 1))
-               return -EINVAL;
-       if (fixed) {
-               if (addr & (page_size - 1))
-                       return -EINVAL;
-               if (addr > high_limit - len)
-                       return -ENOMEM;
-       }
-
-       if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
-               /*
-                * Increasing the slb_addr_limit does not require
-                * slice mask cache to be recalculated because it should
-                * be already initialised beyond the old address limit.
-                */
-               mm_ctx_set_slb_addr_limit(&mm->context, high_limit);
-
-               on_each_cpu(slice_flush_segments, mm, 1);
-       }
-
-       /* Sanity checks */
-       BUG_ON(mm->task_size == 0);
-       BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
-       VM_BUG_ON(radix_enabled());
-
-       slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
-       slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
-                 addr, len, flags, topdown);
-
-       /* If hint, make sure it matches our alignment restrictions */
-       if (!fixed && addr) {
-               addr = ALIGN(addr, page_size);
-               slice_dbg(" aligned addr=%lx\n", addr);
-               /* Ignore hint if it's too large or overlaps a VMA */
-               if (addr > high_limit - len || addr < mmap_min_addr ||
-                   !slice_area_is_free(mm, addr, len))
-                       addr = 0;
-       }
-
-       /* First make up a "good" mask of slices that have the right size
-        * already
-        */
-       maskp = slice_mask_for_size(&mm->context, psize);
-
-       /*
-        * Here "good" means slices that are already the right page size,
-        * "compat" means slices that have a compatible page size (i.e.
-        * 4k in a 64k pagesize kernel), and "free" means slices without
-        * any VMAs.
-        *
-        * If MAP_FIXED:
-        *      check if fits in good | compat => OK
-        *      check if fits in good | compat | free => convert free
-        *      else bad
-        * If have hint:
-        *      check if hint fits in good => OK
-        *      check if hint fits in good | free => convert free
-        * Otherwise:
-        *      search in good, found => OK
-        *      search in good | free, found => convert free
-        *      search in good | compat | free, found => convert free.
-        */
-
-       /*
-        * If we support combo pages, we can allow 64k pages in 4k slices
-        * The mask copies could be avoided in most cases here if we had
-        * a pointer to good mask for the next code to use.
-        */
-       if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
-               compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
-               if (fixed)
-                       slice_or_mask(&good_mask, maskp, compat_maskp);
-               else
-                       slice_copy_mask(&good_mask, maskp);
-       } else {
-               slice_copy_mask(&good_mask, maskp);
-       }
-
-       slice_print_mask(" good_mask", &good_mask);
-       if (compat_maskp)
-               slice_print_mask(" compat_mask", compat_maskp);
-
-       /* First check hint if it's valid or if we have MAP_FIXED */
-       if (addr != 0 || fixed) {
-               /* Check if we fit in the good mask. If we do, we just return,
-                * nothing else to do
-                */
-               if (slice_check_range_fits(mm, &good_mask, addr, len)) {
-                       slice_dbg(" fits good !\n");
-                       newaddr = addr;
-                       goto return_addr;
-               }
-       } else {
-               /* Now let's see if we can find something in the existing
-                * slices for that size
-                */
-               newaddr = slice_find_area(mm, len, &good_mask,
-                                         psize, topdown, high_limit);
-               if (newaddr != -ENOMEM) {
-                       /* Found within the good mask, we don't have to setup,
-                        * we thus return directly
-                        */
-                       slice_dbg(" found area at 0x%lx\n", newaddr);
-                       goto return_addr;
-               }
-       }
-       /*
-        * We don't fit in the good mask, check what other slices are
-        * empty and thus can be converted
-        */
-       slice_mask_for_free(mm, &potential_mask, high_limit);
-       slice_or_mask(&potential_mask, &potential_mask, &good_mask);
-       slice_print_mask(" potential", &potential_mask);
-
-       if (addr != 0 || fixed) {
-               if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
-                       slice_dbg(" fits potential !\n");
-                       newaddr = addr;
-                       goto convert;
-               }
-       }
-
-       /* If we have MAP_FIXED and failed the above steps, then error out */
-       if (fixed)
-               return -EBUSY;
-
-       slice_dbg(" search...\n");
-
-       /* If we had a hint that didn't work out, see if we can fit
-        * anywhere in the good area.
-        */
-       if (addr) {
-               newaddr = slice_find_area(mm, len, &good_mask,
-                                         psize, topdown, high_limit);
-               if (newaddr != -ENOMEM) {
-                       slice_dbg(" found area at 0x%lx\n", newaddr);
-                       goto return_addr;
-               }
-       }
-
-       /* Now let's see if we can find something in the existing slices
-        * for that size plus free slices
-        */
-       newaddr = slice_find_area(mm, len, &potential_mask,
-                                 psize, topdown, high_limit);
-
-       if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && newaddr == -ENOMEM &&
-           psize == MMU_PAGE_64K) {
-               /* retry the search with 4k-page slices included */
-               slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
-               newaddr = slice_find_area(mm, len, &potential_mask,
-                                         psize, topdown, high_limit);
-       }
-
-       if (newaddr == -ENOMEM)
-               return -ENOMEM;
-
-       slice_range_to_mask(newaddr, len, &potential_mask);
-       slice_dbg(" found potential area at 0x%lx\n", newaddr);
-       slice_print_mask(" mask", &potential_mask);
-
- convert:
-       /*
-        * Try to allocate the context before we do slice convert
-        * so that we handle the context allocation failure gracefully.
-        */
-       if (need_extra_context(mm, newaddr)) {
-               if (alloc_extended_context(mm, newaddr) < 0)
-                       return -ENOMEM;
-       }
-
-       slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
-       if (compat_maskp && !fixed)
-               slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
-       if (potential_mask.low_slices ||
-               (SLICE_NUM_HIGH &&
-                !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
-               slice_convert(mm, &potential_mask, psize);
-               if (psize > MMU_PAGE_BASE)
-                       on_each_cpu(slice_flush_segments, mm, 1);
-       }
-       return newaddr;
-
-return_addr:
-       if (need_extra_context(mm, newaddr)) {
-               if (alloc_extended_context(mm, newaddr) < 0)
-                       return -ENOMEM;
-       }
-       return newaddr;
-}
-EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
-
-unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
-{
-       unsigned char *psizes;
-       int index, mask_index;
-
-       VM_BUG_ON(radix_enabled());
-
-       if (slice_addr_is_low(addr)) {
-               psizes = mm_ctx_low_slices(&mm->context);
-               index = GET_LOW_SLICE_INDEX(addr);
-       } else {
-               psizes = mm_ctx_high_slices(&mm->context);
-               index = GET_HIGH_SLICE_INDEX(addr);
-       }
-       mask_index = index & 0x1;
-       return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
-}
-EXPORT_SYMBOL_GPL(get_slice_psize);
-
-void slice_init_new_context_exec(struct mm_struct *mm)
-{
-       unsigned char *hpsizes, *lpsizes;
-       struct slice_mask *mask;
-       unsigned int psize = mmu_virtual_psize;
-
-       slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
-
-       /*
-        * In the case of exec, use the default limit. In the
-        * case of fork it is just inherited from the mm being
-        * duplicated.
-        */
-       mm_ctx_set_slb_addr_limit(&mm->context, SLB_ADDR_LIMIT_DEFAULT);
-       mm_ctx_set_user_psize(&mm->context, psize);
-
-       /*
-        * Set all slice psizes to the default.
-        */
-       lpsizes = mm_ctx_low_slices(&mm->context);
-       memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
-
-       hpsizes = mm_ctx_high_slices(&mm->context);
-       memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
-
-       /*
-        * Slice mask cache starts zeroed, fill the default size cache.
-        */
-       mask = slice_mask_for_size(&mm->context, psize);
-       mask->low_slices = ~0UL;
-       if (SLICE_NUM_HIGH)
-               bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
-}
-
-#ifdef CONFIG_PPC_BOOK3S_64
-void slice_setup_new_exec(void)
-{
-       struct mm_struct *mm = current->mm;
-
-       slice_dbg("slice_setup_new_exec(mm=%p)\n", mm);
-
-       if (!is_32bit_task())
-               return;
-
-       mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
-}
-#endif
-
-void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
-                          unsigned long len, unsigned int psize)
-{
-       struct slice_mask mask;
-
-       VM_BUG_ON(radix_enabled());
-
-       slice_range_to_mask(start, len, &mask);
-       slice_convert(mm, &mask, psize);
-}
-
-#ifdef CONFIG_HUGETLB_PAGE
-/*
- * is_hugepage_only_range() is used by generic code to verify whether
- * a normal mmap mapping (non hugetlbfs) is valid on a given area.
- *
- * until the generic code provides a more generic hook and/or starts
- * calling arch get_unmapped_area for MAP_FIXED (which our implementation
- * here knows how to deal with), we hijack it to keep standard mappings
- * away from us.
- *
- * because of that generic code limitation, MAP_FIXED mapping cannot
- * "convert" back a slice with no VMAs to the standard page size, only
- * get_unmapped_area() can. It would be possible to fix it here but I
- * prefer working on fixing the generic code instead.
- *
- * WARNING: This will not work if hugetlbfs isn't enabled since the
- * generic code will redefine that function as 0 in that. This is ok
- * for now as we only use slices with hugetlbfs enabled. This should
- * be fixed as the generic code gets fixed.
- */
-int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
-                          unsigned long len)
-{
-       const struct slice_mask *maskp;
-       unsigned int psize = mm_ctx_user_psize(&mm->context);
-
-       VM_BUG_ON(radix_enabled());
-
-       maskp = slice_mask_for_size(&mm->context, psize);
-
-       /* We need to account for 4k slices too */
-       if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
-               const struct slice_mask *compat_maskp;
-               struct slice_mask available;
-
-               compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
-               slice_or_mask(&available, maskp, compat_maskp);
-               return !slice_check_range_fits(mm, &available, addr, len);
-       }
-
-       return !slice_check_range_fits(mm, maskp, addr, len);
-}
-#endif
index 4738c4d..308a2e4 100644 (file)
@@ -157,7 +157,7 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags)
 
        mpc8xx_pmu_read(event);
 
-       /* If it was the last user, stop counting to avoid useles overhead */
+       /* If it was the last user, stop counting to avoid useless overhead */
        switch (event_type(event)) {
        case PERF_8xx_ID_CPU_CYCLES:
                break;
index b5b42cf..140502a 100644 (file)
@@ -1142,7 +1142,7 @@ static u64 check_and_compute_delta(u64 prev, u64 val)
        /*
         * POWER7 can roll back counter values, if the new value is smaller
         * than the previous value it will cause the delta and the counter to
-        * have bogus values unless we rolled a counter over.  If a coutner is
+        * have bogus values unless we rolled a counter over.  If a counter is
         * rolled back, it will be smaller, but within 256, which is the maximum
         * number of events to rollback at once.  If we detect a rollback
         * return 0.  This can lead to a small lack of precision in the
@@ -2057,7 +2057,7 @@ static int power_pmu_event_init(struct perf_event *event)
        /*
         * PMU config registers have fields that are
         * reserved and some specific values for bit fields are reserved.
-        * For ex., MMCRA[61:62] is Randome Sampling Mode (SM)
+        * For ex., MMCRA[61:62] is Random Sampling Mode (SM)
         * and value of 0b11 to this field is reserved.
         * Check for invalid values in attr.config.
         */
@@ -2447,7 +2447,7 @@ static void __perf_event_interrupt(struct pt_regs *regs)
        }
 
        /*
-        * During system wide profling or while specific CPU is monitored for an
+        * During system wide profiling or while specific CPU is monitored for an
         * event, some corner cases could cause PMC to overflow in idle path. This
         * will trigger a PMI after waking up from idle. Since counter values are _not_
         * saved/restored in idle path, can lead to below "Can't find PMC" message.
index 12c1777..cf5406b 100644 (file)
@@ -33,7 +33,7 @@ static bool aggregate_result_elements;
 
 static cpumask_t hv_24x7_cpumask;
 
-static bool domain_is_valid(unsigned domain)
+static bool domain_is_valid(unsigned int domain)
 {
        switch (domain) {
 #define DOMAIN(n, v, x, c)             \
@@ -47,7 +47,7 @@ static bool domain_is_valid(unsigned domain)
        }
 }
 
-static bool is_physical_domain(unsigned domain)
+static bool is_physical_domain(unsigned int domain)
 {
        switch (domain) {
 #define DOMAIN(n, v, x, c)             \
@@ -128,7 +128,7 @@ static bool domain_needs_aggregation(unsigned int domain)
                          domain <= HV_PERF_DOMAIN_VCPU_REMOTE_NODE));
 }
 
-static const char *domain_name(unsigned domain)
+static const char *domain_name(unsigned int domain)
 {
        if (!domain_is_valid(domain))
                return NULL;
@@ -146,7 +146,7 @@ static const char *domain_name(unsigned domain)
        return NULL;
 }
 
-static bool catalog_entry_domain_is_valid(unsigned domain)
+static bool catalog_entry_domain_is_valid(unsigned int domain)
 {
        /* POWER8 doesn't support virtual domains. */
        if (interface_version == 1)
@@ -258,7 +258,7 @@ static char *event_name(struct hv_24x7_event_data *ev, int *len)
 
 static char *event_desc(struct hv_24x7_event_data *ev, int *len)
 {
-       unsigned nl = be16_to_cpu(ev->event_name_len);
+       unsigned int nl = be16_to_cpu(ev->event_name_len);
        __be16 *desc_len = (__be16 *)(ev->remainder + nl - 2);
 
        *len = be16_to_cpu(*desc_len) - 2;
@@ -267,9 +267,9 @@ static char *event_desc(struct hv_24x7_event_data *ev, int *len)
 
 static char *event_long_desc(struct hv_24x7_event_data *ev, int *len)
 {
-       unsigned nl = be16_to_cpu(ev->event_name_len);
+       unsigned int nl = be16_to_cpu(ev->event_name_len);
        __be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2);
-       unsigned desc_len = be16_to_cpu(*desc_len_);
+       unsigned int desc_len = be16_to_cpu(*desc_len_);
        __be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2);
 
        *len = be16_to_cpu(*long_desc_len) - 2;
@@ -296,8 +296,8 @@ static void *event_end(struct hv_24x7_event_data *ev, void *end)
 {
        void *start = ev;
        __be16 *dl_, *ldl_;
-       unsigned dl, ldl;
-       unsigned nl = be16_to_cpu(ev->event_name_len);
+       unsigned int dl, ldl;
+       unsigned int nl = be16_to_cpu(ev->event_name_len);
 
        if (nl < 2) {
                pr_debug("%s: name length too short: %d", __func__, nl);
@@ -398,7 +398,7 @@ static long h_get_24x7_catalog_page(char page[], u64 version, u32 index)
  *             - Specifying (i.e overriding) values for other parameters
  *               is undefined.
  */
-static char *event_fmt(struct hv_24x7_event_data *event, unsigned domain)
+static char *event_fmt(struct hv_24x7_event_data *event, unsigned int domain)
 {
        const char *sindex;
        const char *lpar;
@@ -529,9 +529,9 @@ out_s:
        return NULL;
 }
 
-static struct attribute *event_to_attr(unsigned ix,
+static struct attribute *event_to_attr(unsigned int ix,
                                       struct hv_24x7_event_data *event,
-                                      unsigned domain,
+                                      unsigned int domain,
                                       int nonce)
 {
        int event_name_len;
@@ -599,8 +599,8 @@ event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce)
        return device_str_attr_create(name, nl, nonce, desc, dl);
 }
 
-static int event_data_to_attrs(unsigned ix, struct attribute **attrs,
-                                  struct hv_24x7_event_data *event, int nonce)
+static int event_data_to_attrs(unsigned int ix, struct attribute **attrs,
+                              struct hv_24x7_event_data *event, int nonce)
 {
        *attrs = event_to_attr(ix, event, event->domain, nonce);
        if (!*attrs)
@@ -614,8 +614,8 @@ struct event_uniq {
        struct rb_node node;
        const char *name;
        int nl;
-       unsigned ct;
-       unsigned domain;
+       unsigned int ct;
+       unsigned int domain;
 };
 
 static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
@@ -628,8 +628,8 @@ static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
        return memcmp(d1, d2, s1);
 }
 
-static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2,
-                      size_t s2, unsigned d2)
+static int ev_uniq_ord(const void *v1, size_t s1, unsigned int d1,
+                      const void *v2, size_t s2, unsigned int d2)
 {
        int r = memord(v1, s1, v2, s2);
 
@@ -643,7 +643,7 @@ static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2,
 }
 
 static int event_uniq_add(struct rb_root *root, const char *name, int nl,
-                         unsigned domain)
+                         unsigned int domain)
 {
        struct rb_node **new = &(root->rb_node), *parent = NULL;
        struct event_uniq *data;
@@ -1398,7 +1398,7 @@ out:
 static int h_24x7_event_init(struct perf_event *event)
 {
        struct hv_perf_caps caps;
-       unsigned domain;
+       unsigned int domain;
        unsigned long hret;
        u64 ct;
 
index 526d4b7..d7976ab 100644 (file)
@@ -6,6 +6,7 @@
  *           (C) 2017 Anju T Sudhakar, IBM Corporation.
  *           (C) 2017 Hemant K Shaw, IBM Corporation.
  */
+#include <linux/of.h>
 #include <linux/perf_event.h>
 #include <linux/slab.h>
 #include <asm/opal.h>
@@ -521,7 +522,7 @@ static int nest_imc_event_init(struct perf_event *event)
 
        /*
         * Nest HW counter memory resides in a per-chip reserve-memory (HOMER).
-        * Get the base memory addresss for this cpu.
+        * Get the base memory address for this cpu.
         */
        chip_id = cpu_to_chip_id(event->cpu);
 
@@ -674,7 +675,7 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
        /*
         * Check whether core_imc is registered. We could end up here
         * if the cpuhotplug callback registration fails. i.e, callback
-        * invokes the offline path for all sucessfully registered cpus.
+        * invokes the offline path for all successfully registered cpus.
         * At this stage, core_imc pmu will not be registered and we
         * should return here.
         *
index a74d382..839cc68 100644 (file)
@@ -82,11 +82,11 @@ static unsigned long sdar_mod_val(u64 event)
 static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
 {
        /*
-        * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in
-        * continous sampling mode.
+        * MMCRA[SDAR_MODE] specifies how the SDAR should be updated in
+        * continuous sampling mode.
         *
         * Incase of Power8:
-        * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
+        * MMCRA[SDAR_MODE] will be programmed as "0b01" for continuous sampling
         * mode and will be un-changed when setting MMCRA[63] (Marked events).
         *
         * Incase of Power9/power10:
index c393e83..3ad40ff 100644 (file)
@@ -98,7 +98,7 @@ extern u64 PERF_REG_EXTENDED_MASK;
 /* PowerISA v2.07 format attribute structure*/
 extern const struct attribute_group isa207_pmu_format_group;
 
-int p9_dd21_bl_ev[] = {
+static int p9_dd21_bl_ev[] = {
        PM_MRK_ST_DONE_L2,
        PM_RADIX_PWC_L1_HIT,
        PM_FLOP_CMPL,
@@ -112,7 +112,7 @@ int p9_dd21_bl_ev[] = {
        PM_DISP_HELD_SYNC_HOLD,
 };
 
-int p9_dd22_bl_ev[] = {
+static int p9_dd22_bl_ev[] = {
        PM_DTLB_MISS_16G,
        PM_DERAT_MISS_2M,
        PM_DTLB_MISS_2M,
index e70b427..dce696c 100644 (file)
@@ -13,7 +13,6 @@
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
 #include <asm/ppc4xx.h>
-#include <asm/prom.h>
 #include <asm/time.h>
 #include <asm/udbg.h>
 #include <asm/uic.h>
index 807968a..5b23aef 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/ppc4xx.h>
 #include <asm/udbg.h>
 #include <asm/uic.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/delay.h>
 #include "44x.h"
index af13a59..e2e4f6d 100644 (file)
  */
 
 #include <linux/init.h>
+#include <linux/of_fdt.h>
 #include <linux/of_platform.h>
 #include <linux/rtc.h>
 
 #include <asm/machdep.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/time.h>
 #include <asm/uic.h>
index 3dbd8dd..2a0dcdf 100644 (file)
@@ -13,7 +13,6 @@
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
 #include <asm/ppc4xx.h>
-#include <asm/prom.h>
 #include <asm/time.h>
 #include <asm/udbg.h>
 #include <asm/uic.h>
index fb7db5c..20cc8f8 100644 (file)
 
 #include <linux/init.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/rtc.h>
 
 #include <asm/machdep.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/time.h>
 #include <asm/uic.h>
index 68ba4b0..ed854b5 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/of_platform.h>
 
 #include <asm/machdep.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/time.h>
 #include <asm/uic.h>
index 665f18e..f03432e 100644 (file)
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/of_gpio.h>
 #include <linux/slab.h>
 #include <linux/export.h>
 
 #include <asm/machdep.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/time.h>
 #include <asm/uic.h>
index 2571841..1d3bc35 100644 (file)
@@ -327,6 +327,6 @@ late_initcall(cpm_init);
 static int __init cpm_powersave_off(char *arg)
 {
        cpm.powersave_off = 1;
-       return 0;
+       return 1;
 }
 __setup("powersave=off", cpm_powersave_off);
index fee430e..d4f7fff 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/interrupt.h>
 #include <linux/msi.h>
 #include <linux/of.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/pci.h>
 #include <linux/semaphore.h>
index 24f41e1..ca5dd7a 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
 
index 89e2587..d667ad0 100644 (file)
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
 #include <asm/irq.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/dcr.h>
 
 #define NR_UIC_INTS    32
index 0b03d81..0652c7e 100644 (file)
@@ -663,7 +663,7 @@ static void __init mpc512x_clk_setup_mclk(struct mclk_setup_data *entry, size_t
         *   the PSC/MSCAN/SPDIF (serial drivers et al) need the MCLK
         *   for their bitrate
         * - in the absence of "aliases" for clocks we need to create
-        *   individial 'struct clk' items for whatever might get
+        *   individual 'struct clk' items for whatever might get
         *   referenced or looked up, even if several of those items are
         *   identical from the logical POV (their rate value)
         * - for easier future maintenance and for better reflection of
index 9d030c2..fc3fb99 100644 (file)
@@ -14,7 +14,6 @@
 
 #include <asm/machdep.h>
 #include <asm/ipic.h>
-#include <asm/prom.h>
 #include <asm/time.h>
 
 #include <sysdev/fsl_pci.h>
index ea46870..6f08d07 100644 (file)
@@ -14,7 +14,8 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/io.h>
-#include <asm/prom.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 
 static struct device_node *cpld_pic_node;
 static struct irq_domain *cpld_pic_host;
index 303bc30..364564c 100644 (file)
@@ -13,7 +13,6 @@
 
 #include <asm/machdep.h>
 #include <asm/ipic.h>
-#include <asm/prom.h>
 #include <asm/time.h>
 
 #include "mpc512x.h"
index e341166..5ac0ead 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/kernel.h>
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/fsl-diu-fb.h>
 #include <linux/memblock.h>
@@ -20,7 +21,6 @@
 #include <asm/cacheflush.h>
 #include <asm/machdep.h>
 #include <asm/ipic.h>
-#include <asm/prom.h>
 #include <asm/time.h>
 #include <asm/mpc5121.h>
 #include <asm/mpc52xx_psc.h>
@@ -289,7 +289,7 @@ static void __init mpc512x_setup_diu(void)
 
        /*
         * We do not allocate and configure new area for bitmap buffer
-        * because it would requere copying bitmap data (splash image)
+        * because it would require copying bitmap data (splash image)
         * and so negatively affect boot time. Instead we reserve the
         * already configured frame buffer area so that it won't be
         * destroyed. The starting address of the area to reserve and
index 3b7d70d..e064772 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/pci.h>
 #include <linux/of.h>
 #include <asm/dma.h>
-#include <asm/prom.h>
 #include <asm/time.h>
 #include <asm/machdep.h>
 #include <asm/rtas.h>
index 04cc973..7ea9b6c 100644 (file)
@@ -21,7 +21,6 @@
 #include <asm/time.h>
 #include <asm/io.h>
 #include <asm/machdep.h>
-#include <asm/prom.h>
 #include <asm/mpc52xx.h>
 
 /* ************************************************************************
index e7da22d..129313b 100644 (file)
@@ -1,6 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/init.h>
 #include <linux/suspend.h>
+#include <linux/of_address.h>
+
 #include <asm/io.h>
 #include <asm/time.h>
 #include <asm/mpc52xx.h>
index 110c444..ee367ff 100644 (file)
@@ -20,8 +20,9 @@
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <asm/time.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/mpc52xx.h>
 
index b9f5675..cc349d5 100644 (file)
@@ -22,8 +22,8 @@
  */
 
 #undef DEBUG
+#include <linux/of.h>
 #include <asm/time.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/mpc52xx.h>
 
index 565e3a8..4348506 100644 (file)
 #include <linux/gpio.h>
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/of_gpio.h>
 #include <linux/export.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/mpc52xx.h>
 
 /* MPC5200 device tree match tables */
@@ -308,7 +308,7 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number)
 
        spin_lock_irqsave(&gpio_lock, flags);
 
-       /* Reconfiure pin-muxing to gpio */
+       /* Reconfigure pin-muxing to gpio */
        mux = in_be32(&simple_gpio->port_config);
        out_be32(&simple_gpio->port_config, mux & (~gpio));
 
index f862b48..60691b9 100644 (file)
@@ -55,6 +55,8 @@
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/of_gpio.h>
 #include <linux/kernel.h>
@@ -398,7 +400,7 @@ static int mpc52xx_gpt_do_start(struct mpc52xx_gpt_priv *gpt, u64 period,
                set |= MPC52xx_GPT_MODE_CONTINUOUS;
 
        /* Determine the number of clocks in the requested period.  64 bit
-        * arithmatic is done here to preserve the precision until the value
+        * arithmetic is done here to preserve the precision until the value
         * is scaled back down into the u32 range.  Period is in 'ns', bus
         * frequency is in Hz. */
        clocks = period * (u64)gpt->ipb_freq;
@@ -502,7 +504,7 @@ u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt)
        if (prescale == 0)
                prescale = 0x10000;
        period = period * prescale * 1000000000ULL;
-       do_div(period, (u64)gpt->ipb_freq);
+       do_div(period, gpt->ipb_freq);
        return period;
 }
 EXPORT_SYMBOL(mpc52xx_gpt_timer_period);
index b91ebeb..48038aa 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/spinlock.h>
 #include <linux/module.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/mpc52xx.h>
 #include <asm/time.h>
 
@@ -104,7 +105,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
                 *
                 * Configure the watermarks so DMA will always complete correctly.
                 * It may be worth experimenting with the ALARM value to see if
-                * there is a performance impacit.  However, if it is wrong there
+                * there is a performance impact.  However, if it is wrong there
                 * is a risk of DMA not transferring the last chunk of data
                 */
                if (write) {
index af0f799..859e281 100644 (file)
@@ -13,6 +13,7 @@
 #undef DEBUG
 
 #include <linux/pci.h>
+#include <linux/of_address.h>
 #include <asm/mpc52xx.h>
 #include <asm/delay.h>
 #include <asm/machdep.h>
@@ -242,7 +243,7 @@ mpc52xx_pci_setup(struct pci_controller *hose,
        u32 tmp;
        int iwcr0 = 0, iwcr1 = 0, iwcr2 = 0;
 
-       pr_debug("mpc52xx_pci_setup(hose=%p, pci_regs=%p)\n", hose, pci_regs);
+       pr_debug("%s(hose=%p, pci_regs=%p)\n", __func__, hose, pci_regs);
 
        /* pci_process_bridge_OF_ranges() found all our addresses for us;
         * now store them in the right places */
@@ -257,11 +258,7 @@ mpc52xx_pci_setup(struct pci_controller *hose,
        /* Memory windows */
        res = &hose->mem_resources[0];
        if (res->flags) {
-               pr_debug("mem_resource[0] = "
-                        "{.start=%llx, .end=%llx, .flags=%llx}\n",
-                        (unsigned long long)res->start,
-                        (unsigned long long)res->end,
-                        (unsigned long long)res->flags);
+               pr_debug("mem_resource[0] = %pr\n", res);
                out_be32(&pci_regs->iw0btar,
                         MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start,
                                                        resource_size(res)));
@@ -274,8 +271,7 @@ mpc52xx_pci_setup(struct pci_controller *hose,
 
        res = &hose->mem_resources[1];
        if (res->flags) {
-               pr_debug("mem_resource[1] = {.start=%x, .end=%x, .flags=%lx}\n",
-                        res->start, res->end, res->flags);
+               pr_debug("mem_resource[1] = %pr\n", res);
                out_be32(&pci_regs->iw1btar,
                         MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start,
                                                        resource_size(res)));
@@ -292,11 +288,8 @@ mpc52xx_pci_setup(struct pci_controller *hose,
                printk(KERN_ERR "%s: Didn't find IO resources\n", __FILE__);
                return;
        }
-       pr_debug(".io_resource={.start=%llx,.end=%llx,.flags=%llx} "
-                ".io_base_phys=0x%p\n",
-                (unsigned long long)res->start,
-                (unsigned long long)res->end,
-                (unsigned long long)res->flags, (void*)hose->io_base_phys);
+       pr_debug(".io_resource = %pr .io_base_phys=0x%pa\n",
+                res, &hose->io_base_phys);
        out_be32(&pci_regs->iw2btar,
                 MPC52xx_PCI_IWBTAR_TRANSLATION(hose->io_base_phys,
                                                res->start,
@@ -336,8 +329,7 @@ mpc52xx_pci_fixup_resources(struct pci_dev *dev)
 {
        int i;
 
-       pr_debug("mpc52xx_pci_fixup_resources() %.4x:%.4x\n",
-                dev->vendor, dev->device);
+       pr_debug("%s() %.4x:%.4x\n", __func__, dev->vendor, dev->device);
 
        /* We don't rely on boot loader for PCI and resets all
           devices */
index 76a8102..1e0a5e9 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/mpc52xx.h>
 
 /* HW IRQ mapping */
index b1d208d..549b362 100644 (file)
@@ -2,6 +2,8 @@
 #include <linux/init.h>
 #include <linux/suspend.h>
 #include <linux/io.h>
+#include <linux/of_address.h>
+
 #include <asm/time.h>
 #include <asm/cacheflush.h>
 #include <asm/mpc52xx.h>
index 369ebb1..28e627f 100644 (file)
@@ -20,7 +20,6 @@
 #include <asm/machdep.h>
 #include <asm/time.h>
 #include <asm/mpc8260.h>
-#include <asm/prom.h>
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/cpm2_pic.h>
index 745ed61..1c8bbf4 100644 (file)
@@ -20,7 +20,6 @@
 #include <asm/machdep.h>
 #include <linux/time.h>
 #include <asm/mpc8260.h>
-#include <asm/prom.h>
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/cpm2_pic.h>
index 285bfe1..cf32100 100644 (file)
@@ -14,9 +14,9 @@
 #include <linux/irq.h>
 #include <linux/types.h>
 #include <linux/slab.h>
+#include <linux/of_irq.h>
 
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/cpm2.h>
 
 #include "pq2.h"
index d9eed0d..907acde 100644 (file)
@@ -29,7 +29,6 @@
 #include <asm/machdep.h>
 #include <asm/ipic.h>
 #include <asm/irq.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
index a38372f..abb62fa 100644 (file)
@@ -8,17 +8,16 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/mutex.h>
 #include <linux/i2c.h>
 #include <linux/gpio/driver.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
 #include <linux/slab.h>
 #include <linux/kthread.h>
+#include <linux/property.h>
 #include <linux/reboot.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 
 /*
@@ -116,21 +115,17 @@ static int mcu_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 
 static int mcu_gpiochip_add(struct mcu *mcu)
 {
-       struct device_node *np;
+       struct device *dev = &mcu->client->dev;
        struct gpio_chip *gc = &mcu->gc;
 
-       np = of_find_compatible_node(NULL, NULL, "fsl,mcu-mpc8349emitx");
-       if (!np)
-               return -ENODEV;
-
        gc->owner = THIS_MODULE;
-       gc->label = kasprintf(GFP_KERNEL, "%pOF", np);
+       gc->label = kasprintf(GFP_KERNEL, "%pfw", dev_fwnode(dev));
        gc->can_sleep = 1;
        gc->ngpio = MCU_NUM_GPIO;
        gc->base = -1;
        gc->set = mcu_gpio_set;
        gc->direction_output = mcu_gpio_dir_out;
-       gc->of_node = np;
+       gc->parent = dev;
 
        return gpiochip_add_data(gc, mcu);
 }
index 850d566..4353444 100644 (file)
@@ -28,7 +28,6 @@
 #include <asm/machdep.h>
 #include <asm/ipic.h>
 #include <asm/irq.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
index b6133a2..bb8caa5 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/mmc_spi.h>
 #include <linux/mmc/host.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/fsl_devices.h>
 
index 9630f3a..6a110f2 100644 (file)
@@ -27,7 +27,6 @@
 #include <asm/machdep.h>
 #include <asm/ipic.h>
 #include <asm/irq.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
index 0713def..7dde5a7 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/delay.h>
 #include <linux/seq_file.h>
 #include <linux/root_dev.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 
 #include <linux/atomic.h>
@@ -27,7 +28,6 @@
 #include <asm/machdep.h>
 #include <asm/ipic.h>
 #include <asm/irq.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
index da4cf52..b1e6665 100644 (file)
@@ -35,7 +35,6 @@
 #include <asm/machdep.h>
 #include <asm/ipic.h>
 #include <asm/irq.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
index 3427ad0..731bc5c 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/pci.h>
 #include <linux/of_platform.h>
 #include <linux/io.h>
-#include <asm/prom.h>
 #include <asm/time.h>
 #include <asm/ipic.h>
 #include <asm/udbg.h>
index fc88ab9..fa35388 100644 (file)
@@ -9,12 +9,12 @@
 
 #include <linux/pci.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 
 #include <asm/time.h>
 #include <asm/ipic.h>
 #include <asm/udbg.h>
-#include <asm/prom.h>
 #include <sysdev/fsl_pci.h>
 
 #include "mpc83xx.h"
index b0bda20..e2a13a0 100644 (file)
@@ -11,9 +11,9 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <sysdev/fsl_soc.h>
 
 #include "mpc83xx.h"
index 17ae75d..28d6b36 100644 (file)
@@ -19,7 +19,6 @@
 #include <asm/pci-bridge.h>
 #include <asm/ppc-pci.h>
 #include <mm/mmu_decl.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/mpic.h>
 #include <asm/ehv_pic.h>
index 743c65e..8e82737 100644 (file)
 #include <linux/delay.h>
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 
 #include <asm/time.h>
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
 #include <mm/mmu_decl.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/mpic.h>
 #include <asm/swiotlb.h>
index 6ef8580..bdf9d42 100644 (file)
@@ -26,7 +26,6 @@
 #include <asm/mpic.h>
 #include <mm/mmu_decl.h>
 #include <asm/udbg.h>
-#include <asm/prom.h>
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
index 53bccb8..e5d7386 100644 (file)
@@ -18,7 +18,6 @@
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
 #include <mm/mmu_decl.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/mpic.h>
 #include <asm/swiotlb.h>
index 5bd4870..48f3acf 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/initrd.h>
 #include <linux/interrupt.h>
 #include <linux/fsl_devices.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/pgtable.h>
 
@@ -33,7 +35,6 @@
 #include <asm/pci-bridge.h>
 #include <asm/irq.h>
 #include <mm/mmu_decl.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/mpic.h>
 #include <asm/i8259.h>
@@ -151,7 +152,7 @@ static void __init mpc85xx_cds_pci_irq_fixup(struct pci_dev *dev)
                 */
                case PCI_DEVICE_ID_VIA_82C586_2:
                /* There are two USB controllers.
-                * Identify them by functon number
+                * Identify them by function number
                 */
                        if (PCI_FUNC(dev->devfn) == 3)
                                dev->irq = 11;
index 2157a80..f8d2c97 100644 (file)
 #include <linux/delay.h>
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 
 #include <asm/time.h>
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
 #include <mm/mmu_decl.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/mpic.h>
 #include <asm/i8259.h>
index 7759eca..3a2ac41 100644 (file)
@@ -39,7 +39,6 @@
 #include <asm/pci-bridge.h>
 #include <asm/irq.h>
 #include <mm/mmu_decl.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
index 80a8017..d99aba1 100644 (file)
@@ -19,7 +19,6 @@
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
 #include <mm/mmu_decl.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/mpic.h>
 #include <soc/fsl/qe/qe.h>
index 2485528..8ba9306 100644 (file)
@@ -16,7 +16,6 @@
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
 #include <mm/mmu_decl.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/mpic.h>
 
index 1f1af05..5375999 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/fsl/guts.h>
 #include <linux/pci.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <asm/div64.h>
 #include <asm/mpic.h>
index fd9e3e7..bc58a99 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/fsl/guts.h>
 #include <linux/pci.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <asm/div64.h>
 #include <asm/mpic.h>
index 3b9cc49..c04868e 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/fsl_devices.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/of_device.h>
 
@@ -22,7 +23,6 @@
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
 #include <mm/mmu_decl.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/mpic.h>
 #include "smp.h"
index 4c4d577..64109ad 100644 (file)
@@ -12,6 +12,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/of.h>
 #include <linux/of_fdt.h>
 #include <linux/pgtable.h>
 #include <asm/machdep.h>
index a1c6a78..9c43cf3 100644 (file)
@@ -208,7 +208,7 @@ static int smp_85xx_start_cpu(int cpu)
         * The bootpage and highmem can be accessed via ioremap(), but
         * we need to directly access the spinloop if its in lowmem.
         */
-       ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
+       ioremappable = *cpu_rel_addr > virt_to_phys(high_memory - 1);
 
        /* Map the spin table */
        if (ioremappable)
index 166b351..09f6447 100644 (file)
@@ -29,7 +29,6 @@
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
 #include <asm/mpic.h>
-#include <asm/prom.h>
 #include <mm/mmu_decl.h>
 #include <asm/udbg.h>
 
index 69e917e..6b1fe7b 100644 (file)
@@ -28,7 +28,6 @@
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
 #include <asm/mpic.h>
-#include <asm/prom.h>
 #include <mm/mmu_decl.h>
 #include <asm/udbg.h>
 
index 95a1a11..d187f4b 100644 (file)
@@ -26,7 +26,6 @@
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
 #include <asm/mpic.h>
-#include <asm/prom.h>
 #include <mm/mmu_decl.h>
 #include <asm/udbg.h>
 
index 397e158..5836e4e 100644 (file)
 #include <linux/delay.h>
 #include <linux/seq_file.h>
 #include <linux/interrupt.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 
 #include <asm/time.h>
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
 #include <mm/mmu_decl.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/mpic.h>
 
index 44bbbc5..8e358fa 100644 (file)
 #include <linux/kdev_t.h>
 #include <linux/delay.h>
 #include <linux/seq_file.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 
 #include <asm/time.h>
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
-#include <asm/prom.h>
 #include <mm/mmu_decl.h>
 #include <asm/udbg.h>
 
@@ -180,7 +180,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
  *
  * This function is called to determine whether the BSP is compatible with the
  * supplied device-tree, which is assumed to be the correct one for the actual
- * board. It is expected thati, in the future, a kernel may support multiple
+ * board. It is expected that, in the future, a kernel may support multiple
  * boards.
  */
 static int __init gef_ppc9a_probe(void)
index 46d6d3d..b5b2733 100644 (file)
 #include <linux/kdev_t.h>
 #include <linux/delay.h>
 #include <linux/seq_file.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 
 #include <asm/time.h>
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
-#include <asm/prom.h>
 #include <mm/mmu_decl.h>
 #include <asm/udbg.h>
 
@@ -167,7 +167,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
  *
  * This function is called to determine whether the BSP is compatible with the
  * supplied device-tree, which is assumed to be the correct one for the actual
- * board. It is expected thati, in the future, a kernel may support multiple
+ * board. It is expected that, in the future, a kernel may support multiple
  * boards.
  */
 static int __init gef_sbc310_probe(void)
index acf2c6c..bb4c8e6 100644 (file)
 #include <linux/kdev_t.h>
 #include <linux/delay.h>
 #include <linux/seq_file.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 
 #include <asm/time.h>
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
-#include <asm/prom.h>
 #include <mm/mmu_decl.h>
 #include <asm/udbg.h>
 
@@ -157,7 +157,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
  *
  * This function is called to determine whether the BSP is compatible with the
  * supplied device-tree, which is assumed to be the correct one for the actual
- * board. It is expected thati, in the future, a kernel may support multiple
+ * board. It is expected that, in the future, a kernel may support multiple
  * boards.
  */
 static int __init gef_sbc610_probe(void)
index 7733d06..b593b9a 100644 (file)
 #include <linux/delay.h>
 #include <linux/seq_file.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/fsl/guts.h>
 
 #include <asm/time.h>
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
-#include <asm/prom.h>
 #include <mm/mmu_decl.h>
 #include <asm/udbg.h>
 
index a6b8ffc..5294394 100644 (file)
@@ -19,7 +19,6 @@
 #include <asm/time.h>
 #include <asm/machdep.h>
 #include <asm/pci-bridge.h>
-#include <asm/prom.h>
 #include <mm/mmu_decl.h>
 #include <asm/udbg.h>
 #include <asm/swiotlb.h>
index ee98361..b2cc32a 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <linux/pci.h>
 #include <linux/of.h>
+#include <linux/of_fdt.h>
 #include <linux/of_platform.h>
 #include <linux/of_address.h>
 #include <asm/udbg.h>
index 27a7c6f..5a098f7 100644 (file)
@@ -3,7 +3,7 @@
 # Makefile for the PowerPC 8xx linux kernel.
 #
 obj-y                  += m8xx_setup.o machine_check.o pic.o
-obj-$(CONFIG_CPM1)             += cpm1.o
+obj-$(CONFIG_CPM1)             += cpm1.o cpm1-ic.o
 obj-$(CONFIG_UCODE_PATCH)      += micropatch.o
 obj-$(CONFIG_MPC885ADS)   += mpc885ads_setup.o
 obj-$(CONFIG_MPC86XADS)   += mpc86xads_setup.o
index 651486a..10e6e4f 100644 (file)
@@ -15,9 +15,9 @@
 #include <asm/cpm1.h>
 #include <asm/fs_pd.h>
 #include <asm/udbg.h>
-#include <asm/prom.h>
 
 #include "mpc8xx.h"
+#include "pic.h"
 
 struct cpm_pin {
        int port, pin, flags;
@@ -104,7 +104,7 @@ define_machine(adder875) {
        .name = "Adder MPC875",
        .probe = adder875_probe,
        .setup_arch = adder875_setup,
-       .init_IRQ = mpc8xx_pics_init,
+       .init_IRQ = mpc8xx_pic_init,
        .get_irq = mpc8xx_get_irq,
        .restart = mpc8xx_restart,
        .calibrate_decr = generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/8xx/cpm1-ic.c b/arch/powerpc/platforms/8xx/cpm1-ic.c
new file mode 100644 (file)
index 0000000..a18fc7c
--- /dev/null
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interrupt controller for the
+ * Communication Processor Module.
+ * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/platform_device.h>
+#include <asm/cpm1.h>
+
+struct cpm_pic_data {
+       cpic8xx_t __iomem *reg;
+       struct irq_domain *host;
+};
+
+static void cpm_mask_irq(struct irq_data *d)
+{
+       struct cpm_pic_data *data = irq_data_get_irq_chip_data(d);
+       unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
+
+       clrbits32(&data->reg->cpic_cimr, (1 << cpm_vec));
+}
+
+static void cpm_unmask_irq(struct irq_data *d)
+{
+       struct cpm_pic_data *data = irq_data_get_irq_chip_data(d);
+       unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
+
+       setbits32(&data->reg->cpic_cimr, (1 << cpm_vec));
+}
+
+static void cpm_end_irq(struct irq_data *d)
+{
+       struct cpm_pic_data *data = irq_data_get_irq_chip_data(d);
+       unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
+
+       out_be32(&data->reg->cpic_cisr, (1 << cpm_vec));
+}
+
+static struct irq_chip cpm_pic = {
+       .name = "CPM PIC",
+       .irq_mask = cpm_mask_irq,
+       .irq_unmask = cpm_unmask_irq,
+       .irq_eoi = cpm_end_irq,
+};
+
+static int cpm_get_irq(struct irq_desc *desc)
+{
+       struct cpm_pic_data *data = irq_desc_get_handler_data(desc);
+       int cpm_vec;
+
+       /*
+        * Get the vector by setting the ACK bit and then reading
+        * the register.
+        */
+       out_be16(&data->reg->cpic_civr, 1);
+       cpm_vec = in_be16(&data->reg->cpic_civr);
+       cpm_vec >>= 11;
+
+       return irq_linear_revmap(data->host, cpm_vec);
+}
+
+static void cpm_cascade(struct irq_desc *desc)
+{
+       generic_handle_irq(cpm_get_irq(desc));
+}
+
+static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq,
+                           irq_hw_number_t hw)
+{
+       irq_set_chip_data(virq, h->host_data);
+       irq_set_status_flags(virq, IRQ_LEVEL);
+       irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
+       return 0;
+}
+
+static const struct irq_domain_ops cpm_pic_host_ops = {
+       .map = cpm_pic_host_map,
+};
+
+static int cpm_pic_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       int irq;
+       struct cpm_pic_data *data;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res)
+               return -ENODEV;
+
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->reg = devm_ioremap(dev, res->start, resource_size(res));
+       if (!data->reg)
+               return -ENODEV;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
+
+       /* Initialize the CPM interrupt controller. */
+       out_be32(&data->reg->cpic_cicr,
+                (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
+                ((virq_to_hw(irq) / 2) << 13) | CICR_HP_MASK);
+
+       out_be32(&data->reg->cpic_cimr, 0);
+
+       data->host = irq_domain_add_linear(dev->of_node, 64, &cpm_pic_host_ops, data);
+       if (!data->host)
+               return -ENODEV;
+
+       irq_set_handler_data(irq, data);
+       irq_set_chained_handler(irq, cpm_cascade);
+
+       setbits32(&data->reg->cpic_cicr, CICR_IEN);
+
+       return 0;
+}
+
+static const struct of_device_id cpm_pic_match[] = {
+       {
+               .compatible = "fsl,cpm1-pic",
+       }, {
+               .type = "cpm-pic",
+               .compatible = "CPM",
+       }, {},
+};
+
+static struct platform_driver cpm_pic_driver = {
+       .driver = {
+               .name           = "cpm-pic",
+               .of_match_table = cpm_pic_match,
+       },
+       .probe  = cpm_pic_probe,
+};
+
+static int __init cpm_pic_init(void)
+{
+       return platform_driver_register(&cpm_pic_driver);
+}
+arch_initcall(cpm_pic_init);
+
+/*
+ * The CPM can generate the error interrupt when there is a race condition
+ * between generating and masking interrupts.  All we have to do is ACK it
+ * and return.  This is a no-op function so we don't need any special
+ * tests in the interrupt handler.
+ */
+static irqreturn_t cpm_error_interrupt(int irq, void *dev)
+{
+       return IRQ_HANDLED;
+}
+
+static int cpm_error_probe(struct platform_device *pdev)
+{
+       int irq;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0)
+               return irq;
+
+       return request_irq(irq, cpm_error_interrupt, IRQF_NO_THREAD, "error", NULL);
+}
+
+static const struct of_device_id cpm_error_ids[] = {
+       { .compatible = "fsl,cpm1" },
+       { .type = "cpm" },
+       {},
+};
+
+static struct platform_driver cpm_error_driver = {
+       .driver = {
+               .name           = "cpm-error",
+               .of_match_table = cpm_error_ids,
+       },
+       .probe  = cpm_error_probe,
+};
+
+static int __init cpm_error_init(void)
+{
+       return platform_driver_register(&cpm_error_driver);
+}
+subsys_initcall(cpm_error_init);
index c58b6f1..bb38c8d 100644 (file)
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
+#include <linux/of_irq.h>
 #include <asm/page.h>
 #include <asm/8xx_immap.h>
 #include <asm/cpm1.h>
 #include <asm/io.h>
 #include <asm/rheap.h>
-#include <asm/prom.h>
 #include <asm/cpm.h>
 
 #include <asm/fs_pd.h>
 
 cpm8xx_t __iomem *cpmp;  /* Pointer to comm processor space */
 immap_t __iomem *mpc8xx_immr = (void __iomem *)VIRT_IMMR_BASE;
-static cpic8xx_t __iomem *cpic_reg;
-
-static struct irq_domain *cpm_pic_host;
-
-static void cpm_mask_irq(struct irq_data *d)
-{
-       unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
-
-       clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
-}
-
-static void cpm_unmask_irq(struct irq_data *d)
-{
-       unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
-
-       setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
-}
-
-static void cpm_end_irq(struct irq_data *d)
-{
-       unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
-
-       out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec));
-}
-
-static struct irq_chip cpm_pic = {
-       .name = "CPM PIC",
-       .irq_mask = cpm_mask_irq,
-       .irq_unmask = cpm_unmask_irq,
-       .irq_eoi = cpm_end_irq,
-};
-
-int cpm_get_irq(void)
-{
-       int cpm_vec;
-
-       /*
-        * Get the vector by setting the ACK bit and then reading
-        * the register.
-        */
-       out_be16(&cpic_reg->cpic_civr, 1);
-       cpm_vec = in_be16(&cpic_reg->cpic_civr);
-       cpm_vec >>= 11;
-
-       return irq_linear_revmap(cpm_pic_host, cpm_vec);
-}
-
-static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq,
-                         irq_hw_number_t hw)
-{
-       pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw);
-
-       irq_set_status_flags(virq, IRQ_LEVEL);
-       irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
-       return 0;
-}
-
-/*
- * The CPM can generate the error interrupt when there is a race condition
- * between generating and masking interrupts.  All we have to do is ACK it
- * and return.  This is a no-op function so we don't need any special
- * tests in the interrupt handler.
- */
-static irqreturn_t cpm_error_interrupt(int irq, void *dev)
-{
-       return IRQ_HANDLED;
-}
-
-static const struct irq_domain_ops cpm_pic_host_ops = {
-       .map = cpm_pic_host_map,
-};
-
-unsigned int __init cpm_pic_init(void)
-{
-       struct device_node *np = NULL;
-       struct resource res;
-       unsigned int sirq = 0, hwirq, eirq;
-       int ret;
-
-       pr_debug("cpm_pic_init\n");
-
-       np = of_find_compatible_node(NULL, NULL, "fsl,cpm1-pic");
-       if (np == NULL)
-               np = of_find_compatible_node(NULL, "cpm-pic", "CPM");
-       if (np == NULL) {
-               printk(KERN_ERR "CPM PIC init: can not find cpm-pic node\n");
-               return sirq;
-       }
-
-       ret = of_address_to_resource(np, 0, &res);
-       if (ret)
-               goto end;
-
-       cpic_reg = ioremap(res.start, resource_size(&res));
-       if (cpic_reg == NULL)
-               goto end;
-
-       sirq = irq_of_parse_and_map(np, 0);
-       if (!sirq)
-               goto end;
-
-       /* Initialize the CPM interrupt controller. */
-       hwirq = (unsigned int)virq_to_hw(sirq);
-       out_be32(&cpic_reg->cpic_cicr,
-           (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
-               ((hwirq/2) << 13) | CICR_HP_MASK);
-
-       out_be32(&cpic_reg->cpic_cimr, 0);
-
-       cpm_pic_host = irq_domain_add_linear(np, 64, &cpm_pic_host_ops, NULL);
-       if (cpm_pic_host == NULL) {
-               printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
-               sirq = 0;
-               goto end;
-       }
-
-       /* Install our own error handler. */
-       np = of_find_compatible_node(NULL, NULL, "fsl,cpm1");
-       if (np == NULL)
-               np = of_find_node_by_type(NULL, "cpm");
-       if (np == NULL) {
-               printk(KERN_ERR "CPM PIC init: can not find cpm node\n");
-               goto end;
-       }
-
-       eirq = irq_of_parse_and_map(np, 0);
-       if (!eirq)
-               goto end;
-
-       if (request_irq(eirq, cpm_error_interrupt, IRQF_NO_THREAD, "error",
-                       NULL))
-               printk(KERN_ERR "Could not allocate CPM error IRQ!");
-
-       setbits32(&cpic_reg->cpic_cicr, CICR_IEN);
-
-end:
-       of_node_put(np);
-       return sirq;
-}
 
 void __init cpm_reset(void)
 {
@@ -280,6 +141,7 @@ cpm_setbrg(uint brg, uint rate)
                out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) |
                              CPM_BRG_EN | CPM_BRG_DIV16);
 }
+EXPORT_SYMBOL(cpm_setbrg);
 
 struct cpm_ioport16 {
        __be16 dir, par, odr_sor, dat, intr;
index ebcf34a..b3b2252 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/cpm1.h>
 
 #include "mpc8xx.h"
+#include "pic.h"
 
 struct cpm_pin {
        int port, pin, flags;
@@ -166,7 +167,7 @@ define_machine(ep88xc) {
        .name = "Embedded Planet EP88xC",
        .probe = ep88xc_probe,
        .setup_arch = ep88xc_setup_arch,
-       .init_IRQ = mpc8xx_pics_init,
+       .init_IRQ = mpc8xx_pic_init,
        .get_irq        = mpc8xx_get_irq,
        .restart = mpc8xx_restart,
        .calibrate_decr = mpc8xx_calibrate_decr,
index df4d57d..24f358f 100644 (file)
 #include <linux/time.h>
 #include <linux/rtc.h>
 #include <linux/fsl_devices.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
 
 #include <asm/io.h>
 #include <asm/8xx_immap.h>
-#include <asm/prom.h>
 #include <asm/fs_pd.h>
 #include <mm/mmu_decl.h>
 
@@ -28,9 +29,6 @@
 
 #include "mpc8xx.h"
 
-extern int cpm_pic_init(void);
-extern int cpm_get_irq(void);
-
 /* A place holder for time base interrupts, if they are ever enabled. */
 static irqreturn_t timebase_interrupt(int irq, void *dev)
 {
@@ -207,28 +205,3 @@ void __noreturn mpc8xx_restart(char *cmd)
        in_8(&clk_r->res[0]);
        panic("Restart failed\n");
 }
-
-static void cpm_cascade(struct irq_desc *desc)
-{
-       generic_handle_irq(cpm_get_irq());
-}
-
-/* Initialize the internal interrupt controllers.  The number of
- * interrupts supported can vary with the processor type, and the
- * 82xx family can have up to 64.
- * External interrupts can be either edge or level triggered, and
- * need to be initialized by the appropriate driver.
- */
-void __init mpc8xx_pics_init(void)
-{
-       int irq;
-
-       if (mpc8xx_pic_init()) {
-               printk(KERN_ERR "Failed interrupt 8xx controller  initialization\n");
-               return;
-       }
-
-       irq = cpm_pic_init();
-       if (irq)
-               irq_set_chained_handler(irq, cpm_cascade);
-}
index 8d02f5f..03267e4 100644 (file)
@@ -29,6 +29,7 @@
 
 #include "mpc86xads.h"
 #include "mpc8xx.h"
+#include "pic.h"
 
 struct cpm_pin {
        int port, pin, flags;
@@ -140,7 +141,7 @@ define_machine(mpc86x_ads) {
        .name                   = "MPC86x ADS",
        .probe                  = mpc86xads_probe,
        .setup_arch             = mpc86xads_setup_arch,
-       .init_IRQ               = mpc8xx_pics_init,
+       .init_IRQ               = mpc8xx_pic_init,
        .get_irq                = mpc8xx_get_irq,
        .restart                = mpc8xx_restart,
        .calibrate_decr         = mpc8xx_calibrate_decr,
index a0c83c1..b1e39f9 100644 (file)
@@ -42,6 +42,7 @@
 
 #include "mpc885ads.h"
 #include "mpc8xx.h"
+#include "pic.h"
 
 static u32 __iomem *bcsr, *bcsr5;
 
@@ -216,7 +217,7 @@ define_machine(mpc885_ads) {
        .name                   = "Freescale MPC885 ADS",
        .probe                  = mpc885ads_probe,
        .setup_arch             = mpc885ads_setup_arch,
-       .init_IRQ               = mpc8xx_pics_init,
+       .init_IRQ               = mpc8xx_pic_init,
        .get_irq                = mpc8xx_get_irq,
        .restart                = mpc8xx_restart,
        .calibrate_decr         = mpc8xx_calibrate_decr,
index 31cc2ec..79fae33 100644 (file)
@@ -15,7 +15,6 @@ extern void __noreturn mpc8xx_restart(char *cmd);
 extern void mpc8xx_calibrate_decr(void);
 extern int mpc8xx_set_rtc_time(struct rtc_time *tm);
 extern void mpc8xx_get_rtc_time(struct rtc_time *tm);
-extern void mpc8xx_pics_init(void);
 extern unsigned int mpc8xx_get_irq(void);
 
 #endif /* __MPC8xx_H */
index 04a6abf..ea6b0e5 100644 (file)
@@ -4,7 +4,8 @@
 #include <linux/signal.h>
 #include <linux/irq.h>
 #include <linux/dma-mapping.h>
-#include <asm/prom.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <asm/irq.h>
 #include <asm/io.h>
 #include <asm/8xx_immap.h>
@@ -14,8 +15,6 @@
 
 #define PIC_VEC_SPURRIOUS      15
 
-extern int cpm_get_irq(struct pt_regs *regs);
-
 static struct irq_domain *mpc8xx_pic_host;
 static unsigned long mpc8xx_cached_irq_mask;
 static sysconf8xx_t __iomem *siu_reg;
@@ -125,7 +124,7 @@ static const struct irq_domain_ops mpc8xx_pic_host_ops = {
        .xlate = mpc8xx_pic_host_xlate,
 };
 
-int __init mpc8xx_pic_init(void)
+void __init mpc8xx_pic_init(void)
 {
        struct resource res;
        struct device_node *np;
@@ -136,7 +135,7 @@ int __init mpc8xx_pic_init(void)
                np = of_find_node_by_type(NULL, "mpc8xx-pic");
        if (np == NULL) {
                printk(KERN_ERR "Could not find fsl,pq1-pic node\n");
-               return -ENOMEM;
+               return;
        }
 
        ret = of_address_to_resource(np, 0, &res);
@@ -144,20 +143,13 @@ int __init mpc8xx_pic_init(void)
                goto out;
 
        siu_reg = ioremap(res.start, resource_size(&res));
-       if (siu_reg == NULL) {
-               ret = -EINVAL;
+       if (!siu_reg)
                goto out;
-       }
 
        mpc8xx_pic_host = irq_domain_add_linear(np, 64, &mpc8xx_pic_host_ops, NULL);
-       if (mpc8xx_pic_host == NULL) {
+       if (!mpc8xx_pic_host)
                printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n");
-               ret = -ENOMEM;
-               goto out;
-       }
 
-       ret = 0;
 out:
        of_node_put(np);
-       return ret;
 }
index 9fe00ee..c70f1b4 100644 (file)
@@ -4,7 +4,7 @@
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 
-int mpc8xx_pic_init(void);
+void mpc8xx_pic_init(void);
 unsigned int mpc8xx_get_irq(void);
 
 /*
index 4cea8b1..3725d51 100644 (file)
@@ -43,6 +43,7 @@
 #include <asm/udbg.h>
 
 #include "mpc8xx.h"
+#include "pic.h"
 
 struct cpm_pin {
        int port, pin, flags;
@@ -142,7 +143,7 @@ define_machine(tqm8xx) {
        .name                   = "TQM8xx",
        .probe                  = tqm8xx_probe,
        .setup_arch             = tqm8xx_setup_arch,
-       .init_IRQ               = mpc8xx_pics_init,
+       .init_IRQ               = mpc8xx_pic_init,
        .get_irq                = mpc8xx_get_irq,
        .restart                = mpc8xx_restart,
        .calibrate_decr         = mpc8xx_calibrate_decr,
index e2e1fec..9d099dc 100644 (file)
@@ -377,7 +377,6 @@ config SPE
 config PPC_64S_HASH_MMU
        bool "Hash MMU Support"
        depends on PPC_BOOK3S_64
-       select PPC_MM_SLICES
        default y
        help
          Enable support for the Power ISA Hash style MMU. This is implemented
@@ -451,9 +450,6 @@ config PPC_BOOK3E_MMU
        def_bool y
        depends on FSL_BOOKE || PPC_BOOK3E
 
-config PPC_MM_SLICES
-       bool
-
 config PPC_HAVE_PMU_SUPPORT
        bool
 
index 9d252c5..397ce6a 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright 2003 by Hans-Joerg Frieden and Thomas Frieden
  */
 
+#include <linux/irqdomain.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
index f9a1615..c0799fb 100644 (file)
@@ -30,7 +30,7 @@
  *
  * where "vas_copy" and "vas_paste" are defined in copy-paste.h.
  * copy/paste returns to the user space directly. So refer NX hardware
- * documententation for exact copy/paste usage and completion / error
+ * documentation for exact copy/paste usage and completion / error
  * conditions.
  */
 
index 354a58c..f3291e9 100644 (file)
 #include <linux/of_platform.h>
 #include <linux/slab.h>
 #include <linux/debugfs.h>
+#include <linux/of_irq.h>
 
 #include <asm/dcr.h>
 #include <asm/machdep.h>
-#include <asm/prom.h>
 
 #include "cell.h"
 
index bda589d..a3ee397 100644 (file)
@@ -9,9 +9,9 @@
 
 #include <linux/input.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <asm/pmi.h>
-#include <asm/prom.h>
 
 static struct input_dev *button_dev;
 static struct platform_device *button_pdev;
index 1c4c53b..316e533 100644 (file)
 #include <linux/percpu.h>
 #include <linux/types.h>
 #include <linux/export.h>
+#include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
 #include <linux/pgtable.h>
 
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/ptrace.h>
 #include <asm/cell-regs.h>
 
@@ -23,7 +23,7 @@
  * Current implementation uses "cpu" nodes. We build our own mapping
  * array of cpu numbers to cpu nodes locally for now to allow interrupt
  * time code to have a fast path rather than call of_get_cpu_node(). If
- * we implement cpu hotplug, we'll have to install an appropriate norifier
+ * we implement cpu hotplug, we'll have to install an appropriate notifier
  * in order to release references to the cpu going away
  */
 static struct cbe_regs_map
index abb5e52..2f45428 100644 (file)
@@ -39,7 +39,6 @@
 #include <linux/stringify.h>
 #include <asm/spu.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/cell-regs.h>
 
 #include "spu_priv1_mmio.h"
index 0873a7a..03ee815 100644 (file)
 
 #include <linux/interrupt.h>
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/export.h>
 #include <linux/percpu.h>
 #include <linux/types.h>
 #include <linux/ioport.h>
 #include <linux/kernel_stat.h>
 #include <linux/pgtable.h>
+#include <linux/of_address.h>
 
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/ptrace.h>
 #include <asm/machdep.h>
 #include <asm/cell-regs.h>
index 25e726b..0ca3efe 100644 (file)
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 #include <linux/notifier.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/slab.h>
 #include <linux/memblock.h>
@@ -582,7 +584,7 @@ static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
 {
        struct device *dev = data;
 
-       /* We are only intereted in device addition */
+       /* We are only interested in device addition */
        if (action != BUS_NOTIFY_ADD_DEVICE)
                return 0;
 
index dff8d5e..58d967e 100644 (file)
@@ -19,7 +19,6 @@
 
 #include <asm/io.h>
 #include <asm/machdep.h>
-#include <asm/prom.h>
 #include <asm/reg.h>
 #include <asm/cell-regs.h>
 #include <asm/cpu_has_feature.h>
index 4325c05..8d934ea 100644 (file)
 #include <linux/reboot.h>
 #include <linux/kexec.h>
 #include <linux/crash_dump.h>
+#include <linux/of.h>
 
 #include <asm/kexec.h>
 #include <asm/reg.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/rtas.h>
 #include <asm/cell-regs.h>
index edefa78..52de014 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/mmu.h>
 #include <asm/processor.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/rtas.h>
 #include <asm/pci-bridge.h>
 #include <asm/iommu.h>
index d7ab868..31ce00b 100644 (file)
@@ -28,7 +28,6 @@
 #include <asm/irq.h>
 #include <asm/page.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/smp.h>
 #include <asm/paca.h>
 #include <asm/machdep.h>
index a1c293f..e36ebd8 100644 (file)
@@ -8,6 +8,7 @@
 #undef DEBUG
 
 #include <linux/kernel.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/slab.h>
 #include <linux/io.h>
@@ -81,7 +82,7 @@ static int __init spiderpci_pci_setup_chip(struct pci_controller *phb,
        /*
         * On CellBlade, we can't know that which XDR memory is used by
         * kmalloc() to allocate dummy_page_va.
-        * In order to imporve the performance, the XDR which is used to
+        * In order to improve the performance, the XDR which is used to
         * allocate dummy_page_va is the nearest the spider-pci.
         * We have to select the CBE which is the nearest the spider-pci
         * to allocate memory from the best XDR, but I don't know that
index 8af7586..11df737 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/pgtable.h>
 
-#include <asm/prom.h>
 #include <asm/io.h>
 
 #include "interrupt.h"
index 2eecba3..7bd0b56 100644 (file)
@@ -24,7 +24,6 @@
 #include <asm/spu_priv1.h>
 #include <asm/spu_csa.h>
 #include <asm/xmon.h>
-#include <asm/prom.h>
 #include <asm/kexec.h>
 
 const struct spu_management_ops *spu_management_ops;
index ddf8742..ae09c5a 100644 (file)
 #include <linux/io.h>
 #include <linux/mutex.h>
 #include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 
 #include <asm/spu.h>
 #include <asm/spu_priv1.h>
 #include <asm/firmware.h>
-#include <asm/prom.h>
 
 #include "spufs/spufs.h"
 #include "interrupt.h"
@@ -457,7 +458,7 @@ static void __init init_affinity_node(int cbe)
 
                /*
                 * Walk through each phandle in vicinity property of the spu
-                * (tipically two vicinity phandles per spe node)
+                * (typically two vicinity phandles per spe node)
                 */
                for (i = 0; i < (lenp / sizeof(phandle)); i++) {
                        if (vic_handles[i] == avoid_ph)
index 0c2e6bb..d150e39 100644 (file)
@@ -19,7 +19,6 @@
 #include <asm/spu.h>
 #include <asm/spu_priv1.h>
 #include <asm/firmware.h>
-#include <asm/prom.h>
 
 #include "interrupt.h"
 #include "spu_priv1_mmio.h"
index 4c70219..34334c3 100644 (file)
 #include <linux/namei.h>
 #include <linux/pagemap.h>
 #include <linux/poll.h>
+#include <linux/of.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 
-#include <asm/prom.h>
 #include <asm/spu.h>
 #include <asm/spu_priv1.h>
 #include <linux/uaccess.h>
index e820332..dab7807 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/init.h>
 #include <linux/spinlock.h>
 #include <linux/uaccess.h>
-#include <asm/prom.h>
+#include <linux/of.h>
 #include <asm/machdep.h>
 #include <asm/rtas.h>
 #include "chrp.h"
index 76e6256..6f6598e 100644 (file)
@@ -9,11 +9,11 @@
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/pgtable.h>
+#include <linux/of_address.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/hydra.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/sections.h>
 #include <asm/pci-bridge.h>
index 3cfc382..ef4c2b1 100644 (file)
 #include <linux/root_dev.h>
 #include <linux/initrd.h>
 #include <linux/timer.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/of_irq.h>
 
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/dma.h>
 #include <asm/machdep.h>
index e30cd29..ab95155 100644 (file)
@@ -24,7 +24,6 @@
 #include <asm/page.h>
 #include <asm/sections.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/smp.h>
 #include <asm/machdep.h>
 #include <asm/mpic.h>
index acde7bb..d46417e 100644 (file)
 #include <linux/init.h>
 #include <linux/bcd.h>
 #include <linux/ioport.h>
+#include <linux/of_address.h>
 
 #include <asm/io.h>
 #include <asm/nvram.h>
-#include <asm/prom.h>
 #include <asm/sections.h>
 #include <asm/time.h>
 
 #include <platforms/chrp/chrp.h>
 
-extern spinlock_t rtc_lock;
-
 #define NVRAM_AS0  0x74
 #define NVRAM_AS1  0x75
 #define NVRAM_DATA 0x77
index ade928f..5c2575a 100644 (file)
@@ -16,7 +16,6 @@
 
 #include <asm/io.h>
 #include <asm/machdep.h>
-#include <asm/prom.h>
 #include <asm/time.h>
 #include <asm/udbg.h>
 
index 07e71ba..78f2378 100644 (file)
 #include <linux/serial.h>
 #include <linux/tty.h>
 #include <linux/serial_core.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/extable.h>
 
 #include <asm/time.h>
 #include <asm/machdep.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/tsi108.h>
 #include <asm/pci-bridge.h>
index eb8342e..1830e1a 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/of_platform.h>
 
 #include <asm/time.h>
-#include <asm/prom.h>
 #include <asm/mpic.h>
 #include <asm/pci-bridge.h>
 
index 9d891bd..0133e17 100644 (file)
@@ -14,8 +14,8 @@
 #include <linux/delay.h>
 #include <linux/serial_reg.h>
 #include <linux/serial_8250.h>
+#include <linux/of.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/termbits.h>
 
 #include "mpc10x.h"
index 9eb9abb..8b2b422 100644 (file)
 #include <linux/serial.h>
 #include <linux/tty.h>
 #include <linux/serial_core.h>
+#include <linux/of_irq.h>
 
 #include <asm/time.h>
 #include <asm/machdep.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/tsi108.h>
 #include <asm/pci-bridge.h>
index c06a049..4854cc5 100644 (file)
  * Author: Stephen Chivers <schivers@csc.com>
  */
 
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 
 #include <asm/i8259.h>
 #include <asm/pci-bridge.h>
 #include <asm/mpic.h>
-#include <asm/prom.h>
 #include <mm/mmu_decl.h>
 #include <asm/udbg.h>
 
index e188b90..5f16e80 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/of_platform.h>
 
 #include <asm/time.h>
-#include <asm/prom.h>
 #include <asm/mpic.h>
 #include <asm/pci-bridge.h>
 
index 5aea465..e02bdab 100644 (file)
@@ -7,10 +7,11 @@
  * Copyright (C) 2008,2009 Albert Herranz
  */
 
+#include <linux/of_address.h>
+
 #include <mm/mmu_decl.h>
 
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/fixmap.h>
 
index f60ade5..9e03ff8 100644 (file)
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/seq_file.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/memblock.h>
 #include <mm/mmu_decl.h>
 
 #include <asm/io.h>
 #include <asm/machdep.h>
-#include <asm/prom.h>
 #include <asm/time.h>
 #include <asm/udbg.h>
 
index 044a20c..84afae7 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/pci.h>
 #include <linux/interrupt.h>
 #include <linux/mc146818rtc.h>
+#include <linux/of_irq.h>
 
 #include <asm/pci-bridge.h>
 
index 37875e4..b911b31 100644 (file)
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/irq.h>
+#include <linux/of_irq.h>
 
 #include <asm/sections.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/machdep.h>
 #include <asm/iommu.h>
index 4e9ad5b..c26c379 100644 (file)
 #include <linux/serial.h>
 #include <linux/smp.h>
 #include <linux/bitops.h>
+#include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/memblock.h>
 
 #include <asm/processor.h>
 #include <asm/sections.h>
-#include <asm/prom.h>
 #include <asm/io.h>
 #include <asm/pci-bridge.h>
 #include <asm/iommu.h>
index 78209bb..823e219 100644 (file)
@@ -19,9 +19,9 @@
 #include <linux/interrupt.h>
 #include <linux/mc146818rtc.h>
 #include <linux/bcd.h>
+#include <linux/of_address.h>
 
 #include <asm/sections.h>
-#include <asm/prom.h>
 #include <asm/io.h>
 #include <asm/machdep.h>
 #include <asm/time.h>
index 2642731..1be1f18 100644 (file)
@@ -10,6 +10,8 @@
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/sched.h>
 
 #include <asm/pasemi_dma.h>
index 5be7242..0a38663 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/types.h>
 #include <linux/spinlock.h>
 #include <linux/pci.h>
+#include <linux/of.h>
 #include <asm/iommu.h>
 #include <asm/machdep.h>
 #include <asm/firmware.h>
index 1bf65d0..f859ada 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/pci.h>
 #include <linux/of.h>
+#include <linux/of_irq.h>
 #include <linux/i2c.h>
 
 #ifdef CONFIG_I2C_BOARDINFO
index ea1e414..dc18466 100644 (file)
@@ -9,9 +9,9 @@
  */
 
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/msi.h>
 #include <asm/mpic.h>
-#include <asm/prom.h>
 #include <asm/hw_irq.h>
 #include <asm/ppc-pci.h>
 #include <asm/msi_bitmap.h>
index d4b9227..55f0160 100644 (file)
@@ -12,6 +12,7 @@
 
 
 #include <linux/kernel.h>
+#include <linux/of_address.h>
 #include <linux/pci.h>
 
 #include <asm/pci-bridge.h>
index f974bfe..2aef49e 100644 (file)
@@ -18,8 +18,8 @@
 #include <linux/pci.h>
 #include <linux/of_platform.h>
 #include <linux/gfp.h>
+#include <linux/irqdomain.h>
 
-#include <asm/prom.h>
 #include <asm/iommu.h>
 #include <asm/machdep.h>
 #include <asm/i8259.h>
index 32224cb..aeb79a8 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/pmu.h>
 #include <linux/atomic.h>
 #include <linux/export.h>
-#include <asm/prom.h>
 #include <asm/backlight.h>
 
 #define OLD_BACKLIGHT_MAX 15
index d20ef35..72eb99a 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/init.h>
+#include <linux/of_fdt.h>
 #include <generated/utsrelease.h>
 #include <asm/sections.h>
 #include <asm/prom.h>
@@ -243,7 +244,7 @@ static void __init bootx_scan_dt_build_strings(unsigned long base,
                DBG(" detected display ! adding properties names !\n");
                bootx_dt_add_string("linux,boot-display", mem_end);
                bootx_dt_add_string("linux,opened", mem_end);
-               strlcpy(bootx_disp_path, namep, sizeof(bootx_disp_path));
+               strscpy(bootx_disp_path, namep, sizeof(bootx_disp_path));
        }
 
        /* get and store all property names */
index e67c624..5cc958a 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/keylargo.h>
 #include <asm/uninorth.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 #include <asm/dbdma.h>
index df89d91..c1c430c 100644 (file)
 #include <linux/mutex.h>
 #include <linux/i2c.h>
 #include <linux/slab.h>
+#include <linux/of_irq.h>
 #include <asm/keylargo.h>
 #include <asm/uninorth.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/smu.h>
 #include <asm/pmac_pfunc.h>
@@ -1472,7 +1472,7 @@ int __init pmac_i2c_init(void)
        smu_i2c_probe();
 #endif
 
-       /* Now add plaform functions for some known devices */
+       /* Now add platform functions for some known devices */
        pmac_i2c_devscan(pmac_i2c_dev_create);
 
        return 0;
index de8fcb6..fe2e024 100644 (file)
@@ -17,9 +17,9 @@
 #include <linux/memblock.h>
 #include <linux/completion.h>
 #include <linux/spinlock.h>
+#include <linux/of_address.h>
 #include <asm/sections.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/nvram.h>
 
@@ -71,7 +71,7 @@ struct core99_header {
 static int nvram_naddrs;
 static volatile unsigned char __iomem *nvram_data;
 static int is_core_99;
-static int core99_bank = 0;
+static int core99_bank;
 static int nvram_partitions[3];
 // XXX Turn that into a sem
 static DEFINE_RAW_SPINLOCK(nv_lock);
index e9abe0f..d71359b 100644 (file)
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/of_pci.h>
 
 #include <asm/sections.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
index 94df0a9..22741dd 100644 (file)
@@ -12,8 +12,8 @@
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/of.h>
 
-#include <asm/prom.h>
 #include <asm/pmac_pfunc.h>
 
 /* Debug */
@@ -685,7 +685,7 @@ static int pmf_add_functions(struct pmf_device *dev, void *driverdata)
        const int plen = strlen(PP_PREFIX);
        int count = 0;
 
-       for (pp = dev->node->properties; pp != 0; pp = pp->next) {
+       for_each_property_of_node(dev->node, pp) {
                const char *name;
                if (strncmp(pp->name, PP_PREFIX, plen) != 0)
                        continue;
index bb05666..71a3f62 100644 (file)
 #include <linux/adb.h>
 #include <linux/minmax.h>
 #include <linux/pmu.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 
 #include <asm/sections.h>
 #include <asm/io.h>
 #include <asm/smp.h>
-#include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/time.h>
 #include <asm/pmac_feature.h>
index 974d4b4..f71735e 100644 (file)
@@ -50,7 +50,6 @@
 
 #include <asm/reg.h>
 #include <asm/sections.h>
-#include <asm/prom.h>
 #include <asm/io.h>
 #include <asm/pci-bridge.h>
 #include <asm/ohare.h>
@@ -81,10 +80,6 @@ static int current_root_goodness = -1;
 
 #define DEFAULT_ROOT_DEVICE Root_SDA1  /* sda1 - slightly silly choice */
 
-#ifdef CONFIG_PPC64
-int sccdbg;
-#endif
-
 sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
 EXPORT_SYMBOL(sys_ctrler);
 
index da1efdc..0798ece 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/sched/hotplug.h>
 #include <linux/smp.h>
 #include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 #include <linux/kernel_stat.h>
 #include <linux/delay.h>
 #include <linux/init.h>
@@ -39,7 +40,6 @@
 #include <asm/page.h>
 #include <asm/sections.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/smp.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
index 31d6213..4c5790a 100644 (file)
@@ -24,9 +24,9 @@
 #include <linux/interrupt.h>
 #include <linux/hardirq.h>
 #include <linux/rtc.h>
+#include <linux/of_address.h>
 
 #include <asm/sections.h>
-#include <asm/prom.h>
 #include <asm/io.h>
 #include <asm/machdep.h>
 #include <asm/time.h>
index 12158bb..b4756de 100644 (file)
@@ -7,11 +7,11 @@
 #include <linux/adb.h>
 #include <linux/pmu.h>
 #include <linux/cuda.h>
+#include <linux/of.h>
 #include <asm/machdep.h>
 #include <asm/io.h>
 #include <asm/page.h>
 #include <asm/xmon.h>
-#include <asm/prom.h>
 #include <asm/bootx.h>
 #include <asm/errno.h>
 #include <asm/pmac_feature.h>
index 965827a..734df5a 100644 (file)
@@ -5,10 +5,10 @@
  * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
  */
 #include <linux/types.h>
+#include <linux/of.h>
 #include <asm/udbg.h>
 #include <asm/processor.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/pmac_feature.h>
 
 extern u8 real_readb(volatile u8 __iomem  *addr);
index 89e22c4..7c91318 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 #include <linux/list.h>
 #include <linux/msi.h>
 #include <linux/of.h>
@@ -390,7 +391,7 @@ static struct eeh_dev *pnv_eeh_probe(struct pci_dev *pdev)
         * should be blocked until PE reset. MMIO access is dropped
         * by hardware certainly. In order to drop PCI config requests,
         * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which
-        * will be checked in the backend for PE state retrival. If
+        * will be checked in the backend for PE state retrieval. If
         * the PE becomes frozen for the first time and the flag has
         * been set for the PE, we will set EEH_PE_CFG_BLOCKED for
         * that PE to block its config space.
@@ -981,7 +982,7 @@ static int pnv_eeh_do_af_flr(struct pci_dn *pdn, int option)
        case EEH_RESET_FUNDAMENTAL:
                /*
                 * Wait for Transaction Pending bit to clear. A word-aligned
-                * test is used, so we use the conrol offset rather than status
+                * test is used, so we use the control offset rather than status
                 * and shift the test bit to match.
                 */
                pnv_eeh_wait_for_pending(pdn, "AF",
@@ -1048,7 +1049,7 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option)
         * frozen state during PE reset. However, the good idea here from
         * benh is to keep frozen state before we get PE reset done completely
         * (until BAR restore). With the frozen state, HW drops illegal IO
-        * or MMIO access, which can incur recrusive frozen PE during PE
+        * or MMIO access, which can incur recursive frozen PE during PE
         * reset. The side effect is that EEH core has to clear the frozen
         * state explicitly after BAR restore.
         */
@@ -1095,8 +1096,8 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option)
         * bus is behind a hotplug slot and it will use the slot provided
         * reset methods to prevent spurious hotplug events during the reset.
         *
-        * Fundemental resets need to be handled internally to EEH since the
-        * PCI core doesn't really have a concept of a fundemental reset,
+        * Fundamental resets need to be handled internally to EEH since the
+        * PCI core doesn't really have a concept of a fundamental reset,
         * mainly because there's no standard way to generate one. Only a
         * few devices require an FRESET so it should be fine.
         */
index a6677a1..6f94b80 100644 (file)
@@ -112,7 +112,7 @@ static int __init pnv_save_sprs_for_deep_states(void)
                        if (rc != 0)
                                return rc;
 
-                       /* Only p8 needs to set extra HID regiters */
+                       /* Only p8 needs to set extra HID registers */
                        if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
                                uint64_t hid1_val = mfspr(SPRN_HID1);
                                uint64_t hid4_val = mfspr(SPRN_HID4);
@@ -1204,7 +1204,7 @@ static void __init pnv_arch300_idle_init(void)
                 * The idle code does not deal with TB loss occurring
                 * in a shallower state than SPR loss, so force it to
                 * behave like SPRs are lost if TB is lost. POWER9 would
-                * never encouter this, but a POWER8 core would if it
+                * never encounter this, but a POWER8 core would if it
                 * implemented the stop instruction. So this is for forward
                 * compatibility.
                 */
index 28b009b..27c9360 100644 (file)
@@ -289,7 +289,7 @@ int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count)
         * be used by a function depends on how many functions exist
         * on the device. The NPU needs to be configured to know how
         * many bits are available to PASIDs and how many are to be
-        * used by the function BDF indentifier.
+        * used by the function BDF identifier.
         *
         * We only support one AFU-carrying function for now.
         */
index c8ad057..964f464 100644 (file)
@@ -60,7 +60,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
        addr = be64_to_cpu(addr);
        pr_debug("Kernel metadata addr: %llx\n", addr);
        opal_fdm_active = (void *)addr;
-       if (opal_fdm_active->registered_regions == 0)
+       if (be16_to_cpu(opal_fdm_active->registered_regions) == 0)
                return;
 
        ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_BOOT_MEM, &addr);
@@ -95,17 +95,17 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf);
 static void opal_fadump_update_config(struct fw_dump *fadump_conf,
                                      const struct opal_fadump_mem_struct *fdm)
 {
-       pr_debug("Boot memory regions count: %d\n", fdm->region_cnt);
+       pr_debug("Boot memory regions count: %d\n", be16_to_cpu(fdm->region_cnt));
 
        /*
         * The destination address of the first boot memory region is the
         * destination address of boot memory regions.
         */
-       fadump_conf->boot_mem_dest_addr = fdm->rgn[0].dest;
+       fadump_conf->boot_mem_dest_addr = be64_to_cpu(fdm->rgn[0].dest);
        pr_debug("Destination address of boot memory regions: %#016llx\n",
                 fadump_conf->boot_mem_dest_addr);
 
-       fadump_conf->fadumphdr_addr = fdm->fadumphdr_addr;
+       fadump_conf->fadumphdr_addr = be64_to_cpu(fdm->fadumphdr_addr);
 }
 
 /*
@@ -126,9 +126,9 @@ static void __init opal_fadump_get_config(struct fw_dump *fadump_conf,
        fadump_conf->boot_memory_size = 0;
 
        pr_debug("Boot memory regions:\n");
-       for (i = 0; i < fdm->region_cnt; i++) {
-               base = fdm->rgn[i].src;
-               size = fdm->rgn[i].size;
+       for (i = 0; i < be16_to_cpu(fdm->region_cnt); i++) {
+               base = be64_to_cpu(fdm->rgn[i].src);
+               size = be64_to_cpu(fdm->rgn[i].size);
                pr_debug("\t[%03d] base: 0x%lx, size: 0x%lx\n", i, base, size);
 
                fadump_conf->boot_mem_addr[i] = base;
@@ -143,7 +143,7 @@ static void __init opal_fadump_get_config(struct fw_dump *fadump_conf,
         * Start address of reserve dump area (permanent reservation) for
         * re-registering FADump after dump capture.
         */
-       fadump_conf->reserve_dump_area_start = fdm->rgn[0].dest;
+       fadump_conf->reserve_dump_area_start = be64_to_cpu(fdm->rgn[0].dest);
 
        /*
         * Rarely, but it can so happen that system crashes before all
@@ -155,13 +155,14 @@ static void __init opal_fadump_get_config(struct fw_dump *fadump_conf,
         * Hope the memory that could not be preserved only has pages
         * that are usually filtered out while saving the vmcore.
         */
-       if (fdm->region_cnt > fdm->registered_regions) {
+       if (be16_to_cpu(fdm->region_cnt) > be16_to_cpu(fdm->registered_regions)) {
                pr_warn("Not all memory regions were saved!!!\n");
                pr_warn("  Unsaved memory regions:\n");
-               i = fdm->registered_regions;
-               while (i < fdm->region_cnt) {
+               i = be16_to_cpu(fdm->registered_regions);
+               while (i < be16_to_cpu(fdm->region_cnt)) {
                        pr_warn("\t[%03d] base: 0x%llx, size: 0x%llx\n",
-                               i, fdm->rgn[i].src, fdm->rgn[i].size);
+                               i, be64_to_cpu(fdm->rgn[i].src),
+                               be64_to_cpu(fdm->rgn[i].size));
                        i++;
                }
 
@@ -170,7 +171,7 @@ static void __init opal_fadump_get_config(struct fw_dump *fadump_conf,
        }
 
        fadump_conf->boot_mem_top = (fadump_conf->boot_memory_size + hole_size);
-       fadump_conf->boot_mem_regs_cnt = fdm->region_cnt;
+       fadump_conf->boot_mem_regs_cnt = be16_to_cpu(fdm->region_cnt);
        opal_fadump_update_config(fadump_conf, fdm);
 }
 
@@ -178,35 +179,38 @@ static void __init opal_fadump_get_config(struct fw_dump *fadump_conf,
 static void opal_fadump_init_metadata(struct opal_fadump_mem_struct *fdm)
 {
        fdm->version = OPAL_FADUMP_VERSION;
-       fdm->region_cnt = 0;
-       fdm->registered_regions = 0;
-       fdm->fadumphdr_addr = 0;
+       fdm->region_cnt = cpu_to_be16(0);
+       fdm->registered_regions = cpu_to_be16(0);
+       fdm->fadumphdr_addr = cpu_to_be64(0);
 }
 
 static u64 opal_fadump_init_mem_struct(struct fw_dump *fadump_conf)
 {
        u64 addr = fadump_conf->reserve_dump_area_start;
+       u16 reg_cnt;
        int i;
 
        opal_fdm = __va(fadump_conf->kernel_metadata);
        opal_fadump_init_metadata(opal_fdm);
 
        /* Boot memory regions */
+       reg_cnt = be16_to_cpu(opal_fdm->region_cnt);
        for (i = 0; i < fadump_conf->boot_mem_regs_cnt; i++) {
-               opal_fdm->rgn[i].src    = fadump_conf->boot_mem_addr[i];
-               opal_fdm->rgn[i].dest   = addr;
-               opal_fdm->rgn[i].size   = fadump_conf->boot_mem_sz[i];
+               opal_fdm->rgn[i].src    = cpu_to_be64(fadump_conf->boot_mem_addr[i]);
+               opal_fdm->rgn[i].dest   = cpu_to_be64(addr);
+               opal_fdm->rgn[i].size   = cpu_to_be64(fadump_conf->boot_mem_sz[i]);
 
-               opal_fdm->region_cnt++;
+               reg_cnt++;
                addr += fadump_conf->boot_mem_sz[i];
        }
+       opal_fdm->region_cnt = cpu_to_be16(reg_cnt);
 
        /*
-        * Kernel metadata is passed to f/w and retrieved in capture kerenl.
+        * Kernel metadata is passed to f/w and retrieved in capture kernel.
         * So, use it to save fadump header address instead of calculating it.
         */
-       opal_fdm->fadumphdr_addr = (opal_fdm->rgn[0].dest +
-                                   fadump_conf->boot_memory_size);
+       opal_fdm->fadumphdr_addr = cpu_to_be64(be64_to_cpu(opal_fdm->rgn[0].dest) +
+                                              fadump_conf->boot_memory_size);
 
        opal_fadump_update_config(fadump_conf, opal_fdm);
 
@@ -269,18 +273,21 @@ static u64 opal_fadump_get_bootmem_min(void)
 static int opal_fadump_register(struct fw_dump *fadump_conf)
 {
        s64 rc = OPAL_PARAMETER;
+       u16 registered_regs;
        int i, err = -EIO;
 
-       for (i = 0; i < opal_fdm->region_cnt; i++) {
+       registered_regs = be16_to_cpu(opal_fdm->registered_regions);
+       for (i = 0; i < be16_to_cpu(opal_fdm->region_cnt); i++) {
                rc = opal_mpipl_update(OPAL_MPIPL_ADD_RANGE,
-                                      opal_fdm->rgn[i].src,
-                                      opal_fdm->rgn[i].dest,
-                                      opal_fdm->rgn[i].size);
+                                      be64_to_cpu(opal_fdm->rgn[i].src),
+                                      be64_to_cpu(opal_fdm->rgn[i].dest),
+                                      be64_to_cpu(opal_fdm->rgn[i].size));
                if (rc != OPAL_SUCCESS)
                        break;
 
-               opal_fdm->registered_regions++;
+               registered_regs++;
        }
+       opal_fdm->registered_regions = cpu_to_be16(registered_regs);
 
        switch (rc) {
        case OPAL_SUCCESS:
@@ -291,7 +298,8 @@ static int opal_fadump_register(struct fw_dump *fadump_conf)
        case OPAL_RESOURCE:
                /* If MAX regions limit in f/w is hit, warn and proceed. */
                pr_warn("%d regions could not be registered for MPIPL as MAX limit is reached!\n",
-                       (opal_fdm->region_cnt - opal_fdm->registered_regions));
+                       (be16_to_cpu(opal_fdm->region_cnt) -
+                        be16_to_cpu(opal_fdm->registered_regions)));
                fadump_conf->dump_registered = 1;
                err = 0;
                break;
@@ -312,7 +320,7 @@ static int opal_fadump_register(struct fw_dump *fadump_conf)
         * If some regions were registered before OPAL_MPIPL_ADD_RANGE
         * OPAL call failed, unregister all regions.
         */
-       if ((err < 0) && (opal_fdm->registered_regions > 0))
+       if ((err < 0) && (be16_to_cpu(opal_fdm->registered_regions) > 0))
                opal_fadump_unregister(fadump_conf);
 
        return err;
@@ -328,7 +336,7 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf)
                return -EIO;
        }
 
-       opal_fdm->registered_regions = 0;
+       opal_fdm->registered_regions = cpu_to_be16(0);
        fadump_conf->dump_registered = 0;
        return 0;
 }
@@ -563,25 +571,26 @@ static void opal_fadump_region_show(struct fw_dump *fadump_conf,
        else
                fdm_ptr = opal_fdm;
 
-       for (i = 0; i < fdm_ptr->region_cnt; i++) {
+       for (i = 0; i < be16_to_cpu(fdm_ptr->region_cnt); i++) {
                /*
                 * Only regions that are registered for MPIPL
                 * would have dump data.
                 */
                if ((fadump_conf->dump_active) &&
-                   (i < fdm_ptr->registered_regions))
-                       dumped_bytes = fdm_ptr->rgn[i].size;
+                   (i < be16_to_cpu(fdm_ptr->registered_regions)))
+                       dumped_bytes = be64_to_cpu(fdm_ptr->rgn[i].size);
 
                seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ",
-                          fdm_ptr->rgn[i].src, fdm_ptr->rgn[i].dest);
+                          be64_to_cpu(fdm_ptr->rgn[i].src),
+                          be64_to_cpu(fdm_ptr->rgn[i].dest));
                seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n",
-                          fdm_ptr->rgn[i].size, dumped_bytes);
+                          be64_to_cpu(fdm_ptr->rgn[i].size), dumped_bytes);
        }
 
-       /* Dump is active. Show reserved area start address. */
+       /* Dump is active. Show preserved area start address. */
        if (fadump_conf->dump_active) {
-               seq_printf(m, "\nMemory above %#016lx is reserved for saving crash dump\n",
-                          fadump_conf->reserve_dump_area_start);
+               seq_printf(m, "\nMemory above %#016llx is reserved for saving crash dump\n",
+                          fadump_conf->boot_mem_top);
        }
 }
 
@@ -624,6 +633,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
 {
        const __be32 *prop;
        unsigned long dn;
+       __be64 be_addr;
        u64 addr = 0;
        int i, len;
        s64 ret;
@@ -680,13 +690,13 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
        if (!prop)
                return;
 
-       ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &addr);
-       if ((ret != OPAL_SUCCESS) || !addr) {
+       ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &be_addr);
+       if ((ret != OPAL_SUCCESS) || !be_addr) {
                pr_err("Failed to get Kernel metadata (%lld)\n", ret);
                return;
        }
 
-       addr = be64_to_cpu(addr);
+       addr = be64_to_cpu(be_addr);
        pr_debug("Kernel metadata addr: %llx\n", addr);
 
        opal_fdm_active = __va(addr);
@@ -697,14 +707,14 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
        }
 
        /* Kernel regions not registered with f/w for MPIPL */
-       if (opal_fdm_active->registered_regions == 0) {
+       if (be16_to_cpu(opal_fdm_active->registered_regions) == 0) {
                opal_fdm_active = NULL;
                return;
        }
 
-       ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &addr);
-       if (addr) {
-               addr = be64_to_cpu(addr);
+       ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &be_addr);
+       if (be_addr) {
+               addr = be64_to_cpu(be_addr);
                pr_debug("CPU metadata addr: %llx\n", addr);
                opal_cpu_metadata = __va(addr);
        }
index f1e9ecf..3f715ef 100644 (file)
  * OPAL FADump kernel metadata
  *
  * The address of this structure will be registered with f/w for retrieving
- * and processing during crash dump.
+ * in the capture kernel to process the crash dump.
  */
 struct opal_fadump_mem_struct {
        u8      version;
        u8      reserved[3];
-       u16     region_cnt;             /* number of regions */
-       u16     registered_regions;     /* Regions registered for MPIPL */
-       u64     fadumphdr_addr;
+       __be16  region_cnt;             /* number of regions */
+       __be16  registered_regions;     /* Regions registered for MPIPL */
+       __be64  fadumphdr_addr;
        struct opal_mpipl_region        rgn[FADUMP_MAX_MEM_REGS];
 } __packed;
 
@@ -135,7 +135,7 @@ static inline void opal_fadump_read_regs(char *bufp, unsigned int regs_cnt,
        for (i = 0; i < regs_cnt; i++, bufp += reg_entry_size) {
                reg_entry = (struct hdat_fadump_reg_entry *)bufp;
                val = (cpu_endian ? be64_to_cpu(reg_entry->reg_val) :
-                      reg_entry->reg_val);
+                      (u64)(reg_entry->reg_val));
                opal_fadump_set_regval_regnum(regs,
                                              be32_to_cpu(reg_entry->reg_type),
                                              be32_to_cpu(reg_entry->reg_num),
index 3fea5da..348a8cd 100644 (file)
@@ -211,7 +211,7 @@ static void disable_core_pmu_counters(void)
                                            get_hard_smp_processor_id(cpu));
                if (rc)
                        pr_err("%s: Failed to stop Core (cpu = %d)\n",
-                               __FUNCTION__, cpu);
+                               __func__, cpu);
        }
        cpus_read_unlock();
 }
index 5390c88..d129d6d 100644 (file)
@@ -197,7 +197,7 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf,
 
                /*
                 * Select access size based on count and alignment and
-                * access type. IO and MEM only support byte acceses,
+                * access type. IO and MEM only support byte accesses,
                 * FW supports all 3.
                 */
                len = 1;
index 1e8e17d..a1754a2 100644 (file)
@@ -82,7 +82,7 @@ static DECLARE_WORK(mem_error_work, mem_error_handler);
 
 /*
  * opal_memory_err_event - notifier handler that queues up the opal message
- * to be preocessed later.
+ * to be processed later.
  */
 static int opal_memory_err_event(struct notifier_block *nb,
                          unsigned long msg_type, void *msg)
index 5317286..7e419de 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <linux/module.h>
+#include <misc/cxl-base.h>
 #include <asm/pnv-pci.h>
 #include <asm/opal.h>
 
index aef22ee..6923f64 100644 (file)
 #include <linux/rculist.h>
 #include <linux/sizes.h>
 #include <linux/debugfs.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 
 #include <asm/sections.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/machdep.h>
 #include <asm/msi_bitmap.h>
index 04155aa..fe3d111 100644 (file)
@@ -699,7 +699,7 @@ static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
                return -ENOSPC;
        }
 
-       /* allocate a contigious block of PEs for our VFs */
+       /* allocate a contiguous block of PEs for our VFs */
        base_pe = pnv_ioda_alloc_pe(phb, num_vfs);
        if (!base_pe) {
                pci_err(pdev, "Unable to allocate PEs for %d VFs\n", num_vfs);
index f705487..233a50e 100644 (file)
@@ -18,7 +18,6 @@
 
 #include <asm/sections.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/machdep.h>
 #include <asm/msi_bitmap.h>
index a4048b8..610682c 100644 (file)
@@ -90,7 +90,7 @@ config PS3_VERBOSE_RESULT
        bool "PS3 Verbose LV1 hypercall results" if PS3_ADVANCED
        depends on PPC_PS3
        help
-         Enables more verbose log mesages for LV1 hypercall results.
+         Enables more verbose log messages for LV1 hypercall results.
 
          If in doubt, say N here and reduce the size of the kernel by a
          small amount.
index ef710a7..c27e6cf 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/memblock.h>
 
 #include <asm/machdep.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/lv1call.h>
 #include <asm/ps3fb.h>
index 5ce9246..1326de5 100644 (file)
@@ -14,7 +14,6 @@
 
 #include <asm/cell-regs.h>
 #include <asm/firmware.h>
-#include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/lv1call.h>
 #include <asm/setup.h>
@@ -364,7 +363,7 @@ static void  __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
  * @bus_addr: Starting ioc bus address of the area to map.
  * @len: Length in bytes of the area to map.
  * @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
- * list of all chuncks owned by the region.
+ * list of all chunks owned by the region.
  *
  * This implementation uses a very simple dma page manager
  * based on the dma_chunk structure.  This scheme assumes
index cb844e0..b384cd2 100644 (file)
@@ -17,8 +17,6 @@
 #include <linux/of.h>
 #include <linux/slab.h>
 
-#include <asm/prom.h>
-
 #include "platform.h"
 
 enum {
index 3de9145..d749578 100644 (file)
 #include <linux/console.h>
 #include <linux/export.h>
 #include <linux/memblock.h>
+#include <linux/of.h>
 
 #include <asm/machdep.h>
 #include <asm/firmware.h>
 #include <asm/time.h>
 #include <asm/iommu.h>
 #include <asm/udbg.h>
-#include <asm/prom.h>
 #include <asm/lv1call.h>
 #include <asm/ps3gpu.h>
 
index b637bf2..2502e9b 100644 (file)
@@ -601,7 +601,7 @@ static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
                iopte_flag |= CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW;
                break;
        default:
-               /* not happned */
+               /* not happened */
                BUG();
        }
        result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
index 45a3a30..15ed820 100644 (file)
@@ -475,8 +475,6 @@ static struct notifier_block cmm_reboot_nb = {
 static int cmm_memory_cb(struct notifier_block *self,
                        unsigned long action, void *arg)
 {
-       int ret = 0;
-
        switch (action) {
        case MEM_GOING_OFFLINE:
                mutex_lock(&hotplug_mutex);
@@ -493,7 +491,7 @@ static int cmm_memory_cb(struct notifier_block *self,
                break;
        }
 
-       return notifier_from_errno(ret);
+       return NOTIFY_OK;
 }
 
 static struct notifier_block cmm_mem_nb = {
index b1f01ac..498d6ef 100644 (file)
@@ -19,7 +19,6 @@
 #include "of_helpers.h"
 #include "pseries.h"
 
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <linux/uaccess.h>
 #include <asm/rtas.h>
@@ -389,7 +388,7 @@ static void pseries_hp_work_fn(struct work_struct *work)
        handle_dlpar_errorlog(hp_work->errlog);
 
        kfree(hp_work->errlog);
-       kfree((void *)work);
+       kfree(work);
 }
 
 void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog)
index 09fafcf..73ae912 100644 (file)
@@ -43,6 +43,8 @@ static int ibm_get_config_addr_info;
 static int ibm_get_config_addr_info2;
 static int ibm_configure_pe;
 
+static void pseries_eeh_init_edev(struct pci_dn *pdn);
+
 static void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
 {
        struct pci_dn *pdn = pci_get_pdn(pdev);
@@ -359,7 +361,7 @@ static struct eeh_pe *pseries_eeh_pe_get_parent(struct eeh_dev *edev)
  * This function takes care of the initialisation and inserts the eeh_dev
  * into the correct eeh_pe. If no eeh_pe exists we'll allocate one.
  */
-void pseries_eeh_init_edev(struct pci_dn *pdn)
+static void pseries_eeh_init_edev(struct pci_dn *pdn)
 {
        struct eeh_pe pe, *parent;
        struct eeh_dev *edev;
@@ -510,7 +512,7 @@ static int pseries_eeh_set_option(struct eeh_pe *pe, int option)
        int ret = 0;
 
        /*
-        * When we're enabling or disabling EEH functioality on
+        * When we're enabling or disabling EEH functionality on
         * the particular PE, the PE config address is possibly
         * unavailable. Therefore, we have to figure it out from
         * the FDT node.
index 91cf234..2e3a317 100644 (file)
@@ -16,7 +16,6 @@
 
 #include <asm/firmware.h>
 #include <asm/machdep.h>
-#include <asm/prom.h>
 #include <asm/sparsemem.h>
 #include <asm/fadump.h>
 #include <asm/drmem.h>
index 309952a..fba6430 100644 (file)
@@ -1429,7 +1429,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
 
                pci->table_group->tables[1] = newtbl;
 
-               /* Keep default DMA window stuct if removed */
+               /* Keep default DMA window struct if removed */
                if (default_win_removed) {
                        tbl->it_size = 0;
                        vfree(tbl->it_map);
index 760581c..937f9c0 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/mmu_context.h>
 #include <asm/iommu.h>
 #include <asm/tlb.h>
-#include <asm/prom.h>
 #include <asm/cputable.h>
 #include <asm/udbg.h>
 #include <asm/smp.h>
index 2119c00..507dc0b 100644 (file)
@@ -28,7 +28,6 @@
 #include <asm/firmware.h>
 #include <asm/rtas.h>
 #include <asm/time.h>
-#include <asm/prom.h>
 #include <asm/vdso_datapage.h>
 #include <asm/vio.h>
 #include <asm/mmu.h>
index fb2919f..a3a71d3 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/crash_dump.h>
 #include <linux/device.h>
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/msi.h>
 
 #include <asm/rtas.h>
index 69db2ec..cbf1720 100644 (file)
@@ -13,9 +13,9 @@
 #include <linux/slab.h>
 #include <linux/ctype.h>
 #include <linux/uaccess.h>
+#include <linux/of.h>
 #include <asm/nvram.h>
 #include <asm/rtas.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 
 /* Max bytes to read/write in one go */
index 3b6800f..6e671c3 100644 (file)
@@ -14,7 +14,6 @@
 
 #include <asm/eeh.h>
 #include <asm/pci-bridge.h>
-#include <asm/prom.h>
 #include <asm/ppc-pci.h>
 #include <asm/pci.h>
 #include "pseries.h"
index 439ac72..3c290b9 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/slab.h>
-#include <asm/prom.h>
 #include <asm/rtas.h>
 #include <asm/firmware.h>
 #include <asm/machdep.h>
index 7f7369f..cad7a0c 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <linux/uaccess.h>
 #include <asm/mmu.h>
index 35f9cb6..b5853e9 100644 (file)
 #include <linux/delay.h>
 #include <linux/seq_file.h>
 #include <linux/crash_dump.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
 
 #include <asm/page.h>
-#include <asm/prom.h>
 #include <asm/rtas.h>
 #include <asm/fadump.h>
 #include <asm/fadump-internal.h>
@@ -108,6 +109,12 @@ static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf)
        fdm.hpte_region.destination_address = cpu_to_be64(addr);
        addr += fadump_conf->hpte_region_size;
 
+       /*
+        * Align boot memory area destination address to page boundary to
+        * be able to mmap read this area in the vmcore.
+        */
+       addr = PAGE_ALIGN(addr);
+
        /* RMA region section */
        fdm.rmr_region.request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG);
        fdm.rmr_region.source_data_type =
@@ -351,7 +358,7 @@ static int __init rtas_fadump_build_cpu_notes(struct fw_dump *fadump_conf)
                /* Lower 4 bytes of reg_value contains logical cpu id */
                cpu = (be64_to_cpu(reg_entry->reg_value) &
                       RTAS_FADUMP_CPU_ID_MASK);
-               if (fdh && !cpumask_test_cpu(cpu, &fdh->online_mask)) {
+               if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_mask)) {
                        RTAS_FADUMP_SKIP_TO_NEXT_CPU(reg_entry);
                        continue;
                }
@@ -462,10 +469,10 @@ static void rtas_fadump_region_show(struct fw_dump *fadump_conf,
                   be64_to_cpu(fdm_ptr->rmr_region.source_len),
                   be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped));
 
-       /* Dump is active. Show reserved area start address. */
+       /* Dump is active. Show preserved area start address. */
        if (fdm_active) {
-               seq_printf(m, "\nMemory above %#016lx is reserved for saving crash dump\n",
-                          fadump_conf->reserve_dump_area_start);
+               seq_printf(m, "\nMemory above %#016llx is reserved for saving crash dump\n",
+                          fadump_conf->boot_mem_top);
        }
 }
 
index 955ff8a..a3dab15 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/seq_file.h>
 #include <linux/root_dev.h>
 #include <linux/of.h>
+#include <linux/of_irq.h>
 #include <linux/of_pci.h>
 #include <linux/memblock.h>
 #include <linux/swiotlb.h>
@@ -43,7 +44,6 @@
 #include <asm/mmu.h>
 #include <asm/processor.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/rtas.h>
 #include <asm/pci-bridge.h>
 #include <asm/iommu.h>
@@ -658,7 +658,7 @@ static resource_size_t pseries_get_iov_fw_value(struct pci_dev *dev, int resno,
         */
        num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1);
        if (resno >= num_res)
-               return 0; /* or an errror */
+               return 0; /* or an error */
 
        i = START_OF_ENTRIES + NEXT_ENTRY * resno;
        switch (value) {
@@ -762,7 +762,7 @@ static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
 
        if (!pdev->is_physfn)
                return;
-       /*Firmware must support open sriov otherwise dont configure*/
+       /*Firmware must support open sriov otherwise don't configure*/
        indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
        if (indexes)
                of_pci_parse_iov_addrs(pdev, indexes);
index f474293..fd2174e 100644 (file)
@@ -27,7 +27,6 @@
 #include <asm/irq.h>
 #include <asm/page.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/smp.h>
 #include <asm/paca.h>
 #include <asm/machdep.h>
index ec65586..241c843 100644 (file)
@@ -76,7 +76,7 @@ struct vas_sysfs_entry {
  * Create sysfs interface:
  * /sys/devices/vas/vas0/gzip/default_capabilities
  *     This directory contains the following VAS GZIP capabilities
- *     for the defaule credit type.
+ *     for the default credit type.
  * /sys/devices/vas/vas0/gzip/default_capabilities/nr_total_credits
  *     Total number of default credits assigned to the LPAR which
  *     can be changed with DLPAR operation.
index ec643bb..500a1fc 100644 (file)
@@ -801,7 +801,7 @@ int vas_reconfig_capabilties(u8 type, int new_nr_creds)
        atomic_set(&caps->nr_total_credits, new_nr_creds);
        /*
         * The total number of available credits may be decreased or
-        * inceased with DLPAR operation. Means some windows have to be
+        * increased with DLPAR operation. Means some windows have to be
         * closed / reopened. Hold the vas_pseries_mutex so that the
         * the user space can not open new windows.
         */
index c9f9be4..00ecac2 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/dma-map-ops.h>
 #include <linux/kobject.h>
 #include <linux/kexec.h>
+#include <linux/of_irq.h>
 
 #include <asm/iommu.h>
 #include <asm/dma.h>
index 9e86074..cb9ba4e 100644 (file)
 #include <linux/sched.h>
 #include <linux/signal.h>
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
 
 #include <asm/immap_cpm2.h>
 #include <asm/mpc8260.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/fs_pd.h>
 
 #include "cpm2_pic.h"
index be6b99b..610ca70 100644 (file)
@@ -25,8 +25,8 @@
 #include <linux/memblock.h>
 #include <linux/gfp.h>
 #include <linux/kmemleak.h>
+#include <linux/of_address.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/iommu.h>
 #include <asm/pci-bridge.h>
 #include <asm/machdep.h>
index 22991e1..3093f14 100644 (file)
@@ -8,7 +8,7 @@
 
 #include <linux/kernel.h>
 #include <linux/export.h>
-#include <asm/prom.h>
+#include <linux/of_address.h>
 #include <asm/dcr.h>
 
 #ifdef CONFIG_PPC_DCR_MMIO
index 1985e06..217cea1 100644 (file)
 #include <linux/types.h>
 #include <linux/io.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
 #include <linux/mod_devicetable.h>
 #include <linux/syscore_ops.h>
-#include <asm/prom.h>
 #include <asm/fsl_lbc.h>
 
 static DEFINE_SPINLOCK(fsl_lbc_lock);
@@ -37,7 +38,7 @@ EXPORT_SYMBOL(fsl_lbc_ctrl_dev);
  *
  * This function converts a base address of lbc into the right format for the
  * BR register. If the SOC has eLBC then it returns 32bit physical address
- * else it convers a 34bit local bus physical address to correct format of
+ * else it converts a 34bit local bus physical address to correct format of
  * 32bit address for BR register (Example: MPC8641).
  */
 u32 fsl_lbc_addr(phys_addr_t addr_base)
index b3475ae..ef9a599 100644 (file)
 #include <linux/msi.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 #include <linux/seq_file.h>
 #include <sysdev/fsl_soc.h>
-#include <asm/prom.h>
 #include <asm/hw_irq.h>
 #include <asm/ppc-pci.h>
 #include <asm/mpic.h>
index a97ce60..1011cfe 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/interrupt.h>
 #include <linux/memblock.h>
 #include <linux/log2.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/suspend.h>
@@ -29,7 +31,6 @@
 #include <linux/uaccess.h>
 
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/ppc-pci.h>
 #include <asm/machdep.h>
@@ -218,7 +219,7 @@ static void setup_pci_atmu(struct pci_controller *hose)
                 * windows have implemented the default target value as 0xf
                 * for CCSR space.In all Freescale legacy devices the target
                 * of 0xf is reserved for local memory space. 9132 Rev1.0
-                * now has local mempry space mapped to target 0x0 instead of
+                * now has local memory space mapped to target 0x0 instead of
                 * 0xf. Hence adding a workaround to remove the target 0xf
                 * defined for memory space from Inbound window attributes.
                 */
index 90ad161..78118c1 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/time.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <sysdev/fsl_soc.h>
 #include <mm/mmu_decl.h>
index 02553a8..a6c4246 100644 (file)
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/spinlock.h>
 
 #include <asm/byteorder.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/irq.h>
 
 #include "ge_pic.h"
@@ -150,7 +152,7 @@ static struct irq_chip gef_pic_chip = {
 };
 
 
-/* When an interrupt is being configured, this call allows some flexibilty
+/* When an interrupt is being configured, this call allows some flexibility
  * in deciding which irq_chip structure is used
  */
 static int gef_pic_host_map(struct irq_domain *h, unsigned int virq,
index aaba0b8..fd2f94a 100644 (file)
@@ -9,9 +9,9 @@
 #include <linux/kernel.h>
 #include <linux/pci.h>
 #include <linux/init.h>
+#include <linux/of.h>
 
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/grackle.h>
 
index 3b1ae98..06e3914 100644 (file)
@@ -6,11 +6,11 @@
 
 #include <linux/ioport.h>
 #include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
 #include <asm/io.h>
 #include <asm/i8259.h>
-#include <asm/prom.h>
 
 static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */
 
index 09b3661..1aacb40 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/init.h>
 
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/pci-bridge.h>
 #include <asm/machdep.h>
 
index 3f10c9f..5f69e2d 100644 (file)
 #include <linux/device.h>
 #include <linux/spinlock.h>
 #include <linux/fsl_devices.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
 #include <asm/irq.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/ipic.h>
 
 #include "ipic.h"
index 628f9b7..eb48210 100644 (file)
 #include <linux/fs.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/of_address.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
 
 #include <asm/machdep.h>
 #include <asm/nvram.h>
-#include <asm/prom.h>
 
 static void __iomem *mmio_nvram_start;
 static long mmio_nvram_len;
index dbcbaa4..9a9381f 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/syscore_ops.h>
 #include <linux/ratelimit.h>
 #include <linux/pgtable.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 
 #include <asm/ptrace.h>
 #include <asm/signal.h>
index 36ec0bd..698fefa 100644 (file)
@@ -7,12 +7,13 @@
  */
 
 #include <linux/list.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/export.h>
 #include <linux/slab.h>
-#include <asm/prom.h>
 #include <asm/hw_irq.h>
 #include <asm/ppc-pci.h>
 #include <asm/mpic_msgr.h>
@@ -99,7 +100,7 @@ void mpic_msgr_disable(struct mpic_msgr *msgr)
 EXPORT_SYMBOL_GPL(mpic_msgr_disable);
 
 /* The following three functions are used to compute the order and number of
- * the message register blocks.  They are clearly very inefficent.  However,
+ * the message register blocks.  They are clearly very inefficient.  However,
  * they are called *only* a few times during device initialization.
  */
 static unsigned int mpic_msgr_number_of_blocks(void)
index f412d6a..34246c8 100644 (file)
@@ -4,10 +4,11 @@
  */
 
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
 #include <linux/bitmap.h>
 #include <linux/msi.h>
 #include <asm/mpic.h>
-#include <asm/prom.h>
 #include <asm/hw_irq.h>
 #include <asm/ppc-pci.h>
 #include <asm/msi_bitmap.h>
@@ -37,7 +38,7 @@ static int __init mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
        /* Reserve source numbers we know are reserved in the HW.
         *
         * This is a bit of a mix of U3 and U4 reserves but that's going
-        * to work fine, we have plenty enugh numbers left so let's just
+        * to work fine, we have plenty enough numbers left so let's just
         * mark anything we don't like reserved.
         */
        for (i = 0;   i < 8;   i++)
index 444e9ce..b2f0a73 100644 (file)
@@ -255,7 +255,7 @@ EXPORT_SYMBOL(mpic_start_timer);
 
 /**
  * mpic_stop_timer - stop hardware timer
- * @handle: the timer to be stoped
+ * @handle: the timer to be stopped
  *
  * The timer periodically generates an interrupt. Unless user stops the timer.
  */
index 3f4841d..1d8cfdf 100644 (file)
@@ -5,9 +5,9 @@
  */
 
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/msi.h>
 #include <asm/mpic.h>
-#include <asm/prom.h>
 #include <asm/hw_irq.h>
 #include <asm/ppc-pci.h>
 #include <asm/msi_bitmap.h>
@@ -78,7 +78,7 @@ static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
 
        /* U4 PCIe MSIs need to write to the special register in
         * the bridge that generates interrupts. There should be
-        * theorically a register at 0xf8005000 where you just write
+        * theoretically a register at 0xf8005000 where you just write
         * the MSI number and that triggers the right interrupt, but
         * unfortunately, this is busted in HW, the bridge endian swaps
         * the value and hits the wrong nibble in the register.
index fdd3e17..0b6e37f 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/kmemleak.h>
 #include <linux/bitmap.h>
 #include <linux/memblock.h>
+#include <linux/of.h>
 #include <asm/msi_bitmap.h>
 #include <asm/setup.h>
 
index 9c8744e..9dabb50 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/module.h>
 #include <linux/workqueue.h>
+#include <linux/of_address.h>
 #include <linux/of_device.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 
 #include <asm/io.h>
 #include <asm/pmi.h>
-#include <asm/prom.h>
 
 struct pmi_data {
        struct list_head        handler;
index af0f9be..47cc87b 100644 (file)
@@ -14,8 +14,8 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mc146818rtc.h>
+#include <linux/of_address.h>
 
-#include <asm/prom.h>
 
 static int  __init add_rtc(void)
 {
index 9e13fb3..3005139 100644 (file)
 #include <linux/device.h>
 #include <linux/etherdevice.h>
 #include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/of_net.h>
 #include <asm/tsi108.h>
 
 #include <linux/atomic.h>
 #include <asm/io.h>
 #include <asm/irq.h>
-#include <asm/prom.h>
 #include <mm/mmu_decl.h>
 
 #undef DEBUG
index 1070220..5af4c35 100644 (file)
@@ -12,7 +12,9 @@
 #include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/interrupt.h>
+#include <linux/of_address.h>
 
 #include <asm/byteorder.h>
 #include <asm/io.h>
@@ -23,7 +25,6 @@
 #include <asm/tsi108.h>
 #include <asm/tsi108_pci.h>
 #include <asm/tsi108_irq.h>
-#include <asm/prom.h>
 
 #undef DEBUG
 #ifdef DEBUG
index 7d13d2e..edc17b6 100644 (file)
@@ -6,15 +6,16 @@
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/smp.h>
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/cpu.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/spinlock.h>
 #include <linux/module.h>
 
-#include <asm/prom.h>
 #include <asm/io.h>
 #include <asm/smp.h>
 #include <asm/irq.h>
index dec7d93..e33b77d 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/msi.h>
 #include <linux/list.h>
 
-#include <asm/prom.h>
 #include <asm/smp.h>
 #include <asm/machdep.h>
 #include <asm/irq.h>
index c4d95d8..6cfbb4f 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/spinlock.h>
 #include <linux/msi.h>
 
-#include <asm/prom.h>
 #include <asm/smp.h>
 #include <asm/machdep.h>
 #include <asm/irq.h>
index b9da317..9e7007f 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/spinlock.h>
 #include <linux/msi.h>
 
-#include <asm/prom.h>
 #include <asm/smp.h>
 #include <asm/machdep.h>
 #include <asm/irq.h>
index f3fb2a1..ce76f97 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/threads.h>
 #include <linux/kernel.h>
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/debugfs.h>
 #include <linux/smp.h>
 #include <linux/interrupt.h>
@@ -17,7 +18,6 @@
 #include <linux/spinlock.h>
 #include <linux/delay.h>
 
-#include <asm/prom.h>
 #include <asm/io.h>
 #include <asm/smp.h>
 #include <asm/machdep.h>
index bb5bda6..e06d015 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/threads.h>
 #include <linux/kernel.h>
 #include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/debugfs.h>
 #include <linux/smp.h>
 #include <linux/interrupt.h>
@@ -21,7 +22,6 @@
 #include <linux/msi.h>
 #include <linux/vmalloc.h>
 
-#include <asm/prom.h>
 #include <asm/io.h>
 #include <asm/smp.h>
 #include <asm/machdep.h>
index f940428..d25d8c6 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/seq_file.h>
 #include <linux/init.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/delay.h>
@@ -21,7 +22,6 @@
 #include <linux/kmemleak.h>
 
 #include <asm/machdep.h>
-#include <asm/prom.h>
 #include <asm/io.h>
 #include <asm/smp.h>
 #include <asm/irq.h>
@@ -617,7 +617,7 @@ bool __init xive_native_init(void)
 
        xive_tima_os = r.start;
 
-       /* Grab size of provisionning pages */
+       /* Grab size of provisioning pages */
        xive_parse_provisioning(np);
 
        /* Switch the XIVE to exploitation mode */
index 29456c2..1669ddb 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/cpumask.h>
index dfb8081..0774d71 100644 (file)
@@ -408,7 +408,7 @@ const struct powerpc_operand powerpc_operands[] =
 #define FXM4 FXM + 1
   { 0xff, 12, insert_fxm, extract_fxm,
     PPC_OPERAND_OPTIONAL | PPC_OPERAND_OPTIONAL_VALUE},
-  /* If the FXM4 operand is ommitted, use the sentinel value -1.  */
+  /* If the FXM4 operand is omitted, use the sentinel value -1.  */
   { -1, -1, NULL, NULL, 0},
 
   /* The IMM20 field in an LI instruction.  */
index fd72753..2b7e996 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/ptrace.h>
 #include <asm/smp.h>
 #include <asm/string.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/xmon.h>
 #include <asm/processor.h>
@@ -921,9 +920,9 @@ static void insert_bpts(void)
                        bp->enabled = 0;
                        continue;
                }
-               if (IS_MTMSRD(instr) || IS_RFID(instr)) {
-                       printf("Breakpoint at %lx is on an mtmsrd or rfid "
-                              "instruction, disabling it\n", bp->address);
+               if (!can_single_step(ppc_inst_val(instr))) {
+                       printf("Breakpoint at %lx is on an instruction that can't be single stepped, disabling it\n",
+                                       bp->address);
                        bp->enabled = 0;
                        continue;
                }
@@ -1470,9 +1469,8 @@ static long check_bp_loc(unsigned long addr)
                printf("Can't read instruction at address %lx\n", addr);
                return 0;
        }
-       if (IS_MTMSRD(instr) || IS_RFID(instr)) {
-               printf("Breakpoints may not be placed on mtmsrd or rfid "
-                      "instructions\n");
+       if (!can_single_step(ppc_inst_val(instr))) {
+               printf("Breakpoints may not be placed on instructions that can't be single stepped\n");
                return 0;
        }
        return 1;
@@ -2024,7 +2022,7 @@ static void dump_206_sprs(void)
        if (!cpu_has_feature(CPU_FTR_ARCH_206))
                return;
 
-       /* Actually some of these pre-date 2.06, but whatevs */
+       /* Actually some of these pre-date 2.06, but whatever */
 
        printf("srr0   = %.16lx  srr1  = %.16lx dsisr  = %.8lx\n",
                mfspr(SPRN_SRR0), mfspr(SPRN_SRR1), mfspr(SPRN_DSISR));
index 34592d0..f6ef358 100644 (file)
@@ -38,7 +38,7 @@ config SOC_VIRT
        select SIFIVE_PLIC
        select PM_GENERIC_DOMAINS if PM
        select PM_GENERIC_DOMAINS_OF if PM && OF
-       select RISCV_SBI_CPUIDLE if CPU_IDLE
+       select RISCV_SBI_CPUIDLE if CPU_IDLE && RISCV_SBI
        help
          This enables support for QEMU Virt Machine.
 
index 78da839..cd4bbce 100644 (file)
@@ -193,9 +193,6 @@ struct kvm_vcpu_arch {
 
        /* Don't run the VCPU (blocked) */
        bool pause;
-
-       /* SRCU lock index for in-kernel run loop */
-       int srcu_idx;
 };
 
 static inline void kvm_arch_hardware_unsetup(void) {}
index 6241660..7461f96 100644 (file)
@@ -38,14 +38,16 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
                       sizeof(kvm_vcpu_stats_desc),
 };
 
-#define KVM_RISCV_ISA_ALLOWED  (riscv_isa_extension_mask(a) | \
-                                riscv_isa_extension_mask(c) | \
-                                riscv_isa_extension_mask(d) | \
-                                riscv_isa_extension_mask(f) | \
-                                riscv_isa_extension_mask(i) | \
-                                riscv_isa_extension_mask(m) | \
-                                riscv_isa_extension_mask(s) | \
-                                riscv_isa_extension_mask(u))
+#define KVM_RISCV_ISA_DISABLE_ALLOWED  (riscv_isa_extension_mask(d) | \
+                                       riscv_isa_extension_mask(f))
+
+#define KVM_RISCV_ISA_DISABLE_NOT_ALLOWED      (riscv_isa_extension_mask(a) | \
+                                               riscv_isa_extension_mask(c) | \
+                                               riscv_isa_extension_mask(i) | \
+                                               riscv_isa_extension_mask(m))
+
+#define KVM_RISCV_ISA_ALLOWED (KVM_RISCV_ISA_DISABLE_ALLOWED | \
+                              KVM_RISCV_ISA_DISABLE_NOT_ALLOWED)
 
 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
 {
@@ -219,7 +221,8 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
        switch (reg_num) {
        case KVM_REG_RISCV_CONFIG_REG(isa):
                if (!vcpu->arch.ran_atleast_once) {
-                       vcpu->arch.isa = reg_val;
+                       /* Ignore the disable request for these extensions */
+                       vcpu->arch.isa = reg_val | KVM_RISCV_ISA_DISABLE_NOT_ALLOWED;
                        vcpu->arch.isa &= riscv_isa_extension_base(NULL);
                        vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED;
                        kvm_riscv_vcpu_fp_reset(vcpu);
@@ -653,8 +656,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
                                     vcpu->arch.isa);
        kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
 
-       csr_write(CSR_HGATP, 0);
-
        csr->vsstatus = csr_read(CSR_VSSTATUS);
        csr->vsie = csr_read(CSR_VSIE);
        csr->vstvec = csr_read(CSR_VSTVEC);
@@ -726,13 +727,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
        /* Mark this VCPU ran at least once */
        vcpu->arch.ran_atleast_once = true;
 
-       vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
 
        /* Process MMIO value returned from user-space */
        if (run->exit_reason == KVM_EXIT_MMIO) {
                ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
                if (ret) {
-                       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+                       kvm_vcpu_srcu_read_unlock(vcpu);
                        return ret;
                }
        }
@@ -741,13 +742,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
        if (run->exit_reason == KVM_EXIT_RISCV_SBI) {
                ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
                if (ret) {
-                       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+                       kvm_vcpu_srcu_read_unlock(vcpu);
                        return ret;
                }
        }
 
        if (run->immediate_exit) {
-               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                return -EINTR;
        }
 
@@ -786,7 +787,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                 */
                vcpu->mode = IN_GUEST_MODE;
 
-               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                smp_mb__after_srcu_read_unlock();
 
                /*
@@ -804,7 +805,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                        vcpu->mode = OUTSIDE_GUEST_MODE;
                        local_irq_enable();
                        preempt_enable();
-                       vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+                       kvm_vcpu_srcu_read_lock(vcpu);
                        continue;
                }
 
@@ -848,7 +849,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 
                preempt_enable();
 
-               vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
 
                ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
        }
@@ -857,7 +858,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 
        vcpu_put(vcpu);
 
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
 
        return ret;
 }
index aa8af12..a72c15d 100644 (file)
@@ -456,9 +456,9 @@ static int stage2_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
 void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
 {
        if (!kvm_arch_vcpu_runnable(vcpu)) {
-               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                kvm_vcpu_halt(vcpu);
-               vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
                kvm_clear_request(KVM_REQ_UNHALT, vcpu);
        }
 }
index 4449a97..d4308c5 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/err.h>
 #include <linux/kvm_host.h>
 #include <linux/uaccess.h>
+#include <asm/hwcap.h>
 
 #ifdef CONFIG_FPU
 void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
index 9535bea..b0793dc 100644 (file)
@@ -718,6 +718,7 @@ retry:
                if (!check_l4) {
                        disable_pgtable_l5();
                        check_l4 = true;
+                       memset(early_pg_dir, 0, PAGE_SIZE);
                        goto retry;
                }
                disable_pgtable_l4();
index 77b5a03..e084c72 100644 (file)
@@ -255,6 +255,10 @@ config HAVE_MARCH_Z15_FEATURES
        def_bool n
        select HAVE_MARCH_Z14_FEATURES
 
+config HAVE_MARCH_Z16_FEATURES
+       def_bool n
+       select HAVE_MARCH_Z15_FEATURES
+
 choice
        prompt "Processor type"
        default MARCH_Z196
@@ -312,6 +316,14 @@ config MARCH_Z15
          and 8561 series). The kernel will be slightly faster but will not
          work on older machines.
 
+config MARCH_Z16
+       bool "IBM z16"
+       select HAVE_MARCH_Z16_FEATURES
+       depends on $(cc-option,-march=z16)
+       help
+         Select this to enable optimizations for IBM z16 (3931 and
+         3932 series).
+
 endchoice
 
 config MARCH_Z10_TUNE
@@ -332,6 +344,9 @@ config MARCH_Z14_TUNE
 config MARCH_Z15_TUNE
        def_bool TUNE_Z15 || MARCH_Z15 && TUNE_DEFAULT
 
+config MARCH_Z16_TUNE
+       def_bool TUNE_Z16 || MARCH_Z16 && TUNE_DEFAULT
+
 choice
        prompt "Tune code generation"
        default TUNE_DEFAULT
@@ -372,6 +387,10 @@ config TUNE_Z15
        bool "IBM z15"
        depends on $(cc-option,-mtune=z15)
 
+config TUNE_Z16
+       bool "IBM z16"
+       depends on $(cc-option,-mtune=z16)
+
 endchoice
 
 config 64BIT
index 7a65bca..e441b60 100644 (file)
@@ -42,6 +42,7 @@ mflags-$(CONFIG_MARCH_ZEC12)  := -march=zEC12
 mflags-$(CONFIG_MARCH_Z13)    := -march=z13
 mflags-$(CONFIG_MARCH_Z14)    := -march=z14
 mflags-$(CONFIG_MARCH_Z15)    := -march=z15
+mflags-$(CONFIG_MARCH_Z16)    := -march=z16
 
 export CC_FLAGS_MARCH := $(mflags-y)
 
@@ -54,6 +55,7 @@ cflags-$(CONFIG_MARCH_ZEC12_TUNE)     += -mtune=zEC12
 cflags-$(CONFIG_MARCH_Z13_TUNE)                += -mtune=z13
 cflags-$(CONFIG_MARCH_Z14_TUNE)                += -mtune=z14
 cflags-$(CONFIG_MARCH_Z15_TUNE)                += -mtune=z15
+cflags-$(CONFIG_MARCH_Z16_TUNE)                += -mtune=z16
 
 cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
 
index 498bed9..f6dfde5 100644 (file)
@@ -499,11 +499,13 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_CHELSIO is not set
 # CONFIG_NET_VENDOR_CISCO is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DAVICOM is not set
 # CONFIG_NET_VENDOR_DEC is not set
 # CONFIG_NET_VENDOR_DLINK is not set
 # CONFIG_NET_VENDOR_EMULEX is not set
 # CONFIG_NET_VENDOR_ENGLEDER is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_FUNGIBLE is not set
 # CONFIG_NET_VENDOR_GOOGLE is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
@@ -588,13 +590,13 @@ CONFIG_MLX5_INFINIBAND=m
 CONFIG_SYNC_FILE=y
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
+CONFIG_MLX5_VFIO_PCI=m
 CONFIG_VFIO_MDEV=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
 CONFIG_VHOST_NET=m
 CONFIG_VHOST_VSOCK=m
-# CONFIG_SURFACE_PLATFORMS is not set
 CONFIG_S390_CCW_IOMMU=y
 CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
@@ -690,6 +692,7 @@ CONFIG_ENCRYPTED_KEYS=m
 CONFIG_KEY_NOTIFICATIONS=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
@@ -733,6 +736,7 @@ CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
@@ -786,7 +790,6 @@ CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_DEBUG_INFO_BTF=y
 CONFIG_GDB_SCRIPTS=y
@@ -814,6 +817,7 @@ CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
 CONFIG_DEBUG_PER_CPU_MAPS=y
 CONFIG_KFENCE=y
+CONFIG_KFENCE_DEFERRABLE=y
 CONFIG_KFENCE_STATIC_KEYS=y
 CONFIG_DEBUG_SHIRQ=y
 CONFIG_PANIC_ON_OOPS=y
index 61e36b9..706df3a 100644 (file)
@@ -490,11 +490,13 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_CHELSIO is not set
 # CONFIG_NET_VENDOR_CISCO is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DAVICOM is not set
 # CONFIG_NET_VENDOR_DEC is not set
 # CONFIG_NET_VENDOR_DLINK is not set
 # CONFIG_NET_VENDOR_EMULEX is not set
 # CONFIG_NET_VENDOR_ENGLEDER is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_FUNGIBLE is not set
 # CONFIG_NET_VENDOR_GOOGLE is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
@@ -578,13 +580,13 @@ CONFIG_MLX5_INFINIBAND=m
 CONFIG_SYNC_FILE=y
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
+CONFIG_MLX5_VFIO_PCI=m
 CONFIG_VFIO_MDEV=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
 CONFIG_VHOST_NET=m
 CONFIG_VHOST_VSOCK=m
-# CONFIG_SURFACE_PLATFORMS is not set
 CONFIG_S390_CCW_IOMMU=y
 CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
@@ -720,6 +722,7 @@ CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
@@ -772,7 +775,6 @@ CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_DEBUG_INFO_BTF=y
 CONFIG_GDB_SCRIPTS=y
index c55c668..a87fcc4 100644 (file)
@@ -26,6 +26,7 @@ CONFIG_CRASH_DUMP=y
 # CONFIG_S390_GUEST is not set
 # CONFIG_SECCOMP is not set
 # CONFIG_GCC_PLUGINS is not set
+# CONFIG_BLOCK_LEGACY_AUTOLOAD is not set
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 # CONFIG_COMPACTION is not set
@@ -60,7 +61,6 @@ CONFIG_ZFCP=y
 # CONFIG_HID is not set
 # CONFIG_VIRTIO_MENU is not set
 # CONFIG_VHOST_MENU is not set
-# CONFIG_SURFACE_PLATFORMS is not set
 # CONFIG_IOMMU_SUPPORT is not set
 # CONFIG_DNOTIFY is not set
 # CONFIG_INOTIFY_USER is not set
@@ -71,10 +71,10 @@ CONFIG_LSM="yama,loadpin,safesetid,integrity"
 CONFIG_XZ_DEC_MICROLZMA=y
 CONFIG_PRINTK_TIME=y
 # CONFIG_SYMBOLIC_ERRNAME is not set
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_DEBUG_INFO_BTF=y
 CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
 CONFIG_PANIC_ON_OOPS=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
index eabab24..2f0a1ca 100644 (file)
@@ -58,7 +58,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
 
 static inline bool on_thread_stack(void)
 {
-       return !(((unsigned long)(current->stack) ^ current_stack_pointer()) & ~(THREAD_SIZE - 1));
+       return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
 }
 
 #endif
index eee8d96..ff1e25d 100644 (file)
@@ -200,13 +200,7 @@ unsigned long __get_wchan(struct task_struct *p);
 /* Has task runtime instrumentation enabled ? */
 #define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
 
-static __always_inline unsigned long current_stack_pointer(void)
-{
-       unsigned long sp;
-
-       asm volatile("la %0,0(15)" : "=a" (sp));
-       return sp;
-}
+register unsigned long current_stack_pointer asm("r15");
 
 static __always_inline unsigned short stap(void)
 {
index 275f425..f850019 100644 (file)
@@ -46,7 +46,7 @@ struct stack_frame {
 };
 
 /*
- * Unlike current_stack_pointer() which simply returns current value of %r15
+ * Unlike current_stack_pointer which simply contains the current value of %r15
  * current_frame_address() returns function stack frame address, which matches
  * %r15 upon function invocation. It may differ from %r15 later if function
  * allocates stack for local variables or new stack frame to call other
index b2ef014..6ebf02e 100644 (file)
@@ -54,7 +54,7 @@ static void __do_machine_kdump(void *image)
         * This need to be done *after* s390_reset_system set the
         * prefix register of this CPU to zero
         */
-       memcpy((void *) __LC_FPREGS_SAVE_AREA,
+       memcpy(absolute_pointer(__LC_FPREGS_SAVE_AREA),
               (void *)(prefix + __LC_FPREGS_SAVE_AREA), 512);
 
        __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
index 7a74ea5..aa0e0e7 100644 (file)
@@ -283,6 +283,10 @@ static int __init setup_elf_platform(void)
        case 0x8562:
                strcpy(elf_platform, "z15");
                break;
+       case 0x3931:
+       case 0x3932:
+               strcpy(elf_platform, "z16");
+               break;
        }
        return 0;
 }
index 9b30bea..af96dc0 100644 (file)
@@ -1334,11 +1334,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
        hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
        VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
 no_timer:
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
        kvm_vcpu_halt(vcpu);
        vcpu->valid_wakeup = false;
        __unset_cpu_idle(vcpu);
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
 
        hrtimer_cancel(&vcpu->arch.ckc_timer);
        return 0;
index 156d1c2..da3dabd 100644 (file)
@@ -4237,14 +4237,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
         * We try to hold kvm->srcu during most of vcpu_run (except when run-
         * ning the guest), so that memslots (and other stuff) are protected
         */
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
 
        do {
                rc = vcpu_pre_run(vcpu);
                if (rc)
                        break;
 
-               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                /*
                 * As PF_VCPU will be used in fault handler, between
                 * guest_enter and guest_exit should be no uaccess.
@@ -4281,12 +4281,12 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                __enable_cpu_timer_accounting(vcpu);
                guest_exit_irqoff();
                local_irq_enable();
-               vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
 
                rc = vcpu_post_run(vcpu, exit_reason);
        } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
 
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
        return rc;
 }
 
index 7f7c0d6..cc7c959 100644 (file)
@@ -137,12 +137,7 @@ static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
        /* Allocate variable storage */
        vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
        vlen += uv_info.guest_virt_base_stor_len;
-       /*
-        * The Create Secure Configuration Ultravisor Call does not support
-        * using large pages for the virtual memory area.
-        * This is a hardware limitation.
-        */
-       kvm->arch.pv.stor_var = vmalloc_no_huge(vlen);
+       kvm->arch.pv.stor_var = vzalloc(vlen);
        if (!kvm->arch.pv.stor_var)
                goto out_err;
        return 0;
index acda4b6..dada78b 100644 (file)
@@ -1091,7 +1091,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 
        handle_last_fault(vcpu, vsie_page);
 
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
 
        /* save current guest state of bp isolation override */
        guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
@@ -1133,7 +1133,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        if (!guest_bp_isolation)
                clear_thread_flag(TIF_ISOLATE_BP_GUEST);
 
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
 
        if (rc == -EINTR) {
                VCPU_EVENT(vcpu, 3, "%s", "machine check");
index 9bb0673..5a053b3 100644 (file)
@@ -147,7 +147,7 @@ static __always_inline struct pt_regs fake_pt_regs(void)
        struct pt_regs regs;
 
        memset(&regs, 0, sizeof(regs));
-       regs.gprs[15] = current_stack_pointer();
+       regs.gprs[15] = current_stack_pointer;
 
        asm volatile(
                "basr   %[psw_addr],0\n"
index 41c6d73..adb6991 100644 (file)
@@ -35,6 +35,7 @@
 #define flush_page_for_dma(addr) \
        sparc32_cachetlb_ops->page_for_dma(addr)
 
+struct page;
 void sparc_flush_page_to_ram(struct page *page);
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
index 5d77622..48e5db2 100644 (file)
@@ -51,7 +51,7 @@
  *                            perf code: 0x02
  *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
  *                                             SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
- *                                             TGL,TNT,RKL,ADL,RPL
+ *                                             TGL,TNT,RKL,ADL,RPL,SPR
  *                            Scope: Core
  *     MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
  *                            perf code: 0x03
@@ -62,7 +62,7 @@
  *                            perf code: 0x00
  *                            Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
  *                                             KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
- *                                             RPL
+ *                                             RPL,SPR
  *                            Scope: Package (physical package)
  *     MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
  *                            perf code: 0x01
@@ -74,7 +74,7 @@
  *                            perf code: 0x02
  *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
  *                                             SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
- *                                             TGL,TNT,RKL,ADL,RPL
+ *                                             TGL,TNT,RKL,ADL,RPL,SPR
  *                            Scope: Package (physical package)
  *     MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
  *                            perf code: 0x03
@@ -675,6 +675,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(ICELAKE,             &icl_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,           &icx_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D,           &icx_cstates),
+       X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    &icx_cstates),
 
        X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,         &icl_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,           &icl_cstates),
index 7516e41..20fd0ac 100644 (file)
@@ -28,15 +28,13 @@ typedef u16         compat_ipc_pid_t;
 typedef __kernel_fsid_t        compat_fsid_t;
 
 struct compat_stat {
-       compat_dev_t    st_dev;
-       u16             __pad1;
+       u32             st_dev;
        compat_ino_t    st_ino;
        compat_mode_t   st_mode;
        compat_nlink_t  st_nlink;
        __compat_uid_t  st_uid;
        __compat_gid_t  st_gid;
-       compat_dev_t    st_rdev;
-       u16             __pad2;
+       u32             st_rdev;
        u32             st_size;
        u32             st_blksize;
        u32             st_blocks;
index f6d91ec..e9736af 100644 (file)
@@ -210,8 +210,6 @@ void __iomem *ioremap(resource_size_t offset, unsigned long size);
 extern void iounmap(volatile void __iomem *addr);
 #define iounmap iounmap
 
-extern void set_iounmap_nonlazy(void);
-
 #ifdef __KERNEL__
 
 void memcpy_fromio(void *, const volatile void __iomem *, size_t);
index 3c368b6..1a6d7e3 100644 (file)
@@ -118,6 +118,7 @@ KVM_X86_OP_OPTIONAL(mem_enc_register_region)
 KVM_X86_OP_OPTIONAL(mem_enc_unregister_region)
 KVM_X86_OP_OPTIONAL(vm_copy_enc_context_from)
 KVM_X86_OP_OPTIONAL(vm_move_enc_context_from)
+KVM_X86_OP_OPTIONAL(guest_memory_reclaimed)
 KVM_X86_OP(get_msr_feature)
 KVM_X86_OP(can_emulate_instruction)
 KVM_X86_OP(apic_init_signal_blocked)
index d23e80a..4ff3661 100644 (file)
@@ -974,12 +974,10 @@ enum hv_tsc_page_status {
        HV_TSC_PAGE_UNSET = 0,
        /* TSC page MSR was written by the guest, update pending */
        HV_TSC_PAGE_GUEST_CHANGED,
-       /* TSC page MSR was written by KVM userspace, update pending */
+       /* TSC page update was triggered from the host side */
        HV_TSC_PAGE_HOST_CHANGED,
        /* TSC page was properly set up and is currently active  */
        HV_TSC_PAGE_SET,
-       /* TSC page is currently being updated and therefore is inactive */
-       HV_TSC_PAGE_UPDATING,
        /* TSC page was set up with an inaccessible GPA */
        HV_TSC_PAGE_BROKEN,
 };
@@ -1052,6 +1050,7 @@ enum kvm_apicv_inhibit {
        APICV_INHIBIT_REASON_X2APIC,
        APICV_INHIBIT_REASON_BLOCKIRQ,
        APICV_INHIBIT_REASON_ABSENT,
+       APICV_INHIBIT_REASON_SEV,
 };
 
 struct kvm_arch {
@@ -1485,6 +1484,7 @@ struct kvm_x86_ops {
        int (*mem_enc_unregister_region)(struct kvm *kvm, struct kvm_enc_region *argp);
        int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
        int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
+       void (*guest_memory_reclaimed)(struct kvm *kvm);
 
        int (*get_msr_feature)(struct kvm_msr_entry *entry);
 
@@ -1585,8 +1585,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
 #define kvm_arch_pmi_in_guest(vcpu) \
        ((vcpu) && (vcpu)->arch.handling_intr_from_guest)
 
-int kvm_mmu_module_init(void);
-void kvm_mmu_module_exit(void);
+void kvm_mmu_x86_module_init(void);
+int kvm_mmu_vendor_module_init(void);
+void kvm_mmu_vendor_module_exit(void);
 
 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
 int kvm_mmu_create(struct kvm_vcpu *vcpu);
index 0eb90d2..ee15311 100644 (file)
 #define TSX_CTRL_RTM_DISABLE           BIT(0)  /* Disable RTM feature */
 #define TSX_CTRL_CPUID_CLEAR           BIT(1)  /* Disable TSX enumeration */
 
-/* SRBDS support */
 #define MSR_IA32_MCU_OPT_CTRL          0x00000123
-#define RNGDS_MITG_DIS                 BIT(0)
+#define RNGDS_MITG_DIS                 BIT(0)  /* SRBDS support */
+#define RTM_ALLOW                      BIT(1)  /* TSX development mode */
 
 #define MSR_IA32_SYSENTER_CS           0x00000174
 #define MSR_IA32_SYSENTER_ESP          0x00000175
index ed44175..e342ae4 100644 (file)
@@ -1855,6 +1855,8 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
        validate_apic_and_package_id(c);
        x86_spec_ctrl_setup_ap();
        update_srbds_msr();
+
+       tsx_ap_init();
 }
 
 static __init int setup_noclflush(char *arg)
index ee6f23f..2a8e584 100644 (file)
@@ -55,11 +55,10 @@ enum tsx_ctrl_states {
 extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
 
 extern void __init tsx_init(void);
-extern void tsx_enable(void);
-extern void tsx_disable(void);
-extern void tsx_clear_cpuid(void);
+void tsx_ap_init(void);
 #else
 static inline void tsx_init(void) { }
+static inline void tsx_ap_init(void) { }
 #endif /* CONFIG_CPU_SUP_INTEL */
 
 extern void get_cpu_cap(struct cpuinfo_x86 *c);
index 8321c43..f7a5370 100644 (file)
@@ -717,13 +717,6 @@ static void init_intel(struct cpuinfo_x86 *c)
 
        init_intel_misc_features(c);
 
-       if (tsx_ctrl_state == TSX_CTRL_ENABLE)
-               tsx_enable();
-       else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
-               tsx_disable();
-       else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
-               tsx_clear_cpuid();
-
        split_lock_init();
        bus_lock_init();
 
index 9c7a5f0..ec7bbac 100644 (file)
@@ -19,7 +19,7 @@
 
 enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED;
 
-void tsx_disable(void)
+static void tsx_disable(void)
 {
        u64 tsx;
 
@@ -39,7 +39,7 @@ void tsx_disable(void)
        wrmsrl(MSR_IA32_TSX_CTRL, tsx);
 }
 
-void tsx_enable(void)
+static void tsx_enable(void)
 {
        u64 tsx;
 
@@ -58,7 +58,7 @@ void tsx_enable(void)
        wrmsrl(MSR_IA32_TSX_CTRL, tsx);
 }
 
-static bool __init tsx_ctrl_is_supported(void)
+static bool tsx_ctrl_is_supported(void)
 {
        u64 ia32_cap = x86_read_arch_cap_msr();
 
@@ -84,7 +84,45 @@ static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
        return TSX_CTRL_ENABLE;
 }
 
-void tsx_clear_cpuid(void)
+/*
+ * Disabling TSX is not a trivial business.
+ *
+ * First of all, there's a CPUID bit: X86_FEATURE_RTM_ALWAYS_ABORT
+ * which says that TSX is practically disabled (all transactions are
+ * aborted by default). When that bit is set, the kernel unconditionally
+ * disables TSX.
+ *
+ * In order to do that, however, it needs to dance a bit:
+ *
+ * 1. The first method to disable it is through MSR_TSX_FORCE_ABORT and
+ * the MSR is present only when *two* CPUID bits are set:
+ *
+ * - X86_FEATURE_RTM_ALWAYS_ABORT
+ * - X86_FEATURE_TSX_FORCE_ABORT
+ *
+ * 2. The second method is for CPUs which do not have the above-mentioned
+ * MSR: those use a different MSR - MSR_IA32_TSX_CTRL and disable TSX
+ * through that one. Those CPUs can also have the initially mentioned
+ * CPUID bit X86_FEATURE_RTM_ALWAYS_ABORT set and for those the same strategy
+ * applies: TSX gets disabled unconditionally.
+ *
+ * When either of the two methods are present, the kernel disables TSX and
+ * clears the respective RTM and HLE feature flags.
+ *
+ * An additional twist in the whole thing presents late microcode loading
+ * which, when done, may cause for the X86_FEATURE_RTM_ALWAYS_ABORT CPUID
+ * bit to be set after the update.
+ *
+ * A subsequent hotplug operation on any logical CPU except the BSP will
+ * cause for the supported CPUID feature bits to get re-detected and, if
+ * RTM and HLE get cleared all of a sudden, but, userspace did consult
+ * them before the update, then funny explosions will happen. Long story
+ * short: the kernel doesn't modify CPUID feature bits after booting.
+ *
+ * That's why, this function's call in init_intel() doesn't clear the
+ * feature flags.
+ */
+static void tsx_clear_cpuid(void)
 {
        u64 msr;
 
@@ -97,6 +135,39 @@ void tsx_clear_cpuid(void)
                rdmsrl(MSR_TSX_FORCE_ABORT, msr);
                msr |= MSR_TFA_TSX_CPUID_CLEAR;
                wrmsrl(MSR_TSX_FORCE_ABORT, msr);
+       } else if (tsx_ctrl_is_supported()) {
+               rdmsrl(MSR_IA32_TSX_CTRL, msr);
+               msr |= TSX_CTRL_CPUID_CLEAR;
+               wrmsrl(MSR_IA32_TSX_CTRL, msr);
+       }
+}
+
+/*
+ * Disable TSX development mode
+ *
+ * When the microcode released in Feb 2022 is applied, TSX will be disabled by
+ * default on some processors. MSR 0x122 (TSX_CTRL) and MSR 0x123
+ * (IA32_MCU_OPT_CTRL) can be used to re-enable TSX for development, doing so is
+ * not recommended for production deployments. In particular, applying MD_CLEAR
+ * flows for mitigation of the Intel TSX Asynchronous Abort (TAA) transient
+ * execution attack may not be effective on these processors when Intel TSX is
+ * enabled with updated microcode.
+ */
+static void tsx_dev_mode_disable(void)
+{
+       u64 mcu_opt_ctrl;
+
+       /* Check if RTM_ALLOW exists */
+       if (!boot_cpu_has_bug(X86_BUG_TAA) || !tsx_ctrl_is_supported() ||
+           !cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL))
+               return;
+
+       rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
+
+       if (mcu_opt_ctrl & RTM_ALLOW) {
+               mcu_opt_ctrl &= ~RTM_ALLOW;
+               wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
+               setup_force_cpu_cap(X86_FEATURE_RTM_ALWAYS_ABORT);
        }
 }
 
@@ -105,14 +176,14 @@ void __init tsx_init(void)
        char arg[5] = {};
        int ret;
 
+       tsx_dev_mode_disable();
+
        /*
-        * Hardware will always abort a TSX transaction if both CPUID bits
-        * RTM_ALWAYS_ABORT and TSX_FORCE_ABORT are set. In this case, it is
-        * better not to enumerate CPUID.RTM and CPUID.HLE bits. Clear them
-        * here.
+        * Hardware will always abort a TSX transaction when the CPUID bit
+        * RTM_ALWAYS_ABORT is set. In this case, it is better not to enumerate
+        * CPUID.RTM and CPUID.HLE bits. Clear them here.
         */
-       if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) &&
-           boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+       if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
                tsx_ctrl_state = TSX_CTRL_RTM_ALWAYS_ABORT;
                tsx_clear_cpuid();
                setup_clear_cpu_cap(X86_FEATURE_RTM);
@@ -175,3 +246,16 @@ void __init tsx_init(void)
                setup_force_cpu_cap(X86_FEATURE_HLE);
        }
 }
+
+void tsx_ap_init(void)
+{
+       tsx_dev_mode_disable();
+
+       if (tsx_ctrl_state == TSX_CTRL_ENABLE)
+               tsx_enable();
+       else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
+               tsx_disable();
+       else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
+               /* See comment over that function for more details. */
+               tsx_clear_cpuid();
+}
index a7f617a..9752955 100644 (file)
@@ -37,7 +37,6 @@ static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
        } else
                memcpy(buf, vaddr + offset, csize);
 
-       set_iounmap_nonlazy();
        iounmap((void __iomem *)vaddr);
        return csize;
 }
index a22deb5..8b1c45c 100644 (file)
@@ -69,6 +69,7 @@ static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __align
 DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
 static int has_steal_clock = 0;
 
+static int has_guest_poll = 0;
 /*
  * No need for any "IO delay" on KVM
  */
@@ -706,14 +707,26 @@ static int kvm_cpu_down_prepare(unsigned int cpu)
 
 static int kvm_suspend(void)
 {
+       u64 val = 0;
+
        kvm_guest_cpu_offline(false);
 
+#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
+       if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
+               rdmsrl(MSR_KVM_POLL_CONTROL, val);
+       has_guest_poll = !(val & 1);
+#endif
        return 0;
 }
 
 static void kvm_resume(void)
 {
        kvm_cpu_online(raw_smp_processor_id());
+
+#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
+       if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll)
+               wrmsrl(MSR_KVM_POLL_CONTROL, 0);
+#endif
 }
 
 static struct syscore_ops kvm_syscore_ops = {
index 123b677..46f9dfb 100644 (file)
@@ -1135,11 +1135,13 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
        BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
        BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
 
+       mutex_lock(&hv->hv_lock);
+
        if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
+           hv->hv_tsc_page_status == HV_TSC_PAGE_SET ||
            hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
-               return;
+               goto out_unlock;
 
-       mutex_lock(&hv->hv_lock);
        if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
                goto out_unlock;
 
@@ -1201,45 +1203,19 @@ out_unlock:
        mutex_unlock(&hv->hv_lock);
 }
 
-void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
+void kvm_hv_request_tsc_page_update(struct kvm *kvm)
 {
        struct kvm_hv *hv = to_kvm_hv(kvm);
-       u64 gfn;
-       int idx;
-
-       if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
-           hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
-           tsc_page_update_unsafe(hv))
-               return;
 
        mutex_lock(&hv->hv_lock);
 
-       if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
-               goto out_unlock;
-
-       /* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */
-       if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
-               hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
+       if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET &&
+           !tsc_page_update_unsafe(hv))
+               hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
 
-       gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
-
-       hv->tsc_ref.tsc_sequence = 0;
-
-       /*
-        * Take the srcu lock as memslots will be accessed to check the gfn
-        * cache generation against the memslots generation.
-        */
-       idx = srcu_read_lock(&kvm->srcu);
-       if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
-                           &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
-               hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
-       srcu_read_unlock(&kvm->srcu, idx);
-
-out_unlock:
        mutex_unlock(&hv->hv_lock);
 }
 
-
 static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
 {
        if (!hv_vcpu->enforce_cpuid)
index e19c00e..da2737f 100644 (file)
@@ -137,7 +137,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
 
 void kvm_hv_setup_tsc_page(struct kvm *kvm,
                           struct pvclock_vcpu_time_info *hv_clock);
-void kvm_hv_invalidate_tsc_page(struct kvm *kvm);
+void kvm_hv_request_tsc_page_update(struct kvm *kvm);
 
 void kvm_hv_init_vm(struct kvm *kvm);
 void kvm_hv_destroy_vm(struct kvm *kvm);
index 8f19ea7..f9080ee 100644 (file)
@@ -6237,12 +6237,24 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
        return 0;
 }
 
-int kvm_mmu_module_init(void)
+/*
+ * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
+ * its default value of -1 is technically undefined behavior for a boolean.
+ */
+void kvm_mmu_x86_module_init(void)
 {
-       int ret = -ENOMEM;
-
        if (nx_huge_pages == -1)
                __set_nx_huge_pages(get_nx_auto_mode());
+}
+
+/*
+ * The bulk of the MMU initialization is deferred until the vendor module is
+ * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
+ * to be reset when a potentially different vendor module is loaded.
+ */
+int kvm_mmu_vendor_module_init(void)
+{
+       int ret = -ENOMEM;
 
        /*
         * MMU roles use union aliasing which is, generally speaking, an
@@ -6290,7 +6302,7 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
        mmu_free_memory_caches(vcpu);
 }
 
-void kvm_mmu_module_exit(void)
+void kvm_mmu_vendor_module_exit(void)
 {
        mmu_destroy_caches();
        percpu_counter_destroy(&kvm_total_used_mmu_pages);
index d71d177..c472769 100644 (file)
@@ -51,7 +51,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
        if (!kvm->arch.tdp_mmu_enabled)
                return;
 
-       flush_workqueue(kvm->arch.tdp_mmu_zap_wq);
+       /* Also waits for any queued work items.  */
        destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
 
        WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
index 9e66fba..22992b0 100644 (file)
@@ -138,6 +138,15 @@ static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
        return sample_period;
 }
 
+static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
+{
+       if (!pmc->perf_event || pmc->is_paused)
+               return;
+
+       perf_event_period(pmc->perf_event,
+                         get_sample_period(pmc, pmc->counter));
+}
+
 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
 void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
index a1cf9c3..4216195 100644 (file)
@@ -837,7 +837,8 @@ bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
                          BIT(APICV_INHIBIT_REASON_IRQWIN) |
                          BIT(APICV_INHIBIT_REASON_PIT_REINJ) |
                          BIT(APICV_INHIBIT_REASON_X2APIC) |
-                         BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
+                         BIT(APICV_INHIBIT_REASON_BLOCKIRQ) |
+                         BIT(APICV_INHIBIT_REASON_SEV);
 
        return supported & BIT(reason);
 }
index 24eb935..b148608 100644 (file)
@@ -257,6 +257,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
        if (pmc) {
                pmc->counter += data - pmc_read_counter(pmc);
+               pmc_update_sample_period(pmc);
                return 0;
        }
        /* MSR_EVNTSELn */
index 75fa6dd..0ad70c1 100644 (file)
@@ -260,6 +260,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
        INIT_LIST_HEAD(&sev->regions_list);
        INIT_LIST_HEAD(&sev->mirror_vms);
 
+       kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_SEV);
+
        return 0;
 
 e_free:
@@ -465,6 +467,7 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages)
                page_virtual = kmap_atomic(pages[i]);
                clflush_cache_range(page_virtual, PAGE_SIZE);
                kunmap_atomic(page_virtual);
+               cond_resched();
        }
 }
 
@@ -2223,51 +2226,47 @@ int sev_cpu_init(struct svm_cpu_data *sd)
  * Pages used by hardware to hold guest encrypted state must be flushed before
  * returning them to the system.
  */
-static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
-                                  unsigned long len)
+static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
 {
+       int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
+
        /*
-        * If hardware enforced cache coherency for encrypted mappings of the
-        * same physical page is supported, nothing to do.
+        * Note!  The address must be a kernel address, as regular page walk
+        * checks are performed by VM_PAGE_FLUSH, i.e. operating on a user
+        * address is non-deterministic and unsafe.  This function deliberately
+        * takes a pointer to deter passing in a user address.
         */
-       if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
-               return;
+       unsigned long addr = (unsigned long)va;
 
        /*
-        * If the VM Page Flush MSR is supported, use it to flush the page
-        * (using the page virtual address and the guest ASID).
+        * If CPU enforced cache coherency for encrypted mappings of the
+        * same physical page is supported, use CLFLUSHOPT instead. NOTE: cache
+        * flush is still needed in order to work properly with DMA devices.
         */
-       if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
-               struct kvm_sev_info *sev;
-               unsigned long va_start;
-               u64 start, stop;
-
-               /* Align start and stop to page boundaries. */
-               va_start = (unsigned long)va;
-               start = (u64)va_start & PAGE_MASK;
-               stop = PAGE_ALIGN((u64)va_start + len);
-
-               if (start < stop) {
-                       sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
+       if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) {
+               clflush_cache_range(va, PAGE_SIZE);
+               return;
+       }
 
-                       while (start < stop) {
-                               wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
-                                      start | sev->asid);
+       /*
+        * VM Page Flush takes a host virtual address and a guest ASID.  Fall
+        * back to WBINVD if this faults so as not to make any problems worse
+        * by leaving stale encrypted data in the cache.
+        */
+       if (WARN_ON_ONCE(wrmsrl_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
+               goto do_wbinvd;
 
-                               start += PAGE_SIZE;
-                       }
+       return;
 
-                       return;
-               }
+do_wbinvd:
+       wbinvd_on_all_cpus();
+}
 
-               WARN(1, "Address overflow, using WBINVD\n");
-       }
+void sev_guest_memory_reclaimed(struct kvm *kvm)
+{
+       if (!sev_guest(kvm))
+               return;
 
-       /*
-        * Hardware should always have one of the above features,
-        * but if not, use WBINVD and issue a warning.
-        */
-       WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
        wbinvd_on_all_cpus();
 }
 
@@ -2281,7 +2280,8 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
        svm = to_svm(vcpu);
 
        if (vcpu->arch.guest_state_protected)
-               sev_flush_guest_memory(svm, svm->sev_es.vmsa, PAGE_SIZE);
+               sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa);
+
        __free_page(virt_to_page(svm->sev_es.vmsa));
 
        if (svm->sev_es.ghcb_sa_free)
index bd4c64b..7e45d03 100644 (file)
@@ -4620,6 +4620,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .mem_enc_ioctl = sev_mem_enc_ioctl,
        .mem_enc_register_region = sev_mem_enc_register_region,
        .mem_enc_unregister_region = sev_mem_enc_unregister_region,
+       .guest_memory_reclaimed = sev_guest_memory_reclaimed,
 
        .vm_copy_enc_context_from = sev_vm_copy_enc_context_from,
        .vm_move_enc_context_from = sev_vm_move_enc_context_from,
index f77a7d2..f76deff 100644 (file)
@@ -609,6 +609,8 @@ int sev_mem_enc_unregister_region(struct kvm *kvm,
                                  struct kvm_enc_region *range);
 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd);
 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
+void sev_guest_memory_reclaimed(struct kvm *kvm);
+
 void pre_sev_run(struct vcpu_svm *svm, int cpu);
 void __init sev_set_cpu_caps(void);
 void __init sev_hardware_setup(void);
index f18744f..856c875 100644 (file)
@@ -4618,6 +4618,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
                kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
        }
 
+       if (vmx->nested.update_vmcs01_apicv_status) {
+               vmx->nested.update_vmcs01_apicv_status = false;
+               kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
+       }
+
        if ((vm_exit_reason != -1) &&
            (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
                vmx->nested.need_vmcs12_to_shadow_sync = true;
index bc3f851..b82b670 100644 (file)
@@ -431,15 +431,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                            !(msr & MSR_PMC_FULL_WIDTH_BIT))
                                data = (s64)(s32)data;
                        pmc->counter += data - pmc_read_counter(pmc);
-                       if (pmc->perf_event && !pmc->is_paused)
-                               perf_event_period(pmc->perf_event,
-                                                 get_sample_period(pmc, data));
+                       pmc_update_sample_period(pmc);
                        return 0;
                } else if ((pmc = get_fixed_pmc(pmu, msr))) {
                        pmc->counter += data - pmc_read_counter(pmc);
-                       if (pmc->perf_event && !pmc->is_paused)
-                               perf_event_period(pmc->perf_event,
-                                                 get_sample_period(pmc, data));
+                       pmc_update_sample_period(pmc);
                        return 0;
                } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
                        if (data == pmc->eventsel)
index 04d170c..d58b763 100644 (file)
@@ -4174,6 +4174,11 @@ static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
+       if (is_guest_mode(vcpu)) {
+               vmx->nested.update_vmcs01_apicv_status = true;
+               return;
+       }
+
        pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
        if (cpu_has_secondary_exec_ctrls()) {
                if (kvm_vcpu_apicv_active(vcpu))
index 9c6bfcd..b98c7e9 100644 (file)
@@ -183,6 +183,7 @@ struct nested_vmx {
        bool change_vmcs01_virtual_apic_mode;
        bool reload_vmcs01_apic_access_page;
        bool update_vmcs01_cpu_dirty_logging;
+       bool update_vmcs01_apicv_status;
 
        /*
         * Enlightened VMCS has been enabled. It does not mean that L1 has to
index 0c0ca59..a6ab19a 100644 (file)
@@ -2901,7 +2901,7 @@ static void kvm_end_pvclock_update(struct kvm *kvm)
 
 static void kvm_update_masterclock(struct kvm *kvm)
 {
-       kvm_hv_invalidate_tsc_page(kvm);
+       kvm_hv_request_tsc_page_update(kvm);
        kvm_start_pvclock_update(kvm);
        pvclock_update_vm_gtod_copy(kvm);
        kvm_end_pvclock_update(kvm);
@@ -3113,8 +3113,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                                       offsetof(struct compat_vcpu_info, time));
        if (vcpu->xen.vcpu_time_info_set)
                kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
-       if (!v->vcpu_idx)
-               kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
+       kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
        return 0;
 }
 
@@ -6241,7 +6240,7 @@ static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
        if (data.flags & ~KVM_CLOCK_VALID_FLAGS)
                return -EINVAL;
 
-       kvm_hv_invalidate_tsc_page(kvm);
+       kvm_hv_request_tsc_page_update(kvm);
        kvm_start_pvclock_update(kvm);
        pvclock_update_vm_gtod_copy(kvm);
 
@@ -8926,7 +8925,7 @@ int kvm_arch_init(void *opaque)
        }
        kvm_nr_uret_msrs = 0;
 
-       r = kvm_mmu_module_init();
+       r = kvm_mmu_vendor_module_init();
        if (r)
                goto out_free_percpu;
 
@@ -8974,7 +8973,7 @@ void kvm_arch_exit(void)
        cancel_work_sync(&pvclock_gtod_work);
 #endif
        kvm_x86_ops.hardware_enable = NULL;
-       kvm_mmu_module_exit();
+       kvm_mmu_vendor_module_exit();
        free_percpu(user_return_msrs);
        kmem_cache_destroy(x86_emulator_cache);
 #ifdef CONFIG_KVM_XEN
@@ -9112,7 +9111,7 @@ static void kvm_apicv_init(struct kvm *kvm)
 
        if (!enable_apicv)
                set_or_clear_apicv_inhibit(inhibits,
-                                          APICV_INHIBIT_REASON_ABSENT, true);
+                                          APICV_INHIBIT_REASON_DISABLE, true);
 }
 
 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
@@ -9890,6 +9889,11 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
                kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
 }
 
+void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
+{
+       static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm);
+}
+
 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
 {
        if (!lapic_in_kernel(vcpu))
@@ -10098,7 +10102,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        /* Store vcpu->apicv_active before vcpu->mode.  */
        smp_store_release(&vcpu->mode, IN_GUEST_MODE);
 
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
 
        /*
         * 1) We should set ->mode before checking ->requests.  Please see
@@ -10129,7 +10133,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                smp_wmb();
                local_irq_enable();
                preempt_enable();
-               vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
                r = 1;
                goto cancel_injection;
        }
@@ -10255,7 +10259,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        local_irq_enable();
        preempt_enable();
 
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
 
        /*
         * Profile KVM exit RIPs:
@@ -10285,7 +10289,7 @@ out:
 }
 
 /* Called within kvm->srcu read side.  */
-static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
+static inline int vcpu_block(struct kvm_vcpu *vcpu)
 {
        bool hv_timer;
 
@@ -10301,12 +10305,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
                if (hv_timer)
                        kvm_lapic_switch_to_sw_timer(vcpu);
 
-               srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
                        kvm_vcpu_halt(vcpu);
                else
                        kvm_vcpu_block(vcpu);
-               vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
 
                if (hv_timer)
                        kvm_lapic_switch_to_hv_timer(vcpu);
@@ -10348,7 +10352,6 @@ static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
 static int vcpu_run(struct kvm_vcpu *vcpu)
 {
        int r;
-       struct kvm *kvm = vcpu->kvm;
 
        vcpu->arch.l1tf_flush_l1d = true;
 
@@ -10356,7 +10359,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
                if (kvm_vcpu_running(vcpu)) {
                        r = vcpu_enter_guest(vcpu);
                } else {
-                       r = vcpu_block(kvm, vcpu);
+                       r = vcpu_block(vcpu);
                }
 
                if (r <= 0)
@@ -10375,9 +10378,9 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
                }
 
                if (__xfer_to_guest_mode_work_pending()) {
-                       srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+                       kvm_vcpu_srcu_read_unlock(vcpu);
                        r = xfer_to_guest_mode_handle_work(vcpu);
-                       vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+                       kvm_vcpu_srcu_read_lock(vcpu);
                        if (r)
                                return r;
                }
@@ -10388,12 +10391,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
 
 static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
 {
-       int r;
-
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-       r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
-       return r;
+       return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
 }
 
 static int complete_emulated_pio(struct kvm_vcpu *vcpu)
@@ -10485,7 +10483,6 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 {
        struct kvm_run *kvm_run = vcpu->run;
-       struct kvm *kvm = vcpu->kvm;
        int r;
 
        vcpu_load(vcpu);
@@ -10493,7 +10490,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
        kvm_run->flags = 0;
        kvm_load_guest_fpu(vcpu);
 
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
        if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
                if (kvm_run->immediate_exit) {
                        r = -EINTR;
@@ -10505,9 +10502,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                 */
                WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu));
 
-               srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                kvm_vcpu_block(vcpu);
-               vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
 
                if (kvm_apic_accept_events(vcpu) < 0) {
                        r = 0;
@@ -10568,7 +10565,7 @@ out:
        if (kvm_run->kvm_valid_regs)
                store_regs(vcpu);
        post_kvm_run_save(vcpu);
-       srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
 
        kvm_sigset_deactivate(vcpu);
        vcpu_put(vcpu);
@@ -10986,6 +10983,9 @@ static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
        struct kvm_vcpu *vcpu;
        unsigned long i;
 
+       if (!enable_apicv)
+               return;
+
        down_write(&kvm->arch.apicv_update_lock);
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -11197,8 +11197,21 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
                r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
                if (r < 0)
                        goto fail_mmu_destroy;
-               if (kvm_apicv_activated(vcpu->kvm))
+
+               /*
+                * Defer evaluating inhibits until the vCPU is first run, as
+                * this vCPU will not get notified of any changes until this
+                * vCPU is visible to other vCPUs (marked online and added to
+                * the set of vCPUs).  Opportunistically mark APICv active as
+                * VMX in particularly is highly unlikely to have inhibits.
+                * Ignore the current per-VM APICv state so that vCPU creation
+                * is guaranteed to run with a deterministic value, the request
+                * will ensure the vCPU gets the correct state before VM-Entry.
+                */
+               if (enable_apicv) {
                        vcpu->arch.apicv_active = true;
+                       kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
+               }
        } else
                static_branch_inc(&kvm_has_noapic_vcpu);
 
@@ -12986,3 +12999,19 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit);
+
+static int __init kvm_x86_init(void)
+{
+       kvm_mmu_x86_module_init();
+       return 0;
+}
+module_init(kvm_x86_init);
+
+static void __exit kvm_x86_exit(void)
+{
+       /*
+        * If module_init() is implemented, module_exit() must also be
+        * implemented to allow module unload.
+        */
+}
+module_exit(kvm_x86_exit);
index 0402a74..0ae6cf8 100644 (file)
@@ -119,7 +119,7 @@ void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
 
        /* cache copy and flush to align dest */
        if (!IS_ALIGNED(dest, 8)) {
-               unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
+               size_t len = min_t(size_t, size, ALIGN(dest, 8) - dest);
 
                memcpy((void *) dest, (void *) source, len);
                clean_cache_range((void *) dest, len);
index 45cc0ae..c7b9f12 100644 (file)
@@ -29,7 +29,7 @@
        .if XTENSA_HAVE_COPROCESSOR(x);                                 \
                .align 4;                                               \
        .Lsave_cp_regs_cp##x:                                           \
-               xchal_cp##x##_store a2 a4 a5 a6 a7;                     \
+               xchal_cp##x##_store a2 a3 a4 a5 a6;                     \
                jx      a0;                                             \
        .endif
 
@@ -46,7 +46,7 @@
        .if XTENSA_HAVE_COPROCESSOR(x);                                 \
                .align 4;                                               \
        .Lload_cp_regs_cp##x:                                           \
-               xchal_cp##x##_load a2 a4 a5 a6 a7;                      \
+               xchal_cp##x##_load a2 a3 a4 a5 a6;                      \
                jx      a0;                                             \
        .endif
 
index 0dde21e..ad1841c 100644 (file)
@@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data)
 {
        struct patch *patch = data;
 
-       if (atomic_inc_return(&patch->cpu_count) == 1) {
+       if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
                local_patch_text(patch->addr, patch->data, patch->sz);
                atomic_inc(&patch->cpu_count);
        } else {
index 81d7c7e..10b79d3 100644 (file)
@@ -36,24 +36,19 @@ static void rs_poll(struct timer_list *);
 static struct tty_driver *serial_driver;
 static struct tty_port serial_port;
 static DEFINE_TIMER(serial_timer, rs_poll);
-static DEFINE_SPINLOCK(timer_lock);
 
 static int rs_open(struct tty_struct *tty, struct file * filp)
 {
-       spin_lock_bh(&timer_lock);
        if (tty->count == 1)
                mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
-       spin_unlock_bh(&timer_lock);
 
        return 0;
 }
 
 static void rs_close(struct tty_struct *tty, struct file * filp)
 {
-       spin_lock_bh(&timer_lock);
        if (tty->count == 1)
                del_timer_sync(&serial_timer);
-       spin_unlock_bh(&timer_lock);
 }
 
 
@@ -73,8 +68,6 @@ static void rs_poll(struct timer_list *unused)
        int rd = 1;
        unsigned char c;
 
-       spin_lock(&timer_lock);
-
        while (simc_poll(0)) {
                rd = simc_read(0, &c, 1);
                if (rd <= 0)
@@ -87,7 +80,6 @@ static void rs_poll(struct timer_list *unused)
                tty_flip_buffer_push(port);
        if (rd)
                mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
-       spin_unlock(&timer_lock);
 }
 
 
index cdd7b29..4259125 100644 (file)
@@ -1598,7 +1598,7 @@ EXPORT_SYMBOL(bio_split);
 void bio_trim(struct bio *bio, sector_t offset, sector_t size)
 {
        if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
-                        offset + size > bio->bi_iter.bi_size))
+                        offset + size > bio_sectors(bio)))
                return;
 
        size <<= 9;
index ed3ed86..c4370d2 100644 (file)
@@ -794,7 +794,8 @@ bool blk_update_request(struct request *req, blk_status_t error,
 #endif
 
        if (unlikely(error && !blk_rq_is_passthrough(req) &&
-                    !(req->rq_flags & RQF_QUIET))) {
+                    !(req->rq_flags & RQF_QUIET)) &&
+                    !test_bit(GD_DEAD, &req->q->disk->state)) {
                blk_print_req_error(req, error);
                trace_block_rq_error(req, error, nr_bytes);
        }
index 4a86340..f8703db 100644 (file)
@@ -629,7 +629,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                return compat_put_long(argp,
                        (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
        case BLKGETSIZE:
-               if (bdev_nr_sectors(bdev) > ~0UL)
+               if (bdev_nr_sectors(bdev) > ~(compat_ulong_t)0)
                        return -EFBIG;
                return compat_put_ulong(argp, bdev_nr_sectors(bdev));
 
index 0c5a519..014ccb0 100644 (file)
@@ -77,6 +77,8 @@ static int marvell_cable_detect(struct ata_port *ap)
        switch(ap->port_no)
        {
        case 0:
+               if (!ap->ioaddr.bmdma_addr)
+                       return ATA_CBL_PATA_UNK;
                if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
                        return ATA_CBL_PATA40;
                return ATA_CBL_PATA80;
index af6bea5..3fc3b59 100644 (file)
@@ -296,6 +296,7 @@ int driver_deferred_probe_check_state(struct device *dev)
 
        return -EPROBE_DEFER;
 }
+EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
 
 static void deferred_probe_timeout_work_func(struct work_struct *work)
 {
index 05b1120..c441a49 100644 (file)
@@ -1600,7 +1600,7 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
         * Only fake timeouts need to execute blk_mq_complete_request() here.
         */
        cmd->error = BLK_STS_TIMEOUT;
-       if (cmd->fake_timeout)
+       if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL)
                blk_mq_complete_request(rq);
        return BLK_EH_DONE;
 }
index e15063d..3a293f9 100644 (file)
@@ -333,7 +333,7 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
        chacha20_block(chacha_state, first_block);
 
        memcpy(key, first_block, CHACHA_KEY_SIZE);
-       memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
+       memmove(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
        memzero_explicit(first_block, sizeof(first_block));
 }
 
@@ -523,8 +523,7 @@ EXPORT_SYMBOL(get_random_bytes);
 
 static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
 {
-       ssize_t ret = 0;
-       size_t len;
+       size_t len, left, ret = 0;
        u32 chacha_state[CHACHA_STATE_WORDS];
        u8 output[CHACHA_BLOCK_SIZE];
 
@@ -543,37 +542,40 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
         * the user directly.
         */
        if (nbytes <= CHACHA_KEY_SIZE) {
-               ret = copy_to_user(buf, &chacha_state[4], nbytes) ? -EFAULT : nbytes;
+               ret = nbytes - copy_to_user(buf, &chacha_state[4], nbytes);
                goto out_zero_chacha;
        }
 
-       do {
+       for (;;) {
                chacha20_block(chacha_state, output);
                if (unlikely(chacha_state[12] == 0))
                        ++chacha_state[13];
 
                len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
-               if (copy_to_user(buf, output, len)) {
-                       ret = -EFAULT;
+               left = copy_to_user(buf, output, len);
+               if (left) {
+                       ret += len - left;
                        break;
                }
 
-               nbytes -= len;
                buf += len;
                ret += len;
+               nbytes -= len;
+               if (!nbytes)
+                       break;
 
                BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
-               if (!(ret % PAGE_SIZE) && nbytes) {
+               if (ret % PAGE_SIZE == 0) {
                        if (signal_pending(current))
                                break;
                        cond_resched();
                }
-       } while (nbytes);
+       }
 
        memzero_explicit(output, sizeof(output));
 out_zero_chacha:
        memzero_explicit(chacha_state, sizeof(chacha_state));
-       return ret;
+       return ret ? ret : -EFAULT;
 }
 
 /*
@@ -1016,7 +1018,7 @@ int __init rand_initialize(void)
  */
 void add_device_randomness(const void *buf, size_t size)
 {
-       cycles_t cycles = random_get_entropy();
+       unsigned long cycles = random_get_entropy();
        unsigned long flags, now = jiffies;
 
        if (crng_init == 0 && size)
@@ -1047,8 +1049,7 @@ struct timer_rand_state {
  */
 static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
 {
-       cycles_t cycles = random_get_entropy();
-       unsigned long flags, now = jiffies;
+       unsigned long cycles = random_get_entropy(), now = jiffies, flags;
        long delta, delta2, delta3;
 
        spin_lock_irqsave(&input_pool.lock, flags);
@@ -1337,8 +1338,7 @@ static void mix_interrupt_randomness(struct work_struct *work)
 void add_interrupt_randomness(int irq)
 {
        enum { MIX_INFLIGHT = 1U << 31 };
-       cycles_t cycles = random_get_entropy();
-       unsigned long now = jiffies;
+       unsigned long cycles = random_get_entropy(), now = jiffies;
        struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
        struct pt_regs *regs = get_irq_regs();
        unsigned int new_count;
@@ -1351,16 +1351,12 @@ void add_interrupt_randomness(int irq)
        if (cycles == 0)
                cycles = get_reg(fast_pool, regs);
 
-       if (sizeof(cycles) == 8)
+       if (sizeof(unsigned long) == 8) {
                irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq;
-       else {
+               irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_;
+       } else {
                irq_data.u32[0] = cycles ^ irq;
                irq_data.u32[1] = now;
-       }
-
-       if (sizeof(unsigned long) == 8)
-               irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_;
-       else {
                irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_;
                irq_data.u32[3] = get_reg(fast_pool, regs);
        }
@@ -1407,7 +1403,7 @@ static void entropy_timer(struct timer_list *t)
 static void try_to_generate_entropy(void)
 {
        struct {
-               cycles_t cycles;
+               unsigned long cycles;
                struct timer_list timer;
        } stack;
 
index b459eda..5c852e6 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/pm_runtime.h>
 #include <asm/cpuidle.h>
 #include <asm/sbi.h>
+#include <asm/smp.h>
 #include <asm/suspend.h>
 
 #include "dt_idle_states.h"
index 1476156..def564d 100644 (file)
@@ -1453,7 +1453,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 {
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
-       struct at_xdmac_desc    *desc, *_desc;
+       struct at_xdmac_desc    *desc, *_desc, *iter;
        struct list_head        *descs_list;
        enum dma_status         ret;
        int                     residue, retry;
@@ -1568,11 +1568,13 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
         * microblock.
         */
        descs_list = &desc->descs_list;
-       list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
-               dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
-               residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
-               if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
+       list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
+               dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
+               residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
+               if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
+                       desc = iter;
                        break;
+               }
        }
        residue += cur_ubc << dwidth;
 
index 329fc2e..33bc1e6 100644 (file)
@@ -414,14 +414,18 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
                SET_CH_32(dw, chan->dir, chan->id, ch_control1,
                          (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
                /* Linked list */
+
                #ifdef CONFIG_64BIT
-                       SET_CH_64(dw, chan->dir, chan->id, llp.reg,
-                                 chunk->ll_region.paddr);
+               /* llp is not aligned on 64bit -> keep 32bit accesses */
+               SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
+                         lower_32_bits(chunk->ll_region.paddr));
+               SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+                         upper_32_bits(chunk->ll_region.paddr));
                #else /* CONFIG_64BIT */
-                       SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
-                                 lower_32_bits(chunk->ll_region.paddr));
-                       SET_CH_32(dw, chan->dir, chan->id, llp.msb,
-                                 upper_32_bits(chunk->ll_region.paddr));
+               SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
+                         lower_32_bits(chunk->ll_region.paddr));
+               SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+                         upper_32_bits(chunk->ll_region.paddr));
                #endif /* CONFIG_64BIT */
        }
        /* Doorbell */
index 3061fe8..f652da6 100644 (file)
@@ -373,7 +373,6 @@ static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
 {
        lockdep_assert_held(&wq->wq_lock);
 
-       idxd_wq_disable_cleanup(wq);
        wq->size = 0;
        wq->group = NULL;
 }
@@ -701,14 +700,17 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
 
                if (wq->state == IDXD_WQ_ENABLED) {
                        idxd_wq_disable_cleanup(wq);
-                       idxd_wq_device_reset_cleanup(wq);
                        wq->state = IDXD_WQ_DISABLED;
                }
+               idxd_wq_device_reset_cleanup(wq);
        }
 }
 
 void idxd_device_clear_state(struct idxd_device *idxd)
 {
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return;
+
        idxd_groups_clear_state(idxd);
        idxd_engines_clear_state(idxd);
        idxd_device_wqs_clear_state(idxd);
index e289fd4..c01db23 100644 (file)
@@ -150,14 +150,15 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
  */
 int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc)
 {
-       int rc, retries = 0;
+       unsigned int retries = wq->enqcmds_retries;
+       int rc;
 
        do {
                rc = enqcmds(portal, desc);
                if (rc == 0)
                        break;
                cpu_relax();
-       } while (retries++ < wq->enqcmds_retries);
+       } while (retries--);
 
        return rc;
 }
index 7e19ab9..dfd5496 100644 (file)
@@ -905,6 +905,9 @@ static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attr
        u64 xfer_size;
        int rc;
 
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
        if (wq->state != IDXD_WQ_DISABLED)
                return -EPERM;
 
@@ -939,6 +942,9 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu
        u64 batch_size;
        int rc;
 
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
        if (wq->state != IDXD_WQ_DISABLED)
                return -EPERM;
 
index 70c0aa9..6196a7b 100644 (file)
@@ -198,12 +198,12 @@ struct sdma_script_start_addrs {
        s32 per_2_firi_addr;
        s32 mcu_2_firi_addr;
        s32 uart_2_per_addr;
-       s32 uart_2_mcu_ram_addr;
+       s32 uart_2_mcu_addr;
        s32 per_2_app_addr;
        s32 mcu_2_app_addr;
        s32 per_2_per_addr;
        s32 uartsh_2_per_addr;
-       s32 uartsh_2_mcu_ram_addr;
+       s32 uartsh_2_mcu_addr;
        s32 per_2_shp_addr;
        s32 mcu_2_shp_addr;
        s32 ata_2_mcu_addr;
@@ -232,8 +232,8 @@ struct sdma_script_start_addrs {
        s32 mcu_2_ecspi_addr;
        s32 mcu_2_sai_addr;
        s32 sai_2_mcu_addr;
-       s32 uart_2_mcu_addr;
-       s32 uartsh_2_mcu_addr;
+       s32 uart_2_mcu_rom_addr;
+       s32 uartsh_2_mcu_rom_addr;
        /* End of v3 array */
        s32 mcu_2_zqspi_addr;
        /* End of v4 array */
@@ -1796,17 +1796,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
                        saddr_arr[i] = addr_arr[i];
 
        /*
-        * get uart_2_mcu_addr/uartsh_2_mcu_addr rom script specially because
-        * they are now replaced by uart_2_mcu_ram_addr/uartsh_2_mcu_ram_addr
-        * to be compatible with legacy freescale/nxp sdma firmware, and they
-        * are located in the bottom part of sdma_script_start_addrs which are
-        * beyond the SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1.
+        * For compatibility with NXP internal legacy kernel before 4.19 which
+        * is based on uart ram script and mainline kernel based on uart rom
+        * script, both uart ram/rom scripts are present in newer sdma
+        * firmware. Use the rom versions if they are present (V3 or newer).
         */
-       if (addr->uart_2_mcu_addr)
-               sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_addr;
-       if (addr->uartsh_2_mcu_addr)
-               sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_addr;
-
+       if (sdma->script_number >= SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3) {
+               if (addr->uart_2_mcu_rom_addr)
+                       sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_rom_addr;
+               if (addr->uartsh_2_mcu_rom_addr)
+                       sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_rom_addr;
+       }
 }
 
 static void sdma_load_firmware(const struct firmware *fw, void *context)
@@ -1885,7 +1885,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
        u32 reg, val, shift, num_map, i;
        int ret = 0;
 
-       if (IS_ERR(np) || IS_ERR(gpr_np))
+       if (IS_ERR(np) || !gpr_np)
                goto out;
 
        event_remap = of_find_property(np, propname, NULL);
@@ -1933,7 +1933,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
        }
 
 out:
-       if (!IS_ERR(gpr_np))
+       if (gpr_np)
                of_node_put(gpr_np);
 
        return ret;
index 375e7e6..a1517ef 100644 (file)
@@ -274,7 +274,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
        unsigned int status;
        int ret;
 
-       ret = pm_runtime_get_sync(mtkd->ddev.dev);
+       ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
        if (ret < 0) {
                pm_runtime_put_noidle(chan->device->dev);
                return ret;
@@ -288,18 +288,21 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
        ret = readx_poll_timeout(readl, c->base + VFF_EN,
                          status, !status, 10, 100);
        if (ret)
-               return ret;
+               goto err_pm;
 
        ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
                          IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
        if (ret < 0) {
                dev_err(chan->device->dev, "Can't request dma IRQ\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_pm;
        }
 
        if (mtkd->support_33bits)
                mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
 
+err_pm:
+       pm_runtime_put_noidle(mtkd->ddev.dev);
        return ret;
 }
 
index f05ff02..40b1abe 100644 (file)
 #define ECC_STAT_CECNT_SHIFT           8
 #define ECC_STAT_BITNUM_MASK           0x7F
 
+/* ECC error count register definitions */
+#define ECC_ERRCNT_UECNT_MASK          0xFFFF0000
+#define ECC_ERRCNT_UECNT_SHIFT         16
+#define ECC_ERRCNT_CECNT_MASK          0xFFFF
+
 /* DDR QOS Interrupt register definitions */
 #define DDR_QOS_IRQ_STAT_OFST          0x20200
 #define DDR_QOSUE_MASK                 0x4
@@ -423,15 +428,16 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv)
        base = priv->baseaddr;
        p = &priv->stat;
 
+       regval = readl(base + ECC_ERRCNT_OFST);
+       p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
+       p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
+       if (!p->ce_cnt)
+               goto ue_err;
+
        regval = readl(base + ECC_STAT_OFST);
        if (!regval)
                return 1;
 
-       p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT;
-       p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT;
-       if (!p->ce_cnt)
-               goto ue_err;
-
        p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
 
        regval = readl(base + ECC_CEADDR0_OFST);
index cf6fed6..45600ac 100644 (file)
@@ -49,7 +49,7 @@ struct scmi_msg_resp_clock_describe_rates {
        struct {
                __le32 value_low;
                __le32 value_high;
-       } rate[0];
+       } rate[];
 #define RATE_TO_U64(X)         \
 ({                             \
        typeof(X) x = (X);      \
@@ -210,7 +210,8 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
 
        if (rate_discrete && rate) {
                clk->list.num_rates = tot_rate_cnt;
-               sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL);
+               sort(clk->list.rates, tot_rate_cnt, sizeof(*rate),
+                    rate_cmp_func, NULL);
        }
 
        clk->rate_discrete = rate_discrete;
index 4611830..e17c656 100644 (file)
@@ -679,7 +679,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
 
        xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
        if (IS_ERR(xfer)) {
-               scmi_clear_channel(info, cinfo);
+               if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
+                       scmi_clear_channel(info, cinfo);
                return;
        }
 
index 734f1ee..8302a2b 100644 (file)
@@ -405,8 +405,8 @@ static int scmi_optee_chan_free(int id, void *p, void *data)
        return 0;
 }
 
-static struct scmi_shared_mem *get_channel_shm(struct scmi_optee_channel *chan,
-                                              struct scmi_xfer *xfer)
+static struct scmi_shared_mem __iomem *
+get_channel_shm(struct scmi_optee_channel *chan, struct scmi_xfer *xfer)
 {
        if (!chan)
                return NULL;
@@ -419,7 +419,7 @@ static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
                                   struct scmi_xfer *xfer)
 {
        struct scmi_optee_channel *channel = cinfo->transport_info;
-       struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
+       struct scmi_shared_mem __iomem *shmem = get_channel_shm(channel, xfer);
        int ret;
 
        mutex_lock(&channel->mu);
@@ -436,7 +436,7 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
                                      struct scmi_xfer *xfer)
 {
        struct scmi_optee_channel *channel = cinfo->transport_info;
-       struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
+       struct scmi_shared_mem __iomem *shmem = get_channel_shm(channel, xfer);
 
        shmem_fetch_response(shmem, xfer);
 }
index e48108e..7dad6f5 100644 (file)
@@ -955,8 +955,7 @@ static int cs_dsp_create_control(struct cs_dsp *dsp,
        ctl->alg_region = *alg_region;
        if (subname && dsp->fw_ver >= 2) {
                ctl->subname_len = subname_len;
-               ctl->subname = kmemdup(subname,
-                                      strlen(subname) + 1, GFP_KERNEL);
+               ctl->subname = kasprintf(GFP_KERNEL, "%.*s", subname_len, subname);
                if (!ctl->subname) {
                        ret = -ENOMEM;
                        goto err_ctl;
index 8e5d879..41c31b1 100644 (file)
@@ -134,7 +134,7 @@ static int gpio_sim_get_multiple(struct gpio_chip *gc,
        struct gpio_sim_chip *chip = gpiochip_get_data(gc);
 
        mutex_lock(&chip->lock);
-       bitmap_copy(bits, chip->value_map, gc->ngpio);
+       bitmap_replace(bits, bits, chip->value_map, mask, gc->ngpio);
        mutex_unlock(&chip->lock);
 
        return 0;
@@ -146,7 +146,7 @@ static void gpio_sim_set_multiple(struct gpio_chip *gc,
        struct gpio_sim_chip *chip = gpiochip_get_data(gc);
 
        mutex_lock(&chip->lock);
-       bitmap_copy(chip->value_map, bits, gc->ngpio);
+       bitmap_replace(chip->value_map, chip->value_map, bits, mask, gc->ngpio);
        mutex_unlock(&chip->lock);
 }
 
index a5495ad..c2523ac 100644 (file)
@@ -108,7 +108,7 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
  * controller does not have GPIO chip registered at the moment. This is to
  * support probe deferral.
  */
-static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
+static struct gpio_desc *acpi_get_gpiod(char *path, unsigned int pin)
 {
        struct gpio_chip *chip;
        acpi_handle handle;
@@ -136,7 +136,7 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
  * as it is intended for use outside of the GPIO layer (in a similar fashion to
  * gpiod_get_index() for example) it also holds a reference to the GPIO device.
  */
-struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label)
+struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label)
 {
        struct gpio_desc *gpio;
        int ret;
@@ -317,11 +317,12 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
        return desc;
 }
 
-static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
+static bool acpi_gpio_in_ignore_list(const char *controller_in, unsigned int pin_in)
 {
        const char *controller, *pin_str;
-       int len, pin;
+       unsigned int pin;
        char *endp;
+       int len;
 
        controller = ignore_wake;
        while (controller) {
@@ -354,13 +355,13 @@ err:
 static bool acpi_gpio_irq_is_wake(struct device *parent,
                                  struct acpi_resource_gpio *agpio)
 {
-       int pin = agpio->pin_table[0];
+       unsigned int pin = agpio->pin_table[0];
 
        if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
                return false;
 
        if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) {
-               dev_info(parent, "Ignoring wakeup on pin %d\n", pin);
+               dev_info(parent, "Ignoring wakeup on pin %u\n", pin);
                return false;
        }
 
@@ -378,7 +379,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
        struct acpi_gpio_event *event;
        irq_handler_t handler = NULL;
        struct gpio_desc *desc;
-       int ret, pin, irq;
+       unsigned int pin;
+       int ret, irq;
 
        if (!acpi_gpio_get_irq_resource(ares, &agpio))
                return AE_OK;
@@ -387,8 +389,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
        pin = agpio->pin_table[0];
 
        if (pin <= 255) {
-               char ev_name[5];
-               sprintf(ev_name, "_%c%02hhX",
+               char ev_name[8];
+               sprintf(ev_name, "_%c%02X",
                        agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
                        pin);
                if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
@@ -1098,7 +1100,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
 
        length = min_t(u16, agpio->pin_table_length, pin_index + bits);
        for (i = pin_index; i < length; ++i) {
-               int pin = agpio->pin_table[i];
+               unsigned int pin = agpio->pin_table[i];
                struct acpi_gpio_connection *conn;
                struct gpio_desc *desc;
                bool found;
index 085348e..b769417 100644 (file)
@@ -1601,8 +1601,6 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
 
        gpiochip_set_irq_hooks(gc);
 
-       acpi_gpiochip_request_interrupts(gc);
-
        /*
         * Using barrier() here to prevent compiler from reordering
         * gc->irq.initialized before initialization of above
@@ -1612,6 +1610,8 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
 
        gc->irq.initialized = true;
 
+       acpi_gpiochip_request_interrupts(gc);
+
        return 0;
 }
 
index 970b065..d0d0ea5 100644 (file)
@@ -128,6 +128,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
                goto free_chunk;
        }
 
+       mutex_lock(&p->ctx->lock);
+
        /* skip guilty context job */
        if (atomic_read(&p->ctx->guilty) == 1) {
                ret = -ECANCELED;
@@ -709,6 +711,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
        dma_fence_put(parser->fence);
 
        if (parser->ctx) {
+               mutex_unlock(&parser->ctx->lock);
                amdgpu_ctx_put(parser->ctx);
        }
        if (parser->bo_list)
@@ -1157,6 +1160,9 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
 {
        int i, r;
 
+       /* TODO: Investigate why we still need the context lock */
+       mutex_unlock(&p->ctx->lock);
+
        for (i = 0; i < p->nchunks; ++i) {
                struct amdgpu_cs_chunk *chunk;
 
@@ -1167,32 +1173,34 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
                case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
                        r = amdgpu_cs_process_fence_dep(p, chunk);
                        if (r)
-                               return r;
+                               goto out;
                        break;
                case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
                        r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
                        if (r)
-                               return r;
+                               goto out;
                        break;
                case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
                        r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
                        if (r)
-                               return r;
+                               goto out;
                        break;
                case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
                        r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
                        if (r)
-                               return r;
+                               goto out;
                        break;
                case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
                        r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
                        if (r)
-                               return r;
+                               goto out;
                        break;
                }
        }
 
-       return 0;
+out:
+       mutex_lock(&p->ctx->lock);
+       return r;
 }
 
 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
@@ -1368,6 +1376,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                goto out;
 
        r = amdgpu_cs_submit(&parser, cs);
+
 out:
        amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
 
index 5981c7d..8f0e6d9 100644 (file)
@@ -237,6 +237,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 
        kref_init(&ctx->refcount);
        spin_lock_init(&ctx->ring_lock);
+       mutex_init(&ctx->lock);
 
        ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
        ctx->reset_counter_query = ctx->reset_counter;
@@ -357,6 +358,7 @@ static void amdgpu_ctx_fini(struct kref *ref)
                drm_dev_exit(idx);
        }
 
+       mutex_destroy(&ctx->lock);
        kfree(ctx);
 }
 
index d0cbfce..142f2f8 100644 (file)
@@ -49,6 +49,7 @@ struct amdgpu_ctx {
        bool                            preamble_presented;
        int32_t                         init_priority;
        int32_t                         override_priority;
+       struct mutex                    lock;
        atomic_t                        guilty;
        unsigned long                   ras_counter_ce;
        unsigned long                   ras_counter_ue;
index b03663f..29e9419 100644 (file)
@@ -2323,18 +2323,23 @@ static int amdgpu_pmops_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(drm_dev);
-       int r;
 
        if (amdgpu_acpi_is_s0ix_active(adev))
                adev->in_s0ix = true;
        else
                adev->in_s3 = true;
-       r = amdgpu_device_suspend(drm_dev, true);
-       if (r)
-               return r;
+       return amdgpu_device_suspend(drm_dev, true);
+}
+
+static int amdgpu_pmops_suspend_noirq(struct device *dev)
+{
+       struct drm_device *drm_dev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
        if (!adev->in_s0ix)
-               r = amdgpu_asic_reset(adev);
-       return r;
+               return amdgpu_asic_reset(adev);
+
+       return 0;
 }
 
 static int amdgpu_pmops_resume(struct device *dev)
@@ -2575,6 +2580,7 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
        .prepare = amdgpu_pmops_prepare,
        .complete = amdgpu_pmops_complete,
        .suspend = amdgpu_pmops_suspend,
+       .suspend_noirq = amdgpu_pmops_suspend_noirq,
        .resume = amdgpu_pmops_resume,
        .freeze = amdgpu_pmops_freeze,
        .thaw = amdgpu_pmops_thaw,
index f99093f..a0ee828 100644 (file)
@@ -52,7 +52,7 @@
 #define FIRMWARE_ALDEBARAN     "amdgpu/aldebaran_vcn.bin"
 #define FIRMWARE_BEIGE_GOBY    "amdgpu/beige_goby_vcn.bin"
 #define FIRMWARE_YELLOW_CARP   "amdgpu/yellow_carp_vcn.bin"
-#define FIRMWARE_VCN_3_1_2     "amdgpu/vcn_3_1_2_vcn.bin"
+#define FIRMWARE_VCN_3_1_2     "amdgpu/vcn_3_1_2.bin"
 
 MODULE_FIRMWARE(FIRMWARE_RAVEN);
 MODULE_FIRMWARE(FIRMWARE_PICASSO);
index 46d4bf2..b8cfcc6 100644 (file)
@@ -1205,6 +1205,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
        { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
        /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
        { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
+       /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
+       { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
        { 0, 0, 0, 0, 0 },
 };
 
index dfba613..26feefb 100644 (file)
@@ -374,7 +374,7 @@ void dce_clock_read_ss_info(struct clk_mgr_internal *clk_mgr_dce)
                                clk_mgr_dce->dprefclk_ss_percentage =
                                                info.spread_spectrum_percentage;
                        }
-                       if (clk_mgr_dce->base.ctx->dc->debug.ignore_dpref_ss)
+                       if (clk_mgr_dce->base.ctx->dc->config.ignore_dpref_ss)
                                clk_mgr_dce->dprefclk_ss_percentage = 0;
                }
        }
index 702d00c..3121dd2 100644 (file)
@@ -686,8 +686,8 @@ void dcn316_clk_mgr_construct(
        clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base);
        clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
        dce_clock_read_ss_info(&clk_mgr->base);
-       clk_mgr->base.dccg->ref_dtbclk_khz =
-       dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
+       /*clk_mgr->base.dccg->ref_dtbclk_khz =
+       dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);*/
 
        clk_mgr->base.base.bw_params = &dcn316_bw_params;
 
index 77ef9d1..9e79f60 100644 (file)
@@ -340,6 +340,7 @@ struct dc_config {
        bool is_asymmetric_memory;
        bool is_single_rank_dimm;
        bool use_pipe_ctx_sync_logic;
+       bool ignore_dpref_ss;
 };
 
 enum visual_confirm {
@@ -729,7 +730,6 @@ struct dc_debug_options {
        bool apply_vendor_specific_lttpr_wa;
        bool extended_blank_optimization;
        union aux_wake_wa_options aux_wake_wa;
-       bool ignore_dpref_ss;
        uint8_t psr_power_use_phy_fsm;
 };
 
index 781334b..83fbea2 100644 (file)
@@ -2522,14 +2522,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
        struct mpc *mpc = dc->res_pool->mpc;
        struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
 
-       if (per_pixel_alpha)
-               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
-       else
-               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
        blnd_cfg.overlap_only = false;
        blnd_cfg.global_gain = 0xff;
 
+       if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+               blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+       } else if (per_pixel_alpha) {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+       } else {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+       }
+
        if (pipe_ctx->plane_state->global_alpha)
                blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
        else
index 4290eaf..b627c41 100644 (file)
@@ -2344,14 +2344,18 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
        struct mpc *mpc = dc->res_pool->mpc;
        struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
 
-       if (per_pixel_alpha)
-               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
-       else
-               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
        blnd_cfg.overlap_only = false;
        blnd_cfg.global_gain = 0xff;
 
+       if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+               blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+       } else if (per_pixel_alpha) {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+       } else {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+       }
+
        if (pipe_ctx->plane_state->global_alpha)
                blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
        else
index f4df344..9a2cfab 100644 (file)
@@ -214,29 +214,6 @@ int drm_of_encoder_active_endpoint(struct device_node *node,
 }
 EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
 
-static int find_panel_or_bridge(struct device_node *node,
-                               struct drm_panel **panel,
-                               struct drm_bridge **bridge)
-{
-       if (panel) {
-               *panel = of_drm_find_panel(node);
-               if (!IS_ERR(*panel))
-                       return 0;
-
-               /* Clear the panel pointer in case of error. */
-               *panel = NULL;
-       }
-
-       /* No panel found yet, check for a bridge next. */
-       if (bridge) {
-               *bridge = of_drm_find_bridge(node);
-               if (*bridge)
-                       return 0;
-       }
-
-       return -EPROBE_DEFER;
-}
-
 /**
  * drm_of_find_panel_or_bridge - return connected panel or bridge device
  * @np: device tree node containing encoder output ports
@@ -259,44 +236,49 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
                                struct drm_panel **panel,
                                struct drm_bridge **bridge)
 {
-       struct device_node *node;
-       int ret;
+       int ret = -EPROBE_DEFER;
+       struct device_node *remote;
 
        if (!panel && !bridge)
                return -EINVAL;
-
        if (panel)
                *panel = NULL;
-       if (bridge)
-               *bridge = NULL;
-
-       /* Check for a graph on the device node first. */
-       if (of_graph_is_present(np)) {
-               node = of_graph_get_remote_node(np, port, endpoint);
-               if (node) {
-                       ret = find_panel_or_bridge(node, panel, bridge);
-                       of_node_put(node);
-
-                       if (!ret)
-                               return 0;
-               }
-       }
 
-       /* Otherwise check for any child node other than port/ports. */
-       for_each_available_child_of_node(np, node) {
-               if (of_node_name_eq(node, "port") ||
-                   of_node_name_eq(node, "ports"))
-                       continue;
+       /*
+        * of_graph_get_remote_node() produces a noisy error message if port
+        * node isn't found and the absence of the port is a legit case here,
+        * so at first we silently check whether graph presents in the
+        * device-tree node.
+        */
+       if (!of_graph_is_present(np))
+               return -ENODEV;
 
-               ret = find_panel_or_bridge(node, panel, bridge);
-               of_node_put(node);
+       remote = of_graph_get_remote_node(np, port, endpoint);
+       if (!remote)
+               return -ENODEV;
+
+       if (panel) {
+               *panel = of_drm_find_panel(remote);
+               if (!IS_ERR(*panel))
+                       ret = 0;
+               else
+                       *panel = NULL;
+       }
+
+       /* No panel found yet, check for a bridge next. */
+       if (bridge) {
+               if (ret) {
+                       *bridge = of_drm_find_bridge(remote);
+                       if (*bridge)
+                               ret = 0;
+               } else {
+                       *bridge = NULL;
+               }
 
-               /* Stop at the first found occurrence. */
-               if (!ret)
-                       return 0;
        }
 
-       return -EPROBE_DEFER;
+       of_node_put(remote);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(drm_of_find_panel_or_bridge);
 
index d667657..f868db8 100644 (file)
@@ -4383,13 +4383,20 @@ intel_dp_update_420(struct intel_dp *intel_dp)
 static void
 intel_dp_set_edid(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        struct intel_connector *connector = intel_dp->attached_connector;
        struct edid *edid;
+       bool vrr_capable;
 
        intel_dp_unset_edid(intel_dp);
        edid = intel_dp_get_edid(intel_dp);
        connector->detect_edid = edid;
 
+       vrr_capable = intel_vrr_is_capable(&connector->base);
+       drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
+                   connector->base.base.id, connector->base.name, str_yes_no(vrr_capable));
+       drm_connector_set_vrr_capable_property(&connector->base, vrr_capable);
+
        intel_dp_update_dfp(intel_dp, edid);
        intel_dp_update_420(intel_dp);
 
@@ -4422,6 +4429,9 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
 
        intel_dp->dfp.ycbcr_444_to_420 = false;
        connector->base.ycbcr_420_allowed = false;
+
+       drm_connector_set_vrr_capable_property(&connector->base,
+                                              false);
 }
 
 static int
@@ -4572,14 +4582,9 @@ static int intel_dp_get_modes(struct drm_connector *connector)
        int num_modes = 0;
 
        edid = intel_connector->detect_edid;
-       if (edid) {
+       if (edid)
                num_modes = intel_connector_update_modes(connector, edid);
 
-               if (intel_vrr_is_capable(connector))
-                       drm_connector_set_vrr_capable_property(connector,
-                                                              true);
-       }
-
        /* Also add fixed mode, which may or may not be present in EDID */
        if (intel_dp_is_edp(intel_attached_dp(intel_connector)) &&
            intel_connector->panel.fixed_mode) {
index bff8c2d..6c9e6e7 100644 (file)
@@ -887,6 +887,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
                return false;
        }
 
+       /* Wa_16011303918:adl-p */
+       if (crtc_state->vrr.enable &&
+           IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+               drm_dbg_kms(&dev_priv->drm,
+                           "PSR2 not enabled, not compatible with HW stepping + VRR\n");
+               return false;
+       }
+
+       if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
+               drm_dbg_kms(&dev_priv->drm,
+                           "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
+               return false;
+       }
+
        if (HAS_PSR2_SEL_FETCH(dev_priv)) {
                if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
                    !HAS_PSR_HW_TRACKING(dev_priv)) {
@@ -900,12 +914,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
        if (!crtc_state->enable_psr2_sel_fetch &&
            IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
                drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
-               return false;
+               goto unsupported;
        }
 
        if (!psr2_granularity_check(intel_dp, crtc_state)) {
                drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
-               return false;
+               goto unsupported;
        }
 
        if (!crtc_state->enable_psr2_sel_fetch &&
@@ -914,25 +928,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
                            "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
                            crtc_hdisplay, crtc_vdisplay,
                            psr_max_h, psr_max_v);
-               return false;
-       }
-
-       if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
-               drm_dbg_kms(&dev_priv->drm,
-                           "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
-               return false;
-       }
-
-       /* Wa_16011303918:adl-p */
-       if (crtc_state->vrr.enable &&
-           IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
-               drm_dbg_kms(&dev_priv->drm,
-                           "PSR2 not enabled, not compatible with HW stepping + VRR\n");
-               return false;
+               goto unsupported;
        }
 
        tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
        return true;
+
+unsupported:
+       crtc_state->enable_psr2_sel_fetch = false;
+       return false;
 }
 
 void intel_psr_compute_config(struct intel_dp *intel_dp,
index c3ea243..0c5c438 100644 (file)
@@ -70,7 +70,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
         * mmap ioctl is disallowed for all discrete platforms,
         * and for all platforms with GRAPHICS_VER > 12.
         */
-       if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12)
+       if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0))
                return -EOPNOTSUPP;
 
        if (args->flags & ~(I915_MMAP_WC))
index 83c31b2..ccc4fcf 100644 (file)
@@ -1742,7 +1742,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu)
                return ERR_CAST(mmu);
 
        return msm_gem_address_space_create(mmu,
-               "gpu", 0x100000000ULL, 0x1ffffffffULL);
+               "gpu", 0x100000000ULL, SZ_4G);
 }
 
 static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
index 89cfd84..8706bcd 100644 (file)
@@ -599,43 +599,91 @@ static const struct of_device_id dt_match[] = {
        {}
 };
 
-#ifdef CONFIG_PM
-static int adreno_resume(struct device *dev)
+static int adreno_runtime_resume(struct device *dev)
 {
        struct msm_gpu *gpu = dev_to_gpu(dev);
 
        return gpu->funcs->pm_resume(gpu);
 }
 
-static int active_submits(struct msm_gpu *gpu)
+static int adreno_runtime_suspend(struct device *dev)
 {
-       int active_submits;
-       mutex_lock(&gpu->active_lock);
-       active_submits = gpu->active_submits;
-       mutex_unlock(&gpu->active_lock);
-       return active_submits;
+       struct msm_gpu *gpu = dev_to_gpu(dev);
+
+       /*
+        * We should be holding a runpm ref, which will prevent
+        * runtime suspend.  In the system suspend path, we've
+        * already waited for active jobs to complete.
+        */
+       WARN_ON_ONCE(gpu->active_submits);
+
+       return gpu->funcs->pm_suspend(gpu);
+}
+
+static void suspend_scheduler(struct msm_gpu *gpu)
+{
+       int i;
+
+       /*
+        * Shut down the scheduler before we force suspend, so that
+        * suspend isn't racing with scheduler kthread feeding us
+        * more work.
+        *
+        * Note, we just want to park the thread, and let any jobs
+        * that are already on the hw queue complete normally, as
+        * opposed to the drm_sched_stop() path used for handling
+        * faulting/timed-out jobs.  We can't really cancel any jobs
+        * already on the hw queue without racing with the GPU.
+        */
+       for (i = 0; i < gpu->nr_rings; i++) {
+               struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
+               kthread_park(sched->thread);
+       }
 }
 
-static int adreno_suspend(struct device *dev)
+static void resume_scheduler(struct msm_gpu *gpu)
+{
+       int i;
+
+       for (i = 0; i < gpu->nr_rings; i++) {
+               struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
+               kthread_unpark(sched->thread);
+       }
+}
+
+static int adreno_system_suspend(struct device *dev)
 {
        struct msm_gpu *gpu = dev_to_gpu(dev);
-       int remaining;
+       int remaining, ret;
+
+       suspend_scheduler(gpu);
 
        remaining = wait_event_timeout(gpu->retire_event,
-                                      active_submits(gpu) == 0,
+                                      gpu->active_submits == 0,
                                       msecs_to_jiffies(1000));
        if (remaining == 0) {
                dev_err(dev, "Timeout waiting for GPU to suspend\n");
-               return -EBUSY;
+               ret = -EBUSY;
+               goto out;
        }
 
-       return gpu->funcs->pm_suspend(gpu);
+       ret = pm_runtime_force_suspend(dev);
+out:
+       if (ret)
+               resume_scheduler(gpu);
+
+       return ret;
+}
+
+static int adreno_system_resume(struct device *dev)
+{
+       resume_scheduler(dev_to_gpu(dev));
+       return pm_runtime_force_resume(dev);
 }
-#endif
 
 static const struct dev_pm_ops adreno_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
-       SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
+       SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume)
+       RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
 };
 
 static struct platform_driver adreno_driver = {
index c515b7c..c61b5b2 100644 (file)
@@ -54,87 +54,87 @@ struct dpu_intr_reg {
  * When making changes be sure to sync with dpu_hw_intr_reg
  */
 static const struct dpu_intr_reg dpu_intr_set[] = {
-       {
+       [MDP_SSPP_TOP0_INTR] = {
                MDP_SSPP_TOP0_OFF+INTR_CLEAR,
                MDP_SSPP_TOP0_OFF+INTR_EN,
                MDP_SSPP_TOP0_OFF+INTR_STATUS
        },
-       {
+       [MDP_SSPP_TOP0_INTR2] = {
                MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
                MDP_SSPP_TOP0_OFF+INTR2_EN,
                MDP_SSPP_TOP0_OFF+INTR2_STATUS
        },
-       {
+       [MDP_SSPP_TOP0_HIST_INTR] = {
                MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
                MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
                MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
        },
-       {
+       [MDP_INTF0_INTR] = {
                MDP_INTF_0_OFF+INTF_INTR_CLEAR,
                MDP_INTF_0_OFF+INTF_INTR_EN,
                MDP_INTF_0_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF1_INTR] = {
                MDP_INTF_1_OFF+INTF_INTR_CLEAR,
                MDP_INTF_1_OFF+INTF_INTR_EN,
                MDP_INTF_1_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF2_INTR] = {
                MDP_INTF_2_OFF+INTF_INTR_CLEAR,
                MDP_INTF_2_OFF+INTF_INTR_EN,
                MDP_INTF_2_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF3_INTR] = {
                MDP_INTF_3_OFF+INTF_INTR_CLEAR,
                MDP_INTF_3_OFF+INTF_INTR_EN,
                MDP_INTF_3_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF4_INTR] = {
                MDP_INTF_4_OFF+INTF_INTR_CLEAR,
                MDP_INTF_4_OFF+INTF_INTR_EN,
                MDP_INTF_4_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF5_INTR] = {
                MDP_INTF_5_OFF+INTF_INTR_CLEAR,
                MDP_INTF_5_OFF+INTF_INTR_EN,
                MDP_INTF_5_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_AD4_0_INTR] = {
                MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
                MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
                MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
        },
-       {
+       [MDP_AD4_1_INTR] = {
                MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
                MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
                MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
        },
-       {
+       [MDP_INTF0_7xxx_INTR] = {
                MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF1_7xxx_INTR] = {
                MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF2_7xxx_INTR] = {
                MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF3_7xxx_INTR] = {
                MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF4_7xxx_INTR] = {
                MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF5_7xxx_INTR] = {
                MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
index 1ee8246..c478d25 100644 (file)
@@ -98,7 +98,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
                __drm_atomic_helper_plane_destroy_state(plane->state);
 
        kfree(to_mdp5_plane_state(plane->state));
+       plane->state = NULL;
        mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
+       if (!mdp5_state)
+               return;
        __drm_atomic_helper_plane_reset(plane, &mdp5_state->base);
 }
 
index 5d2ff67..acfe1b3 100644 (file)
@@ -176,6 +176,8 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
        va_list va;
 
        new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
+       if (!new_blk)
+               return;
 
        va_start(va, fmt);
 
index 178b774..a42732b 100644 (file)
@@ -580,6 +580,12 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
                        dp->dp_display.connector_type, state);
        mutex_unlock(&dp->event_mutex);
 
+       /*
+        * add fail safe mode outside event_mutex scope
+        * to avoid potiential circular lock with drm thread
+        */
+       dp_panel_add_fail_safe_mode(dp->dp_display.connector);
+
        /* uevent will complete connection part */
        return 0;
 };
index f141872..26c3653 100644 (file)
@@ -151,6 +151,15 @@ static int dp_panel_update_modes(struct drm_connector *connector,
        return rc;
 }
 
+void dp_panel_add_fail_safe_mode(struct drm_connector *connector)
+{
+       /* fail safe edid */
+       mutex_lock(&connector->dev->mode_config.mutex);
+       if (drm_add_modes_noedid(connector, 640, 480))
+               drm_set_preferred_mode(connector, 640, 480);
+       mutex_unlock(&connector->dev->mode_config.mutex);
+}
+
 int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
        struct drm_connector *connector)
 {
@@ -207,16 +216,7 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
                        goto end;
                }
 
-               /* fail safe edid */
-               mutex_lock(&connector->dev->mode_config.mutex);
-               if (drm_add_modes_noedid(connector, 640, 480))
-                       drm_set_preferred_mode(connector, 640, 480);
-               mutex_unlock(&connector->dev->mode_config.mutex);
-       } else {
-               /* always add fail-safe mode as backup mode */
-               mutex_lock(&connector->dev->mode_config.mutex);
-               drm_add_modes_noedid(connector, 640, 480);
-               mutex_unlock(&connector->dev->mode_config.mutex);
+               dp_panel_add_fail_safe_mode(connector);
        }
 
        if (panel->aux_cfg_update_done) {
index 9023e5b..99739ea 100644 (file)
@@ -59,6 +59,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel);
 int dp_panel_deinit(struct dp_panel *dp_panel);
 int dp_panel_timing_cfg(struct dp_panel *dp_panel);
 void dp_panel_dump_regs(struct dp_panel *dp_panel);
+void dp_panel_add_fail_safe_mode(struct drm_connector *connector);
 int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
                struct drm_connector *connector);
 u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
index 0c1b7dd..9f6af0f 100644 (file)
@@ -638,7 +638,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
        return connector;
 
 fail:
-       connector->funcs->destroy(msm_dsi->connector);
+       connector->funcs->destroy(connector);
        return ERR_PTR(ret);
 }
 
index 02b9ae6..a4f6197 100644 (file)
@@ -926,6 +926,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
                                        get_pid_task(aspace->pid, PIDTYPE_PID);
                                if (task) {
                                        comm = kstrdup(task->comm, GFP_KERNEL);
+                                       put_task_struct(task);
                                } else {
                                        comm = NULL;
                                }
index 46029c5..145047e 100644 (file)
@@ -229,7 +229,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
 
        ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
        if (ret)
-               dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
+               dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
 }
 
 static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
@@ -265,7 +265,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel)
        return 0;
 }
 
-static int rpi_touchscreen_enable(struct drm_panel *panel)
+static int rpi_touchscreen_prepare(struct drm_panel *panel)
 {
        struct rpi_touchscreen *ts = panel_to_ts(panel);
        int i;
@@ -295,6 +295,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
        rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
        msleep(100);
 
+       return 0;
+}
+
+static int rpi_touchscreen_enable(struct drm_panel *panel)
+{
+       struct rpi_touchscreen *ts = panel_to_ts(panel);
+
        /* Turn on the backlight. */
        rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
 
@@ -349,7 +356,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel,
 static const struct drm_panel_funcs rpi_touchscreen_funcs = {
        .disable = rpi_touchscreen_disable,
        .unprepare = rpi_touchscreen_noop,
-       .prepare = rpi_touchscreen_noop,
+       .prepare = rpi_touchscreen_prepare,
        .enable = rpi_touchscreen_enable,
        .get_modes = rpi_touchscreen_get_modes,
 };
index b991ba1..f63efd8 100644 (file)
@@ -96,7 +96,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
        struct dma_fence *f;
        int r = 0;
 
-       dma_resv_for_each_fence(&cursor, resv, shared, f) {
+       dma_resv_for_each_fence(&cursor, resv, !shared, f) {
                fence = to_radeon_fence(f);
                if (fence && fence->rdev == rdev)
                        radeon_sync_fence(sync, fence);
index de3424f..6cf2621 100644 (file)
@@ -2,6 +2,9 @@
 config DRM_VC4
        tristate "Broadcom VC4 Graphics"
        depends on ARCH_BCM || ARCH_BCM2835 || COMPILE_TEST
+       # Make sure not 'y' when RASPBERRYPI_FIRMWARE is 'm'. This can only
+       # happen when COMPILE_TEST=y, hence the added !RASPBERRYPI_FIRMWARE.
+       depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
        depends on DRM
        depends on SND && SND_SOC
        depends on COMMON_CLK
index 752f921..98308a1 100644 (file)
@@ -846,7 +846,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
        unsigned long phy_clock;
        int ret;
 
-       ret = pm_runtime_get_sync(dev);
+       ret = pm_runtime_resume_and_get(dev);
        if (ret) {
                DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
                return;
index 31aecc4..04c8a37 100644 (file)
@@ -46,6 +46,21 @@ vmw_buffer_object(struct ttm_buffer_object *bo)
        return container_of(bo, struct vmw_buffer_object, base);
 }
 
+/**
+ * bo_is_vmw - check if the buffer object is a &vmw_buffer_object
+ * @bo: ttm buffer object to be checked
+ *
+ * Uses destroy function associated with the object to determine if this is
+ * a &vmw_buffer_object.
+ *
+ * Returns:
+ * true if the object is of &vmw_buffer_object type, false if not.
+ */
+static bool bo_is_vmw(struct ttm_buffer_object *bo)
+{
+       return bo->destroy == &vmw_bo_bo_free ||
+              bo->destroy == &vmw_gem_destroy;
+}
 
 /**
  * vmw_bo_pin_in_placement - Validate a buffer to placement.
@@ -615,8 +630,9 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
 
                ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
                vmw_bo_unreference(&vbo);
-               if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
-                            ret != -EBUSY)) {
+               if (unlikely(ret != 0)) {
+                       if (ret == -ERESTARTSYS || ret == -EBUSY)
+                               return -EBUSY;
                        DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
                                  (unsigned int) arg->handle);
                        return ret;
@@ -798,7 +814,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
 {
        /* Is @bo embedded in a struct vmw_buffer_object? */
-       if (vmw_bo_is_vmw_bo(bo))
+       if (!bo_is_vmw(bo))
                return;
 
        /* Kill any cached kernel maps before swapout */
@@ -822,7 +838,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
        struct vmw_buffer_object *vbo;
 
        /* Make sure @bo is embedded in a struct vmw_buffer_object? */
-       if (vmw_bo_is_vmw_bo(bo))
+       if (!bo_is_vmw(bo))
                return;
 
        vbo = container_of(bo, struct vmw_buffer_object, base);
@@ -843,22 +859,3 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
        if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
                vmw_resource_unbind_list(vbo);
 }
-
-/**
- * vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object
- * @bo: buffer object to be checked
- *
- * Uses destroy function associated with the object to determine if this is
- * a &vmw_buffer_object.
- *
- * Returns:
- * true if the object is of &vmw_buffer_object type, false if not.
- */
-bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo)
-{
-       if (bo->destroy == &vmw_bo_bo_free ||
-           bo->destroy == &vmw_gem_destroy)
-               return true;
-
-       return false;
-}
index 26eb547..163c007 100644 (file)
@@ -998,13 +998,10 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
                goto out_no_fman;
        }
 
-       drm_vma_offset_manager_init(&dev_priv->vma_manager,
-                                   DRM_FILE_PAGE_OFFSET_START,
-                                   DRM_FILE_PAGE_OFFSET_SIZE);
        ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
                              dev_priv->drm.dev,
                              dev_priv->drm.anon_inode->i_mapping,
-                             &dev_priv->vma_manager,
+                             dev_priv->drm.vma_offset_manager,
                              dev_priv->map_mode == vmw_dma_alloc_coherent,
                              false);
        if (unlikely(ret != 0)) {
@@ -1174,7 +1171,6 @@ static void vmw_driver_unload(struct drm_device *dev)
        vmw_devcaps_destroy(dev_priv);
        vmw_vram_manager_fini(dev_priv);
        ttm_device_fini(&dev_priv->bdev);
-       drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
        vmw_release_device_late(dev_priv);
        vmw_fence_manager_takedown(dev_priv->fman);
        if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -1398,7 +1394,7 @@ vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
        struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
 
        return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
-                                    &dev_priv->vma_manager);
+                                    dev_priv->drm.vma_offset_manager);
 }
 
 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
index 00e8e27..ace7ca1 100644 (file)
@@ -683,6 +683,9 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
            container_of(base, struct vmw_user_surface, prime.base);
        struct vmw_resource *res = &user_srf->srf.res;
 
+       if (base->shareable && res && res->backup)
+               drm_gem_object_put(&res->backup->base.base);
+
        *p_base = NULL;
        vmw_resource_unreference(&res);
 }
@@ -857,6 +860,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
                        goto out_unlock;
                }
                vmw_bo_reference(res->backup);
+               drm_gem_object_get(&res->backup->base.base);
        }
 
        tmp = vmw_resource_reference(&srf->res);
@@ -1513,7 +1517,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
                                                        &res->backup);
                if (ret == 0)
                        vmw_bo_reference(res->backup);
-
        }
 
        if (unlikely(ret != 0)) {
@@ -1561,6 +1564,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
                        drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
                rep->buffer_size = res->backup->base.base.size;
                rep->buffer_handle = backup_handle;
+               if (user_srf->prime.base.shareable)
+                       drm_gem_object_get(&res->backup->base.base);
        } else {
                rep->buffer_map_handle = 0;
                rep->buffer_size = 0;
index 27f969b..e9e2db6 100644 (file)
@@ -179,6 +179,12 @@ struct imx_i2c_hwdata {
        unsigned int            ndivs;
        unsigned int            i2sr_clr_opcode;
        unsigned int            i2cr_ien_opcode;
+       /*
+        * Errata ERR007805 or e7805:
+        * I2C: When the I2C clock speed is configured for 400 kHz,
+        * the SCL low period violates the I2C spec of 1.3 uS min.
+        */
+       bool                    has_err007805;
 };
 
 struct imx_i2c_dma {
@@ -240,6 +246,16 @@ static const struct imx_i2c_hwdata imx21_i2c_hwdata = {
 
 };
 
+static const struct imx_i2c_hwdata imx6_i2c_hwdata = {
+       .devtype                = IMX21_I2C,
+       .regshift               = IMX_I2C_REGSHIFT,
+       .clk_div                = imx_i2c_clk_div,
+       .ndivs                  = ARRAY_SIZE(imx_i2c_clk_div),
+       .i2sr_clr_opcode        = I2SR_CLR_OPCODE_W0C,
+       .i2cr_ien_opcode        = I2CR_IEN_OPCODE_1,
+       .has_err007805          = true,
+};
+
 static struct imx_i2c_hwdata vf610_i2c_hwdata = {
        .devtype                = VF610_I2C,
        .regshift               = VF610_I2C_REGSHIFT,
@@ -266,6 +282,16 @@ MODULE_DEVICE_TABLE(platform, imx_i2c_devtype);
 static const struct of_device_id i2c_imx_dt_ids[] = {
        { .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, },
        { .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, },
+       { .compatible = "fsl,imx6q-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx6sl-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx6sll-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx6sx-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx6ul-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx7s-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx8mm-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx8mn-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx8mp-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx8mq-i2c", .data = &imx6_i2c_hwdata, },
        { .compatible = "fsl,vf610-i2c", .data = &vf610_i2c_hwdata, },
        { /* sentinel */ }
 };
@@ -551,6 +577,13 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
        unsigned int div;
        int i;
 
+       if (i2c_imx->hwdata->has_err007805 && i2c_imx->bitrate > 384000) {
+               dev_dbg(&i2c_imx->adapter.dev,
+                       "SoC errata ERR007805 or e7805 applies, bus frequency limited from %d Hz to 384000 Hz.\n",
+                       i2c_imx->bitrate);
+               i2c_imx->bitrate = 384000;
+       }
+
        /* Divider value calculation */
        if (i2c_imx->cur_clk == i2c_clk_rate)
                return;
index f4820fd..c036431 100644 (file)
 #define ISMT_SPGT_SPD_MASK     0xc0000000      /* SMBus Speed mask */
 #define ISMT_SPGT_SPD_80K      0x00            /* 80 kHz */
 #define ISMT_SPGT_SPD_100K     (0x1 << 30)     /* 100 kHz */
-#define ISMT_SPGT_SPD_400K     (0x2 << 30)     /* 400 kHz */
-#define ISMT_SPGT_SPD_1M       (0x3 << 30)     /* 1 MHz */
+#define ISMT_SPGT_SPD_400K     (0x2U << 30)    /* 400 kHz */
+#define ISMT_SPGT_SPD_1M       (0x3U << 30)    /* 1 MHz */
 
 
 /* MSI Control Register (MSICTL) bit definitions */
index 7728c84..9028ffb 100644 (file)
@@ -137,6 +137,12 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter,
 
                TXFIFO_WR(smbus, msg->buf[msg->len-1] |
                          (stop ? MTXFIFO_STOP : 0));
+
+               if (stop) {
+                       err = pasemi_smb_waitready(smbus);
+                       if (err)
+                               goto reset_out;
+               }
        }
 
        return 0;
index fc1dcc1..5b920f0 100644 (file)
@@ -843,10 +843,8 @@ static int geni_i2c_probe(struct platform_device *pdev)
                /* FIFO is disabled, so we can only use GPI DMA */
                gi2c->gpi_mode = true;
                ret = setup_gpi_dma(gi2c);
-               if (ret) {
-                       dev_err(dev, "Failed to setup GPI DMA mode:%d ret\n", ret);
-                       return ret;
-               }
+               if (ret)
+                       return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n");
 
                dev_dbg(dev, "Using GPI DMA mode for I2C\n");
        } else {
index cf5d049..ab0adaa 100644 (file)
@@ -557,7 +557,7 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
                                .addr = umsg.addr,
                                .flags = umsg.flags,
                                .len = umsg.len,
-                               .buf = compat_ptr(umsg.buf)
+                               .buf = (__force __u8 *)compat_ptr(umsg.buf),
                        };
                }
 
@@ -668,16 +668,21 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
        i2c_dev->dev.class = i2c_dev_class;
        i2c_dev->dev.parent = &adap->dev;
        i2c_dev->dev.release = i2cdev_dev_release;
-       dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
+
+       res = dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
+       if (res)
+               goto err_put_i2c_dev;
 
        res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev);
-       if (res) {
-               put_i2c_dev(i2c_dev, false);
-               return res;
-       }
+       if (res)
+               goto err_put_i2c_dev;
 
        pr_debug("adapter [%s] registered as minor %d\n", adap->name, adap->nr);
        return 0;
+
+err_put_i2c_dev:
+       put_i2c_dev(i2c_dev, false);
+       return res;
 }
 
 static int i2cdev_detach_adapter(struct device *dev, void *dummy)
index c289960..9a23eed 100644 (file)
@@ -61,6 +61,14 @@ static irqreturn_t cypress_sf_irq_handler(int irq, void *devid)
        return IRQ_HANDLED;
 }
 
+static void cypress_sf_disable_regulators(void *arg)
+{
+       struct cypress_sf_data *touchkey = arg;
+
+       regulator_bulk_disable(ARRAY_SIZE(touchkey->regulators),
+                              touchkey->regulators);
+}
+
 static int cypress_sf_probe(struct i2c_client *client)
 {
        struct cypress_sf_data *touchkey;
@@ -121,6 +129,12 @@ static int cypress_sf_probe(struct i2c_client *client)
                return error;
        }
 
+       error = devm_add_action_or_reset(&client->dev,
+                                        cypress_sf_disable_regulators,
+                                        touchkey);
+       if (error)
+               return error;
+
        touchkey->input_dev = devm_input_allocate_device(&client->dev);
        if (!touchkey->input_dev) {
                dev_err(&client->dev, "Failed to allocate input device\n");
index 43375b3..8a7ce41 100644 (file)
@@ -393,7 +393,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
         * revision register.
         */
        error = pm_runtime_get_sync(dev);
-       if (error) {
+       if (error < 0) {
                dev_err(dev, "pm_runtime_get_sync() failed\n");
                pm_runtime_put_noidle(dev);
                return error;
index 73b3961..439fab4 100644 (file)
 #include <linux/kthread.h>
 #include <linux/platform_device.h>
 #include <linux/mutex.h>
+#include <linux/of.h>
 
 #include <linux/uaccess.h>
 #ifdef CONFIG_PPC
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #endif
 
index 994ba5c..b2fe7a3 100644 (file)
@@ -789,7 +789,8 @@ adbhid_input_register(int id, int default_id, int original_handler_id,
 
        switch (default_id) {
        case ADB_KEYBOARD:
-               hid->keycode = kmalloc(sizeof(adb_to_linux_keycodes), GFP_KERNEL);
+               hid->keycode = kmemdup(adb_to_linux_keycodes,
+                                      sizeof(adb_to_linux_keycodes), GFP_KERNEL);
                if (!hid->keycode) {
                        err = -ENOMEM;
                        goto fail;
@@ -797,8 +798,6 @@ adbhid_input_register(int id, int default_id, int original_handler_id,
 
                sprintf(hid->name, "ADB keyboard");
 
-               memcpy(hid->keycode, adb_to_linux_keycodes, sizeof(adb_to_linux_keycodes));
-
                switch (original_handler_id) {
                default:
                        keyboard_type = "<unknown>";
@@ -817,9 +816,7 @@ adbhid_input_register(int id, int default_id, int original_handler_id,
                case 0xC4: case 0xC7:
                        keyboard_type = "ISO, swapping keys";
                        input_dev->id.version = ADB_KEYBOARD_ISO;
-                       i = hid->keycode[10];
-                       hid->keycode[10] = hid->keycode[50];
-                       hid->keycode[50] = i;
+                       swap(hid->keycode[10], hid->keycode[50]);
                        break;
 
                case 0x12: case 0x15: case 0x16: case 0x17: case 0x1A:
index 01eeb23..877e8cb 100644 (file)
@@ -50,7 +50,7 @@ static ssize_t ams_show_current(struct device *dev,
        ams_sensors(&x, &y, &z);
        mutex_unlock(&ams_info.lock);
 
-       return snprintf(buf, PAGE_SIZE, "%d %d %d\n", x, y, z);
+       return sysfs_emit(buf, "%d %d %d\n", x, y, z);
 }
 
 static DEVICE_ATTR(current, S_IRUGO, ams_show_current, NULL);
index 21271b2..d2f0cde 100644 (file)
@@ -256,8 +256,6 @@ static void ams_i2c_exit(void)
 
 int __init ams_i2c_init(struct device_node *np)
 {
-       int result;
-
        /* Set implementation stuff */
        ams_info.of_node = np;
        ams_info.exit = ams_i2c_exit;
@@ -266,7 +264,5 @@ int __init ams_i2c_init(struct device_node *np)
        ams_info.clear_irq = ams_i2c_clear_irq;
        ams_info.bustype = BUS_I2C;
 
-       result = i2c_add_driver(&ams_i2c_driver);
-
-       return result;
+       return i2c_add_driver(&ams_i2c_driver);
 }
index b4821c7..fa904b2 100644 (file)
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/fs.h>
+#include <linux/of.h>
 
 #include <linux/uaccess.h>
 #include <asm/sections.h>
-#include <asm/prom.h>
 #include <asm/io.h>
 
 #include "ans-lcd.h"
index dc634c2..9b63bd2 100644 (file)
@@ -9,8 +9,11 @@
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
 #include <linux/pgtable.h>
-#include <asm/prom.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/adb.h>
+
 #include <asm/io.h>
 #include <asm/hydra.h>
 #include <asm/irq.h>
index 1943a00..1ec1e59 100644 (file)
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/of_device.h>
 #include <linux/of_irq.h>
 
 #include <asm/machdep.h>
 #include <asm/macio.h>
 #include <asm/pmac_feature.h>
-#include <asm/prom.h>
 
 #undef DEBUG
 
@@ -472,7 +473,7 @@ static void macio_pci_add_devices(struct macio_chip *chip)
        root_res = &rdev->resource[0];
 
        /* First scan 1st level */
-       for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) {
+       for_each_child_of_node(pnode, np) {
                if (macio_skip_device(np))
                        continue;
                of_node_get(np);
@@ -489,7 +490,7 @@ static void macio_pci_add_devices(struct macio_chip *chip)
        /* Add media bay devices if any */
        if (mbdev) {
                pnode = mbdev->ofdev.dev.of_node;
-               for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) {
+               for_each_child_of_node(pnode, np) {
                        if (macio_skip_device(np))
                                continue;
                        of_node_get(np);
@@ -502,7 +503,7 @@ static void macio_pci_add_devices(struct macio_chip *chip)
        /* Add serial ports if any */
        if (sdev) {
                pnode = sdev->ofdev.dev.of_node;
-               for (np = NULL; (np = of_get_next_child(pnode, np)) != NULL;) {
+               for_each_child_of_node(pnode, np) {
                        if (macio_skip_device(np))
                                continue;
                        of_node_get(np);
index 27f5eef..2bbe359 100644 (file)
@@ -1,5 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/stat.h>
 #include <asm/macio.h>
 
index b17660c..36070c6 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/kthread.h>
 #include <linux/mutex.h>
 #include <linux/pgtable.h>
-#include <asm/prom.h>
+
 #include <asm/io.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
index 60311e8..c28893e 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/of_irq.h>
 
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 #include <asm/dbdma.h>
index a4fbc3f..b495bfa 100644 (file)
@@ -41,7 +41,6 @@
 
 #include <asm/byteorder.h>
 #include <asm/io.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 #include <asm/smu.h>
@@ -1087,7 +1086,7 @@ static int smu_open(struct inode *inode, struct file *file)
        unsigned long flags;
 
        pp = kzalloc(sizeof(struct smu_private), GFP_KERNEL);
-       if (pp == 0)
+       if (!pp)
                return -ENOMEM;
        spin_lock_init(&pp->lock);
        pp->mode = smu_file_commands;
@@ -1254,7 +1253,7 @@ static __poll_t smu_fpoll(struct file *file, poll_table *wait)
        __poll_t mask = 0;
        unsigned long flags;
 
-       if (pp == 0)
+       if (!pp)
                return 0;
 
        if (pp->mode == smu_file_commands) {
@@ -1277,7 +1276,7 @@ static int smu_release(struct inode *inode, struct file *file)
        unsigned long flags;
        unsigned int busy;
 
-       if (pp == 0)
+       if (!pp)
                return 0;
 
        file->private_data = NULL;
index 7e21843..e604cbc 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/freezer.h>
 #include <linux/of_platform.h>
 
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/io.h>
 #include <asm/sections.h>
index f55f6ad..9226b74 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/kthread.h>
 #include <linux/of_platform.h>
 
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/io.h>
 #include <asm/sections.h>
index 3d0d0b9..5071289 100644 (file)
 #include <linux/cuda.h>
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
 #ifdef CONFIG_PPC
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 #else
@@ -237,10 +239,10 @@ int __init find_via_cuda(void)
     const u32 *reg;
     int err;
 
-    if (vias != 0)
+    if (vias)
        return 1;
     vias = of_find_node_by_name(NULL, "via-cuda");
-    if (vias == 0)
+    if (!vias)
        return 0;
 
     reg = of_get_property(vias, "reg", NULL);
@@ -518,7 +520,7 @@ cuda_write(struct adb_request *req)
     req->reply_len = 0;
 
     spin_lock_irqsave(&cuda_lock, flags);
-    if (current_req != 0) {
+    if (current_req) {
        last_req->next = req;
        last_req = req;
     } else {
index 50ada02..2194016 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/adb.h>
 #include <linux/pmu.h>
 #include <asm/backlight.h>
-#include <asm/prom.h>
 
 #define MAX_PMU_LEVEL 0xFF
 
index ae067ab..a4fb16d 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/leds.h>
 #include <linux/adb.h>
 #include <linux/pmu.h>
-#include <asm/prom.h>
+#include <linux/of.h>
 
 static spinlock_t pmu_blink_lock;
 static struct adb_request pmu_blink_req;
index 4b98bc2..308fcce 100644 (file)
@@ -59,7 +59,6 @@
 #include <asm/pmac_feature.h>
 #include <asm/pmac_pfunc.h>
 #include <asm/pmac_low_i2c.h>
-#include <asm/prom.h>
 #include <asm/mmu_context.h>
 #include <asm/cputable.h>
 #include <asm/time.h>
@@ -161,7 +160,7 @@ static unsigned char __iomem *gpio_reg;
 static int gpio_irq = 0;
 static int gpio_irq_enabled = -1;
 static volatile int pmu_suspended;
-static spinlock_t pmu_lock;
+static DEFINE_SPINLOCK(pmu_lock);
 static u8 pmu_intr_mask;
 static int pmu_version;
 static int drop_interrupts;
@@ -305,8 +304,6 @@ int __init find_via_pmu(void)
                goto fail;
        }
 
-       spin_lock_init(&pmu_lock);
-
        pmu_has_adb = 1;
 
        pmu_intr_mask = PMU_INT_PCEJECT |
@@ -388,8 +385,6 @@ int __init find_via_pmu(void)
 
        pmu_kind = PMU_UNKNOWN;
 
-       spin_lock_init(&pmu_lock);
-
        pmu_has_adb = 1;
 
        pmu_intr_mask = PMU_INT_PCEJECT |
index e7dec32..6ad6441 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/init.h>
 #include <linux/wait.h>
 #include <linux/i2c.h>
-#include <asm/prom.h>
+
 #include <asm/machdep.h>
 #include <asm/io.h>
 #include <asm/sections.h>
index 07f91ec..5307b1e 100644 (file)
@@ -35,8 +35,6 @@
 #include <linux/mutex.h>
 #include <linux/freezer.h>
 
-#include <asm/prom.h>
-
 #include "windfarm.h"
 
 #define VERSION "0.2"
index 7b726f0..28d18ef 100644 (file)
@@ -10,8 +10,6 @@
 #include <linux/cpu.h>
 #include <linux/cpufreq.h>
 
-#include <asm/prom.h>
-
 #include "windfarm.h"
 
 #define VERSION "0.3"
index 2470e5a..82e7b20 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/init.h>
 #include <linux/wait.h>
 #include <linux/i2c.h>
-#include <asm/prom.h>
+
 #include <asm/machdep.h>
 #include <asm/io.h>
 #include <asm/sections.h>
index 29f48c2..eb7e7f0 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/wait.h>
 #include <linux/i2c.h>
 #include <linux/of_device.h>
-#include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/io.h>
 #include <asm/sections.h>
index 9fab0b4..807efdd 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/init.h>
 #include <linux/wait.h>
 #include <linux/i2c.h>
-#include <asm/prom.h>
+
 #include <asm/machdep.h>
 #include <asm/io.h>
 #include <asm/sections.h>
index 1e7b03d..55ee417 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
-#include <asm/prom.h>
+
 #include <asm/pmac_low_i2c.h>
 
 #include "windfarm.h"
index 157ce6e..b5ce347 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef __WINDFARM_MPU_H
 #define __WINDFARM_MPU_H
 
+#include <linux/of.h>
+
 typedef unsigned short fu16;
 typedef int fs32;
 typedef short fs16;
index e8377ce..d1dec31 100644 (file)
@@ -12,7 +12,9 @@
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/reboot.h>
-#include <asm/prom.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
 #include <asm/smu.h>
 
 #include "windfarm.h"
index ba1ec6f..36312f1 100644 (file)
 #include <linux/kmod.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
-#include <asm/prom.h>
+#include <linux/of.h>
+
 #include <asm/machdep.h>
 #include <asm/io.h>
 #include <asm/sections.h>
index e81746b..e21f973 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/reboot.h>
-#include <asm/prom.h>
+
 #include <asm/smu.h>
 
 #include "windfarm.h"
index 82c67a4..e0f4743 100644 (file)
 #include <linux/kmod.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
-#include <asm/prom.h>
+#include <linux/of.h>
+
 #include <asm/machdep.h>
 #include <asm/io.h>
 #include <asm/sections.h>
index 3f346af..c853585 100644 (file)
@@ -37,7 +37,8 @@
 #include <linux/kmod.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
-#include <asm/prom.h>
+#include <linux/of.h>
+
 #include <asm/machdep.h>
 #include <asm/io.h>
 #include <asm/sections.h>
index 7acd168..e9eb7fd 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/device.h>
 #include <linux/platform_device.h>
 #include <linux/reboot.h>
-#include <asm/prom.h>
+
 #include <asm/smu.h>
 
 #include "windfarm.h"
index 7596605..e9957ad 100644 (file)
@@ -14,7 +14,8 @@
 #include <linux/init.h>
 #include <linux/wait.h>
 #include <linux/completion.h>
-#include <asm/prom.h>
+#include <linux/of.h>
+
 #include <asm/machdep.h>
 #include <asm/io.h>
 #include <asm/sections.h>
index e46e115..5ade627 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/wait.h>
 #include <linux/i2c.h>
 #include <linux/mutex.h>
-#include <asm/prom.h>
+
 #include <asm/smu.h>
 #include <asm/pmac_low_i2c.h>
 
index c8706cf..00c6fe2 100644 (file)
@@ -14,7 +14,8 @@
 #include <linux/init.h>
 #include <linux/wait.h>
 #include <linux/completion.h>
-#include <asm/prom.h>
+#include <linux/of.h>
+
 #include <asm/machdep.h>
 #include <asm/io.h>
 #include <asm/sections.h>
index 7c2ca52..df5347e 100644 (file)
@@ -771,12 +771,12 @@ static void journal_write_unlocked(struct closure *cl)
 
                bio_reset(bio, ca->bdev, REQ_OP_WRITE | 
                          REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA);
-               bch_bio_map(bio, w->data);
                bio->bi_iter.bi_sector  = PTR_OFFSET(k, i);
                bio->bi_iter.bi_size = sectors << 9;
 
                bio->bi_end_io  = journal_write_endio;
                bio->bi_private = w;
+               bch_bio_map(bio, w->data);
 
                trace_bcache_journal_write(bio, w->data->keys);
                bio_list_add(&list, bio);
index fdd0194..320fcdf 100644 (file)
@@ -685,7 +685,7 @@ static void do_bio_hook(struct search *s,
 {
        struct bio *bio = &s->bio.bio;
 
-       bio_init_clone(bio->bi_bdev, bio, orig_bio, GFP_NOIO);
+       bio_init_clone(orig_bio->bi_bdev, bio, orig_bio, GFP_NOIO);
        /*
         * bi_end_io can be set separately somewhere else, e.g. the
         * variants in,
index ad2d5fa..36ae30b 100644 (file)
@@ -4399,6 +4399,7 @@ try_smaller_buffer:
        }
 
        if (ic->internal_hash) {
+               size_t recalc_tags_size;
                ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
                if (!ic->recalc_wq ) {
                        ti->error = "Cannot allocate workqueue";
@@ -4412,8 +4413,10 @@ try_smaller_buffer:
                        r = -ENOMEM;
                        goto bad;
                }
-               ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
-                                                ic->tag_size, GFP_KERNEL);
+               recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size;
+               if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
+                       recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
+               ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL);
                if (!ic->recalc_tags) {
                        ti->error = "Cannot allocate tags for recalculating";
                        r = -ENOMEM;
index 875bca3..82f2a06 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/blkdev.h>
 #include <linux/slab.h>
 #include <linux/module.h>
-#include <linux/sched/clock.h>
 
 
 #define DM_MSG_PREFIX  "multipath historical-service-time"
@@ -433,7 +432,7 @@ static struct dm_path *hst_select_path(struct path_selector *ps,
 {
        struct selector *s = ps->context;
        struct path_info *pi = NULL, *best = NULL;
-       u64 time_now = sched_clock();
+       u64 time_now = ktime_get_ns();
        struct dm_path *ret = NULL;
        unsigned long flags;
 
@@ -474,7 +473,7 @@ static int hst_start_io(struct path_selector *ps, struct dm_path *path,
 
 static u64 path_service_time(struct path_info *pi, u64 start_time)
 {
-       u64 sched_now = ktime_get_ns();
+       u64 now = ktime_get_ns();
 
        /* if a previous disk request has finished after this IO was
         * sent to the hardware, pretend the submission happened
@@ -483,11 +482,11 @@ static u64 path_service_time(struct path_info *pi, u64 start_time)
        if (time_after64(pi->last_finish, start_time))
                start_time = pi->last_finish;
 
-       pi->last_finish = sched_now;
-       if (time_before64(sched_now, start_time))
+       pi->last_finish = now;
+       if (time_before64(now, start_time))
                return 0;
 
-       return sched_now - start_time;
+       return now - start_time;
 }
 
 static int hst_end_io(struct path_selector *ps, struct dm_path *path,
index c1ca9be..57daa86 100644 (file)
@@ -360,16 +360,20 @@ static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno,
        return 0;
 }
 
+struct orig_bio_details {
+       unsigned int op;
+       unsigned int nr_sectors;
+};
+
 /*
  * First phase of BIO mapping for targets with zone append emulation:
  * check all BIO that change a zone writer pointer and change zone
  * append operations into regular write operations.
  */
 static bool dm_zone_map_bio_begin(struct mapped_device *md,
-                                 struct bio *orig_bio, struct bio *clone)
+                                 unsigned int zno, struct bio *clone)
 {
        sector_t zsectors = blk_queue_zone_sectors(md->queue);
-       unsigned int zno = bio_zone_no(orig_bio);
        unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
 
        /*
@@ -384,7 +388,7 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
                WRITE_ONCE(md->zwp_offset[zno], zwp_offset);
        }
 
-       switch (bio_op(orig_bio)) {
+       switch (bio_op(clone)) {
        case REQ_OP_ZONE_RESET:
        case REQ_OP_ZONE_FINISH:
                return true;
@@ -401,9 +405,8 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
                 * target zone.
                 */
                clone->bi_opf = REQ_OP_WRITE | REQ_NOMERGE |
-                       (orig_bio->bi_opf & (~REQ_OP_MASK));
-               clone->bi_iter.bi_sector =
-                       orig_bio->bi_iter.bi_sector + zwp_offset;
+                       (clone->bi_opf & (~REQ_OP_MASK));
+               clone->bi_iter.bi_sector += zwp_offset;
                break;
        default:
                DMWARN_LIMIT("Invalid BIO operation");
@@ -423,11 +426,10 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
  * data written to a zone. Note that at this point, the remapped clone BIO
  * may already have completed, so we do not touch it.
  */
-static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
-                                       struct bio *orig_bio,
+static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int zno,
+                                       struct orig_bio_details *orig_bio_details,
                                        unsigned int nr_sectors)
 {
-       unsigned int zno = bio_zone_no(orig_bio);
        unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
 
        /* The clone BIO may already have been completed and failed */
@@ -435,7 +437,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
                return BLK_STS_IOERR;
 
        /* Update the zone wp offset */
-       switch (bio_op(orig_bio)) {
+       switch (orig_bio_details->op) {
        case REQ_OP_ZONE_RESET:
                WRITE_ONCE(md->zwp_offset[zno], 0);
                return BLK_STS_OK;
@@ -452,7 +454,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
                 * Check that the target did not truncate the write operation
                 * emulating a zone append.
                 */
-               if (nr_sectors != bio_sectors(orig_bio)) {
+               if (nr_sectors != orig_bio_details->nr_sectors) {
                        DMWARN_LIMIT("Truncated write for zone append");
                        return BLK_STS_IOERR;
                }
@@ -488,7 +490,7 @@ static inline void dm_zone_unlock(struct request_queue *q,
        bio_clear_flag(clone, BIO_ZONE_WRITE_LOCKED);
 }
 
-static bool dm_need_zone_wp_tracking(struct bio *orig_bio)
+static bool dm_need_zone_wp_tracking(struct bio *bio)
 {
        /*
         * Special processing is not needed for operations that do not need the
@@ -496,15 +498,15 @@ static bool dm_need_zone_wp_tracking(struct bio *orig_bio)
         * zones and all operations that do not modify directly a sequential
         * zone write pointer.
         */
-       if (op_is_flush(orig_bio->bi_opf) && !bio_sectors(orig_bio))
+       if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
                return false;
-       switch (bio_op(orig_bio)) {
+       switch (bio_op(bio)) {
        case REQ_OP_WRITE_ZEROES:
        case REQ_OP_WRITE:
        case REQ_OP_ZONE_RESET:
        case REQ_OP_ZONE_FINISH:
        case REQ_OP_ZONE_APPEND:
-               return bio_zone_is_seq(orig_bio);
+               return bio_zone_is_seq(bio);
        default:
                return false;
        }
@@ -519,8 +521,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
        struct dm_target *ti = tio->ti;
        struct mapped_device *md = io->md;
        struct request_queue *q = md->queue;
-       struct bio *orig_bio = io->orig_bio;
        struct bio *clone = &tio->clone;
+       struct orig_bio_details orig_bio_details;
        unsigned int zno;
        blk_status_t sts;
        int r;
@@ -529,18 +531,21 @@ int dm_zone_map_bio(struct dm_target_io *tio)
         * IOs that do not change a zone write pointer do not need
         * any additional special processing.
         */
-       if (!dm_need_zone_wp_tracking(orig_bio))
+       if (!dm_need_zone_wp_tracking(clone))
                return ti->type->map(ti, clone);
 
        /* Lock the target zone */
-       zno = bio_zone_no(orig_bio);
+       zno = bio_zone_no(clone);
        dm_zone_lock(q, zno, clone);
 
+       orig_bio_details.nr_sectors = bio_sectors(clone);
+       orig_bio_details.op = bio_op(clone);
+
        /*
         * Check that the bio and the target zone write pointer offset are
         * both valid, and if the bio is a zone append, remap it to a write.
         */
-       if (!dm_zone_map_bio_begin(md, orig_bio, clone)) {
+       if (!dm_zone_map_bio_begin(md, zno, clone)) {
                dm_zone_unlock(q, zno, clone);
                return DM_MAPIO_KILL;
        }
@@ -560,7 +565,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
                 * The target submitted the clone BIO. The target zone will
                 * be unlocked on completion of the clone.
                 */
-               sts = dm_zone_map_bio_end(md, orig_bio, *tio->len_ptr);
+               sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
+                                         *tio->len_ptr);
                break;
        case DM_MAPIO_REMAPPED:
                /*
@@ -568,7 +574,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
                 * unlock the target zone here as the clone will not be
                 * submitted.
                 */
-               sts = dm_zone_map_bio_end(md, orig_bio, *tio->len_ptr);
+               sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
+                                         *tio->len_ptr);
                if (sts != BLK_STS_OK)
                        dm_zone_unlock(q, zno, clone);
                break;
index 3c5fad7..82957bd 100644 (file)
@@ -1323,8 +1323,7 @@ static void __map_bio(struct bio *clone)
 }
 
 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
-                               struct dm_target *ti, unsigned num_bios,
-                               unsigned *len)
+                               struct dm_target *ti, unsigned num_bios)
 {
        struct bio *bio;
        int try;
@@ -1335,7 +1334,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
                if (try)
                        mutex_lock(&ci->io->md->table_devices_lock);
                for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
-                       bio = alloc_tio(ci, ti, bio_nr, len,
+                       bio = alloc_tio(ci, ti, bio_nr, NULL,
                                        try ? GFP_NOIO : GFP_NOWAIT);
                        if (!bio)
                                break;
@@ -1363,11 +1362,11 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
                break;
        case 1:
                clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
-               dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
                __map_bio(clone);
                break;
        default:
-               alloc_multiple_bios(&blist, ci, ti, num_bios, len);
+               /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
+               alloc_multiple_bios(&blist, ci, ti, num_bios);
                while ((clone = bio_list_pop(&blist))) {
                        dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
                        __map_bio(clone);
@@ -1392,6 +1391,7 @@ static void __send_empty_flush(struct clone_info *ci)
 
        ci->bio = &flush_bio;
        ci->sector_count = 0;
+       ci->io->tio.clone.bi_iter.bi_size = 0;
 
        while ((ti = dm_table_get_target(ci->map, target_nr++)))
                __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
@@ -1407,14 +1407,10 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
        len = min_t(sector_t, ci->sector_count,
                    max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
 
-       /*
-        * dm_accept_partial_bio cannot be used with duplicate bios,
-        * so update clone_info cursor before __send_duplicate_bios().
-        */
+       __send_duplicate_bios(ci, ti, num_bios, &len);
+
        ci->sector += len;
        ci->sector_count -= len;
-
-       __send_duplicate_bios(ci, ti, num_bios, &len);
 }
 
 static bool is_abnormal_io(struct bio *bio)
index 28f2baf..5afa373 100644 (file)
@@ -7,6 +7,7 @@ comment "NXP media platform drivers"
 config VIDEO_IMX_MIPI_CSIS
        tristate "NXP MIPI CSI-2 CSIS receiver found on i.MX7 and i.MX8 models"
        depends on ARCH_MXC || COMPILE_TEST
+       depends on VIDEO_DEV
        select MEDIA_CONTROLLER
        select V4L2_FWNODE
        select VIDEO_V4L2_SUBDEV_API
index 4de5e8d..3d3d106 100644 (file)
@@ -892,7 +892,7 @@ static int rga_probe(struct platform_device *pdev)
        }
        rga->dst_mmu_pages =
                (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
-       if (rga->dst_mmu_pages) {
+       if (!rga->dst_mmu_pages) {
                ret = -ENOMEM;
                goto free_src_pages;
        }
index 4702974..0de587b 100644 (file)
@@ -77,16 +77,16 @@ err_mutex_unlock:
 }
 
 static const struct si2157_tuner_info si2157_tuners[] = {
-       { SI2141, false, 0x60, SI2141_60_FIRMWARE, SI2141_A10_FIRMWARE },
-       { SI2141, false, 0x61, SI2141_61_FIRMWARE, SI2141_A10_FIRMWARE },
-       { SI2146, false, 0x11, SI2146_11_FIRMWARE, NULL },
-       { SI2147, false, 0x50, SI2147_50_FIRMWARE, NULL },
-       { SI2148, true,  0x32, SI2148_32_FIRMWARE, SI2158_A20_FIRMWARE },
-       { SI2148, true,  0x33, SI2148_33_FIRMWARE, SI2158_A20_FIRMWARE },
-       { SI2157, false, 0x50, SI2157_50_FIRMWARE, SI2157_A30_FIRMWARE },
-       { SI2158, false, 0x50, SI2158_50_FIRMWARE, SI2158_A20_FIRMWARE },
-       { SI2158, false, 0x51, SI2158_51_FIRMWARE, SI2158_A20_FIRMWARE },
-       { SI2177, false, 0x50, SI2177_50_FIRMWARE, SI2157_A30_FIRMWARE },
+       { SI2141, 0x60, false, SI2141_60_FIRMWARE, SI2141_A10_FIRMWARE },
+       { SI2141, 0x61, false, SI2141_61_FIRMWARE, SI2141_A10_FIRMWARE },
+       { SI2146, 0x11, false, SI2146_11_FIRMWARE, NULL },
+       { SI2147, 0x50, false, SI2147_50_FIRMWARE, NULL },
+       { SI2148, 0x32, true,  SI2148_32_FIRMWARE, SI2158_A20_FIRMWARE },
+       { SI2148, 0x33, true,  SI2148_33_FIRMWARE, SI2158_A20_FIRMWARE },
+       { SI2157, 0x50, false, SI2157_50_FIRMWARE, SI2157_A30_FIRMWARE },
+       { SI2158, 0x50, false, SI2158_50_FIRMWARE, SI2158_A20_FIRMWARE },
+       { SI2158, 0x51, false, SI2158_51_FIRMWARE, SI2158_A20_FIRMWARE },
+       { SI2177, 0x50, false, SI2177_50_FIRMWARE, SI2157_A30_FIRMWARE },
 };
 
 static int si2157_load_firmware(struct dvb_frontend *fe,
@@ -178,7 +178,7 @@ static int si2157_find_and_load_firmware(struct dvb_frontend *fe)
                }
        }
 
-       if (!fw_name && !fw_alt_name) {
+       if (required && !fw_name && !fw_alt_name) {
                dev_err(&client->dev,
                        "unknown chip version Si21%d-%c%c%c ROM 0x%02x\n",
                        part_id, cmd.args[1], cmd.args[3], cmd.args[4], rom_id);
index c267283..e749dcb 100644 (file)
@@ -544,20 +544,27 @@ static int atmel_ebi_probe(struct platform_device *pdev)
        smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0);
 
        ebi->smc.regmap = syscon_node_to_regmap(smc_np);
-       if (IS_ERR(ebi->smc.regmap))
-               return PTR_ERR(ebi->smc.regmap);
+       if (IS_ERR(ebi->smc.regmap)) {
+               ret = PTR_ERR(ebi->smc.regmap);
+               goto put_node;
+       }
 
        ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np);
-       if (IS_ERR(ebi->smc.layout))
-               return PTR_ERR(ebi->smc.layout);
+       if (IS_ERR(ebi->smc.layout)) {
+               ret = PTR_ERR(ebi->smc.layout);
+               goto put_node;
+       }
 
        ebi->smc.clk = of_clk_get(smc_np, 0);
        if (IS_ERR(ebi->smc.clk)) {
-               if (PTR_ERR(ebi->smc.clk) != -ENOENT)
-                       return PTR_ERR(ebi->smc.clk);
+               if (PTR_ERR(ebi->smc.clk) != -ENOENT) {
+                       ret = PTR_ERR(ebi->smc.clk);
+                       goto put_node;
+               }
 
                ebi->smc.clk = NULL;
        }
+       of_node_put(smc_np);
        ret = clk_prepare_enable(ebi->smc.clk);
        if (ret)
                return ret;
@@ -608,6 +615,10 @@ static int atmel_ebi_probe(struct platform_device *pdev)
        }
 
        return of_platform_populate(np, NULL, NULL, dev);
+
+put_node:
+       of_node_put(smc_np);
+       return ret;
 }
 
 static __maybe_unused int atmel_ebi_resume(struct device *dev)
index 2f6939d..e83b61c 100644 (file)
@@ -287,8 +287,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
        }
 
        /* legacy dts may still use "simple-bus" compatible */
-       ret = of_platform_populate(dev->dev.of_node, NULL, NULL,
-                                       &dev->dev);
+       ret = of_platform_default_populate(dev->dev.of_node, NULL, &dev->dev);
        if (ret)
                goto err_free_nandirq;
 
index e4cc64f..2e545f4 100644 (file)
@@ -651,6 +651,7 @@ static int rpcif_probe(struct platform_device *pdev)
        struct platform_device *vdev;
        struct device_node *flash;
        const char *name;
+       int ret;
 
        flash = of_get_next_child(pdev->dev.of_node, NULL);
        if (!flash) {
@@ -674,7 +675,14 @@ static int rpcif_probe(struct platform_device *pdev)
                return -ENOMEM;
        vdev->dev.parent = &pdev->dev;
        platform_set_drvdata(pdev, vdev);
-       return platform_device_add(vdev);
+
+       ret = platform_device_add(vdev);
+       if (ret) {
+               platform_device_put(vdev);
+               return ret;
+       }
+
+       return 0;
 }
 
 static int rpcif_remove(struct platform_device *pdev)
index b493de9..d85c565 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/pseudo_fs.h>
 #include <linux/sched/mm.h>
 #include <linux/mmu_context.h>
+#include <linux/irqdomain.h>
 
 #include "cxl.h"
 
index 5dc0f60..7a6dd91 100644 (file)
@@ -25,6 +25,8 @@
 
 extern uint cxl_verbose;
 
+struct property;
+
 #define CXL_TIMEOUT 5
 
 /*
index 53b9198..e5fe0a1 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/hugetlb.h>
 #include <linux/sched/mm.h>
+#include <asm/opal-api.h>
 #include <asm/pnv-pci.h>
 #include <misc/cxllib.h>
 
index 5b93ff5..eee9dec 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/semaphore.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/of.h>
 #include <asm/rtas.h>
 
 #include "cxl.h"
index 9d485c9..3321c01 100644 (file)
@@ -6,6 +6,8 @@
 #include <linux/spinlock.h>
 #include <linux/uaccess.h>
 #include <linux/delay.h>
+#include <linux/irqdomain.h>
+#include <linux/platform_device.h>
 
 #include "cxl.h"
 #include "hcalls.h"
index 4cb829d..5f0e2dc 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 #include <linux/workqueue.h>
 #include <linux/sched.h>
 #include <linux/wait.h>
index 43b312d..c1fbf6f 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 #include <linux/idr.h>
 #include <linux/pci.h>
+#include <linux/platform_device.h>
 #include <linux/sched/task.h>
 
 #include <asm/cputable.h>
index 1a7f228..50b0c44 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/mm.h>
 #include <linux/uaccess.h>
 #include <linux/delay.h>
+#include <linux/irqdomain.h>
 #include <asm/synch.h>
 #include <asm/switch_to.h>
 #include <misc/cxl-base.h>
index ecdcfae..a06920b 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0+
 // Copyright 2017 IBM Corp.
 #include <linux/interrupt.h>
+#include <linux/irqdomain.h>
 #include <asm/pnv-ocxl.h>
 #include <asm/xive.h>
 #include "ocxl_internal.h"
index d881f5e..6777c41 100644 (file)
@@ -556,7 +556,9 @@ int ocxl_file_register_afu(struct ocxl_afu *afu)
 
 err_unregister:
        ocxl_sysfs_unregister_afu(info); // safe to call even if register failed
+       free_minor(info);
        device_unregister(&info->dev);
+       return rc;
 err_put:
        ocxl_afu_put(afu);
        free_minor(info);
index 9670d02..4cf4c55 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/mm_types.h>
 #include <linux/mmu_context.h>
 #include <linux/mmu_notifier.h>
+#include <linux/irqdomain.h>
 #include <asm/copro.h>
 #include <asm/pnv-ocxl.h>
 #include <asm/xive.h>
index 15eddca..38e1525 100644 (file)
@@ -4027,14 +4027,19 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const v
        return true;
 }
 
-static u32 bond_ip_hash(u32 hash, struct flow_keys *flow)
+static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy)
 {
        hash ^= (__force u32)flow_get_u32_dst(flow) ^
                (__force u32)flow_get_u32_src(flow);
        hash ^= (hash >> 16);
        hash ^= (hash >> 8);
+
        /* discard lowest hash bit to deal with the common even ports pattern */
-       return hash >> 1;
+       if (xmit_policy == BOND_XMIT_POLICY_LAYER34 ||
+               xmit_policy == BOND_XMIT_POLICY_ENCAP34)
+               return hash >> 1;
+
+       return hash;
 }
 
 /* Generate hash based on xmit policy. If @skb is given it is used to linearize
@@ -4064,7 +4069,7 @@ static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const voi
                        memcpy(&hash, &flow.ports.ports, sizeof(hash));
        }
 
-       return bond_ip_hash(hash, &flow);
+       return bond_ip_hash(hash, &flow, bond->params.xmit_policy);
 }
 
 /**
@@ -5259,7 +5264,7 @@ static u32 bond_sk_hash_l34(struct sock *sk)
        /* L4 */
        memcpy(&hash, &flow.ports.ports, sizeof(hash));
        /* L3 */
-       return bond_ip_hash(hash, &flow);
+       return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34);
 }
 
 static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
index 413b000..9e28219 100644 (file)
@@ -670,6 +670,8 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
        struct ocelot *ocelot = ds->priv;
        struct felix *felix = ocelot_to_felix(ocelot);
        enum dsa_tag_protocol old_proto = felix->tag_proto;
+       bool cpu_port_active = false;
+       struct dsa_port *dp;
        int err;
 
        if (proto != DSA_TAG_PROTO_SEVILLE &&
@@ -677,6 +679,27 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
            proto != DSA_TAG_PROTO_OCELOT_8021Q)
                return -EPROTONOSUPPORT;
 
+       /* We don't support multiple CPU ports, yet the DT blob may have
+        * multiple CPU ports defined. The first CPU port is the active one,
+        * the others are inactive. In this case, DSA will call
+        * ->change_tag_protocol() multiple times, once per CPU port.
+        * Since we implement the tagging protocol change towards "ocelot" or
+        * "seville" as effectively initializing the NPI port, what we are
+        * doing is effectively changing who the NPI port is to the last @cpu
+        * argument passed, which is an unused DSA CPU port and not the one
+        * that should actively pass traffic.
+        * Suppress DSA's calls on CPU ports that are inactive.
+        */
+       dsa_switch_for_each_user_port(dp, ds) {
+               if (dp->cpu_dp->index == cpu) {
+                       cpu_port_active = true;
+                       break;
+               }
+       }
+
+       if (!cpu_port_active)
+               return 0;
+
        felix_del_tag_protocol(ds, cpu, old_proto);
 
        err = felix_set_tag_protocol(ds, cpu, proto);
index 8d382b2..52a8566 100644 (file)
@@ -2316,7 +2316,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
 
        err = dsa_register_switch(ds);
        if (err) {
-               dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
+               dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
                goto err_register_ds;
        }
 
index 1aa7973..060165a 100644 (file)
@@ -9,34 +9,46 @@ menuconfig NET_DSA_REALTEK
        help
          Select to enable support for Realtek Ethernet switch chips.
 
+         Note that at least one interface driver must be enabled for the
+         subdrivers to be loaded. Moreover, an interface driver cannot achieve
+         anything without at least one subdriver enabled.
+
+if NET_DSA_REALTEK
+
 config NET_DSA_REALTEK_MDIO
-       tristate "Realtek MDIO connected switch driver"
-       depends on NET_DSA_REALTEK
+       tristate "Realtek MDIO interface driver"
        depends on OF
+       depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+       depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+       depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
        help
          Select to enable support for registering switches configured
          through MDIO.
 
 config NET_DSA_REALTEK_SMI
-       tristate "Realtek SMI connected switch driver"
-       depends on NET_DSA_REALTEK
+       tristate "Realtek SMI interface driver"
        depends on OF
+       depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+       depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+       depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
        help
          Select to enable support for registering switches connected
          through SMI.
 
 config NET_DSA_REALTEK_RTL8365MB
        tristate "Realtek RTL8365MB switch subdriver"
-       depends on NET_DSA_REALTEK
-       depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+       imply NET_DSA_REALTEK_SMI
+       imply NET_DSA_REALTEK_MDIO
        select NET_DSA_TAG_RTL8_4
        help
          Select to enable support for Realtek RTL8365MB-VC and RTL8367S.
 
 config NET_DSA_REALTEK_RTL8366RB
        tristate "Realtek RTL8366RB switch subdriver"
-       depends on NET_DSA_REALTEK
-       depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+       imply NET_DSA_REALTEK_SMI
+       imply NET_DSA_REALTEK_MDIO
        select NET_DSA_TAG_RTL4_A
        help
-         Select to enable support for Realtek RTL8366RB
+         Select to enable support for Realtek RTL8366RB.
+
+endif
index 2243d3d..6cec559 100644 (file)
@@ -546,11 +546,6 @@ static const struct of_device_id realtek_smi_of_match[] = {
                .data = &rtl8366rb_variant,
        },
 #endif
-       {
-               /* FIXME: add support for RTL8366S and more */
-               .compatible = "realtek,rtl8366s",
-               .data = NULL,
-       },
 #if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
        {
                .compatible = "realtek,rtl8365mb",
index bd4cb9d..8279930 100644 (file)
@@ -35,15 +35,6 @@ source "drivers/net/ethernet/aquantia/Kconfig"
 source "drivers/net/ethernet/arc/Kconfig"
 source "drivers/net/ethernet/asix/Kconfig"
 source "drivers/net/ethernet/atheros/Kconfig"
-source "drivers/net/ethernet/broadcom/Kconfig"
-source "drivers/net/ethernet/brocade/Kconfig"
-source "drivers/net/ethernet/cadence/Kconfig"
-source "drivers/net/ethernet/calxeda/Kconfig"
-source "drivers/net/ethernet/cavium/Kconfig"
-source "drivers/net/ethernet/chelsio/Kconfig"
-source "drivers/net/ethernet/cirrus/Kconfig"
-source "drivers/net/ethernet/cisco/Kconfig"
-source "drivers/net/ethernet/cortina/Kconfig"
 
 config CX_ECAT
        tristate "Beckhoff CX5020 EtherCAT master support"
@@ -57,6 +48,14 @@ config CX_ECAT
          To compile this driver as a module, choose M here. The module
          will be called ec_bhf.
 
+source "drivers/net/ethernet/broadcom/Kconfig"
+source "drivers/net/ethernet/cadence/Kconfig"
+source "drivers/net/ethernet/calxeda/Kconfig"
+source "drivers/net/ethernet/cavium/Kconfig"
+source "drivers/net/ethernet/chelsio/Kconfig"
+source "drivers/net/ethernet/cirrus/Kconfig"
+source "drivers/net/ethernet/cisco/Kconfig"
+source "drivers/net/ethernet/cortina/Kconfig"
 source "drivers/net/ethernet/davicom/Kconfig"
 
 config DNET
@@ -85,7 +84,6 @@ source "drivers/net/ethernet/huawei/Kconfig"
 source "drivers/net/ethernet/i825xx/Kconfig"
 source "drivers/net/ethernet/ibm/Kconfig"
 source "drivers/net/ethernet/intel/Kconfig"
-source "drivers/net/ethernet/microsoft/Kconfig"
 source "drivers/net/ethernet/xscale/Kconfig"
 
 config JME
@@ -128,8 +126,9 @@ source "drivers/net/ethernet/mediatek/Kconfig"
 source "drivers/net/ethernet/mellanox/Kconfig"
 source "drivers/net/ethernet/micrel/Kconfig"
 source "drivers/net/ethernet/microchip/Kconfig"
-source "drivers/net/ethernet/moxa/Kconfig"
 source "drivers/net/ethernet/mscc/Kconfig"
+source "drivers/net/ethernet/microsoft/Kconfig"
+source "drivers/net/ethernet/moxa/Kconfig"
 source "drivers/net/ethernet/myricom/Kconfig"
 
 config FEALNX
@@ -141,10 +140,10 @@ config FEALNX
          Say Y here to support the Myson MTD-800 family of PCI-based Ethernet
          cards. <http://www.myson.com.tw/>
 
+source "drivers/net/ethernet/ni/Kconfig"
 source "drivers/net/ethernet/natsemi/Kconfig"
 source "drivers/net/ethernet/neterion/Kconfig"
 source "drivers/net/ethernet/netronome/Kconfig"
-source "drivers/net/ethernet/ni/Kconfig"
 source "drivers/net/ethernet/8390/Kconfig"
 source "drivers/net/ethernet/nvidia/Kconfig"
 source "drivers/net/ethernet/nxp/Kconfig"
@@ -164,6 +163,7 @@ source "drivers/net/ethernet/packetengines/Kconfig"
 source "drivers/net/ethernet/pasemi/Kconfig"
 source "drivers/net/ethernet/pensando/Kconfig"
 source "drivers/net/ethernet/qlogic/Kconfig"
+source "drivers/net/ethernet/brocade/Kconfig"
 source "drivers/net/ethernet/qualcomm/Kconfig"
 source "drivers/net/ethernet/rdc/Kconfig"
 source "drivers/net/ethernet/realtek/Kconfig"
@@ -171,10 +171,10 @@ source "drivers/net/ethernet/renesas/Kconfig"
 source "drivers/net/ethernet/rocker/Kconfig"
 source "drivers/net/ethernet/samsung/Kconfig"
 source "drivers/net/ethernet/seeq/Kconfig"
-source "drivers/net/ethernet/sfc/Kconfig"
 source "drivers/net/ethernet/sgi/Kconfig"
 source "drivers/net/ethernet/silan/Kconfig"
 source "drivers/net/ethernet/sis/Kconfig"
+source "drivers/net/ethernet/sfc/Kconfig"
 source "drivers/net/ethernet/smsc/Kconfig"
 source "drivers/net/ethernet/socionext/Kconfig"
 source "drivers/net/ethernet/stmicro/Kconfig"
index 33f1a13..24d715c 100644 (file)
@@ -486,8 +486,8 @@ int aq_nic_start(struct aq_nic_s *self)
        if (err < 0)
                goto err_exit;
 
-       for (i = 0U, aq_vec = self->aq_vec[0];
-               self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+       for (i = 0U; self->aq_vecs > i; ++i) {
+               aq_vec = self->aq_vec[i];
                err = aq_vec_start(aq_vec);
                if (err < 0)
                        goto err_exit;
@@ -517,8 +517,8 @@ int aq_nic_start(struct aq_nic_s *self)
                mod_timer(&self->polling_timer, jiffies +
                          AQ_CFG_POLLING_TIMER_INTERVAL);
        } else {
-               for (i = 0U, aq_vec = self->aq_vec[0];
-                       self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+               for (i = 0U; self->aq_vecs > i; ++i) {
+                       aq_vec = self->aq_vec[i];
                        err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
                                                    aq_vec_isr, aq_vec,
                                                    aq_vec_get_affinity_mask(aq_vec));
index 797a951..3a529ee 100644 (file)
@@ -444,22 +444,22 @@ err_exit:
 
 static int aq_pm_freeze(struct device *dev)
 {
-       return aq_suspend_common(dev, false);
+       return aq_suspend_common(dev, true);
 }
 
 static int aq_pm_suspend_poweroff(struct device *dev)
 {
-       return aq_suspend_common(dev, true);
+       return aq_suspend_common(dev, false);
 }
 
 static int aq_pm_thaw(struct device *dev)
 {
-       return atl_resume_common(dev, false);
+       return atl_resume_common(dev, true);
 }
 
 static int aq_pm_resume_restore(struct device *dev)
 {
-       return atl_resume_common(dev, true);
+       return atl_resume_common(dev, false);
 }
 
 static const struct dev_pm_ops aq_pm_ops = {
index f4774cf..6ab1f32 100644 (file)
@@ -43,8 +43,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
        if (!self) {
                err = -EINVAL;
        } else {
-               for (i = 0U, ring = self->ring[0];
-                       self->tx_rings > i; ++i, ring = self->ring[i]) {
+               for (i = 0U; self->tx_rings > i; ++i) {
+                       ring = self->ring[i];
                        u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
                        ring[AQ_VEC_RX_ID].stats.rx.polls++;
                        u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
@@ -182,8 +182,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
        self->aq_hw_ops = aq_hw_ops;
        self->aq_hw = aq_hw;
 
-       for (i = 0U, ring = self->ring[0];
-               self->tx_rings > i; ++i, ring = self->ring[i]) {
+       for (i = 0U; self->tx_rings > i; ++i) {
+               ring = self->ring[i];
                err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
                if (err < 0)
                        goto err_exit;
@@ -224,8 +224,8 @@ int aq_vec_start(struct aq_vec_s *self)
        unsigned int i = 0U;
        int err = 0;
 
-       for (i = 0U, ring = self->ring[0];
-               self->tx_rings > i; ++i, ring = self->ring[i]) {
+       for (i = 0U; self->tx_rings > i; ++i) {
+               ring = self->ring[i];
                err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
                                                        &ring[AQ_VEC_TX_ID]);
                if (err < 0)
@@ -248,8 +248,8 @@ void aq_vec_stop(struct aq_vec_s *self)
        struct aq_ring_s *ring = NULL;
        unsigned int i = 0U;
 
-       for (i = 0U, ring = self->ring[0];
-               self->tx_rings > i; ++i, ring = self->ring[i]) {
+       for (i = 0U; self->tx_rings > i; ++i) {
+               ring = self->ring[i];
                self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
                                                 &ring[AQ_VEC_TX_ID]);
 
@@ -268,8 +268,8 @@ void aq_vec_deinit(struct aq_vec_s *self)
        if (!self)
                goto err_exit;
 
-       for (i = 0U, ring = self->ring[0];
-               self->tx_rings > i; ++i, ring = self->ring[i]) {
+       for (i = 0U; self->tx_rings > i; ++i) {
+               ring = self->ring[i];
                aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
                aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
        }
@@ -297,8 +297,8 @@ void aq_vec_ring_free(struct aq_vec_s *self)
        if (!self)
                goto err_exit;
 
-       for (i = 0U, ring = self->ring[0];
-               self->tx_rings > i; ++i, ring = self->ring[i]) {
+       for (i = 0U; self->tx_rings > i; ++i) {
+               ring = self->ring[i];
                aq_ring_free(&ring[AQ_VEC_TX_ID]);
                if (i < self->rx_rings)
                        aq_ring_free(&ring[AQ_VEC_RX_ID]);
index 2dd79af..9a41145 100644 (file)
@@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset)
        if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                __raw_writel(value, offset);
        else
-               writel(value, offset);
+               writel_relaxed(value, offset);
 }
 
 static inline u32 bcmgenet_readl(void __iomem *offset)
@@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset)
        if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                return __raw_readl(offset);
        else
-               return readl(offset);
+               return readl_relaxed(offset);
 }
 
 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
index 800d5ce..e475be2 100644 (file)
@@ -1658,6 +1658,7 @@ static void macb_tx_restart(struct macb_queue *queue)
        unsigned int head = queue->tx_head;
        unsigned int tail = queue->tx_tail;
        struct macb *bp = queue->bp;
+       unsigned int head_idx, tbqp;
 
        if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
                queue_writel(queue, ISR, MACB_BIT(TXUBR));
@@ -1665,6 +1666,13 @@ static void macb_tx_restart(struct macb_queue *queue)
        if (head == tail)
                return;
 
+       tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
+       tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
+       head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
+
+       if (tbqp == head_idx)
+               return;
+
        macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
 }
 
index d5356db..caf4802 100644 (file)
@@ -1835,11 +1835,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
                priv->rxdes0_edorr_mask = BIT(30);
                priv->txdes0_edotr_mask = BIT(30);
                priv->is_aspeed = true;
-               /* Disable ast2600 problematic HW arbitration */
-               if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
-                       iowrite32(FTGMAC100_TM_DEFAULT,
-                                 priv->base + FTGMAC100_OFFSET_TM);
-               }
        } else {
                priv->rxdes0_edorr_mask = BIT(15);
                priv->txdes0_edotr_mask = BIT(15);
@@ -1911,6 +1906,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
                err = ftgmac100_setup_clk(priv);
                if (err)
                        goto err_phy_connect;
+
+               /* Disable ast2600 problematic HW arbitration */
+               if (of_device_is_compatible(np, "aspeed,ast2600-mac"))
+                       iowrite32(FTGMAC100_TM_DEFAULT,
+                                 priv->base + FTGMAC100_OFFSET_TM);
        }
 
        /* Default ring sizes */
index 763d2c7..5750f9a 100644 (file)
@@ -489,11 +489,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
        info->phc_index = -1;
 
        fman_node = of_get_parent(mac_node);
-       if (fman_node)
+       if (fman_node) {
                ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
+               of_node_put(fman_node);
+       }
 
-       if (ptp_node)
+       if (ptp_node) {
                ptp_dev = of_find_device_by_node(ptp_node);
+               of_node_put(ptp_node);
+       }
 
        if (ptp_dev)
                ptp = platform_get_drvdata(ptp_dev);
index d60e201..e6c8e6d 100644 (file)
@@ -1009,8 +1009,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
 {
        u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
            link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
-       u16 max_ltr_enc_d = 0;  /* maximum LTR decoded by platform */
-       u16 lat_enc_d = 0;      /* latency decoded */
+       u32 max_ltr_enc_d = 0;  /* maximum LTR decoded by platform */
+       u32 lat_enc_d = 0;      /* latency decoded */
        u16 lat_enc = 0;        /* latency encoded */
 
        if (link) {
index 190590d..7dfcf78 100644 (file)
@@ -2871,7 +2871,6 @@ continue_reset:
        running = adapter->state == __IAVF_RUNNING;
 
        if (running) {
-               netdev->flags &= ~IFF_UP;
                netif_carrier_off(netdev);
                netif_tx_stop_all_queues(netdev);
                adapter->link_up = false;
@@ -2988,7 +2987,7 @@ continue_reset:
                 * to __IAVF_RUNNING
                 */
                iavf_up_complete(adapter);
-               netdev->flags |= IFF_UP;
+
                iavf_irq_enable(adapter, true);
        } else {
                iavf_change_state(adapter, __IAVF_DOWN);
@@ -3004,10 +3003,8 @@ continue_reset:
 reset_err:
        mutex_unlock(&adapter->client_lock);
        mutex_unlock(&adapter->crit_lock);
-       if (running) {
+       if (running)
                iavf_change_state(adapter, __IAVF_RUNNING);
-               netdev->flags |= IFF_UP;
-       }
        dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
        iavf_close(netdev);
 }
index 5daade3..fba178e 100644 (file)
@@ -577,7 +577,7 @@ void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
 {
        struct net_device *netdev;
 
-       if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list)
+       if (!vsi || vsi->type != ICE_VSI_PF)
                return;
 
        netdev = vsi->netdev;
@@ -599,7 +599,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
        int base_idx, i;
 
        if (!vsi || vsi->type != ICE_VSI_PF)
-               return -EINVAL;
+               return 0;
 
        pf = vsi->back;
        netdev = vsi->netdev;
@@ -636,7 +636,6 @@ void ice_remove_arfs(struct ice_pf *pf)
        if (!pf_vsi)
                return;
 
-       ice_free_cpu_rx_rmap(pf_vsi);
        ice_clear_arfs(pf_vsi);
 }
 
@@ -653,9 +652,5 @@ void ice_rebuild_arfs(struct ice_pf *pf)
                return;
 
        ice_remove_arfs(pf);
-       if (ice_set_cpu_rx_rmap(pf_vsi)) {
-               dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n");
-               return;
-       }
        ice_init_arfs(pf_vsi);
 }
index 9a84d74..6a463b2 100644 (file)
@@ -361,7 +361,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        np = netdev_priv(netdev);
        vsi = np->vsi;
 
-       if (ice_is_reset_in_progress(vsi->back->state))
+       if (ice_is_reset_in_progress(vsi->back->state) ||
+           test_bit(ICE_VF_DIS, vsi->back->state))
                return NETDEV_TX_BUSY;
 
        repr = ice_netdev_to_repr(netdev);
index bd58d9d..6a41333 100644 (file)
@@ -52,7 +52,7 @@ static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
 
 static inline int ice_eswitch_configure(struct ice_pf *pf)
 {
-       return -EOPNOTSUPP;
+       return 0;
 }
 
 static inline int ice_eswitch_rebuild(struct ice_pf *pf)
index 2774cbd..6d19c58 100644 (file)
@@ -2689,6 +2689,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
                return;
 
        vsi->irqs_ready = false;
+       ice_free_cpu_rx_rmap(vsi);
+
        ice_for_each_q_vector(vsi, i) {
                u16 vector = i + base;
                int irq_num;
@@ -2702,7 +2704,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
                        continue;
 
                /* clear the affinity notifier in the IRQ descriptor */
-               irq_set_affinity_notifier(irq_num, NULL);
+               if (!IS_ENABLED(CONFIG_RFS_ACCEL))
+                       irq_set_affinity_notifier(irq_num, NULL);
 
                /* clear the affinity_mask in the IRQ descriptor */
                irq_set_affinity_hint(irq_num, NULL);
index d768925..5b11988 100644 (file)
@@ -2510,6 +2510,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
                irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
        }
 
+       err = ice_set_cpu_rx_rmap(vsi);
+       if (err) {
+               netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
+                          vsi->vsi_num, ERR_PTR(err));
+               goto free_q_irqs;
+       }
+
        vsi->irqs_ready = true;
        return 0;
 
@@ -3692,20 +3699,12 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
         */
        ice_napi_add(vsi);
 
-       status = ice_set_cpu_rx_rmap(vsi);
-       if (status) {
-               dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
-                       vsi->vsi_num, status);
-               goto unroll_napi_add;
-       }
        status = ice_init_mac_fltr(pf);
        if (status)
-               goto free_cpu_rx_map;
+               goto unroll_napi_add;
 
        return 0;
 
-free_cpu_rx_map:
-       ice_free_cpu_rx_rmap(vsi);
 unroll_napi_add:
        ice_tc_indir_block_unregister(vsi);
 unroll_cfg_netdev:
@@ -5167,7 +5166,6 @@ static int __maybe_unused ice_suspend(struct device *dev)
                        continue;
                ice_vsi_free_q_vectors(pf->vsi[v]);
        }
-       ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
        ice_clear_interrupt_scheme(pf);
 
        pci_save_state(pdev);
index 4eb0599..13cdb5e 100644 (file)
@@ -641,6 +641,7 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
        status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0,
                                       orom_data, hw->flash.banks.orom_size);
        if (status) {
+               vfree(orom_data);
                ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
                return status;
        }
index 866ee4d..9dd38f6 100644 (file)
@@ -415,8 +415,8 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
  */
 static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
 {
+       u32 nb_buffs_extra = 0, nb_buffs = 0;
        union ice_32b_rx_flex_desc *rx_desc;
-       u32 nb_buffs_extra = 0, nb_buffs;
        u16 ntu = rx_ring->next_to_use;
        u16 total_count = count;
        struct xdp_buff **xdp;
@@ -428,6 +428,10 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
                nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
                                                   rx_desc,
                                                   rx_ring->count - ntu);
+               if (nb_buffs_extra != rx_ring->count - ntu) {
+                       ntu += nb_buffs_extra;
+                       goto exit;
+               }
                rx_desc = ICE_RX_DESC(rx_ring, 0);
                xdp = ice_xdp_buf(rx_ring, 0);
                ntu = 0;
@@ -441,6 +445,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
        if (ntu == rx_ring->count)
                ntu = 0;
 
+exit:
        if (rx_ring->next_to_use != ntu)
                ice_release_rx_desc(rx_ring, ntu);
 
index 66ea566..59d5c46 100644 (file)
@@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
 {
        u32 swfw_sync;
 
-       while (igc_get_hw_semaphore_i225(hw))
-               ; /* Empty */
+       /* Releasing the resource requires first getting the HW semaphore.
+        * If we fail to get the semaphore, there is nothing we can do,
+        * except log an error and quit. We are not allowed to hang here
+        * indefinitely, as it may cause denial of service or system crash.
+        */
+       if (igc_get_hw_semaphore_i225(hw)) {
+               hw_dbg("Failed to release SW_FW_SYNC.\n");
+               return;
+       }
 
        swfw_sync = rd32(IGC_SW_FW_SYNC);
        swfw_sync &= ~mask;
index 40dbf4b..6961f65 100644 (file)
@@ -581,7 +581,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
         * the lower time out
         */
        for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
-               usleep_range(500, 1000);
+               udelay(50);
                mdic = rd32(IGC_MDIC);
                if (mdic & IGC_MDIC_READY)
                        break;
@@ -638,7 +638,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
         * the lower time out
         */
        for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
-               usleep_range(500, 1000);
+               udelay(50);
                mdic = rd32(IGC_MDIC);
                if (mdic & IGC_MDIC_READY)
                        break;
index 0d6e321..653e9f1 100644 (file)
@@ -992,6 +992,17 @@ static void igc_ptp_time_restore(struct igc_adapter *adapter)
        igc_ptp_write_i225(adapter, &ts);
 }
 
+static void igc_ptm_stop(struct igc_adapter *adapter)
+{
+       struct igc_hw *hw = &adapter->hw;
+       u32 ctrl;
+
+       ctrl = rd32(IGC_PTM_CTRL);
+       ctrl &= ~IGC_PTM_CTRL_EN;
+
+       wr32(IGC_PTM_CTRL, ctrl);
+}
+
 /**
  * igc_ptp_suspend - Disable PTP work items and prepare for suspend
  * @adapter: Board private structure
@@ -1009,8 +1020,10 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
        adapter->ptp_tx_skb = NULL;
        clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
 
-       if (pci_device_is_present(adapter->pdev))
+       if (pci_device_is_present(adapter->pdev)) {
                igc_ptp_time_save(adapter);
+               igc_ptm_stop(adapter);
+       }
 }
 
 /**
index 939b692..ce843ea 100644 (file)
@@ -650,6 +650,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
        return 0;
 
 errout:
+       mutex_destroy(&mlxsw_i2c->cmd.lock);
        i2c_set_clientdata(client, NULL);
 
        return err;
index b734664..fe663b0 100644 (file)
@@ -423,7 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
 
        parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
        ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
-                           0, 0, parms.link, tun->fwmark, 0);
+                           0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0);
 
        rt = ip_route_output_key(tun->net, &fl4);
        if (IS_ERR(rt))
index ce5970b..2679111 100644 (file)
@@ -346,7 +346,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
 
                        lan966x_mac_process_raw_entry(&raw_entries[column],
                                                      mac, &vid, &dest_idx);
-                       WARN_ON(dest_idx > lan966x->num_phys_ports);
+                       if (WARN_ON(dest_idx > lan966x->num_phys_ports))
+                               continue;
 
                        /* If the entry in SW is found, then there is nothing
                         * to do
@@ -392,7 +393,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
 
                lan966x_mac_process_raw_entry(&raw_entries[column],
                                              mac, &vid, &dest_idx);
-               WARN_ON(dest_idx > lan966x->num_phys_ports);
+               if (WARN_ON(dest_idx > lan966x->num_phys_ports))
+                       continue;
 
                mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
                if (!mac_entry)
index 1f8c67f..95830e3 100644 (file)
@@ -446,6 +446,12 @@ static bool lan966x_hw_offload(struct lan966x *lan966x, u32 port,
                     ANA_CPU_FWD_CFG_MLD_REDIR_ENA)))
                return true;
 
+       if (eth_type_vlan(skb->protocol)) {
+               skb = skb_vlan_untag(skb);
+               if (unlikely(!skb))
+                       return false;
+       }
+
        if (skb->protocol == htons(ETH_P_IP) &&
            ip_hdr(skb)->protocol == IPPROTO_IGMP)
                return false;
@@ -665,6 +671,9 @@ static void lan966x_cleanup_ports(struct lan966x *lan966x)
                disable_irq(lan966x->ana_irq);
                lan966x->ana_irq = -ENXIO;
        }
+
+       if (lan966x->ptp_irq)
+               devm_free_irq(lan966x->dev, lan966x->ptp_irq, lan966x);
 }
 
 static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
index ae78277..0a1041d 100644 (file)
@@ -29,10 +29,10 @@ enum {
 
 static u64 lan966x_ptp_get_nominal_value(void)
 {
-       u64 res = 0x304d2df1;
-
-       res <<= 32;
-       return res;
+       /* This is the default value that for each system clock, the time of day
+        * is increased. It has the format 5.59 nanosecond.
+        */
+       return 0x304d4873ecade305;
 }
 
 int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
index e3555c9..df2bee6 100644 (file)
@@ -322,8 +322,7 @@ static int lan966x_port_prechangeupper(struct net_device *dev,
 
        if (netif_is_bridge_master(info->upper_dev) && !info->linking)
                switchdev_bridge_port_unoffload(port->dev, port,
-                                               &lan966x_switchdev_nb,
-                                               &lan966x_switchdev_blocking_nb);
+                                               NULL, NULL);
 
        return NOTIFY_DONE;
 }
index e443bd8..ee9c607 100644 (file)
@@ -2859,6 +2859,8 @@ static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
                val = BIT(port);
 
        ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
+       ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV4);
+       ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MCIPV6);
 }
 
 static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,
index cd478d2..00f6d34 100644 (file)
 #define TSE_PCS_USE_SGMII_ENA                          BIT(0)
 #define TSE_PCS_IF_USE_SGMII                           0x03
 
-#define SGMII_ADAPTER_CTRL_REG                         0x00
-#define SGMII_ADAPTER_DISABLE                          0x0001
-#define SGMII_ADAPTER_ENABLE                           0x0000
-
 #define AUTONEGO_LINK_TIMER                            20
 
 static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs)
@@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
                           unsigned int speed)
 {
        void __iomem *tse_pcs_base = pcs->tse_pcs_base;
-       void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
        u32 val;
 
-       writew(SGMII_ADAPTER_ENABLE,
-              sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
-
        pcs->autoneg = phy_dev->autoneg;
 
        if (phy_dev->autoneg == AUTONEG_ENABLE) {
index 442812c..694ac25 100644 (file)
 #include <linux/phy.h>
 #include <linux/timer.h>
 
+#define SGMII_ADAPTER_CTRL_REG         0x00
+#define SGMII_ADAPTER_ENABLE           0x0000
+#define SGMII_ADAPTER_DISABLE          0x0001
+
 struct tse_pcs {
        struct device *dev;
        void __iomem *tse_pcs_base;
index b7c2579..ac9e6c7 100644 (file)
@@ -18,9 +18,6 @@
 
 #include "altr_tse_pcs.h"
 
-#define SGMII_ADAPTER_CTRL_REG                          0x00
-#define SGMII_ADAPTER_DISABLE                           0x0001
-
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
@@ -62,16 +59,14 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
 {
        struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
        void __iomem *splitter_base = dwmac->splitter_base;
-       void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base;
        void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base;
        struct device *dev = dwmac->dev;
        struct net_device *ndev = dev_get_drvdata(dev);
        struct phy_device *phy_dev = ndev->phydev;
        u32 val;
 
-       if ((tse_pcs_base) && (sgmii_adapter_base))
-               writew(SGMII_ADAPTER_DISABLE,
-                      sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+       writew(SGMII_ADAPTER_DISABLE,
+              sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
 
        if (splitter_base) {
                val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
@@ -93,7 +88,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
                writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
        }
 
-       if (tse_pcs_base && sgmii_adapter_base)
+       writew(SGMII_ADAPTER_ENABLE,
+              sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+       if (phy_dev)
                tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
 }
 
index 22fea0f..92d3294 100644 (file)
@@ -71,9 +71,9 @@ static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
        writel(value, ioaddr + PTP_TCR);
 
        /* wait for present system time initialize to complete */
-       return readl_poll_timeout(ioaddr + PTP_TCR, value,
+       return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
                                 !(value & PTP_TCR_TSINIT),
-                                10000, 100000);
+                                10, 100000);
 }
 
 static int config_addend(void __iomem *ioaddr, u32 addend)
index 1610529..74e845f 100644 (file)
@@ -1355,7 +1355,9 @@ static int rr_close(struct net_device *dev)
 
        rrpriv->fw_running = 0;
 
+       spin_unlock_irqrestore(&rrpriv->lock, flags);
        del_timer_sync(&rrpriv->timer);
+       spin_lock_irqsave(&rrpriv->lock, flags);
 
        writel(0, &regs->TxPi);
        writel(0, &regs->IpRxPi);
index 069e882..b00bc81 100644 (file)
@@ -460,8 +460,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
                        return RX_HANDLER_CONSUMED;
                *pskb = skb;
                eth = eth_hdr(skb);
-               if (macvlan_forward_source(skb, port, eth->h_source))
+               if (macvlan_forward_source(skb, port, eth->h_source)) {
+                       kfree_skb(skb);
                        return RX_HANDLER_CONSUMED;
+               }
                src = macvlan_hash_lookup(port, eth->h_source);
                if (src && src->mode != MACVLAN_MODE_VEPA &&
                    src->mode != MACVLAN_MODE_BRIDGE) {
@@ -480,8 +482,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
                return RX_HANDLER_PASS;
        }
 
-       if (macvlan_forward_source(skb, port, eth->h_source))
+       if (macvlan_forward_source(skb, port, eth->h_source)) {
+               kfree_skb(skb);
                return RX_HANDLER_CONSUMED;
+       }
        if (macvlan_passthru(port))
                vlan = list_first_or_null_rcu(&port->vlans,
                                              struct macvlan_dev, list);
index 1becb1a..1c1584f 100644 (file)
@@ -43,6 +43,11 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
        int rc;
 
        rc = fwnode_irq_get(child, 0);
+       /* Don't wait forever if the IRQ provider doesn't become available,
+        * just fall back to poll mode
+        */
+       if (rc == -EPROBE_DEFER)
+               rc = driver_deferred_probe_check_state(&phy->mdio.dev);
        if (rc == -EPROBE_DEFER)
                return rc;
 
index 389df3f..c2c0e36 100644 (file)
@@ -706,7 +706,6 @@ static int lan87xx_read_status(struct phy_device *phydev)
 static int lan87xx_config_aneg(struct phy_device *phydev)
 {
        u16 ctl = 0;
-       int rc;
 
        switch (phydev->master_slave_set) {
        case MASTER_SLAVE_CFG_MASTER_FORCE:
@@ -722,11 +721,7 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
                return -EOPNOTSUPP;
        }
 
-       rc = phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
-       if (rc == 1)
-               rc = genphy_soft_reset(phydev);
-
-       return rc;
+       return phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
 }
 
 static struct phy_driver microchip_t1_phy_driver[] = {
@@ -748,6 +743,7 @@ static struct phy_driver microchip_t1_phy_driver[] = {
        {
                PHY_ID_MATCH_MODEL(PHY_ID_LAN937X),
                .name           = "Microchip LAN937x T1",
+               .flags          = PHY_POLL_CABLE_TEST,
                .features       = PHY_BASIC_T1_FEATURES,
                .config_init    = lan87xx_config_init,
                .suspend        = genphy_suspend,
index 276a0e4..dbe4c0a 100644 (file)
@@ -1124,7 +1124,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* NETIF_F_LLTX requires to do our own update of trans_start */
        queue = netdev_get_tx_queue(dev, txq);
-       queue->trans_start = jiffies;
+       txq_trans_cond_update(queue);
 
        /* Notify and wake up reader process */
        if (tfile->flags & TUN_FASYNC)
index 1b57149..eb0121a 100644 (file)
@@ -320,7 +320,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
 
        rcu_read_lock();
        rcv = rcu_dereference(priv->peer);
-       if (unlikely(!rcv)) {
+       if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
                kfree_skb(skb);
                goto drop;
        }
index de97ff9..8a5e3a6 100644 (file)
@@ -651,11 +651,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
 
        rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
        if (rd == NULL)
-               return -ENOBUFS;
+               return -ENOMEM;
 
        if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
                kfree(rd);
-               return -ENOBUFS;
+               return -ENOMEM;
        }
 
        rd->remote_ip = *ip;
index d5b83f9..e6b34b0 100644 (file)
@@ -3136,6 +3136,20 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
                        arvif->do_not_send_tmpl = true;
                else
                        arvif->do_not_send_tmpl = false;
+
+               if (vif->bss_conf.he_support) {
+                       ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+                                                           WMI_VDEV_PARAM_BA_MODE,
+                                                           WMI_BA_MODE_BUFFER_SIZE_256);
+                       if (ret)
+                               ath11k_warn(ar->ab,
+                                           "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
+                                           arvif->vdev_id);
+                       else
+                               ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+                                          "Set BA BUFFER SIZE 256 for VDEV: %d\n",
+                                          arvif->vdev_id);
+               }
        }
 
        if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
@@ -3171,14 +3185,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
 
                if (arvif->is_up && vif->bss_conf.he_support &&
                    vif->bss_conf.he_oper.params) {
-                       ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
-                                                           WMI_VDEV_PARAM_BA_MODE,
-                                                           WMI_BA_MODE_BUFFER_SIZE_256);
-                       if (ret)
-                               ath11k_warn(ar->ab,
-                                           "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
-                                           arvif->vdev_id);
-
                        param_id = WMI_VDEV_PARAM_HEOPS_0_31;
                        param_value = vif->bss_conf.he_oper.params;
                        ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
index 98090e4..e2791d4 100644 (file)
@@ -839,7 +839,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
                        continue;
 
                txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
-               fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
+               fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0];
                if (fi->keyix == keyix)
                        return true;
        }
index d0caf1d..db83cc4 100644 (file)
@@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
 {
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        BUILD_BUG_ON(sizeof(struct ath_frame_info) >
-                    sizeof(tx_info->rate_driver_data));
-       return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
+                    sizeof(tx_info->status.status_driver_data));
+       return (struct ath_frame_info *) &tx_info->status.status_driver_data[0];
 }
 
 static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
@@ -2542,6 +2542,16 @@ skip_tx_complete:
        spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
 }
 
+static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info)
+{
+       void *ptr = &tx_info->status;
+
+       memset(ptr + sizeof(tx_info->status.rates), 0,
+              sizeof(tx_info->status) -
+              sizeof(tx_info->status.rates) -
+              sizeof(tx_info->status.status_driver_data));
+}
+
 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
                             struct ath_tx_status *ts, int nframes, int nbad,
                             int txok)
@@ -2553,6 +2563,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
        struct ath_hw *ah = sc->sc_ah;
        u8 i, tx_rateindex;
 
+       ath_clear_tx_status(tx_info);
+
        if (txok)
                tx_info->status.ack_signal = ts->ts_rssi;
 
@@ -2567,6 +2579,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
        tx_info->status.ampdu_len = nframes;
        tx_info->status.ampdu_ack_len = nframes - nbad;
 
+       tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
+
+       for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
+               tx_info->status.rates[i].count = 0;
+               tx_info->status.rates[i].idx = -1;
+       }
+
        if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
            (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
                /*
@@ -2588,16 +2607,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
                        tx_info->status.rates[tx_rateindex].count =
                                hw->max_rate_tries;
        }
-
-       for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
-               tx_info->status.rates[i].count = 0;
-               tx_info->status.rates[i].idx = -1;
-       }
-
-       tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
-
-       /* we report airtime in ath_tx_count_airtime(), don't report twice */
-       tx_info->status.tx_time = 0;
 }
 
 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
index 55285ca..212fbbe 100644 (file)
@@ -557,7 +557,7 @@ enum brcmf_sdio_frmtype {
        BRCMF_SDIO_FT_SUB,
 };
 
-#define SDIOD_DRVSTR_KEY(chip, pmu)     (((chip) << 16) | (pmu))
+#define SDIOD_DRVSTR_KEY(chip, pmu)     (((unsigned int)(chip) << 16) | (pmu))
 
 /* SDIO Pad drive strength to select value mappings */
 struct sdiod_drive_str {
index 8a22ee5..df85ebc 100644 (file)
@@ -80,7 +80,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
 
        /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
-       mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
+       mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
 
        /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
        mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
index efb85c6..e1846d0 100644 (file)
@@ -366,7 +366,7 @@ static inline void nvme_end_req(struct request *req)
 {
        blk_status_t status = nvme_error_status(nvme_req(req)->status);
 
-       if (unlikely(nvme_req(req)->status != NVME_SC_SUCCESS))
+       if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
                nvme_log_error(req);
        nvme_end_req_zoned(req);
        nvme_trace_bio_complete(req);
@@ -1015,6 +1015,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                        goto out;
        }
 
+       req->rq_flags |= RQF_QUIET;
        ret = nvme_execute_rq(req, at_head);
        if (result && ret >= 0)
                *result = nvme_req(req)->result;
@@ -1287,6 +1288,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
                                 warn_str, cur->nidl);
                        return -1;
                }
+               if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+                       return NVME_NIDT_EUI64_LEN;
                memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
                return NVME_NIDT_EUI64_LEN;
        case NVME_NIDT_NGUID:
@@ -1295,6 +1298,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
                                 warn_str, cur->nidl);
                        return -1;
                }
+               if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+                       return NVME_NIDT_NGUID_LEN;
                memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
                return NVME_NIDT_NGUID_LEN;
        case NVME_NIDT_UUID:
@@ -1303,6 +1308,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
                                 warn_str, cur->nidl);
                        return -1;
                }
+               if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+                       return NVME_NIDT_UUID_LEN;
                uuid_copy(&ids->uuid, data + sizeof(*cur));
                return NVME_NIDT_UUID_LEN;
        case NVME_NIDT_CSI:
@@ -1399,12 +1406,18 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
        if ((*id)->ncap == 0) /* namespace not allocated or attached */
                goto out_free_id;
 
-       if (ctrl->vs >= NVME_VS(1, 1, 0) &&
-           !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
-               memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
-       if (ctrl->vs >= NVME_VS(1, 2, 0) &&
-           !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
-               memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+
+       if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
+               dev_info(ctrl->device,
+                        "Ignoring bogus Namespace Identifiers\n");
+       } else {
+               if (ctrl->vs >= NVME_VS(1, 1, 0) &&
+                   !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+                       memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
+               if (ctrl->vs >= NVME_VS(1, 2, 0) &&
+                   !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+                       memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+       }
 
        return 0;
 
index 1393bbf..a2b53ca 100644 (file)
@@ -144,6 +144,11 @@ enum nvme_quirks {
         * encoding the generation sequence number.
         */
        NVME_QUIRK_SKIP_CID_GEN                 = (1 << 17),
+
+       /*
+        * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
+        */
+       NVME_QUIRK_BOGUS_NID                    = (1 << 18),
 };
 
 /*
index d817ca1..3aacf1c 100644 (file)
@@ -3409,7 +3409,10 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
        { PCI_VDEVICE(INTEL, 0x5845),   /* Qemu emulated controller */
                .driver_data = NVME_QUIRK_IDENTIFY_CNS |
-                               NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+                               NVME_QUIRK_DISABLE_WRITE_ZEROES |
+                               NVME_QUIRK_BOGUS_NID, },
+       { PCI_VDEVICE(REDHAT, 0x0010),  /* Qemu emulated controller */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x126f, 0x2263),   /* Silicon Motion unidentified */
                .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
        { PCI_DEVICE(0x1bb1, 0x0100),   /* Seagate Nytro Flash Storage */
@@ -3447,6 +3450,10 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
        { PCI_DEVICE(0x2646, 0x2263),   /* KINGSTON A2000 NVMe SSD  */
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+       { PCI_DEVICE(0x1e4B, 0x1002),   /* MAXIO MAP1002 */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
+       { PCI_DEVICE(0x1e4B, 0x1202),   /* MAXIO MAP1202 */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
                .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
        { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
index 9694370..59d3980 100644 (file)
@@ -400,6 +400,9 @@ validate_group(struct perf_event *event)
        if (!validate_event(event->pmu, &fake_pmu, leader))
                return -EINVAL;
 
+       if (event == leader)
+               return 0;
+
        for_each_sibling_event(sibling, leader) {
                if (!validate_event(event->pmu, &fake_pmu, sibling))
                        return -EINVAL;
@@ -489,12 +492,7 @@ __hw_perf_event_init(struct perf_event *event)
                local64_set(&hwc->period_left, hwc->sample_period);
        }
 
-       if (event->group_leader != event) {
-               if (validate_group(event) != 0)
-                       return -EINVAL;
-       }
-
-       return 0;
+       return validate_group(event);
 }
 
 static int armpmu_event_init(struct perf_event *event)
index 6b8b3ab..3463629 100644 (file)
@@ -584,21 +584,6 @@ static struct platform_driver acerhdf_driver = {
        .remove = acerhdf_remove,
 };
 
-/* checks if str begins with start */
-static int str_starts_with(const char *str, const char *start)
-{
-       unsigned long str_len = 0, start_len = 0;
-
-       str_len = strlen(str);
-       start_len = strlen(start);
-
-       if (str_len >= start_len &&
-                       !strncmp(str, start, start_len))
-               return 1;
-
-       return 0;
-}
-
 /* check hardware */
 static int __init acerhdf_check_hardware(void)
 {
@@ -651,9 +636,9 @@ static int __init acerhdf_check_hardware(void)
                 * check if actual hardware BIOS vendor, product and version
                 * IDs start with the strings of BIOS table entry
                 */
-               if (str_starts_with(vendor, bt->vendor) &&
-                               str_starts_with(product, bt->product) &&
-                               str_starts_with(version, bt->version)) {
+               if (strstarts(vendor, bt->vendor) &&
+                   strstarts(product, bt->product) &&
+                   strstarts(version, bt->version)) {
                        found = 1;
                        break;
                }
index e9d0dbb..fa4123d 100644 (file)
@@ -160,8 +160,10 @@ MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
 
 static struct amd_pmc_dev pmc;
 static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
-static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
 static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
+#ifdef CONFIG_SUSPEND
+static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
+#endif
 
 static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
 {
@@ -325,6 +327,7 @@ static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table
        return 0;
 }
 
+#ifdef CONFIG_SUSPEND
 static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
 {
        struct smu_metrics table;
@@ -338,6 +341,7 @@ static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
                dev_dbg(pdev->dev, "Last suspend in deepest state for %lluus\n",
                         table.timein_s0i3_lastcapture);
 }
+#endif
 
 #ifdef CONFIG_DEBUG_FS
 static int smu_fw_info_show(struct seq_file *s, void *unused)
@@ -569,6 +573,7 @@ out_unlock:
        return rc;
 }
 
+#ifdef CONFIG_SUSPEND
 static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
 {
        switch (dev->cpu_id) {
@@ -694,6 +699,7 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
        .prepare = amd_pmc_s2idle_prepare,
        .restore = amd_pmc_s2idle_restore,
 };
+#endif
 
 static const struct pci_device_id pmc_pci_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
@@ -733,6 +739,7 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
        return 0;
 }
 
+#ifdef CONFIG_SUSPEND
 static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
 {
        int err;
@@ -753,6 +760,7 @@ static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
 
        return 0;
 }
+#endif
 
 static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
 {
@@ -859,9 +867,11 @@ static int amd_pmc_probe(struct platform_device *pdev)
 
        amd_pmc_get_smu_version(dev);
        platform_set_drvdata(pdev, dev);
+#ifdef CONFIG_SUSPEND
        err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
        if (err)
                dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n");
+#endif
 
        amd_pmc_dbgfs_register(dev);
        return 0;
@@ -875,7 +885,9 @@ static int amd_pmc_remove(struct platform_device *pdev)
 {
        struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
 
+#ifdef CONFIG_SUSPEND
        acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops);
+#endif
        amd_pmc_dbgfs_unregister(dev);
        pci_dev_put(dev->rdev);
        mutex_destroy(&dev->lock);
index f5c72e3..0553428 100644 (file)
@@ -10,7 +10,6 @@
 
 #define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
 
-#include <linux/io.h>
 #include <linux/delay.h>
 #include <linux/dmi.h>
 #include <linux/err.h>
index c1d9ed9..19f6b45 100644 (file)
@@ -1121,8 +1121,6 @@ static void kbd_led_set(struct led_classdev *led_cdev,
 
        if (value > samsung->kbd_led.max_brightness)
                value = samsung->kbd_led.max_brightness;
-       else if (value < 0)
-               value = 0;
 
        samsung->kbd_led_wk = value;
        queue_work(samsung->led_workqueue, &samsung->kbd_led_work);
index bce17ca..a01a927 100644 (file)
@@ -740,16 +740,8 @@ static ssize_t certificate_store(struct kobject *kobj,
        if (!tlmi_priv.certificate_support)
                return -EOPNOTSUPP;
 
-       new_cert = kstrdup(buf, GFP_KERNEL);
-       if (!new_cert)
-               return -ENOMEM;
-       /* Strip out CR if one is present */
-       strip_cr(new_cert);
-
        /* If empty then clear installed certificate */
-       if (new_cert[0] == '\0') { /* Clear installed certificate */
-               kfree(new_cert);
-
+       if ((buf[0] == '\0') || (buf[0] == '\n')) { /* Clear installed certificate */
                /* Check that signature is set */
                if (!setting->signature || !setting->signature[0])
                        return -EACCES;
@@ -763,14 +755,16 @@ static ssize_t certificate_store(struct kobject *kobj,
 
                ret = tlmi_simple_call(LENOVO_CLEAR_BIOS_CERT_GUID, auth_str);
                kfree(auth_str);
-               if (ret)
-                       return ret;
 
-               kfree(setting->certificate);
-               setting->certificate = NULL;
-               return count;
+               return ret ?: count;
        }
 
+       new_cert = kstrdup(buf, GFP_KERNEL);
+       if (!new_cert)
+               return -ENOMEM;
+       /* Strip out CR if one is present */
+       strip_cr(new_cert);
+
        if (setting->cert_installed) {
                /* Certificate is installed so this is an update */
                if (!setting->signature || !setting->signature[0]) {
@@ -792,21 +786,14 @@ static ssize_t certificate_store(struct kobject *kobj,
                auth_str = kasprintf(GFP_KERNEL, "%s,%s",
                                new_cert, setting->password);
        }
-       if (!auth_str) {
-               kfree(new_cert);
+       kfree(new_cert);
+       if (!auth_str)
                return -ENOMEM;
-       }
 
        ret = tlmi_simple_call(guid, auth_str);
        kfree(auth_str);
-       if (ret) {
-               kfree(new_cert);
-               return ret;
-       }
 
-       kfree(setting->certificate);
-       setting->certificate = new_cert;
-       return count;
+       return ret ?: count;
 }
 
 static struct kobj_attribute auth_certificate = __ATTR_WO(certificate);
@@ -1194,6 +1181,10 @@ static void tlmi_release_attr(void)
 
        kset_unregister(tlmi_priv.attribute_kset);
 
+       /* Free up any saved signatures */
+       kfree(tlmi_priv.pwd_admin->signature);
+       kfree(tlmi_priv.pwd_admin->save_signature);
+
        /* Authentication structures */
        sysfs_remove_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group);
        kobject_put(&tlmi_priv.pwd_admin->kobj);
@@ -1210,11 +1201,6 @@ static void tlmi_release_attr(void)
        }
 
        kset_unregister(tlmi_priv.authentication_kset);
-
-       /* Free up any saved certificates/signatures */
-       kfree(tlmi_priv.pwd_admin->certificate);
-       kfree(tlmi_priv.pwd_admin->signature);
-       kfree(tlmi_priv.pwd_admin->save_signature);
 }
 
 static int tlmi_sysfs_init(void)
index 4f69df6..4daba61 100644 (file)
@@ -63,7 +63,6 @@ struct tlmi_pwd_setting {
        int index; /*Used for HDD and NVME auth */
        enum level_option level;
        bool cert_installed;
-       char *certificate;
        char *signature;
        char *save_signature;
 };
index ea02c8d..d925cb1 100644 (file)
@@ -604,6 +604,12 @@ int power_supply_get_battery_info(struct power_supply *psy,
        err = samsung_sdi_battery_get_info(&psy->dev, value, &info);
        if (!err)
                goto out_ret_pointer;
+       else if (err == -ENODEV)
+               /*
+                * Device does not have a static battery.
+                * Proceed to look for a simple battery.
+                */
+               err = 0;
 
        if (strcmp("simple-battery", value)) {
                err = -ENODEV;
index 9d59f27..b33daab 100644 (file)
@@ -824,6 +824,7 @@ static struct samsung_sdi_battery samsung_sdi_batteries[] = {
                        .constant_charge_current_max_ua = 900000,
                        .constant_charge_voltage_max_uv = 4200000,
                        .charge_term_current_ua = 200000,
+                       .charge_restart_voltage_uv = 4170000,
                        .maintenance_charge = samsung_maint_charge_table,
                        .maintenance_charge_size = ARRAY_SIZE(samsung_maint_charge_table),
                        .alert_low_temp_charge_current_ua = 300000,
@@ -867,6 +868,7 @@ static struct samsung_sdi_battery samsung_sdi_batteries[] = {
                        .constant_charge_current_max_ua = 1500000,
                        .constant_charge_voltage_max_uv = 4350000,
                        .charge_term_current_ua = 120000,
+                       .charge_restart_voltage_uv = 4300000,
                        .maintenance_charge = samsung_maint_charge_table,
                        .maintenance_charge_size = ARRAY_SIZE(samsung_maint_charge_table),
                        .alert_low_temp_charge_current_ua = 300000,
index 1e83150..a8dde46 100644 (file)
@@ -121,7 +121,9 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
                return dev_err_probe(dev, PTR_ERR(priv->rstc),
                                     "failed to get reset\n");
 
-       reset_control_deassert(priv->rstc);
+       error = reset_control_deassert(priv->rstc);
+       if (error)
+               return error;
 
        priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops;
        priv->rcdev.of_reset_n_cells = 1;
index 24d3395..4c5bba5 100644 (file)
@@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
        struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
        struct mrq_reset_request request;
        struct tegra_bpmp_message msg;
+       int err;
 
        memset(&request, 0, sizeof(request));
        request.cmd = command;
@@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
        msg.tx.data = &request;
        msg.tx.size = sizeof(request);
 
-       return tegra_bpmp_transfer(bpmp, &msg);
+       err = tegra_bpmp_transfer(bpmp, &msg);
+       if (err)
+               return err;
+       if (msg.rx.ret)
+               return -EINVAL;
+
+       return 0;
 }
 
 static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
index 7fe7f53..6c864b0 100644 (file)
@@ -1977,7 +1977,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
                if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
                        break;
 
-               if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
+               if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
                        if (nopin->op_code == ISCSI_OP_NOOP_IN &&
                            nopin->itt == (u16) RESERVED_ITT) {
                                printk(KERN_ALERT "bnx2i: Unsolicited "
index fe86fd6..15fbd09 100644 (file)
@@ -1721,7 +1721,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
                        struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
 
                        /* Must suspend all rx queue activity for this ep */
-                       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+                       set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
                }
                /* CONN_DISCONNECT timeout may or may not be an issue depending
                 * on what transcribed in TCP layer, different targets behave
index 8c7d4dd..4365d52 100644 (file)
@@ -1634,11 +1634,11 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
        log_debug(1 << CXGBI_DBG_PDU_RX,
                "csk 0x%p, conn 0x%p.\n", csk, conn);
 
-       if (unlikely(!conn || conn->suspend_rx)) {
+       if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
                log_debug(1 << CXGBI_DBG_PDU_RX,
-                       "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
+                       "csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n",
                        csk, conn, conn ? conn->id : 0xFF,
-                       conn ? conn->suspend_rx : 0xFF);
+                       conn ? conn->flags : 0xFF);
                return;
        }
 
index cf4211c..797abf4 100644 (file)
@@ -678,7 +678,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
        struct iscsi_task *task;
        itt_t itt;
 
-       if (session->state == ISCSI_STATE_TERMINATE)
+       if (session->state == ISCSI_STATE_TERMINATE ||
+           !test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags))
                return NULL;
 
        if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
@@ -1392,8 +1393,8 @@ static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
        if (conn->stop_stage == 0)
                session->state = ISCSI_STATE_FAILED;
 
-       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
-       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+       set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
+       set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
        return true;
 }
 
@@ -1454,7 +1455,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
         * Do this after dropping the extra ref because if this was a requeue
         * it's removed from that list and cleanup_queued_task would miss it.
         */
-       if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+       if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
                /*
                 * Save the task and ref in case we weren't cleaning up this
                 * task and get woken up again.
@@ -1532,7 +1533,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
        int rc = 0;
 
        spin_lock_bh(&conn->session->frwd_lock);
-       if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+       if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
                ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
                spin_unlock_bh(&conn->session->frwd_lock);
                return -ENODATA;
@@ -1746,7 +1747,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
                goto fault;
        }
 
-       if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+       if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
                reason = FAILURE_SESSION_IN_RECOVERY;
                sc->result = DID_REQUEUE << 16;
                goto fault;
@@ -1935,7 +1936,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
 void iscsi_suspend_queue(struct iscsi_conn *conn)
 {
        spin_lock_bh(&conn->session->frwd_lock);
-       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+       set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
        spin_unlock_bh(&conn->session->frwd_lock);
 }
 EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
@@ -1953,7 +1954,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
        struct Scsi_Host *shost = conn->session->host;
        struct iscsi_host *ihost = shost_priv(shost);
 
-       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+       set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
        if (ihost->workq)
                flush_workqueue(ihost->workq);
 }
@@ -1961,7 +1962,7 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
 
 static void iscsi_start_tx(struct iscsi_conn *conn)
 {
-       clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+       clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
        iscsi_conn_queue_work(conn);
 }
 
@@ -2214,6 +2215,8 @@ void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
        iscsi_suspend_tx(conn);
 
        spin_lock_bh(&session->frwd_lock);
+       clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
+
        if (!is_active) {
                /*
                 * if logout timed out before userspace could even send a PDU
@@ -3317,6 +3320,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
        spin_lock_bh(&session->frwd_lock);
        if (is_leading)
                session->leadconn = conn;
+
+       set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
        spin_unlock_bh(&session->frwd_lock);
 
        /*
@@ -3329,8 +3334,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
        /*
         * Unblock xmitworker(), Login Phase will pass through.
         */
-       clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
-       clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+       clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
+       clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
        return 0;
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_bind);
index 2e9ffe3..8830057 100644 (file)
@@ -927,7 +927,7 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
         */
        conn->last_recv = jiffies;
 
-       if (unlikely(conn->suspend_rx)) {
+       if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
                ISCSI_DBG_TCP(conn, "Rx suspended!\n");
                *status = ISCSI_TCP_SUSPENDED;
                return 0;
index f90b707..01c5e8f 100644 (file)
@@ -766,6 +766,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
        pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity       = 0x01;
        pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt          = 0x01;
 
+       /* Enable higher IQs and OQs, 32 to 63, bit 16 */
+       if (pm8001_ha->max_q_num > 32)
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
+                                                       1 << 16;
        /* Disable end to end CRC checking */
        pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
 
@@ -1027,6 +1031,13 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
        if (0x0000 != gst_len_mpistate)
                return -EBUSY;
 
+       /*
+        *  As per controller datasheet, after successful MPI
+        *  initialization minimum 500ms delay is required before
+        *  issuing commands.
+        */
+       msleep(500);
+
        return 0;
 }
 
@@ -1727,10 +1738,11 @@ static void
 pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
-       u32 mask;
-       mask = (u32)(1 << vec);
-
-       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
+       if (vec < 32)
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec);
+       else
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U,
+                           1U << (vec - 32));
        return;
 #endif
        pm80xx_chip_intx_interrupt_enable(pm8001_ha);
@@ -1746,12 +1758,15 @@ static void
 pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
-       u32 mask;
-       if (vec == 0xFF)
-               mask = 0xFFFFFFFF;
+       if (vec == 0xFF) {
+               /* disable all vectors 0-31, 32-63 */
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF);
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF);
+       } else if (vec < 32)
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec);
        else
-               mask = (u32)(1 << vec);
-       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U,
+                           1U << (vec - 32));
        return;
 #endif
        pm80xx_chip_intx_interrupt_disable(pm8001_ha);
index 8196f89..31ec429 100644 (file)
@@ -860,6 +860,37 @@ static int qedi_task_xmit(struct iscsi_task *task)
        return qedi_iscsi_send_ioreq(task);
 }
 
+static void qedi_offload_work(struct work_struct *work)
+{
+       struct qedi_endpoint *qedi_ep =
+               container_of(work, struct qedi_endpoint, offload_work);
+       struct qedi_ctx *qedi;
+       int wait_delay = 5 * HZ;
+       int ret;
+
+       qedi = qedi_ep->qedi;
+
+       ret = qedi_iscsi_offload_conn(qedi_ep);
+       if (ret) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
+                        qedi_ep->iscsi_cid, qedi_ep, ret);
+               qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+               return;
+       }
+
+       ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
+                                              (qedi_ep->state ==
+                                              EP_STATE_OFLDCONN_COMPL),
+                                              wait_delay);
+       if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
+               qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
+                        qedi_ep->iscsi_cid, qedi_ep);
+       }
+}
+
 static struct iscsi_endpoint *
 qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
                int non_blocking)
@@ -908,6 +939,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
        }
        qedi_ep = ep->dd_data;
        memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
+       INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
        qedi_ep->state = EP_STATE_IDLE;
        qedi_ep->iscsi_cid = (u32)-1;
        qedi_ep->qedi = qedi;
@@ -1056,12 +1088,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
        qedi_ep = ep->dd_data;
        qedi = qedi_ep->qedi;
 
+       flush_work(&qedi_ep->offload_work);
+
        if (qedi_ep->state == EP_STATE_OFLDCONN_START)
                goto ep_exit_recover;
 
-       if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
-               flush_work(&qedi_ep->offload_work);
-
        if (qedi_ep->conn) {
                qedi_conn = qedi_ep->conn;
                abrt_conn = qedi_conn->abrt_conn;
@@ -1235,37 +1266,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
        return rc;
 }
 
-static void qedi_offload_work(struct work_struct *work)
-{
-       struct qedi_endpoint *qedi_ep =
-               container_of(work, struct qedi_endpoint, offload_work);
-       struct qedi_ctx *qedi;
-       int wait_delay = 5 * HZ;
-       int ret;
-
-       qedi = qedi_ep->qedi;
-
-       ret = qedi_iscsi_offload_conn(qedi_ep);
-       if (ret) {
-               QEDI_ERR(&qedi->dbg_ctx,
-                        "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
-                        qedi_ep->iscsi_cid, qedi_ep, ret);
-               qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
-               return;
-       }
-
-       ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
-                                              (qedi_ep->state ==
-                                              EP_STATE_OFLDCONN_COMPL),
-                                              wait_delay);
-       if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
-               qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
-               QEDI_ERR(&qedi->dbg_ctx,
-                        "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
-                        qedi_ep->iscsi_cid, qedi_ep);
-       }
-}
-
 static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
 {
        struct qedi_ctx *qedi;
@@ -1381,7 +1381,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
                          qedi_ep->dst_addr, qedi_ep->dst_port);
        }
 
-       INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
        queue_work(qedi->offload_thread, &qedi_ep->offload_work);
 
        ret = 0;
index ff78ef7..592a290 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/blkdev.h>
 #include <linux/crc-t10dif.h>
 #include <linux/spinlock.h>
-#include <linux/mutex.h>
 #include <linux/interrupt.h>
 #include <linux/atomic.h>
 #include <linux/hrtimer.h>
@@ -732,9 +731,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
            {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 };
 
-static atomic_t sdebug_num_hosts;
-static DEFINE_MUTEX(add_host_mutex);
-
+static int sdebug_num_hosts;
 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
 static int sdebug_ato = DEF_ATO;
 static int sdebug_cdb_len = DEF_CDB_LEN;
@@ -781,7 +778,6 @@ static int sdebug_uuid_ctl = DEF_UUID_CTL;
 static bool sdebug_random = DEF_RANDOM;
 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
 static bool sdebug_removable = DEF_REMOVABLE;
-static bool sdebug_deflect_incoming;
 static bool sdebug_clustering;
 static bool sdebug_host_lock = DEF_HOST_LOCK;
 static bool sdebug_strict = DEF_STRICT;
@@ -5122,10 +5118,6 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
                       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
        if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
                sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
-       if (smp_load_acquire(&sdebug_deflect_incoming)) {
-               pr_info("Exit early due to deflect_incoming\n");
-               return 1;
-       }
        if (devip == NULL) {
                devip = find_build_dev_info(sdp);
                if (devip == NULL)
@@ -5211,7 +5203,7 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
 }
 
 /* Deletes (stops) timers or work queues of all queued commands */
-static void stop_all_queued(bool done_with_no_conn)
+static void stop_all_queued(void)
 {
        unsigned long iflags;
        int j, k;
@@ -5220,15 +5212,13 @@ static void stop_all_queued(bool done_with_no_conn)
        struct sdebug_queued_cmd *sqcp;
        struct sdebug_dev_info *devip;
        struct sdebug_defer *sd_dp;
-       struct scsi_cmnd *scp;
 
        for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
                spin_lock_irqsave(&sqp->qc_lock, iflags);
                for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
                        if (test_bit(k, sqp->in_use_bm)) {
                                sqcp = &sqp->qc_arr[k];
-                               scp = sqcp->a_cmnd;
-                               if (!scp)
+                               if (sqcp->a_cmnd == NULL)
                                        continue;
                                devip = (struct sdebug_dev_info *)
                                        sqcp->a_cmnd->device->hostdata;
@@ -5243,10 +5233,6 @@ static void stop_all_queued(bool done_with_no_conn)
                                        l_defer_t = SDEB_DEFER_NONE;
                                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
                                stop_qc_helper(sd_dp, l_defer_t);
-                               if (done_with_no_conn && l_defer_t != SDEB_DEFER_NONE) {
-                                       scp->result = DID_NO_CONNECT << 16;
-                                       scsi_done(scp);
-                               }
                                clear_bit(k, sqp->in_use_bm);
                                spin_lock_irqsave(&sqp->qc_lock, iflags);
                        }
@@ -5389,7 +5375,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
                }
        }
        spin_unlock(&sdebug_host_list_lock);
-       stop_all_queued(false);
+       stop_all_queued();
        if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
                sdev_printk(KERN_INFO, SCpnt->device,
                            "%s: %d device(s) found\n", __func__, k);
@@ -5449,50 +5435,13 @@ static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
        }
 }
 
-static void sdeb_block_all_queues(void)
-{
-       int j;
-       struct sdebug_queue *sqp;
-
-       for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
-               atomic_set(&sqp->blocked, (int)true);
-}
-
-static void sdeb_unblock_all_queues(void)
+static void block_unblock_all_queues(bool block)
 {
        int j;
        struct sdebug_queue *sqp;
 
        for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
-               atomic_set(&sqp->blocked, (int)false);
-}
-
-static void
-sdeb_add_n_hosts(int num_hosts)
-{
-       if (num_hosts < 1)
-               return;
-       do {
-               bool found;
-               unsigned long idx;
-               struct sdeb_store_info *sip;
-               bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
-
-               found = false;
-               if (want_phs) {
-                       xa_for_each_marked(per_store_ap, idx, sip, SDEB_XA_NOT_IN_USE) {
-                               sdeb_most_recent_idx = (int)idx;
-                               found = true;
-                               break;
-                       }
-                       if (found)      /* re-use case */
-                               sdebug_add_host_helper((int)idx);
-                       else
-                               sdebug_do_add_host(true /* make new store */);
-               } else {
-                       sdebug_do_add_host(false);
-               }
-       } while (--num_hosts);
+               atomic_set(&sqp->blocked, (int)block);
 }
 
 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
@@ -5505,10 +5454,10 @@ static void tweak_cmnd_count(void)
        modulo = abs(sdebug_every_nth);
        if (modulo < 2)
                return;
-       sdeb_block_all_queues();
+       block_unblock_all_queues(true);
        count = atomic_read(&sdebug_cmnd_count);
        atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
-       sdeb_unblock_all_queues();
+       block_unblock_all_queues(false);
 }
 
 static void clear_queue_stats(void)
@@ -5526,15 +5475,6 @@ static bool inject_on_this_cmd(void)
        return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
 }
 
-static int process_deflect_incoming(struct scsi_cmnd *scp)
-{
-       u8 opcode = scp->cmnd[0];
-
-       if (opcode == SYNCHRONIZE_CACHE || opcode == SYNCHRONIZE_CACHE_16)
-               return 0;
-       return DID_NO_CONNECT << 16;
-}
-
 #define INCLUSIVE_TIMING_MAX_NS 1000000                /* 1 millisecond */
 
 /* Complete the processing of the thread that queued a SCSI command to this
@@ -5544,7 +5484,8 @@ static int process_deflect_incoming(struct scsi_cmnd *scp)
  */
 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
                         int scsi_result,
-                        int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *),
+                        int (*pfp)(struct scsi_cmnd *,
+                                   struct sdebug_dev_info *),
                         int delta_jiff, int ndelay)
 {
        bool new_sd_dp;
@@ -5565,27 +5506,13 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
        }
        sdp = cmnd->device;
 
-       if (delta_jiff == 0) {
-               sqp = get_queue(cmnd);
-               if (atomic_read(&sqp->blocked)) {
-                       if (smp_load_acquire(&sdebug_deflect_incoming))
-                               return process_deflect_incoming(cmnd);
-                       else
-                               return SCSI_MLQUEUE_HOST_BUSY;
-               }
+       if (delta_jiff == 0)
                goto respond_in_thread;
-       }
 
        sqp = get_queue(cmnd);
        spin_lock_irqsave(&sqp->qc_lock, iflags);
        if (unlikely(atomic_read(&sqp->blocked))) {
                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
-               if (smp_load_acquire(&sdebug_deflect_incoming)) {
-                       scsi_result = process_deflect_incoming(cmnd);
-                       goto respond_in_thread;
-               }
-               if (sdebug_verbose)
-                       pr_info("blocked --> SCSI_MLQUEUE_HOST_BUSY\n");
                return SCSI_MLQUEUE_HOST_BUSY;
        }
        num_in_q = atomic_read(&devip->num_in_q);
@@ -5774,12 +5701,8 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
 respond_in_thread:     /* call back to mid-layer using invocation thread */
        cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
        cmnd->result &= ~SDEG_RES_IMMED_MASK;
-       if (cmnd->result == 0 && scsi_result != 0) {
+       if (cmnd->result == 0 && scsi_result != 0)
                cmnd->result = scsi_result;
-               if (sdebug_verbose)
-                       pr_info("respond_in_thread: tag=0x%x, scp->result=0x%x\n",
-                               blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)), scsi_result);
-       }
        scsi_done(cmnd);
        return 0;
 }
@@ -6064,7 +5987,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
                        int j, k;
                        struct sdebug_queue *sqp;
 
-                       sdeb_block_all_queues();
+                       block_unblock_all_queues(true);
                        for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
                             ++j, ++sqp) {
                                k = find_first_bit(sqp->in_use_bm,
@@ -6078,7 +6001,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
                                sdebug_jdelay = jdelay;
                                sdebug_ndelay = 0;
                        }
-                       sdeb_unblock_all_queues();
+                       block_unblock_all_queues(false);
                }
                return res;
        }
@@ -6104,7 +6027,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
                        int j, k;
                        struct sdebug_queue *sqp;
 
-                       sdeb_block_all_queues();
+                       block_unblock_all_queues(true);
                        for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
                             ++j, ++sqp) {
                                k = find_first_bit(sqp->in_use_bm,
@@ -6119,7 +6042,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
                                sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
                                                        : DEF_JDELAY;
                        }
-                       sdeb_unblock_all_queues();
+                       block_unblock_all_queues(false);
                }
                return res;
        }
@@ -6433,7 +6356,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
            (n <= SDEBUG_CANQUEUE) &&
            (sdebug_host_max_queue == 0)) {
-               sdeb_block_all_queues();
+               block_unblock_all_queues(true);
                k = 0;
                for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
                     ++j, ++sqp) {
@@ -6448,7 +6371,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
                        atomic_set(&retired_max_queue, k + 1);
                else
                        atomic_set(&retired_max_queue, 0);
-               sdeb_unblock_all_queues();
+               block_unblock_all_queues(false);
                return count;
        }
        return -EINVAL;
@@ -6537,48 +6460,43 @@ static DRIVER_ATTR_RW(virtual_gb);
 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
 {
        /* absolute number of hosts currently active is what is shown */
-       return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&sdebug_num_hosts));
+       return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
 }
 
-/*
- * Accept positive and negative values. Hex values (only positive) may be prefixed by '0x'.
- * To remove all hosts use a large negative number (e.g. -9999). The value 0 does nothing.
- * Returns -EBUSY if another add_host sysfs invocation is active.
- */
 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
                              size_t count)
 {
+       bool found;
+       unsigned long idx;
+       struct sdeb_store_info *sip;
+       bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
        int delta_hosts;
 
-       if (count == 0 || kstrtoint(buf, 0, &delta_hosts))
+       if (sscanf(buf, "%d", &delta_hosts) != 1)
                return -EINVAL;
-       if (sdebug_verbose)
-               pr_info("prior num_hosts=%d, num_to_add=%d\n",
-                       atomic_read(&sdebug_num_hosts), delta_hosts);
-       if (delta_hosts == 0)
-               return count;
-       if (mutex_trylock(&add_host_mutex) == 0)
-               return -EBUSY;
        if (delta_hosts > 0) {
-               sdeb_add_n_hosts(delta_hosts);
-       } else if (delta_hosts < 0) {
-               smp_store_release(&sdebug_deflect_incoming, true);
-               sdeb_block_all_queues();
-               if (delta_hosts >= atomic_read(&sdebug_num_hosts))
-                       stop_all_queued(true);
                do {
-                       if (atomic_read(&sdebug_num_hosts) < 1) {
-                               free_all_queued();
-                               break;
+                       found = false;
+                       if (want_phs) {
+                               xa_for_each_marked(per_store_ap, idx, sip,
+                                                  SDEB_XA_NOT_IN_USE) {
+                                       sdeb_most_recent_idx = (int)idx;
+                                       found = true;
+                                       break;
+                               }
+                               if (found)      /* re-use case */
+                                       sdebug_add_host_helper((int)idx);
+                               else
+                                       sdebug_do_add_host(true);
+                       } else {
+                               sdebug_do_add_host(false);
                        }
+               } while (--delta_hosts);
+       } else if (delta_hosts < 0) {
+               do {
                        sdebug_do_remove_host(false);
                } while (++delta_hosts);
-               sdeb_unblock_all_queues();
-               smp_store_release(&sdebug_deflect_incoming, false);
        }
-       mutex_unlock(&add_host_mutex);
-       if (sdebug_verbose)
-               pr_info("post num_hosts=%d\n", atomic_read(&sdebug_num_hosts));
        return count;
 }
 static DRIVER_ATTR_RW(add_host);
@@ -7089,10 +7007,6 @@ static int __init scsi_debug_init(void)
        sdebug_add_host = 0;
 
        for (k = 0; k < hosts_to_add; k++) {
-               if (smp_load_acquire(&sdebug_deflect_incoming)) {
-                       pr_info("exit early as sdebug_deflect_incoming is set\n");
-                       return 0;
-               }
                if (want_store && k == 0) {
                        ret = sdebug_add_host_helper(idx);
                        if (ret < 0) {
@@ -7110,12 +7024,8 @@ static int __init scsi_debug_init(void)
                }
        }
        if (sdebug_verbose)
-               pr_info("built %d host(s)\n", atomic_read(&sdebug_num_hosts));
+               pr_info("built %d host(s)\n", sdebug_num_hosts);
 
-       /*
-        * Even though all the hosts have been established, due to async device (LU) scanning
-        * by the scsi mid-level, there may still be devices (LUs) being set up.
-        */
        return 0;
 
 bus_unreg:
@@ -7131,17 +7041,12 @@ free_q_arr:
 
 static void __exit scsi_debug_exit(void)
 {
-       int k;
+       int k = sdebug_num_hosts;
 
-       /* Possible race with LUs still being set up; stop them asap */
-       sdeb_block_all_queues();
-       smp_store_release(&sdebug_deflect_incoming, true);
-       stop_all_queued(false);
-       for (k = 0; atomic_read(&sdebug_num_hosts) > 0; k++)
+       stop_all_queued();
+       for (; k; k--)
                sdebug_do_remove_host(true);
        free_all_queued();
-       if (sdebug_verbose)
-               pr_info("removed %d hosts\n", k);
        driver_unregister(&sdebug_driverfs_driver);
        bus_unregister(&pseudo_lld_bus);
        root_device_unregister(pseudo_primary);
@@ -7311,13 +7216,13 @@ static int sdebug_add_host_helper(int per_host_idx)
        sdbg_host->dev.bus = &pseudo_lld_bus;
        sdbg_host->dev.parent = pseudo_primary;
        sdbg_host->dev.release = &sdebug_release_adapter;
-       dev_set_name(&sdbg_host->dev, "adapter%d", atomic_read(&sdebug_num_hosts));
+       dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
 
        error = device_register(&sdbg_host->dev);
        if (error)
                goto clean;
 
-       atomic_inc(&sdebug_num_hosts);
+       ++sdebug_num_hosts;
        return 0;
 
 clean:
@@ -7381,7 +7286,7 @@ static void sdebug_do_remove_host(bool the_end)
                return;
 
        device_unregister(&sdbg_host->dev);
-       atomic_dec(&sdebug_num_hosts);
+       --sdebug_num_hosts;
 }
 
 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
@@ -7389,10 +7294,10 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
        int num_in_q = 0;
        struct sdebug_dev_info *devip;
 
-       sdeb_block_all_queues();
+       block_unblock_all_queues(true);
        devip = (struct sdebug_dev_info *)sdev->hostdata;
        if (NULL == devip) {
-               sdeb_unblock_all_queues();
+               block_unblock_all_queues(false);
                return  -ENODEV;
        }
        num_in_q = atomic_read(&devip->num_in_q);
@@ -7411,7 +7316,7 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
                sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
                            __func__, qdepth, num_in_q);
        }
-       sdeb_unblock_all_queues();
+       block_unblock_all_queues(false);
        return sdev->queue_depth;
 }
 
index 27951ea..2c0dd64 100644 (file)
@@ -86,6 +86,9 @@ struct iscsi_internal {
        struct transport_container session_cont;
 };
 
+static DEFINE_IDR(iscsi_ep_idr);
+static DEFINE_MUTEX(iscsi_ep_idr_mutex);
+
 static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
 
 static struct workqueue_struct *iscsi_conn_cleanup_workq;
@@ -168,6 +171,11 @@ struct device_attribute dev_attr_##_prefix##_##_name =     \
 static void iscsi_endpoint_release(struct device *dev)
 {
        struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+
+       mutex_lock(&iscsi_ep_idr_mutex);
+       idr_remove(&iscsi_ep_idr, ep->id);
+       mutex_unlock(&iscsi_ep_idr_mutex);
+
        kfree(ep);
 }
 
@@ -180,7 +188,7 @@ static ssize_t
 show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
-       return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id);
+       return sysfs_emit(buf, "%d\n", ep->id);
 }
 static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
 
@@ -193,48 +201,32 @@ static struct attribute_group iscsi_endpoint_group = {
        .attrs = iscsi_endpoint_attrs,
 };
 
-#define ISCSI_MAX_EPID -1
-
-static int iscsi_match_epid(struct device *dev, const void *data)
-{
-       struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
-       const uint64_t *epid = data;
-
-       return *epid == ep->id;
-}
-
 struct iscsi_endpoint *
 iscsi_create_endpoint(int dd_size)
 {
-       struct device *dev;
        struct iscsi_endpoint *ep;
-       uint64_t id;
-       int err;
-
-       for (id = 1; id < ISCSI_MAX_EPID; id++) {
-               dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
-                                       iscsi_match_epid);
-               if (!dev)
-                       break;
-               else
-                       put_device(dev);
-       }
-       if (id == ISCSI_MAX_EPID) {
-               printk(KERN_ERR "Too many connections. Max supported %u\n",
-                      ISCSI_MAX_EPID - 1);
-               return NULL;
-       }
+       int err, id;
 
        ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
        if (!ep)
                return NULL;
 
+       mutex_lock(&iscsi_ep_idr_mutex);
+       id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO);
+       if (id < 0) {
+               mutex_unlock(&iscsi_ep_idr_mutex);
+               printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
+                      id);
+               goto free_ep;
+       }
+       mutex_unlock(&iscsi_ep_idr_mutex);
+
        ep->id = id;
        ep->dev.class = &iscsi_endpoint_class;
-       dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
+       dev_set_name(&ep->dev, "ep-%d", id);
        err = device_register(&ep->dev);
         if (err)
-                goto free_ep;
+               goto free_id;
 
        err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
        if (err)
@@ -248,6 +240,10 @@ unregister_dev:
        device_unregister(&ep->dev);
        return NULL;
 
+free_id:
+       mutex_lock(&iscsi_ep_idr_mutex);
+       idr_remove(&iscsi_ep_idr, id);
+       mutex_unlock(&iscsi_ep_idr_mutex);
 free_ep:
        kfree(ep);
        return NULL;
@@ -275,14 +271,17 @@ EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
  */
 struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
 {
-       struct device *dev;
+       struct iscsi_endpoint *ep;
 
-       dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
-                               iscsi_match_epid);
-       if (!dev)
-               return NULL;
+       mutex_lock(&iscsi_ep_idr_mutex);
+       ep = idr_find(&iscsi_ep_idr, handle);
+       if (!ep)
+               goto unlock;
 
-       return iscsi_dev_to_endpoint(dev);
+       get_device(&ep->dev);
+unlock:
+       mutex_unlock(&iscsi_ep_idr_mutex);
+       return ep;
 }
 EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
 
@@ -2202,10 +2201,10 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
 
        switch (flag) {
        case STOP_CONN_RECOVER:
-               conn->state = ISCSI_CONN_FAILED;
+               WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
                break;
        case STOP_CONN_TERM:
-               conn->state = ISCSI_CONN_DOWN;
+               WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
                break;
        default:
                iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n",
@@ -2217,6 +2216,49 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
        ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n");
 }
 
+static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
+{
+       struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
+       struct iscsi_endpoint *ep;
+
+       ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
+       WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
+
+       if (!conn->ep || !session->transport->ep_disconnect)
+               return;
+
+       ep = conn->ep;
+       conn->ep = NULL;
+
+       session->transport->unbind_conn(conn, is_active);
+       session->transport->ep_disconnect(ep);
+       ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
+}
+
+static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn,
+                                        struct iscsi_endpoint *ep,
+                                        bool is_active)
+{
+       /* Check if this was a conn error and the kernel took ownership */
+       spin_lock_irq(&conn->lock);
+       if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+               spin_unlock_irq(&conn->lock);
+               iscsi_ep_disconnect(conn, is_active);
+       } else {
+               spin_unlock_irq(&conn->lock);
+               ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
+               mutex_unlock(&conn->ep_mutex);
+
+               flush_work(&conn->cleanup_work);
+               /*
+                * Userspace is now done with the EP so we can release the ref
+                * iscsi_cleanup_conn_work_fn took.
+                */
+               iscsi_put_endpoint(ep);
+               mutex_lock(&conn->ep_mutex);
+       }
+}
+
 static int iscsi_if_stop_conn(struct iscsi_transport *transport,
                              struct iscsi_uevent *ev)
 {
@@ -2237,12 +2279,25 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
                cancel_work_sync(&conn->cleanup_work);
                iscsi_stop_conn(conn, flag);
        } else {
+               /*
+                * For offload, when iscsid is restarted it won't know about
+                * existing endpoints so it can't do a ep_disconnect. We clean
+                * it up here for userspace.
+                */
+               mutex_lock(&conn->ep_mutex);
+               if (conn->ep)
+                       iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
+               mutex_unlock(&conn->ep_mutex);
+
                /*
                 * Figure out if it was the kernel or userspace initiating this.
                 */
+               spin_lock_irq(&conn->lock);
                if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+                       spin_unlock_irq(&conn->lock);
                        iscsi_stop_conn(conn, flag);
                } else {
+                       spin_unlock_irq(&conn->lock);
                        ISCSI_DBG_TRANS_CONN(conn,
                                             "flush kernel conn cleanup.\n");
                        flush_work(&conn->cleanup_work);
@@ -2251,31 +2306,14 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
                 * Only clear for recovery to avoid extra cleanup runs during
                 * termination.
                 */
+               spin_lock_irq(&conn->lock);
                clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
+               spin_unlock_irq(&conn->lock);
        }
        ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n");
        return 0;
 }
 
-static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
-{
-       struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
-       struct iscsi_endpoint *ep;
-
-       ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
-       conn->state = ISCSI_CONN_FAILED;
-
-       if (!conn->ep || !session->transport->ep_disconnect)
-               return;
-
-       ep = conn->ep;
-       conn->ep = NULL;
-
-       session->transport->unbind_conn(conn, is_active);
-       session->transport->ep_disconnect(ep);
-       ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
-}
-
 static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
 {
        struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
@@ -2284,18 +2322,11 @@ static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
 
        mutex_lock(&conn->ep_mutex);
        /*
-        * If we are not at least bound there is nothing for us to do. Userspace
-        * will do a ep_disconnect call if offload is used, but will not be
-        * doing a stop since there is nothing to clean up, so we have to clear
-        * the cleanup bit here.
+        * Get a ref to the ep, so we don't release its ID until after
+        * userspace is done referencing it in iscsi_if_disconnect_bound_ep.
         */
-       if (conn->state != ISCSI_CONN_BOUND && conn->state != ISCSI_CONN_UP) {
-               ISCSI_DBG_TRANS_CONN(conn, "Got error while conn is already failed. Ignoring.\n");
-               clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
-               mutex_unlock(&conn->ep_mutex);
-               return;
-       }
-
+       if (conn->ep)
+               get_device(&conn->ep->dev);
        iscsi_ep_disconnect(conn, false);
 
        if (system_state != SYSTEM_RUNNING) {
@@ -2340,11 +2371,12 @@ iscsi_alloc_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
                conn->dd_data = &conn[1];
 
        mutex_init(&conn->ep_mutex);
+       spin_lock_init(&conn->lock);
        INIT_LIST_HEAD(&conn->conn_list);
        INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn);
        conn->transport = transport;
        conn->cid = cid;
-       conn->state = ISCSI_CONN_DOWN;
+       WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
 
        /* this is released in the dev's release function */
        if (!get_device(&session->dev))
@@ -2542,9 +2574,32 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
        struct iscsi_uevent *ev;
        struct iscsi_internal *priv;
        int len = nlmsg_total_size(sizeof(*ev));
+       unsigned long flags;
+       int state;
 
-       if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags))
-               queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work);
+       spin_lock_irqsave(&conn->lock, flags);
+       /*
+        * Userspace will only do a stop call if we are at least bound. And, we
+        * only need to do the in kernel cleanup if in the UP state so cmds can
+        * be released to upper layers. If in other states just wait for
+        * userspace to avoid races that can leave the cleanup_work queued.
+        */
+       state = READ_ONCE(conn->state);
+       switch (state) {
+       case ISCSI_CONN_BOUND:
+       case ISCSI_CONN_UP:
+               if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP,
+                                     &conn->flags)) {
+                       queue_work(iscsi_conn_cleanup_workq,
+                                  &conn->cleanup_work);
+               }
+               break;
+       default:
+               ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n",
+                                    state);
+               break;
+       }
+       spin_unlock_irqrestore(&conn->lock, flags);
 
        priv = iscsi_if_transport_lookup(conn->transport);
        if (!priv)
@@ -2894,7 +2949,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
        char *data = (char*)ev + sizeof(*ev);
        struct iscsi_cls_conn *conn;
        struct iscsi_cls_session *session;
-       int err = 0, value = 0;
+       int err = 0, value = 0, state;
 
        if (ev->u.set_param.len > PAGE_SIZE)
                return -EINVAL;
@@ -2911,8 +2966,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
                        session->recovery_tmo = value;
                break;
        default:
-               if ((conn->state == ISCSI_CONN_BOUND) ||
-                       (conn->state == ISCSI_CONN_UP)) {
+               state = READ_ONCE(conn->state);
+               if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) {
                        err = transport->set_param(conn, ev->u.set_param.param,
                                        data, ev->u.set_param.len);
                } else {
@@ -2984,16 +3039,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
        }
 
        mutex_lock(&conn->ep_mutex);
-       /* Check if this was a conn error and the kernel took ownership */
-       if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
-               ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
-               mutex_unlock(&conn->ep_mutex);
-
-               flush_work(&conn->cleanup_work);
-               goto put_ep;
-       }
-
-       iscsi_ep_disconnect(conn, false);
+       iscsi_if_disconnect_bound_ep(conn, ep, false);
        mutex_unlock(&conn->ep_mutex);
 put_ep:
        iscsi_put_endpoint(ep);
@@ -3696,24 +3742,17 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
                return -EINVAL;
 
        mutex_lock(&conn->ep_mutex);
+       spin_lock_irq(&conn->lock);
        if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+               spin_unlock_irq(&conn->lock);
                mutex_unlock(&conn->ep_mutex);
                ev->r.retcode = -ENOTCONN;
                return 0;
        }
+       spin_unlock_irq(&conn->lock);
 
        switch (nlh->nlmsg_type) {
        case ISCSI_UEVENT_BIND_CONN:
-               if (conn->ep) {
-                       /*
-                        * For offload boot support where iscsid is restarted
-                        * during the pivot root stage, the ep will be intact
-                        * here when the new iscsid instance starts up and
-                        * reconnects.
-                        */
-                       iscsi_ep_disconnect(conn, true);
-               }
-
                session = iscsi_session_lookup(ev->u.b_conn.sid);
                if (!session) {
                        err = -EINVAL;
@@ -3724,7 +3763,7 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
                                                ev->u.b_conn.transport_eph,
                                                ev->u.b_conn.is_leading);
                if (!ev->r.retcode)
-                       conn->state = ISCSI_CONN_BOUND;
+                       WRITE_ONCE(conn->state, ISCSI_CONN_BOUND);
 
                if (ev->r.retcode || !transport->ep_connect)
                        break;
@@ -3743,7 +3782,8 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
        case ISCSI_UEVENT_START_CONN:
                ev->r.retcode = transport->start_conn(conn);
                if (!ev->r.retcode)
-                       conn->state = ISCSI_CONN_UP;
+                       WRITE_ONCE(conn->state, ISCSI_CONN_UP);
+
                break;
        case ISCSI_UEVENT_SEND_PDU:
                pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
@@ -4050,10 +4090,11 @@ static ssize_t show_conn_state(struct device *dev,
 {
        struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent);
        const char *state = "unknown";
+       int conn_state = READ_ONCE(conn->state);
 
-       if (conn->state >= 0 &&
-           conn->state < ARRAY_SIZE(connection_state_names))
-               state = connection_state_names[conn->state];
+       if (conn_state >= 0 &&
+           conn_state < ARRAY_SIZE(connection_state_names))
+               state = connection_state_names[conn_state];
 
        return sysfs_emit(buf, "%s\n", state);
 }
index ddd00ef..fbdb512 100644 (file)
@@ -41,7 +41,7 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
        int result;
        unsigned char *buffer;
 
-       buffer = kmalloc(32, GFP_KERNEL);
+       buffer = kzalloc(32, GFP_KERNEL);
        if (!buffer)
                return -ENOMEM;
 
@@ -55,10 +55,13 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
        cgc.data_direction = DMA_FROM_DEVICE;
 
        result = sr_do_ioctl(cd, &cgc);
+       if (result)
+               goto err;
 
        tochdr->cdth_trk0 = buffer[2];
        tochdr->cdth_trk1 = buffer[3];
 
+err:
        kfree(buffer);
        return result;
 }
@@ -71,7 +74,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
        int result;
        unsigned char *buffer;
 
-       buffer = kmalloc(32, GFP_KERNEL);
+       buffer = kzalloc(32, GFP_KERNEL);
        if (!buffer)
                return -ENOMEM;
 
@@ -86,6 +89,8 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
        cgc.data_direction = DMA_FROM_DEVICE;
 
        result = sr_do_ioctl(cd, &cgc);
+       if (result)
+               goto err;
 
        tocentry->cdte_ctrl = buffer[5] & 0xf;
        tocentry->cdte_adr = buffer[5] >> 4;
@@ -98,6 +103,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
                tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
                        + buffer[10]) << 8) + buffer[11];
 
+err:
        kfree(buffer);
        return result;
 }
@@ -384,7 +390,7 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
 {
        Scsi_CD *cd = cdi->handle;
        struct packet_command cgc;
-       char *buffer = kmalloc(32, GFP_KERNEL);
+       char *buffer = kzalloc(32, GFP_KERNEL);
        int result;
 
        if (!buffer)
@@ -400,10 +406,13 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
        cgc.data_direction = DMA_FROM_DEVICE;
        cgc.timeout = IOCTL_TIMEOUT;
        result = sr_do_ioctl(cd, &cgc);
+       if (result)
+               goto err;
 
        memcpy(mcn->medium_catalog_number, buffer + 9, 13);
        mcn->medium_catalog_number[13] = 0;
 
+err:
        kfree(buffer);
        return result;
 }
index 92d9610..938017a 100644 (file)
@@ -277,6 +277,9 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
 static bool atmel_qspi_supports_op(struct spi_mem *mem,
                                   const struct spi_mem_op *op)
 {
+       if (!spi_mem_default_supports_op(mem, op))
+               return false;
+
        if (atmel_qspi_find_mode(op) < 0)
                return false;
 
index 616ada8..19686fb 100644 (file)
@@ -1415,9 +1415,24 @@ static bool cqspi_supports_mem_op(struct spi_mem *mem,
        all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
                    !op->data.dtr;
 
-       /* Mixed DTR modes not supported. */
-       if (!(all_true || all_false))
+       if (all_true) {
+               /* Right now we only support 8-8-8 DTR mode. */
+               if (op->cmd.nbytes && op->cmd.buswidth != 8)
+                       return false;
+               if (op->addr.nbytes && op->addr.buswidth != 8)
+                       return false;
+               if (op->data.nbytes && op->data.buswidth != 8)
+                       return false;
+       } else if (all_false) {
+               /* Only 1-1-X ops are supported without DTR */
+               if (op->cmd.nbytes && op->cmd.buswidth > 1)
+                       return false;
+               if (op->addr.nbytes && op->addr.buswidth > 1)
+                       return false;
+       } else {
+               /* Mixed DTR modes are not supported. */
                return false;
+       }
 
        return spi_mem_default_supports_op(mem, op);
 }
index a5ef7a5..f6eec7a 100644 (file)
@@ -72,6 +72,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info },
        { PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info },
+       { PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
        { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
index 94fb096..d167699 100644 (file)
@@ -960,7 +960,17 @@ static int __maybe_unused mtk_nor_suspend(struct device *dev)
 
 static int __maybe_unused mtk_nor_resume(struct device *dev)
 {
-       return pm_runtime_force_resume(dev);
+       struct spi_controller *ctlr = dev_get_drvdata(dev);
+       struct mtk_nor *sp = spi_controller_get_devdata(ctlr);
+       int ret;
+
+       ret = pm_runtime_force_resume(dev);
+       if (ret)
+               return ret;
+
+       mtk_nor_init(sp);
+
+       return 0;
 }
 
 static const struct dev_pm_ops mtk_nor_pm_ops = {
index b7bb16f..06b6f35 100644 (file)
@@ -36,6 +36,10 @@ static bool nointxmask;
 static bool disable_vga;
 static bool disable_idle_d3;
 
+/* List of PF's that vfio_pci_core_sriov_configure() has been called on */
+static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex);
+static LIST_HEAD(vfio_pci_sriov_pfs);
+
 static inline bool vfio_vga_disabled(void)
 {
 #ifdef CONFIG_VFIO_PCI_VGA
@@ -434,47 +438,17 @@ out:
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_disable);
 
-static struct vfio_pci_core_device *get_pf_vdev(struct vfio_pci_core_device *vdev)
-{
-       struct pci_dev *physfn = pci_physfn(vdev->pdev);
-       struct vfio_device *pf_dev;
-
-       if (!vdev->pdev->is_virtfn)
-               return NULL;
-
-       pf_dev = vfio_device_get_from_dev(&physfn->dev);
-       if (!pf_dev)
-               return NULL;
-
-       if (pci_dev_driver(physfn) != pci_dev_driver(vdev->pdev)) {
-               vfio_device_put(pf_dev);
-               return NULL;
-       }
-
-       return container_of(pf_dev, struct vfio_pci_core_device, vdev);
-}
-
-static void vfio_pci_vf_token_user_add(struct vfio_pci_core_device *vdev, int val)
-{
-       struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev);
-
-       if (!pf_vdev)
-               return;
-
-       mutex_lock(&pf_vdev->vf_token->lock);
-       pf_vdev->vf_token->users += val;
-       WARN_ON(pf_vdev->vf_token->users < 0);
-       mutex_unlock(&pf_vdev->vf_token->lock);
-
-       vfio_device_put(&pf_vdev->vdev);
-}
-
 void vfio_pci_core_close_device(struct vfio_device *core_vdev)
 {
        struct vfio_pci_core_device *vdev =
                container_of(core_vdev, struct vfio_pci_core_device, vdev);
 
-       vfio_pci_vf_token_user_add(vdev, -1);
+       if (vdev->sriov_pf_core_dev) {
+               mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
+               WARN_ON(!vdev->sriov_pf_core_dev->vf_token->users);
+               vdev->sriov_pf_core_dev->vf_token->users--;
+               mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
+       }
        vfio_spapr_pci_eeh_release(vdev->pdev);
        vfio_pci_core_disable(vdev);
 
@@ -495,7 +469,12 @@ void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev)
 {
        vfio_pci_probe_mmaps(vdev);
        vfio_spapr_pci_eeh_open(vdev->pdev);
-       vfio_pci_vf_token_user_add(vdev, 1);
+
+       if (vdev->sriov_pf_core_dev) {
+               mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
+               vdev->sriov_pf_core_dev->vf_token->users++;
+               mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
+       }
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable);
 
@@ -1583,11 +1562,8 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
         *
         * If the VF token is provided but unused, an error is generated.
         */
-       if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token)
-               return 0; /* No VF token provided or required */
-
        if (vdev->pdev->is_virtfn) {
-               struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev);
+               struct vfio_pci_core_device *pf_vdev = vdev->sriov_pf_core_dev;
                bool match;
 
                if (!pf_vdev) {
@@ -1600,7 +1576,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
                }
 
                if (!vf_token) {
-                       vfio_device_put(&pf_vdev->vdev);
                        pci_info_ratelimited(vdev->pdev,
                                "VF token required to access device\n");
                        return -EACCES;
@@ -1610,8 +1585,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
                match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
                mutex_unlock(&pf_vdev->vf_token->lock);
 
-               vfio_device_put(&pf_vdev->vdev);
-
                if (!match) {
                        pci_info_ratelimited(vdev->pdev,
                                "Incorrect VF token provided for device\n");
@@ -1732,8 +1705,30 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb,
 static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev)
 {
        struct pci_dev *pdev = vdev->pdev;
+       struct vfio_pci_core_device *cur;
+       struct pci_dev *physfn;
        int ret;
 
+       if (pdev->is_virtfn) {
+               /*
+                * If this VF was created by our vfio_pci_core_sriov_configure()
+                * then we can find the PF vfio_pci_core_device now, and due to
+                * the locking in pci_disable_sriov() it cannot change until
+                * this VF device driver is removed.
+                */
+               physfn = pci_physfn(vdev->pdev);
+               mutex_lock(&vfio_pci_sriov_pfs_mutex);
+               list_for_each_entry(cur, &vfio_pci_sriov_pfs, sriov_pfs_item) {
+                       if (cur->pdev == physfn) {
+                               vdev->sriov_pf_core_dev = cur;
+                               break;
+                       }
+               }
+               mutex_unlock(&vfio_pci_sriov_pfs_mutex);
+               return 0;
+       }
+
+       /* Not a SRIOV PF */
        if (!pdev->is_physfn)
                return 0;
 
@@ -1805,6 +1800,7 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
        INIT_LIST_HEAD(&vdev->ioeventfds_list);
        mutex_init(&vdev->vma_lock);
        INIT_LIST_HEAD(&vdev->vma_list);
+       INIT_LIST_HEAD(&vdev->sriov_pfs_item);
        init_rwsem(&vdev->memory_lock);
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_init_device);
@@ -1896,7 +1892,7 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
 {
        struct pci_dev *pdev = vdev->pdev;
 
-       pci_disable_sriov(pdev);
+       vfio_pci_core_sriov_configure(pdev, 0);
 
        vfio_unregister_group_dev(&vdev->vdev);
 
@@ -1935,21 +1931,49 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected);
 
 int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
 {
+       struct vfio_pci_core_device *vdev;
        struct vfio_device *device;
        int ret = 0;
 
+       device_lock_assert(&pdev->dev);
+
        device = vfio_device_get_from_dev(&pdev->dev);
        if (!device)
                return -ENODEV;
 
-       if (nr_virtfn == 0)
-               pci_disable_sriov(pdev);
-       else
+       vdev = container_of(device, struct vfio_pci_core_device, vdev);
+
+       if (nr_virtfn) {
+               mutex_lock(&vfio_pci_sriov_pfs_mutex);
+               /*
+                * The thread that adds the vdev to the list is the only thread
+                * that gets to call pci_enable_sriov() and we will only allow
+                * it to be called once without going through
+                * pci_disable_sriov()
+                */
+               if (!list_empty(&vdev->sriov_pfs_item)) {
+                       ret = -EINVAL;
+                       goto out_unlock;
+               }
+               list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs);
+               mutex_unlock(&vfio_pci_sriov_pfs_mutex);
                ret = pci_enable_sriov(pdev, nr_virtfn);
+               if (ret)
+                       goto out_del;
+               ret = nr_virtfn;
+               goto out_put;
+       }
 
-       vfio_device_put(device);
+       pci_disable_sriov(pdev);
 
-       return ret < 0 ? ret : nr_virtfn;
+out_del:
+       mutex_lock(&vfio_pci_sriov_pfs_mutex);
+       list_del_init(&vdev->sriov_pfs_item);
+out_unlock:
+       mutex_unlock(&vfio_pci_sriov_pfs_mutex);
+out_put:
+       vfio_device_put(device);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
 
index dfe26fa..617a7f4 100644 (file)
@@ -689,29 +689,34 @@ void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
 }
 EXPORT_SYMBOL(xen_free_ballooned_pages);
 
-#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
-static void __init balloon_add_region(unsigned long start_pfn,
-                                     unsigned long pages)
+static void __init balloon_add_regions(void)
 {
+#if defined(CONFIG_XEN_PV)
+       unsigned long start_pfn, pages;
        unsigned long pfn, extra_pfn_end;
+       unsigned int i;
 
-       /*
-        * If the amount of usable memory has been limited (e.g., with
-        * the 'mem' command line parameter), don't add pages beyond
-        * this limit.
-        */
-       extra_pfn_end = min(max_pfn, start_pfn + pages);
+       for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+               pages = xen_extra_mem[i].n_pfns;
+               if (!pages)
+                       continue;
 
-       for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
-               /* totalram_pages and totalhigh_pages do not
-                  include the boot-time balloon extension, so
-                  don't subtract from it. */
-               balloon_append(pfn_to_page(pfn));
-       }
+               start_pfn = xen_extra_mem[i].start_pfn;
 
-       balloon_stats.total_pages += extra_pfn_end - start_pfn;
-}
+               /*
+                * If the amount of usable memory has been limited (e.g., with
+                * the 'mem' command line parameter), don't add pages beyond
+                * this limit.
+                */
+               extra_pfn_end = min(max_pfn, start_pfn + pages);
+
+               for (pfn = start_pfn; pfn < extra_pfn_end; pfn++)
+                       balloon_append(pfn_to_page(pfn));
+
+               balloon_stats.total_pages += extra_pfn_end - start_pfn;
+       }
 #endif
+}
 
 static int __init balloon_init(void)
 {
@@ -745,20 +750,7 @@ static int __init balloon_init(void)
        register_sysctl_table(xen_root);
 #endif
 
-#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
-       {
-               int i;
-
-               /*
-                * Initialize the balloon with pages from the extra memory
-                * regions (see arch/x86/xen/setup.c).
-                */
-               for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
-                       if (xen_extra_mem[i].n_pfns)
-                               balloon_add_region(xen_extra_mem[i].start_pfn,
-                                                  xen_extra_mem[i].n_pfns);
-       }
-#endif
+       balloon_add_regions();
 
        task = kthread_run(balloon_thread, NULL, "xen-balloon");
        if (IS_ERR(task)) {
index 4849f94..55acb32 100644 (file)
@@ -178,9 +178,9 @@ static void __del_gref(struct gntalloc_gref *gref)
        unsigned long addr;
 
        if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
-               uint8_t *tmp = kmap(gref->page);
+               uint8_t *tmp = kmap_local_page(gref->page);
                tmp[gref->notify.pgoff] = 0;
-               kunmap(gref->page);
+               kunmap_local(tmp);
        }
        if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
                notify_remote_via_evtchn(gref->notify.event);
index a8b4105..a39f2d3 100644 (file)
@@ -230,39 +230,6 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
 }
 EXPORT_SYMBOL(xen_free_unpopulated_pages);
 
-#ifdef CONFIG_XEN_PV
-static int __init init(void)
-{
-       unsigned int i;
-
-       if (!xen_domain())
-               return -ENODEV;
-
-       if (!xen_pv_domain())
-               return 0;
-
-       /*
-        * Initialize with pages from the extra memory regions (see
-        * arch/x86/xen/setup.c).
-        */
-       for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
-               unsigned int j;
-
-               for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
-                       struct page *pg =
-                               pfn_to_page(xen_extra_mem[i].start_pfn + j);
-
-                       pg->zone_device_data = page_list;
-                       page_list = pg;
-                       list_count++;
-               }
-       }
-
-       return 0;
-}
-subsys_initcall(init);
-#endif
-
 static int __init unpopulated_init(void)
 {
        int ret;
index 6bcf147..4763132 100644 (file)
@@ -616,8 +616,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
                _debug("write discard %x @%llx [%llx]", len, start, i_size);
 
                /* The dirty region was entirely beyond the EOF. */
-               fscache_clear_page_bits(afs_vnode_cache(vnode),
-                                       mapping, start, len, caching);
+               fscache_clear_page_bits(mapping, start, len, caching);
                afs_pages_written_back(vnode, start, len);
                ret = 0;
        }
index 6556e13..63c7ebb 100644 (file)
@@ -1117,11 +1117,11 @@ out_free_interp:
                         * independently randomized mmap region (0 load_bias
                         * without MAP_FIXED nor MAP_FIXED_NOREPLACE).
                         */
-                       alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
-                       if (interpreter || alignment > ELF_MIN_ALIGN) {
+                       if (interpreter) {
                                load_bias = ELF_ET_DYN_BASE;
                                if (current->flags & PF_RANDOMIZE)
                                        load_bias += arch_mmap_rnd();
+                               alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
                                if (alignment)
                                        load_bias &= ~(alignment - 1);
                                elf_flags |= MAP_FIXED_NOREPLACE;
index c22d287..0dd6de9 100644 (file)
@@ -2503,12 +2503,6 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
                return ERR_PTR(ret);
        }
 
-       /*
-        * New block group is likely to be used soon. Try to activate it now.
-        * Failure is OK for now.
-        */
-       btrfs_zone_activate(cache);
-
        ret = exclude_super_stripes(cache);
        if (ret) {
                /* We may have excluded something, so call this just in case */
@@ -2946,7 +2940,6 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
        struct btrfs_path *path = NULL;
        LIST_HEAD(dirty);
        struct list_head *io = &cur_trans->io_bgs;
-       int num_started = 0;
        int loops = 0;
 
        spin_lock(&cur_trans->dirty_bgs_lock);
@@ -3012,7 +3005,6 @@ again:
                        cache->io_ctl.inode = NULL;
                        ret = btrfs_write_out_cache(trans, cache, path);
                        if (ret == 0 && cache->io_ctl.inode) {
-                               num_started++;
                                should_put = 0;
 
                                /*
@@ -3113,7 +3105,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
        int should_put;
        struct btrfs_path *path;
        struct list_head *io = &cur_trans->io_bgs;
-       int num_started = 0;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -3171,7 +3162,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
                        cache->io_ctl.inode = NULL;
                        ret = btrfs_write_out_cache(trans, cache, path);
                        if (ret == 0 && cache->io_ctl.inode) {
-                               num_started++;
                                should_put = 0;
                                list_add_tail(&cache->io_list, io);
                        } else {
@@ -3455,7 +3445,7 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
        return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
 }
 
-static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
+static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
 {
        struct btrfs_block_group *bg;
        int ret;
@@ -3542,7 +3532,11 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
 out:
        btrfs_trans_release_chunk_metadata(trans);
 
-       return ret;
+       if (ret)
+               return ERR_PTR(ret);
+
+       btrfs_get_block_group(bg);
+       return bg;
 }
 
 /*
@@ -3657,10 +3651,17 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
        struct btrfs_space_info *space_info;
+       struct btrfs_block_group *ret_bg;
        bool wait_for_alloc = false;
        bool should_alloc = false;
+       bool from_extent_allocation = false;
        int ret = 0;
 
+       if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) {
+               from_extent_allocation = true;
+               force = CHUNK_ALLOC_FORCE;
+       }
+
        /* Don't re-enter if we're already allocating a chunk */
        if (trans->allocating_chunk)
                return -ENOSPC;
@@ -3750,9 +3751,22 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
                        force_metadata_allocation(fs_info);
        }
 
-       ret = do_chunk_alloc(trans, flags);
+       ret_bg = do_chunk_alloc(trans, flags);
        trans->allocating_chunk = false;
 
+       if (IS_ERR(ret_bg)) {
+               ret = PTR_ERR(ret_bg);
+       } else if (from_extent_allocation) {
+               /*
+                * New block group is likely to be used soon. Try to activate
+                * it now. Failure is OK for now.
+                */
+               btrfs_zone_activate(ret_bg);
+       }
+
+       if (!ret)
+               btrfs_put_block_group(ret_bg);
+
        spin_lock(&space_info->lock);
        if (ret < 0) {
                if (ret == -ENOSPC)
index 93aabc6..e8308f2 100644 (file)
@@ -35,11 +35,15 @@ enum btrfs_discard_state {
  * the FS with empty chunks
  *
  * CHUNK_ALLOC_FORCE means it must try to allocate one
+ *
+ * CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
+ * find_free_extent() that also activaes the zone
  */
 enum btrfs_chunk_alloc_enum {
        CHUNK_ALLOC_NO_FORCE,
        CHUNK_ALLOC_LIMITED,
        CHUNK_ALLOC_FORCE,
+       CHUNK_ALLOC_FORCE_FOR_EXTENT,
 };
 
 struct btrfs_caching_control {
index be476f0..19bf36d 100644 (file)
@@ -537,6 +537,9 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
        cb->orig_bio = NULL;
        cb->nr_pages = nr_pages;
 
+       if (blkcg_css)
+               kthread_associate_blkcg(blkcg_css);
+
        while (cur_disk_bytenr < disk_start + compressed_len) {
                u64 offset = cur_disk_bytenr - disk_start;
                unsigned int index = offset >> PAGE_SHIFT;
@@ -555,6 +558,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
                                bio = NULL;
                                goto finish_cb;
                        }
+                       if (blkcg_css)
+                               bio->bi_opf |= REQ_CGROUP_PUNT;
                }
                /*
                 * We should never reach next_stripe_start start as we will
@@ -612,6 +617,9 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
        return 0;
 
 finish_cb:
+       if (blkcg_css)
+               kthread_associate_blkcg(NULL);
+
        if (bio) {
                bio->bi_status = ret;
                bio_endio(bio);
index b30309f..126f244 100644 (file)
@@ -1850,9 +1850,10 @@ again:
 
        ret = btrfs_insert_fs_root(fs_info, root);
        if (ret) {
-               btrfs_put_root(root);
-               if (ret == -EEXIST)
+               if (ret == -EEXIST) {
+                       btrfs_put_root(root);
                        goto again;
+               }
                goto fail;
        }
        return root;
index f477035..6aa92f8 100644 (file)
@@ -4082,7 +4082,7 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
                        }
 
                        ret = btrfs_chunk_alloc(trans, ffe_ctl->flags,
-                                               CHUNK_ALLOC_FORCE);
+                                               CHUNK_ALLOC_FORCE_FOR_EXTENT);
 
                        /* Do not bail out on ENOSPC since we can do more. */
                        if (ret == -ENOSPC)
index 17d5557..5082b9c 100644 (file)
@@ -2016,8 +2016,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
                 * to use run_delalloc_nocow() here, like for  regular
                 * preallocated inodes.
                 */
-               ASSERT(!zoned ||
-                      (zoned && btrfs_is_data_reloc_root(inode->root)));
+               ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root));
                ret = run_delalloc_nocow(inode, locked_page, start, end,
                                         page_started, nr_written);
        } else if (!inode_can_compress(inode) ||
@@ -7444,6 +7443,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
        u64 block_start, orig_start, orig_block_len, ram_bytes;
        bool can_nocow = false;
        bool space_reserved = false;
+       u64 prev_len;
        int ret = 0;
 
        /*
@@ -7471,6 +7471,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
                        can_nocow = true;
        }
 
+       prev_len = len;
        if (can_nocow) {
                struct extent_map *em2;
 
@@ -7500,8 +7501,6 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
                        goto out;
                }
        } else {
-               const u64 prev_len = len;
-
                /* Our caller expects us to free the input extent map. */
                free_extent_map(em);
                *map = NULL;
@@ -7532,7 +7531,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
         * We have created our ordered extent, so we can now release our reservation
         * for an outstanding extent.
         */
-       btrfs_delalloc_release_extents(BTRFS_I(inode), len);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len);
 
        /*
         * Need to update the i_size under the extent lock so buffered
index f46e710..be6c245 100644 (file)
@@ -5456,8 +5456,6 @@ long btrfs_ioctl(struct file *file, unsigned int
                return btrfs_ioctl_fs_info(fs_info, argp);
        case BTRFS_IOC_DEV_INFO:
                return btrfs_ioctl_dev_info(fs_info, argp);
-       case BTRFS_IOC_BALANCE:
-               return btrfs_ioctl_balance(file, NULL);
        case BTRFS_IOC_TREE_SEARCH:
                return btrfs_ioctl_tree_search(inode, argp);
        case BTRFS_IOC_TREE_SEARCH_V2:
index 2cfbc74..a8cc736 100644 (file)
@@ -4430,10 +4430,12 @@ static int balance_kthread(void *data)
        struct btrfs_fs_info *fs_info = data;
        int ret = 0;
 
+       sb_start_write(fs_info->sb);
        mutex_lock(&fs_info->balance_mutex);
        if (fs_info->balance_ctl)
                ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
        mutex_unlock(&fs_info->balance_mutex);
+       sb_end_write(fs_info->sb);
 
        return ret;
 }
index f256c8a..ca9f3e4 100644 (file)
@@ -57,6 +57,16 @@ static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
        trace_cachefiles_mark_inactive(object, inode);
 }
 
+static void cachefiles_do_unmark_inode_in_use(struct cachefiles_object *object,
+                                             struct dentry *dentry)
+{
+       struct inode *inode = d_backing_inode(dentry);
+
+       inode_lock(inode);
+       __cachefiles_unmark_inode_in_use(object, dentry);
+       inode_unlock(inode);
+}
+
 /*
  * Unmark a backing inode and tell cachefilesd that there's something that can
  * be culled.
@@ -68,9 +78,7 @@ void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
        struct inode *inode = file_inode(file);
 
        if (inode) {
-               inode_lock(inode);
-               __cachefiles_unmark_inode_in_use(object, file->f_path.dentry);
-               inode_unlock(inode);
+               cachefiles_do_unmark_inode_in_use(object, file->f_path.dentry);
 
                if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
                        atomic_long_add(inode->i_blocks, &cache->b_released);
@@ -484,7 +492,7 @@ struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
                                object, d_backing_inode(path.dentry), ret,
                                cachefiles_trace_trunc_error);
                        file = ERR_PTR(ret);
-                       goto out_dput;
+                       goto out_unuse;
                }
        }
 
@@ -494,15 +502,20 @@ struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
                trace_cachefiles_vfs_error(object, d_backing_inode(path.dentry),
                                           PTR_ERR(file),
                                           cachefiles_trace_open_error);
-               goto out_dput;
+               goto out_unuse;
        }
        if (unlikely(!file->f_op->read_iter) ||
            unlikely(!file->f_op->write_iter)) {
                fput(file);
                pr_notice("Cache does not support read_iter and write_iter\n");
                file = ERR_PTR(-EINVAL);
+               goto out_unuse;
        }
 
+       goto out_dput;
+
+out_unuse:
+       cachefiles_do_unmark_inode_in_use(object, path.dentry);
 out_dput:
        dput(path.dentry);
 out:
@@ -590,14 +603,16 @@ static bool cachefiles_open_file(struct cachefiles_object *object,
 check_failed:
        fscache_cookie_lookup_negative(object->cookie);
        cachefiles_unmark_inode_in_use(object, file);
-       if (ret == -ESTALE) {
-               fput(file);
-               dput(dentry);
+       fput(file);
+       dput(dentry);
+       if (ret == -ESTALE)
                return cachefiles_create_file(object);
-       }
+       return false;
+
 error_fput:
        fput(file);
 error:
+       cachefiles_do_unmark_inode_in_use(object, dentry);
        dput(dentry);
        return false;
 }
index 3546510..00b087c 100644 (file)
@@ -203,7 +203,7 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
        if (!buf)
                return false;
        buf->reserved = cpu_to_be32(0);
-       memcpy(buf->data, p, len);
+       memcpy(buf->data, p, volume->vcookie->coherency_len);
 
        ret = cachefiles_inject_write_error();
        if (ret == 0)
index a47fa44..2b1a1c0 100644 (file)
@@ -266,22 +266,24 @@ static void cifs_kill_sb(struct super_block *sb)
         * before we kill the sb.
         */
        if (cifs_sb->root) {
+               for (node = rb_first(root); node; node = rb_next(node)) {
+                       tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+                       tcon = tlink_tcon(tlink);
+                       if (IS_ERR(tcon))
+                               continue;
+                       cfid = &tcon->crfid;
+                       mutex_lock(&cfid->fid_mutex);
+                       if (cfid->dentry) {
+                               dput(cfid->dentry);
+                               cfid->dentry = NULL;
+                       }
+                       mutex_unlock(&cfid->fid_mutex);
+               }
+
+               /* finally release root dentry */
                dput(cifs_sb->root);
                cifs_sb->root = NULL;
        }
-       node = rb_first(root);
-       while (node != NULL) {
-               tlink = rb_entry(node, struct tcon_link, tl_rbnode);
-               tcon = tlink_tcon(tlink);
-               cfid = &tcon->crfid;
-               mutex_lock(&cfid->fid_mutex);
-               if (cfid->dentry) {
-                       dput(cfid->dentry);
-                       cfid->dentry = NULL;
-               }
-               mutex_unlock(&cfid->fid_mutex);
-               node = rb_next(node);
-       }
 
        kill_anon_super(sb);
        cifs_umount(cifs_sb);
@@ -944,7 +946,7 @@ cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
        ssize_t rc;
        struct inode *inode = file_inode(iocb->ki_filp);
 
-       if (iocb->ki_filp->f_flags & O_DIRECT)
+       if (iocb->ki_flags & IOCB_DIRECT)
                return cifs_user_readv(iocb, iter);
 
        rc = cifs_revalidate_mapping(inode);
index 54155eb..42e14f4 100644 (file)
@@ -534,12 +534,19 @@ int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
 {
        /* If tcp session is not an dfs connection, then reconnect to last target server */
        spin_lock(&cifs_tcp_ses_lock);
-       if (!server->is_dfs_conn || !server->origin_fullpath || !server->leaf_fullpath) {
+       if (!server->is_dfs_conn) {
                spin_unlock(&cifs_tcp_ses_lock);
                return __cifs_reconnect(server, mark_smb_session);
        }
        spin_unlock(&cifs_tcp_ses_lock);
 
+       mutex_lock(&server->refpath_lock);
+       if (!server->origin_fullpath || !server->leaf_fullpath) {
+               mutex_unlock(&server->refpath_lock);
+               return __cifs_reconnect(server, mark_smb_session);
+       }
+       mutex_unlock(&server->refpath_lock);
+
        return reconnect_dfs_server(server);
 }
 #else
@@ -1049,7 +1056,7 @@ smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
                spin_unlock(&server->req_lock);
                wake_up(&server->request_q);
 
-               trace_smb3_add_credits(server->CurrentMid,
+               trace_smb3_hdr_credits(server->CurrentMid,
                                server->conn_id, server->hostname, scredits,
                                le16_to_cpu(shdr->CreditRequest), in_flight);
                cifs_server_dbg(FYI, "%s: added %u credits total=%d\n",
@@ -3675,9 +3682,11 @@ static void setup_server_referral_paths(struct mount_ctx *mnt_ctx)
 {
        struct TCP_Server_Info *server = mnt_ctx->server;
 
+       mutex_lock(&server->refpath_lock);
        server->origin_fullpath = mnt_ctx->origin_fullpath;
        server->leaf_fullpath = mnt_ctx->leaf_fullpath;
        server->current_fullpath = mnt_ctx->leaf_fullpath;
+       mutex_unlock(&server->refpath_lock);
        mnt_ctx->origin_fullpath = mnt_ctx->leaf_fullpath = NULL;
 }
 
index 30e040d..956f8e5 100644 (file)
@@ -1422,12 +1422,14 @@ static int refresh_tcon(struct cifs_ses **sessions, struct cifs_tcon *tcon, bool
        struct TCP_Server_Info *server = tcon->ses->server;
 
        mutex_lock(&server->refpath_lock);
-       if (strcasecmp(server->leaf_fullpath, server->origin_fullpath))
-               __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, force_refresh);
+       if (server->origin_fullpath) {
+               if (server->leaf_fullpath && strcasecmp(server->leaf_fullpath,
+                                                       server->origin_fullpath))
+                       __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, force_refresh);
+               __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, force_refresh);
+       }
        mutex_unlock(&server->refpath_lock);
 
-       __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, force_refresh);
-
        return 0;
 }
 
@@ -1530,11 +1532,14 @@ static void refresh_mounts(struct cifs_ses **sessions)
                list_del_init(&tcon->ulist);
 
                mutex_lock(&server->refpath_lock);
-               if (strcasecmp(server->leaf_fullpath, server->origin_fullpath))
-                       __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, false);
+               if (server->origin_fullpath) {
+                       if (server->leaf_fullpath && strcasecmp(server->leaf_fullpath,
+                                                               server->origin_fullpath))
+                               __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, false);
+                       __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, false);
+               }
                mutex_unlock(&server->refpath_lock);
 
-               __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, false);
                cifs_put_tcon(tcon);
        }
 }
index 852e54e..bbdf328 100644 (file)
@@ -85,6 +85,9 @@ parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len,
        if (rc != 1)
                return -EINVAL;
 
+       if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
+               return -EINVAL;
+
        rc = symlink_hash(link_len, link_str, md5_hash);
        if (rc) {
                cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc);
index db23f5b..d6aaeff 100644 (file)
@@ -86,6 +86,9 @@ smb2_add_credits(struct TCP_Server_Info *server,
        if (*val > 65000) {
                *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
                pr_warn_once("server overflowed SMB3 credits\n");
+               trace_smb3_overflow_credits(server->CurrentMid,
+                                           server->conn_id, server->hostname, *val,
+                                           add, server->in_flight);
        }
        server->in_flight--;
        if (server->in_flight == 0 &&
@@ -251,7 +254,7 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
        in_flight = server->in_flight;
        spin_unlock(&server->req_lock);
 
-       trace_smb3_add_credits(server->CurrentMid,
+       trace_smb3_wait_credits(server->CurrentMid,
                        server->conn_id, server->hostname, scredits, -(credits->value), in_flight);
        cifs_dbg(FYI, "%s: removed %u credits total=%d\n",
                        __func__, credits->value, scredits);
@@ -300,7 +303,7 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
        spin_unlock(&server->req_lock);
        wake_up(&server->request_q);
 
-       trace_smb3_add_credits(server->CurrentMid,
+       trace_smb3_adj_credits(server->CurrentMid,
                        server->conn_id, server->hostname, scredits,
                        credits->value - new_val, in_flight);
        cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n",
@@ -1855,9 +1858,17 @@ smb2_copychunk_range(const unsigned int xid,
        int chunks_copied = 0;
        bool chunk_sizes_updated = false;
        ssize_t bytes_written, total_bytes_written = 0;
+       struct inode *inode;
 
        pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
 
+       /*
+        * We need to flush all unwritten data before we can send the
+        * copychunk ioctl to the server.
+        */
+       inode = d_inode(trgtfile->dentry);
+       filemap_write_and_wait(inode->i_mapping);
+
        if (pcchunk == NULL)
                return -ENOMEM;
 
@@ -2492,7 +2503,7 @@ smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
                spin_unlock(&server->req_lock);
                wake_up(&server->request_q);
 
-               trace_smb3_add_credits(server->CurrentMid,
+               trace_smb3_pend_credits(server->CurrentMid,
                                server->conn_id, server->hostname, scredits,
                                le16_to_cpu(shdr->CreditRequest), in_flight);
                cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n",
index 6cecf30..bc27961 100644 (file)
@@ -1006,6 +1006,13 @@ DEFINE_SMB3_CREDIT_EVENT(credit_timeout);
 DEFINE_SMB3_CREDIT_EVENT(insufficient_credits);
 DEFINE_SMB3_CREDIT_EVENT(too_many_credits);
 DEFINE_SMB3_CREDIT_EVENT(add_credits);
+DEFINE_SMB3_CREDIT_EVENT(adj_credits);
+DEFINE_SMB3_CREDIT_EVENT(hdr_credits);
+DEFINE_SMB3_CREDIT_EVENT(nblk_credits);
+DEFINE_SMB3_CREDIT_EVENT(pend_credits);
+DEFINE_SMB3_CREDIT_EVENT(wait_credits);
+DEFINE_SMB3_CREDIT_EVENT(waitff_credits);
+DEFINE_SMB3_CREDIT_EVENT(overflow_credits);
 DEFINE_SMB3_CREDIT_EVENT(set_credits);
 
 #endif /* _CIFS_TRACE_H */
index eeb1a69..c667e6d 100644 (file)
@@ -464,13 +464,12 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
                return -EIO;
        }
 
-       tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
+       tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
        if (!tr_hdr)
                return -ENOMEM;
 
        memset(&cur_rqst[0], 0, sizeof(cur_rqst));
        memset(&iov, 0, sizeof(iov));
-       memset(tr_hdr, 0, sizeof(*tr_hdr));
 
        iov.iov_base = tr_hdr;
        iov.iov_len = sizeof(*tr_hdr);
@@ -542,7 +541,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
                in_flight = server->in_flight;
                spin_unlock(&server->req_lock);
 
-               trace_smb3_add_credits(server->CurrentMid,
+               trace_smb3_nblk_credits(server->CurrentMid,
                                server->conn_id, server->hostname, scredits, -1, in_flight);
                cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
                                __func__, 1, scredits);
@@ -648,7 +647,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
                        in_flight = server->in_flight;
                        spin_unlock(&server->req_lock);
 
-                       trace_smb3_add_credits(server->CurrentMid,
+                       trace_smb3_waitff_credits(server->CurrentMid,
                                        server->conn_id, server->hostname, scredits,
                                        -(num_credits), in_flight);
                        cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
index 0ed880f..e6dea6d 100644 (file)
@@ -1066,12 +1066,9 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
 
        /* wake up the caller thread for sync decompression */
        if (sync) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&io->u.wait.lock, flags);
                if (!atomic_add_return(bios, &io->pending_bios))
-                       wake_up_locked(&io->u.wait);
-               spin_unlock_irqrestore(&io->u.wait.lock, flags);
+                       complete(&io->u.done);
+
                return;
        }
 
@@ -1217,7 +1214,7 @@ jobqueue_init(struct super_block *sb,
        } else {
 fg_out:
                q = fgq;
-               init_waitqueue_head(&fgq->u.wait);
+               init_completion(&fgq->u.done);
                atomic_set(&fgq->pending_bios, 0);
        }
        q->sb = sb;
@@ -1419,8 +1416,7 @@ static void z_erofs_runqueue(struct super_block *sb,
                return;
 
        /* wait until all bios are completed */
-       io_wait_event(io[JQ_SUBMIT].u.wait,
-                     !atomic_read(&io[JQ_SUBMIT].pending_bios));
+       wait_for_completion_io(&io[JQ_SUBMIT].u.done);
 
        /* handle synchronous decompress queue in the caller context */
        z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool);
index e043216..800b11c 100644 (file)
@@ -97,7 +97,7 @@ struct z_erofs_decompressqueue {
        z_erofs_next_pcluster_t head;
 
        union {
-               wait_queue_head_t wait;
+               struct completion done;
                struct work_struct work;
        } u;
 };
index 3f87cca..a743b1e 100644 (file)
@@ -2273,6 +2273,10 @@ static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi)
  * Structure of a directory entry
  */
 #define EXT4_NAME_LEN 255
+/*
+ * Base length of the ext4 directory entry excluding the name length
+ */
+#define EXT4_BASE_DIR_LEN (sizeof(struct ext4_dir_entry_2) - EXT4_NAME_LEN)
 
 struct ext4_dir_entry {
        __le32  inode;                  /* Inode number */
@@ -3032,7 +3036,7 @@ extern int ext4_inode_attach_jinode(struct inode *inode);
 extern int ext4_can_truncate(struct inode *inode);
 extern int ext4_truncate(struct inode *);
 extern int ext4_break_layouts(struct inode *);
-extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
+extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length);
 extern void ext4_set_inode_flags(struct inode *, bool init);
 extern int ext4_alloc_da_blocks(struct inode *inode);
 extern void ext4_set_aops(struct inode *inode);
@@ -3064,6 +3068,7 @@ int ext4_fileattr_set(struct user_namespace *mnt_userns,
                      struct dentry *dentry, struct fileattr *fa);
 int ext4_fileattr_get(struct dentry *dentry, struct fileattr *fa);
 extern void ext4_reset_inode_seed(struct inode *inode);
+int ext4_update_overhead(struct super_block *sb);
 
 /* migrate.c */
 extern int ext4_ext_migrate(struct inode *);
index 0d98cf4..e473fde 100644 (file)
@@ -4500,9 +4500,9 @@ retry:
        return ret > 0 ? ret2 : ret;
 }
 
-static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
+static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len);
 
-static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
+static int ext4_insert_range(struct file *file, loff_t offset, loff_t len);
 
 static long ext4_zero_range(struct file *file, loff_t offset,
                            loff_t len, int mode)
@@ -4574,6 +4574,10 @@ static long ext4_zero_range(struct file *file, loff_t offset,
        /* Wait all existing dio workers, newcomers will block on i_rwsem */
        inode_dio_wait(inode);
 
+       ret = file_modified(file);
+       if (ret)
+               goto out_mutex;
+
        /* Preallocate the range including the unaligned edges */
        if (partial_begin || partial_end) {
                ret = ext4_alloc_file_blocks(file,
@@ -4690,7 +4694,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
                return -EOPNOTSUPP;
 
        if (mode & FALLOC_FL_PUNCH_HOLE) {
-               ret = ext4_punch_hole(inode, offset, len);
+               ret = ext4_punch_hole(file, offset, len);
                goto exit;
        }
 
@@ -4699,12 +4703,12 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
                goto exit;
 
        if (mode & FALLOC_FL_COLLAPSE_RANGE) {
-               ret = ext4_collapse_range(inode, offset, len);
+               ret = ext4_collapse_range(file, offset, len);
                goto exit;
        }
 
        if (mode & FALLOC_FL_INSERT_RANGE) {
-               ret = ext4_insert_range(inode, offset, len);
+               ret = ext4_insert_range(file, offset, len);
                goto exit;
        }
 
@@ -4740,6 +4744,10 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
        /* Wait all existing dio workers, newcomers will block on i_rwsem */
        inode_dio_wait(inode);
 
+       ret = file_modified(file);
+       if (ret)
+               goto out;
+
        ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
        if (ret)
                goto out;
@@ -5241,8 +5249,9 @@ out:
  * This implements the fallocate's collapse range functionality for ext4
  * Returns: 0 and non-zero on error.
  */
-static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
 {
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        struct address_space *mapping = inode->i_mapping;
        ext4_lblk_t punch_start, punch_stop;
@@ -5294,6 +5303,10 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
        /* Wait for existing dio to complete */
        inode_dio_wait(inode);
 
+       ret = file_modified(file);
+       if (ret)
+               goto out_mutex;
+
        /*
         * Prevent page faults from reinstantiating pages we have released from
         * page cache.
@@ -5387,8 +5400,9 @@ out_mutex:
  * by len bytes.
  * Returns 0 on success, error otherwise.
  */
-static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
+static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
 {
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        struct address_space *mapping = inode->i_mapping;
        handle_t *handle;
@@ -5445,6 +5459,10 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
        /* Wait for existing dio to complete */
        inode_dio_wait(inode);
 
+       ret = file_modified(file);
+       if (ret)
+               goto out_mutex;
+
        /*
         * Prevent page faults from reinstantiating pages we have released from
         * page cache.
index 13740f2..646ece9 100644 (file)
@@ -3953,12 +3953,14 @@ int ext4_break_layouts(struct inode *inode)
  * Returns: 0 on success or negative on failure
  */
 
-int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
+int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
 {
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        ext4_lblk_t first_block, stop_block;
        struct address_space *mapping = inode->i_mapping;
-       loff_t first_block_offset, last_block_offset;
+       loff_t first_block_offset, last_block_offset, max_length;
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        handle_t *handle;
        unsigned int credits;
        int ret = 0, ret2 = 0;
@@ -4001,6 +4003,14 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
                   offset;
        }
 
+       /*
+        * For punch hole the length + offset needs to be within one block
+        * before last range. Adjust the length if it goes beyond that limit.
+        */
+       max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
+       if (offset + length > max_length)
+               length = max_length - offset;
+
        if (offset & (sb->s_blocksize - 1) ||
            (offset + length) & (sb->s_blocksize - 1)) {
                /*
@@ -4016,6 +4026,10 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
        /* Wait all existing dio workers, newcomers will block on i_rwsem */
        inode_dio_wait(inode);
 
+       ret = file_modified(file);
+       if (ret)
+               goto out_mutex;
+
        /*
         * Prevent page faults from reinstantiating pages we have released from
         * page cache.
index 992229c..ba44fa1 100644 (file)
@@ -1652,3 +1652,19 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        return ext4_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
 }
 #endif
+
+static void set_overhead(struct ext4_super_block *es, const void *arg)
+{
+       es->s_overhead_clusters = cpu_to_le32(*((unsigned long *) arg));
+}
+
+int ext4_update_overhead(struct super_block *sb)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+       if (sb_rdonly(sb) || sbi->s_overhead == 0 ||
+           sbi->s_overhead == le32_to_cpu(sbi->s_es->s_overhead_clusters))
+               return 0;
+
+       return ext4_update_superblocks_fn(sb, set_overhead, &sbi->s_overhead);
+}
index e37da8d..767b4bf 100644 (file)
@@ -1466,10 +1466,10 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
 
        de = (struct ext4_dir_entry_2 *)search_buf;
        dlimit = search_buf + buf_size;
-       while ((char *) de < dlimit) {
+       while ((char *) de < dlimit - EXT4_BASE_DIR_LEN) {
                /* this code is executed quadratically often */
                /* do minimal checking `by hand' */
-               if ((char *) de + de->name_len <= dlimit &&
+               if (de->name + de->name_len <= dlimit &&
                    ext4_match(dir, fname, de)) {
                        /* found a match - just to be sure, do
                         * a full check */
index 495ce59..14695e2 100644 (file)
@@ -134,8 +134,10 @@ static void ext4_finish_bio(struct bio *bio)
                                continue;
                        }
                        clear_buffer_async_write(bh);
-                       if (bio->bi_status)
+                       if (bio->bi_status) {
+                               set_buffer_write_io_error(bh);
                                buffer_io_error(bh);
+                       }
                } while ((bh = bh->b_this_page) != head);
                spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
                if (!under_io) {
index 81749ea..1466fbd 100644 (file)
@@ -1199,20 +1199,25 @@ static void ext4_put_super(struct super_block *sb)
        int aborted = 0;
        int i, err;
 
-       ext4_unregister_li_request(sb);
-       ext4_quota_off_umount(sb);
-
-       flush_work(&sbi->s_error_work);
-       destroy_workqueue(sbi->rsv_conversion_wq);
-       ext4_release_orphan_info(sb);
-
        /*
         * Unregister sysfs before destroying jbd2 journal.
         * Since we could still access attr_journal_task attribute via sysfs
         * path which could have sbi->s_journal->j_task as NULL
+        * Unregister sysfs before flush sbi->s_error_work.
+        * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If
+        * read metadata verify failed then will queue error work.
+        * flush_stashed_error_work will call start_this_handle may trigger
+        * BUG_ON.
         */
        ext4_unregister_sysfs(sb);
 
+       ext4_unregister_li_request(sb);
+       ext4_quota_off_umount(sb);
+
+       flush_work(&sbi->s_error_work);
+       destroy_workqueue(sbi->rsv_conversion_wq);
+       ext4_release_orphan_info(sb);
+
        if (sbi->s_journal) {
                aborted = is_journal_aborted(sbi->s_journal);
                err = jbd2_journal_destroy(sbi->s_journal);
@@ -4172,9 +4177,11 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
        ext4_fsblk_t            first_block, last_block, b;
        ext4_group_t            i, ngroups = ext4_get_groups_count(sb);
        int                     s, j, count = 0;
+       int                     has_super = ext4_bg_has_super(sb, grp);
 
        if (!ext4_has_feature_bigalloc(sb))
-               return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
+               return (has_super + ext4_bg_num_gdb(sb, grp) +
+                       (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
                        sbi->s_itb_per_group + 2);
 
        first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
@@ -5282,9 +5289,18 @@ no_journal:
         * Get the # of file system overhead blocks from the
         * superblock if present.
         */
-       if (es->s_overhead_clusters)
-               sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
-       else {
+       sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
+       /* ignore the precalculated value if it is ridiculous */
+       if (sbi->s_overhead > ext4_blocks_count(es))
+               sbi->s_overhead = 0;
+       /*
+        * If the bigalloc feature is not enabled recalculating the
+        * overhead doesn't take long, so we might as well just redo
+        * it to make sure we are using the correct value.
+        */
+       if (!ext4_has_feature_bigalloc(sb))
+               sbi->s_overhead = 0;
+       if (sbi->s_overhead == 0) {
                err = ext4_calculate_overhead(sb);
                if (err)
                        goto failed_mount_wq;
@@ -5602,6 +5618,8 @@ static int ext4_fill_super(struct super_block *sb, struct fs_context *fc)
                ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
                         "Quota mode: %s.", descr, ext4_quota_mode(sb));
 
+       /* Update the s_overhead_clusters if necessary */
+       ext4_update_overhead(sb);
        return 0;
 
 free_sbi:
index 76316c4..b313a97 100644 (file)
@@ -38,6 +38,3 @@ config FSCACHE_DEBUG
          enabled by setting bits in /sys/modules/fscache/parameter/debug.
 
          See Documentation/filesystems/caching/fscache.rst for more information.
-
-config FSCACHE_OLD_API
-       bool
index 2749933..d645f8b 100644 (file)
@@ -214,7 +214,7 @@ void fscache_relinquish_cache(struct fscache_cache *cache)
 
        cache->ops = NULL;
        cache->cache_priv = NULL;
-       smp_store_release(&cache->state, FSCACHE_CACHE_IS_NOT_PRESENT);
+       fscache_set_cache_state(cache, FSCACHE_CACHE_IS_NOT_PRESENT);
        fscache_put_cache(cache, where);
 }
 EXPORT_SYMBOL(fscache_relinquish_cache);
index 9bb1ab5..9d3cf01 100644 (file)
@@ -30,7 +30,7 @@ static DEFINE_SPINLOCK(fscache_cookie_lru_lock);
 DEFINE_TIMER(fscache_cookie_lru_timer, fscache_cookie_lru_timed_out);
 static DECLARE_WORK(fscache_cookie_lru_work, fscache_cookie_lru_worker);
 static const char fscache_cookie_states[FSCACHE_COOKIE_STATE__NR] = "-LCAIFUWRD";
-unsigned int fscache_lru_cookie_timeout = 10 * HZ;
+static unsigned int fscache_lru_cookie_timeout = 10 * HZ;
 
 void fscache_print_cookie(struct fscache_cookie *cookie, char prefix)
 {
@@ -1069,6 +1069,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
 }
 EXPORT_SYMBOL(__fscache_invalidate);
 
+#ifdef CONFIG_PROC_FS
 /*
  * Generate a list of extant cookies in /proc/fs/fscache/cookies
  */
@@ -1145,3 +1146,4 @@ const struct seq_operations fscache_cookies_seq_ops = {
        .stop   = fscache_cookies_seq_stop,
        .show   = fscache_cookies_seq_show,
 };
+#endif
index ed1c9ed..1336f51 100644 (file)
@@ -56,7 +56,9 @@ static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
  * cookie.c
  */
 extern struct kmem_cache *fscache_cookie_jar;
+#ifdef CONFIG_PROC_FS
 extern const struct seq_operations fscache_cookies_seq_ops;
+#endif
 extern struct timer_list fscache_cookie_lru_timer;
 
 extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
@@ -137,7 +139,9 @@ int fscache_stats_show(struct seq_file *m, void *v);
 /*
  * volume.c
  */
+#ifdef CONFIG_PROC_FS
 extern const struct seq_operations fscache_volumes_seq_ops;
+#endif
 
 struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
                                          enum fscache_volume_trace where);
index c8c7fe9..3af3b08 100644 (file)
@@ -235,8 +235,7 @@ static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
 {
        struct fscache_write_request *wreq = priv;
 
-       fscache_clear_page_bits(fscache_cres_cookie(&wreq->cache_resources),
-                               wreq->mapping, wreq->start, wreq->len,
+       fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
                                wreq->set_bits);
 
        if (wreq->term_func)
@@ -296,7 +295,7 @@ abandon_end:
 abandon_free:
        kfree(wreq);
 abandon:
-       fscache_clear_page_bits(cookie, mapping, start, len, cond);
+       fscache_clear_page_bits(mapping, start, len, cond);
        if (term_func)
                term_func(term_func_priv, ret, false);
 }
index 99c7477..6f86349 100644 (file)
@@ -195,7 +195,6 @@ out:
  * Called under mmap_write_lock(mm).
  */
 
-#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 static unsigned long
 hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
                unsigned long len, unsigned long pgoff, unsigned long flags)
@@ -206,7 +205,7 @@ hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
        info.flags = 0;
        info.length = len;
        info.low_limit = current->mm->mmap_base;
-       info.high_limit = TASK_SIZE;
+       info.high_limit = arch_get_mmap_end(addr, len, flags);
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
        info.align_offset = 0;
        return vm_unmapped_area(&info);
@@ -222,7 +221,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
        info.length = len;
        info.low_limit = max(PAGE_SIZE, mmap_min_addr);
-       info.high_limit = current->mm->mmap_base;
+       info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
        info.align_offset = 0;
        addr = vm_unmapped_area(&info);
@@ -237,20 +236,22 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
                VM_BUG_ON(addr != -ENOMEM);
                info.flags = 0;
                info.low_limit = current->mm->mmap_base;
-               info.high_limit = TASK_SIZE;
+               info.high_limit = arch_get_mmap_end(addr, len, flags);
                addr = vm_unmapped_area(&info);
        }
 
        return addr;
 }
 
-static unsigned long
-hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
-               unsigned long len, unsigned long pgoff, unsigned long flags)
+unsigned long
+generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+                                 unsigned long len, unsigned long pgoff,
+                                 unsigned long flags)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        struct hstate *h = hstate_file(file);
+       const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
 
        if (len & ~huge_page_mask(h))
                return -EINVAL;
@@ -266,7 +267,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        if (addr) {
                addr = ALIGN(addr, huge_page_size(h));
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
+               if (mmap_end - len >= addr &&
                    (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
@@ -282,6 +283,15 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        return hugetlb_get_unmapped_area_bottomup(file, addr, len,
                        pgoff, flags);
 }
+
+#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+static unsigned long
+hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+                         unsigned long len, unsigned long pgoff,
+                         unsigned long flags)
+{
+       return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
+}
 #endif
 
 static size_t
index 04d374e..dbecd27 100644 (file)
@@ -155,7 +155,6 @@ struct io_wq_work_node *wq_stack_extract(struct io_wq_work_node *stack)
 struct io_wq_work {
        struct io_wq_work_node list;
        unsigned flags;
-       int fd;
 };
 
 static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
index 659f8ec..7625b29 100644 (file)
@@ -907,7 +907,11 @@ struct io_kiocb {
 
        u64                             user_data;
        u32                             result;
-       u32                             cflags;
+       /* fd initially, then cflags for completion */
+       union {
+               u32                     cflags;
+               int                     fd;
+       };
 
        struct io_ring_ctx              *ctx;
        struct task_struct              *task;
@@ -916,8 +920,12 @@ struct io_kiocb {
        /* store used ubuf, so we can prevent reloading */
        struct io_mapped_ubuf           *imu;
 
-       /* used by request caches, completion batching and iopoll */
-       struct io_wq_work_node          comp_list;
+       union {
+               /* used by request caches, completion batching and iopoll */
+               struct io_wq_work_node  comp_list;
+               /* cache ->apoll->events */
+               int apoll_events;
+       };
        atomic_t                        refs;
        atomic_t                        poll_refs;
        struct io_task_work             io_task_work;
@@ -2789,11 +2797,10 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
                /* order with io_complete_rw_iopoll(), e.g. ->result updates */
                if (!smp_load_acquire(&req->iopoll_completed))
                        break;
+               nr_events++;
                if (unlikely(req->flags & REQ_F_CQE_SKIP))
                        continue;
-
                __io_fill_cqe_req(req, req->result, io_put_kbuf(req, 0));
-               nr_events++;
        }
 
        if (unlikely(!nr_events))
@@ -3183,19 +3190,18 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
 {
        struct kiocb *kiocb = &req->rw.kiocb;
-       bool is_stream = req->file->f_mode & FMODE_STREAM;
 
-       if (kiocb->ki_pos == -1) {
-               if (!is_stream) {
-                       req->flags |= REQ_F_CUR_POS;
-                       kiocb->ki_pos = req->file->f_pos;
-                       return &kiocb->ki_pos;
-               } else {
-                       kiocb->ki_pos = 0;
-                       return NULL;
-               }
+       if (kiocb->ki_pos != -1)
+               return &kiocb->ki_pos;
+
+       if (!(req->file->f_mode & FMODE_STREAM)) {
+               req->flags |= REQ_F_CUR_POS;
+               kiocb->ki_pos = req->file->f_pos;
+               return &kiocb->ki_pos;
        }
-       return is_stream ? NULL : &kiocb->ki_pos;
+
+       kiocb->ki_pos = 0;
+       return NULL;
 }
 
 static void kiocb_done(struct io_kiocb *req, ssize_t ret,
@@ -3825,8 +3831,10 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                iovec = NULL;
        }
        ret = io_rw_init_file(req, FMODE_READ);
-       if (unlikely(ret))
+       if (unlikely(ret)) {
+               kfree(iovec);
                return ret;
+       }
        req->result = iov_iter_count(&s->iter);
 
        if (force_nonblock) {
@@ -3951,8 +3959,10 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
                iovec = NULL;
        }
        ret = io_rw_init_file(req, FMODE_WRITE);
-       if (unlikely(ret))
+       if (unlikely(ret)) {
+               kfree(iovec);
                return ret;
+       }
        req->result = iov_iter_count(&s->iter);
 
        if (force_nonblock) {
@@ -4351,7 +4361,7 @@ static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
                return -EAGAIN;
 
        if (sp->flags & SPLICE_F_FD_IN_FIXED)
-               in = io_file_get_fixed(req, sp->splice_fd_in, IO_URING_F_UNLOCKED);
+               in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
        else
                in = io_file_get_normal(req, sp->splice_fd_in);
        if (!in) {
@@ -4393,7 +4403,7 @@ static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
                return -EAGAIN;
 
        if (sp->flags & SPLICE_F_FD_IN_FIXED)
-               in = io_file_get_fixed(req, sp->splice_fd_in, IO_URING_F_UNLOCKED);
+               in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
        else
                in = io_file_get_normal(req, sp->splice_fd_in);
        if (!in) {
@@ -5834,7 +5844,6 @@ static void io_poll_remove_entries(struct io_kiocb *req)
 static int io_poll_check_events(struct io_kiocb *req, bool locked)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       struct io_poll_iocb *poll = io_poll_get_single(req);
        int v;
 
        /* req->task == current here, checking PF_EXITING is safe */
@@ -5851,17 +5860,17 @@ static int io_poll_check_events(struct io_kiocb *req, bool locked)
                        return -ECANCELED;
 
                if (!req->result) {
-                       struct poll_table_struct pt = { ._key = req->cflags };
+                       struct poll_table_struct pt = { ._key = req->apoll_events };
+                       unsigned flags = locked ? 0 : IO_URING_F_UNLOCKED;
 
-                       if (unlikely(!io_assign_file(req, IO_URING_F_UNLOCKED)))
-                               req->result = -EBADF;
-                       else
-                               req->result = vfs_poll(req->file, &pt) & req->cflags;
+                       if (unlikely(!io_assign_file(req, flags)))
+                               return -EBADF;
+                       req->result = vfs_poll(req->file, &pt) & req->apoll_events;
                }
 
                /* multishot, just fill an CQE and proceed */
-               if (req->result && !(req->cflags & EPOLLONESHOT)) {
-                       __poll_t mask = mangle_poll(req->result & poll->events);
+               if (req->result && !(req->apoll_events & EPOLLONESHOT)) {
+                       __poll_t mask = mangle_poll(req->result & req->apoll_events);
                        bool filled;
 
                        spin_lock(&ctx->completion_lock);
@@ -5939,7 +5948,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, int events)
         * CPU. We want to avoid pulling in req->apoll->events for that
         * case.
         */
-       req->cflags = events;
+       req->apoll_events = events;
        if (req->opcode == IORING_OP_POLL_ADD)
                req->io_task_work.func = io_poll_task_func;
        else
@@ -6331,7 +6340,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
                return -EINVAL;
 
        io_req_set_refcount(req);
-       req->cflags = poll->events = io_poll_parse_events(sqe, flags);
+       req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
        return 0;
 }
 
@@ -6833,6 +6842,7 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
        up.nr = 0;
        up.tags = 0;
        up.resv = 0;
+       up.resv2 = 0;
 
        io_ring_submit_lock(ctx, needs_lock);
        ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
@@ -7088,9 +7098,9 @@ static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
                return true;
 
        if (req->flags & REQ_F_FIXED_FILE)
-               req->file = io_file_get_fixed(req, req->work.fd, issue_flags);
+               req->file = io_file_get_fixed(req, req->fd, issue_flags);
        else
-               req->file = io_file_get_normal(req, req->work.fd);
+               req->file = io_file_get_normal(req, req->fd);
        if (req->file)
                return true;
 
@@ -7104,13 +7114,14 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
        const struct cred *creds = NULL;
        int ret;
 
+       if (unlikely(!io_assign_file(req, issue_flags)))
+               return -EBADF;
+
        if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
                creds = override_creds(req->creds);
 
        if (!io_op_defs[req->opcode].audit_skip)
                audit_uring_entry(req->opcode);
-       if (unlikely(!io_assign_file(req, issue_flags)))
-               return -EBADF;
 
        switch (req->opcode) {
        case IORING_OP_NOP:
@@ -7271,16 +7282,18 @@ static void io_wq_submit_work(struct io_wq_work *work)
        if (timeout)
                io_queue_linked_timeout(timeout);
 
-       if (!io_assign_file(req, issue_flags)) {
-               err = -EBADF;
-               work->flags |= IO_WQ_WORK_CANCEL;
-       }
 
        /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
        if (work->flags & IO_WQ_WORK_CANCEL) {
+fail:
                io_req_task_queue_fail(req, err);
                return;
        }
+       if (!io_assign_file(req, issue_flags)) {
+               err = -EBADF;
+               work->flags |= IO_WQ_WORK_CANCEL;
+               goto fail;
+       }
 
        if (req->flags & REQ_F_FORCE_ASYNC) {
                bool opcode_poll = def->pollin || def->pollout;
@@ -7628,7 +7641,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
        if (io_op_defs[opcode].needs_file) {
                struct io_submit_state *state = &ctx->submit_state;
 
-               req->work.fd = READ_ONCE(sqe->fd);
+               req->fd = READ_ONCE(sqe->fd);
 
                /*
                 * Plug now if we have more than 2 IO left after this, and the
@@ -10524,6 +10537,11 @@ static int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
                        break;
                }
 
+               if (reg.resv) {
+                       ret = -EINVAL;
+                       break;
+               }
+
                if (reg.offset == -1U) {
                        start = 0;
                        end = IO_RINGFD_REG_MAX;
@@ -10570,7 +10588,7 @@ static int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
                        ret = -EFAULT;
                        break;
                }
-               if (reg.offset >= IO_RINGFD_REG_MAX) {
+               if (reg.resv || reg.offset >= IO_RINGFD_REG_MAX) {
                        ret = -EINVAL;
                        break;
                }
@@ -10697,6 +10715,8 @@ static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz
                return -EINVAL;
        if (copy_from_user(&arg, argp, sizeof(arg)))
                return -EFAULT;
+       if (arg.pad)
+               return -EINVAL;
        *sig = u64_to_user_ptr(arg.sigmask);
        *argsz = arg.sigmask_sz;
        *ts = u64_to_user_ptr(arg.ts);
@@ -11178,7 +11198,8 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
                        IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
                        IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
                        IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
-                       IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP;
+                       IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
+                       IORING_FEAT_LINKED_FILE;
 
        if (copy_to_user(params, p, sizeof(*p))) {
                ret = -EFAULT;
@@ -11389,8 +11410,6 @@ static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
        __u32 tmp;
        int err;
 
-       if (up->resv)
-               return -EINVAL;
        if (check_add_overflow(up->offset, nr_args, &tmp))
                return -EOVERFLOW;
        err = io_rsrc_node_switch_start(ctx);
@@ -11416,6 +11435,8 @@ static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
        memset(&up, 0, sizeof(up));
        if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
                return -EFAULT;
+       if (up.resv || up.resv2)
+               return -EINVAL;
        return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
 }
 
@@ -11428,7 +11449,7 @@ static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
                return -EINVAL;
        if (copy_from_user(&up, arg, sizeof(up)))
                return -EFAULT;
-       if (!up.nr || up.resv)
+       if (!up.nr || up.resv || up.resv2)
                return -EINVAL;
        return __io_register_rsrc_update(ctx, type, &up, up.nr);
 }
index 5b9408e..ac7f067 100644 (file)
@@ -488,7 +488,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        jbd2_journal_wait_updates(journal);
 
        commit_transaction->t_state = T_SWITCH;
-       write_unlock(&journal->j_state_lock);
 
        J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
                        journal->j_max_transaction_buffers);
@@ -508,6 +507,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
         * has reserved.  This is consistent with the existing behaviour
         * that multiple jbd2_journal_get_write_access() calls to the same
         * buffer are perfectly permissible.
+        * We use journal->j_state_lock here to serialize processing of
+        * t_reserved_list with eviction of buffers from journal_unmap_buffer().
         */
        while (commit_transaction->t_reserved_list) {
                jh = commit_transaction->t_reserved_list;
@@ -527,6 +528,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
                jbd2_journal_refile_buffer(journal, jh);
        }
 
+       write_unlock(&journal->j_state_lock);
        /*
         * Now try to drop any written-back buffers from the journal's
         * checkpoint lists.  We do this *before* commit because it potentially
index 60e7ac6..1e2076a 100644 (file)
@@ -158,19 +158,41 @@ out:
  * Return : windows path string or error
  */
 
-char *convert_to_nt_pathname(char *filename)
+char *convert_to_nt_pathname(struct ksmbd_share_config *share,
+                            struct path *path)
 {
-       char *ab_pathname;
+       char *pathname, *ab_pathname, *nt_pathname;
+       int share_path_len = share->path_sz;
 
-       if (strlen(filename) == 0)
-               filename = "\\";
+       pathname = kmalloc(PATH_MAX, GFP_KERNEL);
+       if (!pathname)
+               return ERR_PTR(-EACCES);
 
-       ab_pathname = kstrdup(filename, GFP_KERNEL);
-       if (!ab_pathname)
-               return NULL;
+       ab_pathname = d_path(path, pathname, PATH_MAX);
+       if (IS_ERR(ab_pathname)) {
+               nt_pathname = ERR_PTR(-EACCES);
+               goto free_pathname;
+       }
+
+       if (strncmp(ab_pathname, share->path, share_path_len)) {
+               nt_pathname = ERR_PTR(-EACCES);
+               goto free_pathname;
+       }
+
+       nt_pathname = kzalloc(strlen(&ab_pathname[share_path_len]) + 2, GFP_KERNEL);
+       if (!nt_pathname) {
+               nt_pathname = ERR_PTR(-ENOMEM);
+               goto free_pathname;
+       }
+       if (ab_pathname[share_path_len] == '\0')
+               strcpy(nt_pathname, "/");
+       strcat(nt_pathname, &ab_pathname[share_path_len]);
+
+       ksmbd_conv_path_to_windows(nt_pathname);
 
-       ksmbd_conv_path_to_windows(ab_pathname);
-       return ab_pathname;
+free_pathname:
+       kfree(pathname);
+       return nt_pathname;
 }
 
 int get_nlink(struct kstat *st)
index 253366b..aae2a25 100644 (file)
@@ -14,7 +14,8 @@ struct ksmbd_file;
 int match_pattern(const char *str, size_t len, const char *pattern);
 int ksmbd_validate_filename(char *filename);
 int parse_stream_name(char *filename, char **stream_name, int *s_type);
-char *convert_to_nt_pathname(char *filename);
+char *convert_to_nt_pathname(struct ksmbd_share_config *share,
+                            struct path *path);
 int get_nlink(struct kstat *st);
 void ksmbd_conv_path_to_unix(char *path);
 void ksmbd_strip_last_slash(char *path);
index 23871b1..8b55605 100644 (file)
@@ -1694,33 +1694,3 @@ out:
        read_unlock(&lease_list_lock);
        return ret_op;
 }
-
-int smb2_check_durable_oplock(struct ksmbd_file *fp,
-                             struct lease_ctx_info *lctx, char *name)
-{
-       struct oplock_info *opinfo = opinfo_get(fp);
-       int ret = 0;
-
-       if (opinfo && opinfo->is_lease) {
-               if (!lctx) {
-                       pr_err("open does not include lease\n");
-                       ret = -EBADF;
-                       goto out;
-               }
-               if (memcmp(opinfo->o_lease->lease_key, lctx->lease_key,
-                          SMB2_LEASE_KEY_SIZE)) {
-                       pr_err("invalid lease key\n");
-                       ret = -EBADF;
-                       goto out;
-               }
-               if (name && strcmp(fp->filename, name)) {
-                       pr_err("invalid name reconnect %s\n", name);
-                       ret = -EINVAL;
-                       goto out;
-               }
-       }
-out:
-       if (opinfo)
-               opinfo_put(opinfo);
-       return ret;
-}
index 0cf7a2b..0975344 100644 (file)
@@ -124,6 +124,4 @@ struct oplock_info *lookup_lease_in_table(struct ksmbd_conn *conn,
 int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci,
                        struct lease_ctx_info *lctx);
 void destroy_lease_table(struct ksmbd_conn *conn);
-int smb2_check_durable_oplock(struct ksmbd_file *fp,
-                             struct lease_ctx_info *lctx, char *name);
 #endif /* __KSMBD_OPLOCK_H */
index 3bf6c56..16c803a 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/statfs.h>
 #include <linux/ethtool.h>
 #include <linux/falloc.h>
+#include <linux/mount.h>
 
 #include "glob.h"
 #include "smbfsctl.h"
@@ -2918,7 +2919,6 @@ int smb2_open(struct ksmbd_work *work)
                goto err_out;
        }
 
-       fp->filename = name;
        fp->cdoption = req->CreateDisposition;
        fp->daccess = daccess;
        fp->saccess = req->ShareAccess;
@@ -3270,14 +3270,13 @@ err_out1:
                if (!rsp->hdr.Status)
                        rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
 
-               if (!fp || !fp->filename)
-                       kfree(name);
                if (fp)
                        ksmbd_fd_put(work, fp);
                smb2_set_err_rsp(work);
                ksmbd_debug(SMB, "Error response: %x\n", rsp->hdr.Status);
        }
 
+       kfree(name);
        kfree(lc);
 
        return 0;
@@ -3895,8 +3894,6 @@ int smb2_query_dir(struct ksmbd_work *work)
                ksmbd_debug(SMB, "Search pattern is %s\n", srch_ptr);
        }
 
-       ksmbd_debug(SMB, "Directory name is %s\n", dir_fp->filename);
-
        if (srch_flag & SMB2_REOPEN || srch_flag & SMB2_RESTART_SCANS) {
                ksmbd_debug(SMB, "Restart directory scan\n");
                generic_file_llseek(dir_fp->filp, 0, SEEK_SET);
@@ -4390,9 +4387,9 @@ static int get_file_all_info(struct ksmbd_work *work,
                return -EACCES;
        }
 
-       filename = convert_to_nt_pathname(fp->filename);
-       if (!filename)
-               return -ENOMEM;
+       filename = convert_to_nt_pathname(work->tcon->share_conf, &fp->filp->f_path);
+       if (IS_ERR(filename))
+               return PTR_ERR(filename);
 
        inode = file_inode(fp->filp);
        generic_fillattr(file_mnt_user_ns(fp->filp), inode, &stat);
@@ -4999,15 +4996,17 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
        case FS_SECTOR_SIZE_INFORMATION:
        {
                struct smb3_fs_ss_info *info;
+               unsigned int sector_size =
+                       min_t(unsigned int, path.mnt->mnt_sb->s_blocksize, 4096);
 
                info = (struct smb3_fs_ss_info *)(rsp->Buffer);
 
-               info->LogicalBytesPerSector = cpu_to_le32(stfs.f_bsize);
+               info->LogicalBytesPerSector = cpu_to_le32(sector_size);
                info->PhysicalBytesPerSectorForAtomicity =
-                               cpu_to_le32(stfs.f_bsize);
-               info->PhysicalBytesPerSectorForPerf = cpu_to_le32(stfs.f_bsize);
+                               cpu_to_le32(sector_size);
+               info->PhysicalBytesPerSectorForPerf = cpu_to_le32(sector_size);
                info->FSEffPhysicalBytesPerSectorForAtomicity =
-                               cpu_to_le32(stfs.f_bsize);
+                               cpu_to_le32(sector_size);
                info->Flags = cpu_to_le32(SSINFO_FLAGS_ALIGNED_DEVICE |
                                    SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE);
                info->ByteOffsetForSectorAlignment = 0;
@@ -5683,8 +5682,7 @@ static int set_file_allocation_info(struct ksmbd_work *work,
                size = i_size_read(inode);
                rc = ksmbd_vfs_truncate(work, fp, alloc_blks * 512);
                if (rc) {
-                       pr_err("truncate failed! filename : %s, err %d\n",
-                              fp->filename, rc);
+                       pr_err("truncate failed!, err %d\n", rc);
                        return rc;
                }
                if (size < alloc_blks * 512)
@@ -5714,12 +5712,10 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
         * truncated range.
         */
        if (inode->i_sb->s_magic != MSDOS_SUPER_MAGIC) {
-               ksmbd_debug(SMB, "filename : %s truncated to newsize %lld\n",
-                           fp->filename, newsize);
+               ksmbd_debug(SMB, "truncated to newsize %lld\n", newsize);
                rc = ksmbd_vfs_truncate(work, fp, newsize);
                if (rc) {
-                       ksmbd_debug(SMB, "truncate failed! filename : %s err %d\n",
-                                   fp->filename, rc);
+                       ksmbd_debug(SMB, "truncate failed!, err %d\n", rc);
                        if (rc != -EAGAIN)
                                rc = -EBADF;
                        return rc;
@@ -5765,8 +5761,10 @@ static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
        if (parent_fp) {
                if (parent_fp->daccess & FILE_DELETE_LE) {
                        pr_err("parent dir is opened with delete access\n");
+                       ksmbd_fd_put(work, parent_fp);
                        return -ESHARE;
                }
+               ksmbd_fd_put(work, parent_fp);
        }
 next:
        return smb2_rename(work, fp, user_ns, rename_info,
index 9cebb6b..dcdd07c 100644 (file)
@@ -398,8 +398,7 @@ int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp, size_t count,
 
        nbytes = kernel_read(filp, rbuf, count, pos);
        if (nbytes < 0) {
-               pr_err("smb read failed for (%s), err = %zd\n",
-                      fp->filename, nbytes);
+               pr_err("smb read failed, err = %zd\n", nbytes);
                return nbytes;
        }
 
@@ -875,8 +874,7 @@ int ksmbd_vfs_truncate(struct ksmbd_work *work,
 
        err = vfs_truncate(&filp->f_path, size);
        if (err)
-               pr_err("truncate failed for filename : %s err %d\n",
-                      fp->filename, err);
+               pr_err("truncate failed, err %d\n", err);
        return err;
 }
 
index 29c1db6..c4d59d2 100644 (file)
@@ -328,7 +328,6 @@ static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
                kfree(smb_lock);
        }
 
-       kfree(fp->filename);
        if (ksmbd_stream_fd(fp))
                kfree(fp->stream.name);
        kmem_cache_free(filp_cache, fp);
@@ -497,6 +496,7 @@ struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode)
        list_for_each_entry(lfp, &ci->m_fp_list, node) {
                if (inode == file_inode(lfp->filp)) {
                        atomic_dec(&ci->m_count);
+                       lfp = ksmbd_fp_get(lfp);
                        read_unlock(&ci->m_lock);
                        return lfp;
                }
index 36239ce..fcb1341 100644 (file)
@@ -62,7 +62,6 @@ struct ksmbd_inode {
 
 struct ksmbd_file {
        struct file                     *filp;
-       char                            *filename;
        u64                             persistent_id;
        u64                             volatile_id;
 
index 3f1829b..509657f 100644 (file)
@@ -3673,18 +3673,14 @@ static struct dentry *filename_create(int dfd, struct filename *name,
 {
        struct dentry *dentry = ERR_PTR(-EEXIST);
        struct qstr last;
+       bool want_dir = lookup_flags & LOOKUP_DIRECTORY;
+       unsigned int reval_flag = lookup_flags & LOOKUP_REVAL;
+       unsigned int create_flags = LOOKUP_CREATE | LOOKUP_EXCL;
        int type;
        int err2;
        int error;
-       bool is_dir = (lookup_flags & LOOKUP_DIRECTORY);
 
-       /*
-        * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any
-        * other flags passed in are ignored!
-        */
-       lookup_flags &= LOOKUP_REVAL;
-
-       error = filename_parentat(dfd, name, lookup_flags, path, &last, &type);
+       error = filename_parentat(dfd, name, reval_flag, path, &last, &type);
        if (error)
                return ERR_PTR(error);
 
@@ -3698,11 +3694,13 @@ static struct dentry *filename_create(int dfd, struct filename *name,
        /* don't fail immediately if it's r/o, at least try to report other errors */
        err2 = mnt_want_write(path->mnt);
        /*
-        * Do the final lookup.
+        * Do the final lookup.  Suppress 'create' if there is a trailing
+        * '/', and a directory wasn't requested.
         */
-       lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL;
+       if (last.name[last.len] && !want_dir)
+               create_flags = 0;
        inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
-       dentry = __lookup_hash(&last, path->dentry, lookup_flags);
+       dentry = __lookup_hash(&last, path->dentry, reval_flag | create_flags);
        if (IS_ERR(dentry))
                goto unlock;
 
@@ -3716,7 +3714,7 @@ static struct dentry *filename_create(int dfd, struct filename *name,
         * all is fine. Let's be bastards - you had / on the end, you've
         * been asking for (non-existent) directory. -ENOENT for you.
         */
-       if (unlikely(!is_dir && last.name[last.len])) {
+       if (unlikely(!create_flags)) {
                error = -ENOENT;
                goto fail;
        }
index a0a36bf..afe2b64 100644 (file)
@@ -4058,10 +4058,22 @@ static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt)
        if (err) {
                struct mount *p;
 
-               for (p = mnt; p != m; p = next_mnt(p, mnt)) {
+               /*
+                * If we had to call mnt_hold_writers() MNT_WRITE_HOLD will
+                * be set in @mnt_flags. The loop unsets MNT_WRITE_HOLD for all
+                * mounts and needs to take care to include the first mount.
+                */
+               for (p = mnt; p; p = next_mnt(p, mnt)) {
                        /* If we had to hold writers unblock them. */
                        if (p->mnt.mnt_flags & MNT_WRITE_HOLD)
                                mnt_unhold_writers(p);
+
+                       /*
+                        * We're done once the first mount we changed got
+                        * MNT_WRITE_HOLD unset.
+                        */
+                       if (p == m)
+                               break;
                }
        }
        return err;
index c08882f..2c1b027 100644 (file)
@@ -236,6 +236,13 @@ nfsd_file_check_write_error(struct nfsd_file *nf)
        return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
 }
 
+static void
+nfsd_file_flush(struct nfsd_file *nf)
+{
+       if (nf->nf_file && vfs_fsync(nf->nf_file, 1) != 0)
+               nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
+}
+
 static void
 nfsd_file_do_unhash(struct nfsd_file *nf)
 {
@@ -295,19 +302,15 @@ nfsd_file_put_noref(struct nfsd_file *nf)
 void
 nfsd_file_put(struct nfsd_file *nf)
 {
-       bool is_hashed;
-
        set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
-       if (refcount_read(&nf->nf_ref) > 2 || !nf->nf_file) {
+       if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0) {
+               nfsd_file_flush(nf);
                nfsd_file_put_noref(nf);
-               return;
+       } else {
+               nfsd_file_put_noref(nf);
+               if (nf->nf_file)
+                       nfsd_file_schedule_laundrette();
        }
-
-       filemap_flush(nf->nf_file->f_mapping);
-       is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
-       nfsd_file_put_noref(nf);
-       if (is_hashed)
-               nfsd_file_schedule_laundrette();
        if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
                nfsd_file_gc();
 }
@@ -328,6 +331,7 @@ nfsd_file_dispose_list(struct list_head *dispose)
        while(!list_empty(dispose)) {
                nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
                list_del(&nf->nf_lru);
+               nfsd_file_flush(nf);
                nfsd_file_put_noref(nf);
        }
 }
@@ -341,6 +345,7 @@ nfsd_file_dispose_list_sync(struct list_head *dispose)
        while(!list_empty(dispose)) {
                nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
                list_del(&nf->nf_lru);
+               nfsd_file_flush(nf);
                if (!refcount_dec_and_test(&nf->nf_ref))
                        continue;
                if (nfsd_file_free(nf))
index 367551b..b576080 100644 (file)
@@ -249,34 +249,34 @@ nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
        int w;
 
        if (!svcxdr_encode_stat(xdr, resp->status))
-               return 0;
+               return false;
 
        if (dentry == NULL || d_really_is_negative(dentry))
-               return 1;
+               return true;
        inode = d_inode(dentry);
 
        if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat))
-               return 0;
+               return false;
        if (xdr_stream_encode_u32(xdr, resp->mask) < 0)
-               return 0;
+               return false;
 
        rqstp->rq_res.page_len = w = nfsacl_size(
                (resp->mask & NFS_ACL)   ? resp->acl_access  : NULL,
                (resp->mask & NFS_DFACL) ? resp->acl_default : NULL);
        while (w > 0) {
                if (!*(rqstp->rq_next_page++))
-                       return 1;
+                       return true;
                w -= PAGE_SIZE;
        }
 
        if (!nfs_stream_encode_acl(xdr, inode, resp->acl_access,
                                   resp->mask & NFS_ACL, 0))
-               return 0;
+               return false;
        if (!nfs_stream_encode_acl(xdr, inode, resp->acl_default,
                                   resp->mask & NFS_DFACL, NFS_ACL_DEFAULT))
-               return 0;
+               return false;
 
-       return 1;
+       return true;
 }
 
 /* ACCESS */
@@ -286,17 +286,17 @@ nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
        struct nfsd3_accessres *resp = rqstp->rq_resp;
 
        if (!svcxdr_encode_stat(xdr, resp->status))
-               return 0;
+               return false;
        switch (resp->status) {
        case nfs_ok:
                if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat))
-                       return 0;
+                       return false;
                if (xdr_stream_encode_u32(xdr, resp->access) < 0)
-                       return 0;
+                       return false;
                break;
        }
 
-       return 1;
+       return true;
 }
 
 /*
index 9648ac1..e140ea1 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -804,7 +804,7 @@ struct pipe_inode_info *alloc_pipe_info(void)
        if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
                goto out_revert_acct;
 
-       pipe->bufs = kvcalloc(pipe_bufs, sizeof(struct pipe_buffer),
+       pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
                             GFP_KERNEL_ACCOUNT);
 
        if (pipe->bufs) {
@@ -849,7 +849,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
 #endif
        if (pipe->tmp_page)
                __free_page(pipe->tmp_page);
-       kvfree(pipe->bufs);
+       kfree(pipe->bufs);
        kfree(pipe);
 }
 
@@ -1264,7 +1264,8 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
        if (nr_slots < n)
                return -EBUSY;
 
-       bufs = kvcalloc(nr_slots, sizeof(*bufs), GFP_KERNEL_ACCOUNT);
+       bufs = kcalloc(nr_slots, sizeof(*bufs),
+                      GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
        if (unlikely(!bufs))
                return -ENOMEM;
 
@@ -1291,7 +1292,7 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
        head = n;
        tail = 0;
 
-       kvfree(pipe->bufs);
+       kfree(pipe->bufs);
        pipe->bufs = bufs;
        pipe->ring_size = nr_slots;
        if (pipe->max_usage > nr_slots)
index 80acb68..962d324 100644 (file)
@@ -759,9 +759,14 @@ static void posix_acl_fix_xattr_userns(
 }
 
 void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
+                                  struct inode *inode,
                                   void *value, size_t size)
 {
        struct user_namespace *user_ns = current_user_ns();
+
+       /* Leave ids untouched on non-idmapped mounts. */
+       if (no_idmapping(mnt_userns, i_user_ns(inode)))
+               mnt_userns = &init_user_ns;
        if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns))
                return;
        posix_acl_fix_xattr_userns(&init_user_ns, user_ns, mnt_userns, value,
@@ -769,9 +774,14 @@ void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
 }
 
 void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
+                                struct inode *inode,
                                 void *value, size_t size)
 {
        struct user_namespace *user_ns = current_user_ns();
+
+       /* Leave ids untouched on non-idmapped mounts. */
+       if (no_idmapping(mnt_userns, i_user_ns(inode)))
+               mnt_userns = &init_user_ns;
        if ((user_ns == &init_user_ns) && (mnt_userns == &init_user_ns))
                return;
        posix_acl_fix_xattr_userns(user_ns, &init_user_ns, mnt_userns, value,
index 7f734be..5c2c944 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -348,9 +348,6 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat
 #  define choose_32_64(a,b) b
 #endif
 
-#define valid_dev(x)  choose_32_64(old_valid_dev(x),true)
-#define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
-
 #ifndef INIT_STRUCT_STAT_PADDING
 #  define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
 #endif
@@ -359,7 +356,9 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
 {
        struct stat tmp;
 
-       if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
+       if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
+               return -EOVERFLOW;
+       if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
                return -EOVERFLOW;
 #if BITS_PER_LONG == 32
        if (stat->size > MAX_NON_LFS)
@@ -367,7 +366,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
 #endif
 
        INIT_STRUCT_STAT_PADDING(tmp);
-       tmp.st_dev = encode_dev(stat->dev);
+       tmp.st_dev = new_encode_dev(stat->dev);
        tmp.st_ino = stat->ino;
        if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
                return -EOVERFLOW;
@@ -377,7 +376,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
                return -EOVERFLOW;
        SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
        SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
-       tmp.st_rdev = encode_dev(stat->rdev);
+       tmp.st_rdev = new_encode_dev(stat->rdev);
        tmp.st_size = stat->size;
        tmp.st_atime = stat->atime.tv_sec;
        tmp.st_mtime = stat->mtime.tv_sec;
@@ -665,11 +664,13 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
 {
        struct compat_stat tmp;
 
-       if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
+       if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
+               return -EOVERFLOW;
+       if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
                return -EOVERFLOW;
 
        memset(&tmp, 0, sizeof(tmp));
-       tmp.st_dev = old_encode_dev(stat->dev);
+       tmp.st_dev = new_encode_dev(stat->dev);
        tmp.st_ino = stat->ino;
        if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
                return -EOVERFLOW;
@@ -679,7 +680,7 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
                return -EOVERFLOW;
        SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
        SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
-       tmp.st_rdev = old_encode_dev(stat->rdev);
+       tmp.st_rdev = new_encode_dev(stat->rdev);
        if ((u64) stat->size > MAX_NON_LFS)
                return -EOVERFLOW;
        tmp.st_size = stat->size;
index 5c8c517..9980451 100644 (file)
@@ -569,7 +569,8 @@ setxattr(struct user_namespace *mnt_userns, struct dentry *d,
                }
                if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
                    (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
-                       posix_acl_fix_xattr_from_user(mnt_userns, kvalue, size);
+                       posix_acl_fix_xattr_from_user(mnt_userns, d_inode(d),
+                                                     kvalue, size);
        }
 
        error = vfs_setxattr(mnt_userns, d, kname, kvalue, size, flags);
@@ -667,7 +668,8 @@ getxattr(struct user_namespace *mnt_userns, struct dentry *d,
        if (error > 0) {
                if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
                    (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
-                       posix_acl_fix_xattr_to_user(mnt_userns, kvalue, error);
+                       posix_acl_fix_xattr_to_user(mnt_userns, d_inode(d),
+                                                   kvalue, error);
                if (size && copy_to_user(value, kvalue, error))
                        error = -EFAULT;
        } else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
index 8fc6373..df30f11 100644 (file)
@@ -143,7 +143,7 @@ static inline void put_unaligned_be48(const u64 val, void *p)
 
 static inline u64 __get_unaligned_be48(const u8 *p)
 {
-       return (u64)p[0] << 40 | (u64)p[1] << 32 | p[2] << 24 |
+       return (u64)p[0] << 40 | (u64)p[1] << 32 | (u64)p[2] << 24 |
                p[3] << 16 | p[4] << 8 | p[5];
 }
 
diff --git a/include/linux/dma-buf-map.h b/include/linux/dma-buf-map.h
deleted file mode 100644 (file)
index 19fa0b5..0000000
+++ /dev/null
@@ -1,266 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Pointer to dma-buf-mapped memory, plus helpers.
- */
-
-#ifndef __DMA_BUF_MAP_H__
-#define __DMA_BUF_MAP_H__
-
-#include <linux/io.h>
-#include <linux/string.h>
-
-/**
- * DOC: overview
- *
- * Calling dma-buf's vmap operation returns a pointer to the buffer's memory.
- * Depending on the location of the buffer, users may have to access it with
- * I/O operations or memory load/store operations. For example, copying to
- * system memory could be done with memcpy(), copying to I/O memory would be
- * done with memcpy_toio().
- *
- * .. code-block:: c
- *
- *     void *vaddr = ...; // pointer to system memory
- *     memcpy(vaddr, src, len);
- *
- *     void *vaddr_iomem = ...; // pointer to I/O memory
- *     memcpy_toio(vaddr, _iomem, src, len);
- *
- * When using dma-buf's vmap operation, the returned pointer is encoded as
- * :c:type:`struct dma_buf_map <dma_buf_map>`.
- * :c:type:`struct dma_buf_map <dma_buf_map>` stores the buffer's address in
- * system or I/O memory and a flag that signals the required method of
- * accessing the buffer. Use the returned instance and the helper functions
- * to access the buffer's memory in the correct way.
- *
- * The type :c:type:`struct dma_buf_map <dma_buf_map>` and its helpers are
- * actually independent from the dma-buf infrastructure. When sharing buffers
- * among devices, drivers have to know the location of the memory to access
- * the buffers in a safe way. :c:type:`struct dma_buf_map <dma_buf_map>`
- * solves this problem for dma-buf and its users. If other drivers or
- * sub-systems require similar functionality, the type could be generalized
- * and moved to a more prominent header file.
- *
- * Open-coding access to :c:type:`struct dma_buf_map <dma_buf_map>` is
- * considered bad style. Rather then accessing its fields directly, use one
- * of the provided helper functions, or implement your own. For example,
- * instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be initialized
- * statically with DMA_BUF_MAP_INIT_VADDR(), or at runtime with
- * dma_buf_map_set_vaddr(). These helpers will set an address in system memory.
- *
- * .. code-block:: c
- *
- *     struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(0xdeadbeaf);
- *
- *     dma_buf_map_set_vaddr(&map, 0xdeadbeaf);
- *
- * To set an address in I/O memory, use dma_buf_map_set_vaddr_iomem().
- *
- * .. code-block:: c
- *
- *     dma_buf_map_set_vaddr_iomem(&map, 0xdeadbeaf);
- *
- * Instances of struct dma_buf_map do not have to be cleaned up, but
- * can be cleared to NULL with dma_buf_map_clear(). Cleared mappings
- * always refer to system memory.
- *
- * .. code-block:: c
- *
- *     dma_buf_map_clear(&map);
- *
- * Test if a mapping is valid with either dma_buf_map_is_set() or
- * dma_buf_map_is_null().
- *
- * .. code-block:: c
- *
- *     if (dma_buf_map_is_set(&map) != dma_buf_map_is_null(&map))
- *             // always true
- *
- * Instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be compared
- * for equality with dma_buf_map_is_equal(). Mappings the point to different
- * memory spaces, system or I/O, are never equal. That's even true if both
- * spaces are located in the same address space, both mappings contain the
- * same address value, or both mappings refer to NULL.
- *
- * .. code-block:: c
- *
- *     struct dma_buf_map sys_map; // refers to system memory
- *     struct dma_buf_map io_map; // refers to I/O memory
- *
- *     if (dma_buf_map_is_equal(&sys_map, &io_map))
- *             // always false
- *
- * A set up instance of struct dma_buf_map can be used to access or manipulate
- * the buffer memory. Depending on the location of the memory, the provided
- * helpers will pick the correct operations. Data can be copied into the memory
- * with dma_buf_map_memcpy_to(). The address can be manipulated with
- * dma_buf_map_incr().
- *
- * .. code-block:: c
- *
- *     const void *src = ...; // source buffer
- *     size_t len = ...; // length of src
- *
- *     dma_buf_map_memcpy_to(&map, src, len);
- *     dma_buf_map_incr(&map, len); // go to first byte after the memcpy
- */
-
-/**
- * struct dma_buf_map - Pointer to vmap'ed dma-buf memory.
- * @vaddr_iomem:       The buffer's address if in I/O memory
- * @vaddr:             The buffer's address if in system memory
- * @is_iomem:          True if the dma-buf memory is located in I/O
- *                     memory, or false otherwise.
- */
-struct dma_buf_map {
-       union {
-               void __iomem *vaddr_iomem;
-               void *vaddr;
-       };
-       bool is_iomem;
-};
-
-/**
- * DMA_BUF_MAP_INIT_VADDR - Initializes struct dma_buf_map to an address in system memory
- * @vaddr_:    A system-memory address
- */
-#define DMA_BUF_MAP_INIT_VADDR(vaddr_) \
-       { \
-               .vaddr = (vaddr_), \
-               .is_iomem = false, \
-       }
-
-/**
- * dma_buf_map_set_vaddr - Sets a dma-buf mapping structure to an address in system memory
- * @map:       The dma-buf mapping structure
- * @vaddr:     A system-memory address
- *
- * Sets the address and clears the I/O-memory flag.
- */
-static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr)
-{
-       map->vaddr = vaddr;
-       map->is_iomem = false;
-}
-
-/**
- * dma_buf_map_set_vaddr_iomem - Sets a dma-buf mapping structure to an address in I/O memory
- * @map:               The dma-buf mapping structure
- * @vaddr_iomem:       An I/O-memory address
- *
- * Sets the address and the I/O-memory flag.
- */
-static inline void dma_buf_map_set_vaddr_iomem(struct dma_buf_map *map,
-                                              void __iomem *vaddr_iomem)
-{
-       map->vaddr_iomem = vaddr_iomem;
-       map->is_iomem = true;
-}
-
-/**
- * dma_buf_map_is_equal - Compares two dma-buf mapping structures for equality
- * @lhs:       The dma-buf mapping structure
- * @rhs:       A dma-buf mapping structure to compare with
- *
- * Two dma-buf mapping structures are equal if they both refer to the same type of memory
- * and to the same address within that memory.
- *
- * Returns:
- * True is both structures are equal, or false otherwise.
- */
-static inline bool dma_buf_map_is_equal(const struct dma_buf_map *lhs,
-                                       const struct dma_buf_map *rhs)
-{
-       if (lhs->is_iomem != rhs->is_iomem)
-               return false;
-       else if (lhs->is_iomem)
-               return lhs->vaddr_iomem == rhs->vaddr_iomem;
-       else
-               return lhs->vaddr == rhs->vaddr;
-}
-
-/**
- * dma_buf_map_is_null - Tests for a dma-buf mapping to be NULL
- * @map:       The dma-buf mapping structure
- *
- * Depending on the state of struct dma_buf_map.is_iomem, tests if the
- * mapping is NULL.
- *
- * Returns:
- * True if the mapping is NULL, or false otherwise.
- */
-static inline bool dma_buf_map_is_null(const struct dma_buf_map *map)
-{
-       if (map->is_iomem)
-               return !map->vaddr_iomem;
-       return !map->vaddr;
-}
-
-/**
- * dma_buf_map_is_set - Tests is the dma-buf mapping has been set
- * @map:       The dma-buf mapping structure
- *
- * Depending on the state of struct dma_buf_map.is_iomem, tests if the
- * mapping has been set.
- *
- * Returns:
- * True if the mapping is been set, or false otherwise.
- */
-static inline bool dma_buf_map_is_set(const struct dma_buf_map *map)
-{
-       return !dma_buf_map_is_null(map);
-}
-
-/**
- * dma_buf_map_clear - Clears a dma-buf mapping structure
- * @map:       The dma-buf mapping structure
- *
- * Clears all fields to zero; including struct dma_buf_map.is_iomem. So
- * mapping structures that were set to point to I/O memory are reset for
- * system memory. Pointers are cleared to NULL. This is the default.
- */
-static inline void dma_buf_map_clear(struct dma_buf_map *map)
-{
-       if (map->is_iomem) {
-               map->vaddr_iomem = NULL;
-               map->is_iomem = false;
-       } else {
-               map->vaddr = NULL;
-       }
-}
-
-/**
- * dma_buf_map_memcpy_to - Memcpy into dma-buf mapping
- * @dst:       The dma-buf mapping structure
- * @src:       The source buffer
- * @len:       The number of byte in src
- *
- * Copies data into a dma-buf mapping. The source buffer is in system
- * memory. Depending on the buffer's location, the helper picks the correct
- * method of accessing the memory.
- */
-static inline void dma_buf_map_memcpy_to(struct dma_buf_map *dst, const void *src, size_t len)
-{
-       if (dst->is_iomem)
-               memcpy_toio(dst->vaddr_iomem, src, len);
-       else
-               memcpy(dst->vaddr, src, len);
-}
-
-/**
- * dma_buf_map_incr - Increments the address stored in a dma-buf mapping
- * @map:       The dma-buf mapping structure
- * @incr:      The number of bytes to increment
- *
- * Increments the address stored in a dma-buf mapping. Depending on the
- * buffer's location, the correct value will be updated.
- */
-static inline void dma_buf_map_incr(struct dma_buf_map *map, size_t incr)
-{
-       if (map->is_iomem)
-               map->vaddr_iomem += incr;
-       else
-               map->vaddr += incr;
-}
-
-#endif /* __DMA_BUF_MAP_H__ */
index 6727fb0..e255390 100644 (file)
@@ -573,7 +573,6 @@ int fscache_write(struct netfs_cache_resources *cres,
 
 /**
  * fscache_clear_page_bits - Clear the PG_fscache bits from a set of pages
- * @cookie: The cookie representing the cache object
  * @mapping: The netfs inode to use as the source
  * @start: The start position in @mapping
  * @len: The amount of data to unlock
@@ -582,8 +581,7 @@ int fscache_write(struct netfs_cache_resources *cres,
  * Clear the PG_fscache flag from a sequence of pages and wake up anyone who's
  * waiting.
  */
-static inline void fscache_clear_page_bits(struct fscache_cookie *cookie,
-                                          struct address_space *mapping,
+static inline void fscache_clear_page_bits(struct address_space *mapping,
                                           loff_t start, size_t len,
                                           bool caching)
 {
index c3aa8b3..e71f6e1 100644 (file)
@@ -688,7 +688,7 @@ void acpi_dev_remove_driver_gpios(struct acpi_device *adev);
 int devm_acpi_dev_add_driver_gpios(struct device *dev,
                                   const struct acpi_gpio_mapping *gpios);
 
-struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label);
+struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label);
 
 #else  /* CONFIG_GPIOLIB && CONFIG_ACPI */
 
@@ -705,6 +705,12 @@ static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
        return -ENXIO;
 }
 
+static inline struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin,
+                                                          char *label)
+{
+       return ERR_PTR(-ENOSYS);
+}
+
 #endif /* CONFIG_GPIOLIB && CONFIG_ACPI */
 
 
index 53c1b60..bc97c84 100644 (file)
@@ -169,6 +169,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
                                                long freed);
 bool isolate_huge_page(struct page *page, struct list_head *list);
 int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
+int get_huge_page_for_hwpoison(unsigned long pfn, int flags);
 void putback_active_hugepage(struct page *page);
 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
 void free_huge_page(struct page *page);
@@ -378,6 +379,11 @@ static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
        return 0;
 }
 
+static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
+{
+       return 0;
+}
+
 static inline void putback_active_hugepage(struct page *page)
 {
 }
@@ -513,6 +519,11 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
                                        unsigned long flags);
 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
 
+unsigned long
+generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+                                 unsigned long len, unsigned long pgoff,
+                                 unsigned long flags);
+
 /*
  * huegtlb page specific state flags.  These flags are located in page.private
  * of the hugetlb head page.  Functions created via the below macros should be
index 08ba599..a890428 100644 (file)
 }                                      \
 )
 
-/**
- * lower_48_bits() - return bits 0-47 of a number
- * @n: the number we're accessing
- */
-static inline u64 lower_48_bits(u64 n)
-{
-       return n & ((1ull << 48) - 1);
-}
-
 /**
  * upper_32_bits - return bits 32-63 of a number
  * @n: the number we're accessing
index f49e642..726857a 100644 (file)
@@ -204,6 +204,22 @@ static __always_inline __must_check bool kfence_free(void *addr)
  */
 bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
 
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+/**
+ * __kfence_obj_info() - fill kmem_obj_info struct
+ * @kpp: kmem_obj_info to be filled
+ * @object: the object
+ *
+ * Return:
+ * * false - not a KFENCE object
+ * * true - a KFENCE object, filled @kpp
+ *
+ * Copies information to @kpp for KFENCE objects.
+ */
+bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
+#endif
+
 #else /* CONFIG_KFENCE */
 
 static inline bool is_kfence_address(const void *addr) { return false; }
@@ -221,6 +237,14 @@ static inline bool __must_check kfence_handle_page_fault(unsigned long addr, boo
        return false;
 }
 
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+       return false;
+}
+#endif
+
 #endif
 
 #endif /* _LINUX_KFENCE_H */
index 3f9b22c..34eed5f 100644 (file)
@@ -315,7 +315,10 @@ struct kvm_vcpu {
        int cpu;
        int vcpu_id; /* id given by userspace at creation */
        int vcpu_idx; /* index in kvm->vcpus array */
-       int srcu_idx;
+       int ____srcu_idx; /* Don't use this directly.  You've been warned. */
+#ifdef CONFIG_PROVE_RCU
+       int srcu_depth;
+#endif
        int mode;
        u64 requests;
        unsigned long guest_debug;
@@ -840,6 +843,25 @@ static inline void kvm_vm_bugged(struct kvm *kvm)
        unlikely(__ret);                                        \
 })
 
+static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PROVE_RCU
+       WARN_ONCE(vcpu->srcu_depth++,
+                 "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1);
+#endif
+       vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+}
+
+static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu)
+{
+       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx);
+
+#ifdef CONFIG_PROVE_RCU
+       WARN_ONCE(--vcpu->srcu_depth,
+                 "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth);
+#endif
+}
+
 static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
 {
        return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
@@ -2197,6 +2219,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
                                            unsigned long start, unsigned long end);
 
+void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
+
 #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
 #else
index a68dce3..89b1472 100644 (file)
@@ -1012,6 +1012,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
 }
 
 void mem_cgroup_flush_stats(void);
+void mem_cgroup_flush_stats_delayed(void);
 
 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
                              int val);
@@ -1455,6 +1456,10 @@ static inline void mem_cgroup_flush_stats(void)
 {
 }
 
+static inline void mem_cgroup_flush_stats_delayed(void)
+{
+}
+
 static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
                                            enum node_stat_item idx, int val)
 {
index e34edb7..9f44254 100644 (file)
@@ -3197,6 +3197,14 @@ extern int sysctl_memory_failure_recovery;
 extern void shake_page(struct page *p);
 extern atomic_long_t num_poisoned_pages __read_mostly;
 extern int soft_offline_page(unsigned long pfn, int flags);
+#ifdef CONFIG_MEMORY_FAILURE
+extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags);
+#else
+static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
+{
+       return 0;
+}
+#endif
 
 #ifndef arch_memory_failure
 static inline int arch_memory_failure(unsigned long pfn, int flags)
index 060e8d2..1766e1d 100644 (file)
@@ -34,15 +34,19 @@ posix_acl_xattr_count(size_t size)
 
 #ifdef CONFIG_FS_POSIX_ACL
 void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
+                                  struct inode *inode,
                                   void *value, size_t size);
 void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
+                                  struct inode *inode,
                                 void *value, size_t size);
 #else
 static inline void posix_acl_fix_xattr_from_user(struct user_namespace *mnt_userns,
+                                                struct inode *inode,
                                                 void *value, size_t size)
 {
 }
 static inline void posix_acl_fix_xattr_to_user(struct user_namespace *mnt_userns,
+                                              struct inode *inode,
                                               void *value, size_t size)
 {
 }
index d5e3c00..a8911b1 100644 (file)
@@ -1443,6 +1443,7 @@ struct task_struct {
        int                             pagefault_disabled;
 #ifdef CONFIG_MMU
        struct task_struct              *oom_reaper_list;
+       struct timer_list               oom_reaper_timer;
 #endif
 #ifdef CONFIG_VMAP_STACK
        struct vm_struct                *stack_vm_area;
index a80356e..8cd975a 100644 (file)
@@ -136,6 +136,14 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
 #endif /* CONFIG_MEMCG */
 
 #ifdef CONFIG_MMU
+#ifndef arch_get_mmap_end
+#define arch_get_mmap_end(addr, len, flags)    (TASK_SIZE)
+#endif
+
+#ifndef arch_get_mmap_base
+#define arch_get_mmap_base(addr, base) (base)
+#endif
+
 extern void arch_pick_mmap_layout(struct mm_struct *mm,
                                  struct rlimit *rlim_stack);
 extern unsigned long
@@ -145,6 +153,15 @@ extern unsigned long
 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
                          unsigned long len, unsigned long pgoff,
                          unsigned long flags);
+
+unsigned long
+generic_get_unmapped_area(struct file *filp, unsigned long addr,
+                         unsigned long len, unsigned long pgoff,
+                         unsigned long flags);
+unsigned long
+generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+                                 unsigned long len, unsigned long pgoff,
+                                 unsigned long flags);
 #else
 static inline void arch_pick_mmap_layout(struct mm_struct *mm,
                                         struct rlimit *rlim_stack) {}
index a5dda49..217711f 100644 (file)
@@ -395,6 +395,7 @@ struct svc_deferred_req {
        size_t                  addrlen;
        struct sockaddr_storage daddr;  /* where reply must come from */
        size_t                  daddrlen;
+       void                    *xprt_ctxt;
        struct cache_deferred_req handle;
        size_t                  xprt_hlen;
        int                     argslen;
index a4b1af5..248f4ac 100644 (file)
@@ -59,6 +59,15 @@ struct crc64_pi_tuple {
        __u8   ref_tag[6];
 };
 
+/**
+ * lower_48_bits() - return bits 0-47 of a number
+ * @n: the number we're accessing
+ */
+static inline u64 lower_48_bits(u64 n)
+{
+       return n & ((1ull << 48) - 1);
+}
+
 static inline u64 ext_pi_ref_tag(struct request *rq)
 {
        unsigned int shift = ilog2(queue_logical_block_size(rq->q));
index 059b18e..5745c90 100644 (file)
@@ -75,7 +75,7 @@
  * By default we use get_cycles() for this purpose, but individual
  * architectures may override this in their asm/timex.h header file.
  */
-#define random_get_entropy()   get_cycles()
+#define random_get_entropy()   ((unsigned long)get_cycles())
 #endif
 
 /*
index 74a4a0f..48f2dd3 100644 (file)
@@ -133,6 +133,8 @@ struct vfio_pci_core_device {
        struct mutex            ioeventfds_lock;
        struct list_head        ioeventfds_list;
        struct vfio_pci_vf_token        *vf_token;
+       struct list_head                sriov_pfs_item;
+       struct vfio_pci_core_device     *sriov_pf_core_dev;
        struct notifier_block   nb;
        struct mutex            vma_lock;
        struct list_head        vma_list;
index 3b1df7d..b159c27 100644 (file)
@@ -26,7 +26,7 @@ struct notifier_block;                /* in notifier.h */
 #define VM_KASAN               0x00000080      /* has allocated kasan shadow memory */
 #define VM_FLUSH_RESET_PERMS   0x00000100      /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
 #define VM_MAP_PUT_PAGES       0x00000200      /* put pages and free array in vfree */
-#define VM_NO_HUGE_VMAP                0x00000400      /* force PAGE_SIZE pte mapping */
+#define VM_ALLOW_HUGE_VMAP     0x00000400      /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */
 
 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
        !defined(CONFIG_KASAN_VMALLOC)
@@ -153,7 +153,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
                        const void *caller) __alloc_size(1);
 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
                int node, const void *caller) __alloc_size(1);
-void *vmalloc_no_huge(unsigned long size) __alloc_size(1);
+void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
 
 extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
 extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2);
index 90cd02f..9c5637d 100644 (file)
@@ -4,8 +4,6 @@
 
 #include <linux/skbuff.h>
 
-#define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER)
-
 struct ip_esp_hdr;
 
 static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
index aa33e10..9f65f1b 100644 (file)
@@ -59,6 +59,8 @@ struct flow_dissector_key_vlan {
                __be16  vlan_tci;
        };
        __be16  vlan_tpid;
+       __be16  vlan_eth_type;
+       u16     padding;
 };
 
 struct flow_dissector_mpls_lse {
index 0219fe9..88dee57 100644 (file)
@@ -243,11 +243,18 @@ static inline __be32 tunnel_id_to_key32(__be64 tun_id)
 static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
                                       int proto,
                                       __be32 daddr, __be32 saddr,
-                                      __be32 key, __u8 tos, int oif,
+                                      __be32 key, __u8 tos,
+                                      struct net *net, int oif,
                                       __u32 mark, __u32 tun_inner_hash)
 {
        memset(fl4, 0, sizeof(*fl4));
-       fl4->flowi4_oif = oif;
+
+       if (oif) {
+               fl4->flowi4_l3mdev = l3mdev_master_upper_ifindex_by_index_rcu(net, oif);
+               /* Legacy VRF/l3mdev use case */
+               fl4->flowi4_oif = fl4->flowi4_l3mdev ? 0 : oif;
+       }
+
        fl4->daddr = daddr;
        fl4->saddr = saddr;
        fl4->flowi4_tos = tos;
index 3d83b64..b4af483 100644 (file)
@@ -75,8 +75,8 @@ struct netns_ipv6 {
        struct list_head        fib6_walkers;
        rwlock_t                fib6_walker_lock;
        spinlock_t              fib6_gc_lock;
-       unsigned int             ip6_rt_gc_expire;
-       unsigned long            ip6_rt_last_gc;
+       atomic_t                ip6_rt_gc_expire;
+       unsigned long           ip6_rt_last_gc;
        unsigned char           flowlabel_has_excl;
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
        bool                    fib6_has_custom_rules;
index e76c946..d0a2477 100644 (file)
@@ -53,8 +53,10 @@ enum {
 
 #define ISID_SIZE                      6
 
-/* Connection suspend "bit" */
-#define ISCSI_SUSPEND_BIT              1
+/* Connection flags */
+#define ISCSI_CONN_FLAG_SUSPEND_TX     BIT(0)
+#define ISCSI_CONN_FLAG_SUSPEND_RX     BIT(1)
+#define ISCSI_CONN_FLAG_BOUND          BIT(2)
 
 #define ISCSI_ITT_MASK                 0x1fff
 #define ISCSI_TOTAL_CMDS_MAX           4096
@@ -211,8 +213,7 @@ struct iscsi_conn {
        struct list_head        cmdqueue;       /* data-path cmd queue */
        struct list_head        requeue;        /* tasks needing another run */
        struct work_struct      xmitwork;       /* per-conn. xmit workqueue */
-       unsigned long           suspend_tx;     /* suspend Tx */
-       unsigned long           suspend_rx;     /* suspend Rx */
+       unsigned long           flags;          /* ISCSI_CONN_FLAGs */
 
        /* negotiated params */
        unsigned                max_recv_dlength; /* initiator_max_recv_dsl*/
index 38e4a67..9acb842 100644 (file)
@@ -211,6 +211,8 @@ struct iscsi_cls_conn {
        struct mutex ep_mutex;
        struct iscsi_endpoint *ep;
 
+       /* Used when accessing flags and queueing work. */
+       spinlock_t lock;
        unsigned long flags;
        struct work_struct cleanup_work;
 
@@ -295,7 +297,7 @@ extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
 struct iscsi_endpoint {
        void *dd_data;                  /* LLD private data */
        struct device dev;
-       uint64_t id;
+       int id;
        struct iscsi_cls_conn *conn;
 };
 
index b7e9b58..6d4cc49 100644 (file)
@@ -284,6 +284,7 @@ int snd_card_disconnect(struct snd_card *card);
 void snd_card_disconnect_sync(struct snd_card *card);
 int snd_card_free(struct snd_card *card);
 int snd_card_free_when_closed(struct snd_card *card);
+int snd_card_free_on_error(struct device *dev, int ret);
 void snd_card_set_id(struct snd_card *card, const char *id);
 int snd_card_register(struct snd_card *card);
 int snd_card_info_init(void);
index 653dfff..8d79ceb 100644 (file)
@@ -51,6 +51,11 @@ struct snd_dma_device {
 #define SNDRV_DMA_TYPE_DEV_SG  SNDRV_DMA_TYPE_DEV /* no SG-buf support */
 #define SNDRV_DMA_TYPE_DEV_WC_SG       SNDRV_DMA_TYPE_DEV_WC
 #endif
+/* fallback types, don't use those directly */
+#ifdef CONFIG_SND_DMA_SGBUF
+#define SNDRV_DMA_TYPE_DEV_SG_FALLBACK         10
+#define SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK      11
+#endif
 
 /*
  * info for buffer allocation
index a520804..766dc6f 100644 (file)
@@ -179,6 +179,10 @@ struct snd_soc_component_driver {
                                  struct snd_pcm_hw_params *params);
        bool use_dai_pcm_id;    /* use DAI link PCM ID as PCM device number */
        int be_pcm_base;        /* base device ID for all BE PCMs */
+
+#ifdef CONFIG_DEBUG_FS
+       const char *debugfs_prefix;
+#endif
 };
 
 struct snd_soc_component {
index 5337acf..3995c58 100644 (file)
@@ -2015,17 +2015,18 @@ DECLARE_EVENT_CLASS(svc_deferred_event,
        TP_STRUCT__entry(
                __field(const void *, dr)
                __field(u32, xid)
-               __string(addr, dr->xprt->xpt_remotebuf)
+               __array(__u8, addr, INET6_ADDRSTRLEN + 10)
        ),
 
        TP_fast_assign(
                __entry->dr = dr;
                __entry->xid = be32_to_cpu(*(__be32 *)(dr->args +
                                                       (dr->xprt_hlen>>2)));
-               __assign_str(addr, dr->xprt->xpt_remotebuf);
+               snprintf(__entry->addr, sizeof(__entry->addr) - 1,
+                        "%pISpc", (struct sockaddr *)&dr->addr);
        ),
 
-       TP_printk("addr=%s dr=%p xid=0x%08x", __get_str(addr), __entry->dr,
+       TP_printk("addr=%s dr=%p xid=0x%08x", __entry->addr, __entry->dr,
                __entry->xid)
 );
 
index 7989d94..dff8e7f 100644 (file)
 /* Select an area of screen to be copied */
 #define KEY_SELECTIVE_SCREENSHOT       0x27a
 
+/* Move the focus to the next or previous user controllable element within a UI container */
+#define KEY_NEXT_ELEMENT               0x27b
+#define KEY_PREVIOUS_ELEMENT           0x27c
+
+/* Toggle Autopilot engagement */
+#define KEY_AUTOPILOT_ENGAGE_TOGGLE    0x27d
+
+/* Shortcut Keys */
+#define KEY_MARK_WAYPOINT              0x27e
+#define KEY_SOS                                0x27f
+#define KEY_NAV_CHART                  0x280
+#define KEY_FISHING_CHART              0x281
+#define KEY_SINGLE_RANGE_RADAR         0x282
+#define KEY_DUAL_RANGE_RADAR           0x283
+#define KEY_RADAR_OVERLAY              0x284
+#define KEY_TRADITIONAL_SONAR          0x285
+#define KEY_CLEARVU_SONAR              0x286
+#define KEY_SIDEVU_SONAR               0x287
+#define KEY_NAV_INFO                   0x288
+#define KEY_BRIGHTNESS_MENU            0x289
+
 /*
  * Some keyboards have keys which do not have a defined meaning, these keys
  * are intended to be programmed / bound to macros by the user. For most
index 784adc6..1845cf7 100644 (file)
@@ -296,6 +296,7 @@ struct io_uring_params {
 #define IORING_FEAT_NATIVE_WORKERS     (1U << 9)
 #define IORING_FEAT_RSRC_TAGS          (1U << 10)
 #define IORING_FEAT_CQE_SKIP           (1U << 11)
+#define IORING_FEAT_LINKED_FILE                (1U << 12)
 
 /*
  * io_uring_register(2) opcodes and arguments
index 3021ea2..7837ba4 100644 (file)
@@ -1,4 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_STDDEF_H
+#define _UAPI_LINUX_STDDEF_H
+
 #include <linux/compiler_types.h>
 
 #ifndef __always_inline
@@ -41,3 +44,4 @@
                struct { } __empty_ ## NAME; \
                TYPE NAME[]; \
        }
+#endif
index 5797c2a..d0a9aa0 100644 (file)
@@ -71,7 +71,6 @@ struct cpuhp_cpu_state {
        bool                    rollback;
        bool                    single;
        bool                    bringup;
-       int                     cpu;
        struct hlist_node       *node;
        struct hlist_node       *last;
        enum cpuhp_state        cb_state;
@@ -475,7 +474,7 @@ static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
 #endif
 
 static inline enum cpuhp_state
-cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
+cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
 {
        enum cpuhp_state prev_state = st->state;
        bool bringup = st->state < target;
@@ -486,14 +485,15 @@ cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
        st->target = target;
        st->single = false;
        st->bringup = bringup;
-       if (cpu_dying(st->cpu) != !bringup)
-               set_cpu_dying(st->cpu, !bringup);
+       if (cpu_dying(cpu) != !bringup)
+               set_cpu_dying(cpu, !bringup);
 
        return prev_state;
 }
 
 static inline void
-cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
+cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
+                 enum cpuhp_state prev_state)
 {
        bool bringup = !st->bringup;
 
@@ -520,8 +520,8 @@ cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
        }
 
        st->bringup = bringup;
-       if (cpu_dying(st->cpu) != !bringup)
-               set_cpu_dying(st->cpu, !bringup);
+       if (cpu_dying(cpu) != !bringup)
+               set_cpu_dying(cpu, !bringup);
 }
 
 /* Regular hotplug invocation of the AP hotplug thread */
@@ -541,15 +541,16 @@ static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
        wait_for_ap_thread(st, st->bringup);
 }
 
-static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
+static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
+                        enum cpuhp_state target)
 {
        enum cpuhp_state prev_state;
        int ret;
 
-       prev_state = cpuhp_set_state(st, target);
+       prev_state = cpuhp_set_state(cpu, st, target);
        __cpuhp_kick_ap(st);
        if ((ret = st->result)) {
-               cpuhp_reset_state(st, prev_state);
+               cpuhp_reset_state(cpu, st, prev_state);
                __cpuhp_kick_ap(st);
        }
 
@@ -581,7 +582,7 @@ static int bringup_wait_for_ap(unsigned int cpu)
        if (st->target <= CPUHP_AP_ONLINE_IDLE)
                return 0;
 
-       return cpuhp_kick_ap(st, st->target);
+       return cpuhp_kick_ap(cpu, st, st->target);
 }
 
 static int bringup_cpu(unsigned int cpu)
@@ -704,7 +705,7 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                         ret, cpu, cpuhp_get_step(st->state)->name,
                         st->state);
 
-               cpuhp_reset_state(st, prev_state);
+               cpuhp_reset_state(cpu, st, prev_state);
                if (can_rollback_cpu(st))
                        WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
                                                            prev_state));
@@ -721,7 +722,6 @@ static void cpuhp_create(unsigned int cpu)
 
        init_completion(&st->done_up);
        init_completion(&st->done_down);
-       st->cpu = cpu;
 }
 
 static int cpuhp_should_run(unsigned int cpu)
@@ -875,7 +875,7 @@ static int cpuhp_kick_ap_work(unsigned int cpu)
        cpuhp_lock_release(true);
 
        trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
-       ret = cpuhp_kick_ap(st, st->target);
+       ret = cpuhp_kick_ap(cpu, st, st->target);
        trace_cpuhp_exit(cpu, st->state, prev_state, ret);
 
        return ret;
@@ -1107,7 +1107,7 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                         ret, cpu, cpuhp_get_step(st->state)->name,
                         st->state);
 
-               cpuhp_reset_state(st, prev_state);
+               cpuhp_reset_state(cpu, st, prev_state);
 
                if (st->state < prev_state)
                        WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
@@ -1134,7 +1134,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
 
        cpuhp_tasks_frozen = tasks_frozen;
 
-       prev_state = cpuhp_set_state(st, target);
+       prev_state = cpuhp_set_state(cpu, st, target);
        /*
         * If the current CPU state is in the range of the AP hotplug thread,
         * then we need to kick the thread.
@@ -1165,7 +1165,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
        ret = cpuhp_down_callbacks(cpu, st, target);
        if (ret && st->state < prev_state) {
                if (st->state == CPUHP_TEARDOWN_CPU) {
-                       cpuhp_reset_state(st, prev_state);
+                       cpuhp_reset_state(cpu, st, prev_state);
                        __cpuhp_kick_ap(st);
                } else {
                        WARN(1, "DEAD callback error for CPU%d", cpu);
@@ -1352,7 +1352,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
 
        cpuhp_tasks_frozen = tasks_frozen;
 
-       cpuhp_set_state(st, target);
+       cpuhp_set_state(cpu, st, target);
        /*
         * If the current CPU state is in the range of the AP hotplug thread,
         * then we need to kick the thread once more.
index 4632b0f..8a6cd53 100644 (file)
@@ -114,6 +114,7 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
                dma_direct_sync_single_for_cpu(dev, addr, size, dir);
 
        if (unlikely(is_swiotlb_buffer(dev, phys)))
-               swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
+               swiotlb_tbl_unmap_single(dev, phys, size, dir,
+                                        attrs | DMA_ATTR_SKIP_CPU_SYNC);
 }
 #endif /* _KERNEL_DMA_DIRECT_H */
index 23bb197..7858baf 100644 (file)
@@ -6247,7 +6247,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 again:
        mutex_lock(&event->mmap_mutex);
        if (event->rb) {
-               if (event->rb->nr_pages != nr_pages) {
+               if (data_page_nr(event->rb) != nr_pages) {
                        ret = -EINVAL;
                        goto unlock;
                }
index 0828327..5150d5f 100644 (file)
@@ -116,6 +116,11 @@ static inline int page_order(struct perf_buffer *rb)
 }
 #endif
 
+static inline int data_page_nr(struct perf_buffer *rb)
+{
+       return rb->nr_pages << page_order(rb);
+}
+
 static inline unsigned long perf_data_size(struct perf_buffer *rb)
 {
        return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
index 5286871..fb35b92 100644 (file)
@@ -859,11 +859,6 @@ void rb_free(struct perf_buffer *rb)
 }
 
 #else
-static int data_page_nr(struct perf_buffer *rb)
-{
-       return rb->nr_pages << page_order(rb);
-}
-
 static struct page *
 __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
 {
index f7ff891..fdf1704 100644 (file)
@@ -269,8 +269,9 @@ static int __irq_build_affinity_masks(unsigned int startvec,
         */
        if (numvecs <= nodes) {
                for_each_node_mask(n, nodemsk) {
-                       cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
-                                  node_to_cpumask[n]);
+                       /* Ensure that only CPUs which are in both masks are set */
+                       cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
+                       cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk);
                        if (++curvec == last_affv)
                                curvec = firstvec;
                }
index f7df715..7afa40f 100644 (file)
@@ -137,7 +137,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
        if (!irq_work_claim(work))
                return false;
 
-       kasan_record_aux_stack(work);
+       kasan_record_aux_stack_noalloc(work);
 
        preempt_disable();
        if (cpu != smp_processor_id()) {
index 475524b..b3732b2 100644 (file)
@@ -475,8 +475,11 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
        vma->vm_flags |= VM_DONTEXPAND;
        for (off = 0; off < size; off += PAGE_SIZE) {
                page = vmalloc_to_page(kcov->area + off);
-               if (vm_insert_page(vma, vma->vm_start + off, page))
-                       WARN_ONCE(1, "vm_insert_page() failed");
+               res = vm_insert_page(vma, vma->vm_start + off, page);
+               if (res) {
+                       pr_warn_once("kcov: vm_insert_page() failed\n");
+                       return res;
+               }
        }
        return 0;
 exit:
index d4bd299..a68482d 100644 (file)
@@ -3829,11 +3829,11 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 
        se->avg.runnable_sum = se->avg.runnable_avg * divider;
 
-       se->avg.load_sum = divider;
-       if (se_weight(se)) {
-               se->avg.load_sum =
-                       div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
-       }
+       se->avg.load_sum = se->avg.load_avg * divider;
+       if (se_weight(se) < se->avg.load_sum)
+               se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
+       else
+               se->avg.load_sum = 1;
 
        enqueue_load_avg(cfs_rq, se);
        cfs_rq->avg.util_avg += se->avg.util_avg;
index 01a7c17..65a630f 100644 (file)
@@ -579,7 +579,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
 
        /* There shouldn't be any pending callbacks on an offline CPU. */
        if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
-                    !warned && !llist_empty(head))) {
+                    !warned && entry != NULL)) {
                warned = true;
                WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
 
index 2d76c91..d257721 100644 (file)
@@ -188,7 +188,7 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
         */
        if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
 #ifdef CONFIG_NO_HZ_FULL
-               WARN_ON(tick_nohz_full_running);
+               WARN_ON_ONCE(tick_nohz_full_running);
 #endif
                tick_do_timer_cpu = cpu;
        }
@@ -1538,7 +1538,7 @@ void tick_cancel_sched_timer(int cpu)
 }
 #endif
 
-/**
+/*
  * Async notification about clocksource changes
  */
 void tick_clock_notify(void)
@@ -1559,7 +1559,7 @@ void tick_oneshot_notify(void)
        set_bit(0, &ts->check_clocks);
 }
 
-/**
+/*
  * Check, if a change happened, which makes oneshot possible.
  *
  * Called cyclic from the hrtimer softirq (driven by the timer
index 85f1021..9dd2a39 100644 (file)
@@ -1722,11 +1722,14 @@ static inline void __run_timers(struct timer_base *base)
               time_after_eq(jiffies, base->next_expiry)) {
                levels = collect_expired_timers(base, heads);
                /*
-                * The only possible reason for not finding any expired
-                * timer at this clk is that all matching timers have been
-                * dequeued.
+                * The two possible reasons for not finding any expired
+                * timer at this clk are that all matching timers have been
+                * dequeued or no timer has been queued since
+                * base::next_expiry was set to base::clk +
+                * NEXT_TIMER_MAX_DELTA.
                 */
-               WARN_ON_ONCE(!levels && !base->next_expiry_recalc);
+               WARN_ON_ONCE(!levels && !base->next_expiry_recalc
+                            && base->timers_pending);
                base->clk++;
                base->next_expiry = __next_timer_interrupt(base);
 
index 4acc88e..54e646e 100644 (file)
@@ -207,6 +207,8 @@ static void *xas_descend(struct xa_state *xas, struct xa_node *node)
        if (xa_is_sibling(entry)) {
                offset = xa_to_sibling(entry);
                entry = xa_entry(xas->xa, node, offset);
+               if (node->shift && xa_is_node(entry))
+                       entry = XA_RETRY_ENTRY;
        }
 
        xas->xa_offset = offset;
index c3e37aa..fe915db 100644 (file)
 #include "internal.h"
 
 #ifdef CONFIG_COMPACTION
+/*
+ * Fragmentation score check interval for proactive compaction purposes.
+ */
+#define HPAGE_FRAG_CHECK_INTERVAL_MSEC (500)
+
 static inline void count_compact_event(enum vm_event_item item)
 {
        count_vm_event(item);
@@ -50,11 +55,6 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
 #define pageblock_start_pfn(pfn)       block_start_pfn(pfn, pageblock_order)
 #define pageblock_end_pfn(pfn)         block_end_pfn(pfn, pageblock_order)
 
-/*
- * Fragmentation score check interval for proactive compaction purposes.
- */
-static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
-
 /*
  * Page order with-respect-to which proactive compaction
  * calculates external fragmentation, which is used as
index 3a5ffb5..9a1eef6 100644 (file)
@@ -1063,12 +1063,6 @@ void __init pagecache_init(void)
                init_waitqueue_head(&folio_wait_table[i]);
 
        page_writeback_init();
-
-       /*
-        * tmpfs uses the ZERO_PAGE for reading holes: it is up-to-date,
-        * and splice's page_cache_pipe_buf_confirm() needs to see that.
-        */
-       SetPageUptodate(ZERO_PAGE(0));
 }
 
 /*
index b34f501..3fc7217 100644 (file)
@@ -3475,7 +3475,6 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
 {
        int nr_nodes, node;
        struct page *page;
-       int rc = 0;
 
        lockdep_assert_held(&hugetlb_lock);
 
@@ -3486,15 +3485,19 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
        }
 
        for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
-               if (!list_empty(&h->hugepage_freelists[node])) {
-                       page = list_entry(h->hugepage_freelists[node].next,
-                                       struct page, lru);
-                       rc = demote_free_huge_page(h, page);
-                       break;
+               list_for_each_entry(page, &h->hugepage_freelists[node], lru) {
+                       if (PageHWPoison(page))
+                               continue;
+
+                       return demote_free_huge_page(h, page);
                }
        }
 
-       return rc;
+       /*
+        * Only way to get here is if all pages on free lists are poisoned.
+        * Return -EBUSY so that caller will not retry.
+        */
+       return -EBUSY;
 }
 
 #define HSTATE_ATTR_RO(_name) \
@@ -6782,6 +6785,16 @@ int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
        return ret;
 }
 
+int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
+{
+       int ret;
+
+       spin_lock_irq(&hugetlb_lock);
+       ret = __get_huge_page_for_hwpoison(pfn, flags);
+       spin_unlock_irq(&hugetlb_lock);
+       return ret;
+}
+
 void putback_active_hugepage(struct page *page)
 {
        spin_lock_irq(&hugetlb_lock);
index 07a76c4..9e1b654 100644 (file)
@@ -336,8 +336,6 @@ void __kasan_poison_vmalloc(const void *start, unsigned long size)
 
 #endif
 
-#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
-
 void kasan_enable_tagging(void)
 {
        if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC)
@@ -347,6 +345,9 @@ void kasan_enable_tagging(void)
        else
                hw_enable_tagging_sync();
 }
+
+#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
+
 EXPORT_SYMBOL_GPL(kasan_enable_tagging);
 
 void kasan_force_async_fault(void)
index d79b83d..b01b4bb 100644 (file)
@@ -355,25 +355,27 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
 #define hw_set_mem_tag_range(addr, size, tag, init) \
                        arch_set_mem_tag_range((addr), (size), (tag), (init))
 
+void kasan_enable_tagging(void);
+
 #else /* CONFIG_KASAN_HW_TAGS */
 
 #define hw_enable_tagging_sync()
 #define hw_enable_tagging_async()
 #define hw_enable_tagging_asymm()
 
+static inline void kasan_enable_tagging(void) { }
+
 #endif /* CONFIG_KASAN_HW_TAGS */
 
 #if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
 
-void kasan_enable_tagging(void);
 void kasan_force_async_fault(void);
 
-#else /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
+#else /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
 
-static inline void kasan_enable_tagging(void) { }
 static inline void kasan_force_async_fault(void) { }
 
-#endif /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
+#endif /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
 
 #ifdef CONFIG_KASAN_SW_TAGS
 u8 kasan_random_tag(void);
index a203747..9b2b5f5 100644 (file)
@@ -231,27 +231,6 @@ static bool kfence_unprotect(unsigned long addr)
        return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
 }
 
-static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
-{
-       long index;
-
-       /* The checks do not affect performance; only called from slow-paths. */
-
-       if (!is_kfence_address((void *)addr))
-               return NULL;
-
-       /*
-        * May be an invalid index if called with an address at the edge of
-        * __kfence_pool, in which case we would report an "invalid access"
-        * error.
-        */
-       index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
-       if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
-               return NULL;
-
-       return &kfence_metadata[index];
-}
-
 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
 {
        unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
index 9a6c4b1..600f2e2 100644 (file)
@@ -96,6 +96,27 @@ struct kfence_metadata {
 
 extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
 
+static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
+{
+       long index;
+
+       /* The checks do not affect performance; only called from slow-paths. */
+
+       if (!is_kfence_address((void *)addr))
+               return NULL;
+
+       /*
+        * May be an invalid index if called with an address at the edge of
+        * __kfence_pool, in which case we would report an "invalid access"
+        * error.
+        */
+       index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
+       if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
+               return NULL;
+
+       return &kfence_metadata[index];
+}
+
 /* KFENCE error types for report generation. */
 enum kfence_error_type {
        KFENCE_ERROR_OOB,               /* Detected a out-of-bounds access. */
index f93a7b2..f5a6d8b 100644 (file)
@@ -273,3 +273,50 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r
        /* We encountered a memory safety error, taint the kernel! */
        add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
 }
+
+#ifdef CONFIG_PRINTK
+static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack)
+{
+       int i, j;
+
+       i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
+       for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j)
+               kp_stack[j] = (void *)track->stack_entries[i];
+       if (j < KS_ADDRS_COUNT)
+               kp_stack[j] = NULL;
+}
+
+bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+       struct kfence_metadata *meta = addr_to_metadata((unsigned long)object);
+       unsigned long flags;
+
+       if (!meta)
+               return false;
+
+       /*
+        * If state is UNUSED at least show the pointer requested; the rest
+        * would be garbage data.
+        */
+       kpp->kp_ptr = object;
+
+       /* Requesting info an a never-used object is almost certainly a bug. */
+       if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED))
+               return true;
+
+       raw_spin_lock_irqsave(&meta->lock, flags);
+
+       kpp->kp_slab = slab;
+       kpp->kp_slab_cache = meta->cache;
+       kpp->kp_objp = (void *)meta->addr;
+       kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
+       if (meta->state == KFENCE_OBJECT_FREED)
+               kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
+       /* get_stack_skipnr() ensures the first entry is outside allocator. */
+       kpp->kp_ret = kpp->kp_stack[0];
+
+       raw_spin_unlock_irqrestore(&meta->lock, flags);
+
+       return true;
+}
+#endif
index acd7cbb..a182f5d 100644 (file)
@@ -1132,7 +1132,7 @@ EXPORT_SYMBOL(kmemleak_no_scan);
 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
                               gfp_t gfp)
 {
-       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+       if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
                kmemleak_alloc(__va(phys), size, min_count, gfp);
 }
 EXPORT_SYMBOL(kmemleak_alloc_phys);
@@ -1146,7 +1146,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
  */
 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
 {
-       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+       if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
                kmemleak_free_part(__va(phys), size);
 }
 EXPORT_SYMBOL(kmemleak_free_part_phys);
@@ -1158,7 +1158,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys);
  */
 void __ref kmemleak_not_leak_phys(phys_addr_t phys)
 {
-       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+       if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
                kmemleak_not_leak(__va(phys));
 }
 EXPORT_SYMBOL(kmemleak_not_leak_phys);
@@ -1170,7 +1170,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys);
  */
 void __ref kmemleak_ignore_phys(phys_addr_t phys)
 {
-       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+       if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
                kmemleak_ignore(__va(phys));
 }
 EXPORT_SYMBOL(kmemleak_ignore_phys);
index 725f767..598fece 100644 (file)
@@ -587,6 +587,9 @@ static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
 static DEFINE_SPINLOCK(stats_flush_lock);
 static DEFINE_PER_CPU(unsigned int, stats_updates);
 static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
+static u64 flush_next_time;
+
+#define FLUSH_TIME (2UL*HZ)
 
 /*
  * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
@@ -637,6 +640,7 @@ static void __mem_cgroup_flush_stats(void)
        if (!spin_trylock_irqsave(&stats_flush_lock, flag))
                return;
 
+       flush_next_time = jiffies_64 + 2*FLUSH_TIME;
        cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
        atomic_set(&stats_flush_threshold, 0);
        spin_unlock_irqrestore(&stats_flush_lock, flag);
@@ -648,10 +652,16 @@ void mem_cgroup_flush_stats(void)
                __mem_cgroup_flush_stats();
 }
 
+void mem_cgroup_flush_stats_delayed(void)
+{
+       if (time_after64(jiffies_64, flush_next_time))
+               mem_cgroup_flush_stats();
+}
+
 static void flush_memcg_stats_dwork(struct work_struct *w)
 {
        __mem_cgroup_flush_stats();
-       queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
+       queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
 }
 
 /**
index dcb6bb9..27760c1 100644 (file)
@@ -1498,50 +1498,113 @@ static int try_to_split_thp_page(struct page *page, const char *msg)
        return 0;
 }
 
-static int memory_failure_hugetlb(unsigned long pfn, int flags)
+/*
+ * Called from hugetlb code with hugetlb_lock held.
+ *
+ * Return values:
+ *   0             - free hugepage
+ *   1             - in-use hugepage
+ *   2             - not a hugepage
+ *   -EBUSY        - the hugepage is busy (try to retry)
+ *   -EHWPOISON    - the hugepage is already hwpoisoned
+ */
+int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
+{
+       struct page *page = pfn_to_page(pfn);
+       struct page *head = compound_head(page);
+       int ret = 2;    /* fallback to normal page handling */
+       bool count_increased = false;
+
+       if (!PageHeadHuge(head))
+               goto out;
+
+       if (flags & MF_COUNT_INCREASED) {
+               ret = 1;
+               count_increased = true;
+       } else if (HPageFreed(head) || HPageMigratable(head)) {
+               ret = get_page_unless_zero(head);
+               if (ret)
+                       count_increased = true;
+       } else {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       if (TestSetPageHWPoison(head)) {
+               ret = -EHWPOISON;
+               goto out;
+       }
+
+       return ret;
+out:
+       if (count_increased)
+               put_page(head);
+       return ret;
+}
+
+#ifdef CONFIG_HUGETLB_PAGE
+/*
+ * Taking refcount of hugetlb pages needs extra care about race conditions
+ * with basic operations like hugepage allocation/free/demotion.
+ * So some of prechecks for hwpoison (pinning, and testing/setting
+ * PageHWPoison) should be done in single hugetlb_lock range.
+ */
+static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
 {
-       struct page *p = pfn_to_page(pfn);
-       struct page *head = compound_head(p);
        int res;
+       struct page *p = pfn_to_page(pfn);
+       struct page *head;
        unsigned long page_flags;
+       bool retry = true;
 
-       if (TestSetPageHWPoison(head)) {
-               pr_err("Memory failure: %#lx: already hardware poisoned\n",
-                      pfn);
-               res = -EHWPOISON;
-               if (flags & MF_ACTION_REQUIRED)
+       *hugetlb = 1;
+retry:
+       res = get_huge_page_for_hwpoison(pfn, flags);
+       if (res == 2) { /* fallback to normal page handling */
+               *hugetlb = 0;
+               return 0;
+       } else if (res == -EHWPOISON) {
+               pr_err("Memory failure: %#lx: already hardware poisoned\n", pfn);
+               if (flags & MF_ACTION_REQUIRED) {
+                       head = compound_head(p);
                        res = kill_accessing_process(current, page_to_pfn(head), flags);
+               }
+               return res;
+       } else if (res == -EBUSY) {
+               if (retry) {
+                       retry = false;
+                       goto retry;
+               }
+               action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
                return res;
        }
 
+       head = compound_head(p);
+       lock_page(head);
+
+       if (hwpoison_filter(p)) {
+               ClearPageHWPoison(head);
+               res = -EOPNOTSUPP;
+               goto out;
+       }
+
        num_poisoned_pages_inc();
 
-       if (!(flags & MF_COUNT_INCREASED)) {
-               res = get_hwpoison_page(p, flags);
-               if (!res) {
-                       lock_page(head);
-                       if (hwpoison_filter(p)) {
-                               if (TestClearPageHWPoison(head))
-                                       num_poisoned_pages_dec();
-                               unlock_page(head);
-                               return -EOPNOTSUPP;
-                       }
-                       unlock_page(head);
-                       res = MF_FAILED;
-                       if (__page_handle_poison(p)) {
-                               page_ref_inc(p);
-                               res = MF_RECOVERED;
-                       }
-                       action_result(pfn, MF_MSG_FREE_HUGE, res);
-                       return res == MF_RECOVERED ? 0 : -EBUSY;
-               } else if (res < 0) {
-                       action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
-                       return -EBUSY;
+       /*
+        * Handling free hugepage.  The possible race with hugepage allocation
+        * or demotion can be prevented by PageHWPoison flag.
+        */
+       if (res == 0) {
+               unlock_page(head);
+               res = MF_FAILED;
+               if (__page_handle_poison(p)) {
+                       page_ref_inc(p);
+                       res = MF_RECOVERED;
                }
+               action_result(pfn, MF_MSG_FREE_HUGE, res);
+               return res == MF_RECOVERED ? 0 : -EBUSY;
        }
 
-       lock_page(head);
-
        /*
         * The page could have changed compound pages due to race window.
         * If this happens just bail out.
@@ -1554,14 +1617,6 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
 
        page_flags = head->flags;
 
-       if (hwpoison_filter(p)) {
-               if (TestClearPageHWPoison(head))
-                       num_poisoned_pages_dec();
-               put_page(p);
-               res = -EOPNOTSUPP;
-               goto out;
-       }
-
        /*
         * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so
         * simply disable it. In order to make it work properly, we need
@@ -1588,6 +1643,12 @@ out:
        unlock_page(head);
        return res;
 }
+#else
+static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
+{
+       return 0;
+}
+#endif
 
 static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
                struct dev_pagemap *pgmap)
@@ -1712,6 +1773,7 @@ int memory_failure(unsigned long pfn, int flags)
        int res = 0;
        unsigned long page_flags;
        bool retry = true;
+       int hugetlb = 0;
 
        if (!sysctl_memory_failure_recovery)
                panic("Memory failure on page %lx", pfn);
@@ -1739,10 +1801,9 @@ int memory_failure(unsigned long pfn, int flags)
        }
 
 try_again:
-       if (PageHuge(p)) {
-               res = memory_failure_hugetlb(pfn, flags);
+       res = try_memory_failure_hugetlb(pfn, flags, &hugetlb);
+       if (hugetlb)
                goto unlock_mutex;
-       }
 
        if (TestSetPageHWPoison(p)) {
                pr_err("Memory failure: %#lx: already hardware poisoned\n",
@@ -1799,6 +1860,19 @@ try_again:
        }
 
        if (PageTransHuge(hpage)) {
+               /*
+                * Bail out before SetPageHasHWPoisoned() if hpage is
+                * huge_zero_page, although PG_has_hwpoisoned is not
+                * checked in set_huge_zero_page().
+                *
+                * TODO: Handle memory failure of huge_zero_page thoroughly.
+                */
+               if (is_huge_zero_page(hpage)) {
+                       action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
+                       res = -EBUSY;
+                       goto unlock_mutex;
+               }
+
                /*
                 * The flag must be set after the refcount is bumped
                 * otherwise it may race with THP split.
index 3aa839f..e9b7d74 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2117,14 +2117,6 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
        return addr;
 }
 
-#ifndef arch_get_mmap_end
-#define arch_get_mmap_end(addr)        (TASK_SIZE)
-#endif
-
-#ifndef arch_get_mmap_base
-#define arch_get_mmap_base(addr, base) (base)
-#endif
-
 /* Get an address range which is currently unmapped.
  * For shmat() with addr=0.
  *
@@ -2136,15 +2128,15 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
  *
  * This function "knows" that -ENOMEM has the bits set.
  */
-#ifndef HAVE_ARCH_UNMAPPED_AREA
 unsigned long
-arch_get_unmapped_area(struct file *filp, unsigned long addr,
-               unsigned long len, unsigned long pgoff, unsigned long flags)
+generic_get_unmapped_area(struct file *filp, unsigned long addr,
+                         unsigned long len, unsigned long pgoff,
+                         unsigned long flags)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma, *prev;
        struct vm_unmapped_area_info info;
-       const unsigned long mmap_end = arch_get_mmap_end(addr);
+       const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
 
        if (len > mmap_end - mmap_min_addr)
                return -ENOMEM;
@@ -2169,22 +2161,30 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        info.align_offset = 0;
        return vm_unmapped_area(&info);
 }
+
+#ifndef HAVE_ARCH_UNMAPPED_AREA
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+                      unsigned long len, unsigned long pgoff,
+                      unsigned long flags)
+{
+       return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
+}
 #endif
 
 /*
  * This mmap-allocator allocates new areas top-down from below the
  * stack's low limit (the base):
  */
-#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
-                         unsigned long len, unsigned long pgoff,
-                         unsigned long flags)
+generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+                                 unsigned long len, unsigned long pgoff,
+                                 unsigned long flags)
 {
        struct vm_area_struct *vma, *prev;
        struct mm_struct *mm = current->mm;
        struct vm_unmapped_area_info info;
-       const unsigned long mmap_end = arch_get_mmap_end(addr);
+       const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
 
        /* requested length too big for entire address space */
        if (len > mmap_end - mmap_min_addr)
@@ -2227,6 +2227,15 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
 
        return addr;
 }
+
+#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+                              unsigned long len, unsigned long pgoff,
+                              unsigned long flags)
+{
+       return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
+}
 #endif
 
 unsigned long
index 459d195..f45ff1b 100644 (file)
@@ -1036,6 +1036,18 @@ int mmu_interval_notifier_insert_locked(
 }
 EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
 
+static bool
+mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions,
+                         unsigned long seq)
+{
+       bool ret;
+
+       spin_lock(&subscriptions->lock);
+       ret = subscriptions->invalidate_seq != seq;
+       spin_unlock(&subscriptions->lock);
+       return ret;
+}
+
 /**
  * mmu_interval_notifier_remove - Remove a interval notifier
  * @interval_sub: Interval subscription to unregister
@@ -1083,7 +1095,7 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
        lock_map_release(&__mmu_notifier_invalidate_range_start_map);
        if (seq)
                wait_event(subscriptions->wq,
-                          READ_ONCE(subscriptions->invalidate_seq) != seq);
+                          mmu_interval_seq_released(subscriptions, seq));
 
        /* pairs with mmgrab in mmu_interval_notifier_insert() */
        mmdrop(mm);
index 7ec3819..49d7df3 100644 (file)
@@ -632,7 +632,7 @@ done:
         */
        set_bit(MMF_OOM_SKIP, &mm->flags);
 
-       /* Drop a reference taken by wake_oom_reaper */
+       /* Drop a reference taken by queue_oom_reaper */
        put_task_struct(tsk);
 }
 
@@ -644,12 +644,12 @@ static int oom_reaper(void *unused)
                struct task_struct *tsk = NULL;
 
                wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
-               spin_lock(&oom_reaper_lock);
+               spin_lock_irq(&oom_reaper_lock);
                if (oom_reaper_list != NULL) {
                        tsk = oom_reaper_list;
                        oom_reaper_list = tsk->oom_reaper_list;
                }
-               spin_unlock(&oom_reaper_lock);
+               spin_unlock_irq(&oom_reaper_lock);
 
                if (tsk)
                        oom_reap_task(tsk);
@@ -658,22 +658,48 @@ static int oom_reaper(void *unused)
        return 0;
 }
 
-static void wake_oom_reaper(struct task_struct *tsk)
+static void wake_oom_reaper(struct timer_list *timer)
 {
-       /* mm is already queued? */
-       if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
-               return;
+       struct task_struct *tsk = container_of(timer, struct task_struct,
+                       oom_reaper_timer);
+       struct mm_struct *mm = tsk->signal->oom_mm;
+       unsigned long flags;
 
-       get_task_struct(tsk);
+       /* The victim managed to terminate on its own - see exit_mmap */
+       if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
+               put_task_struct(tsk);
+               return;
+       }
 
-       spin_lock(&oom_reaper_lock);
+       spin_lock_irqsave(&oom_reaper_lock, flags);
        tsk->oom_reaper_list = oom_reaper_list;
        oom_reaper_list = tsk;
-       spin_unlock(&oom_reaper_lock);
+       spin_unlock_irqrestore(&oom_reaper_lock, flags);
        trace_wake_reaper(tsk->pid);
        wake_up(&oom_reaper_wait);
 }
 
+/*
+ * Give the OOM victim time to exit naturally before invoking the oom_reaping.
+ * The timers timeout is arbitrary... the longer it is, the longer the worst
+ * case scenario for the OOM can take. If it is too small, the oom_reaper can
+ * get in the way and release resources needed by the process exit path.
+ * e.g. The futex robust list can sit in Anon|Private memory that gets reaped
+ * before the exit path is able to wake the futex waiters.
+ */
+#define OOM_REAPER_DELAY (2*HZ)
+static void queue_oom_reaper(struct task_struct *tsk)
+{
+       /* mm is already queued? */
+       if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
+               return;
+
+       get_task_struct(tsk);
+       timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
+       tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
+       add_timer(&tsk->oom_reaper_timer);
+}
+
 static int __init oom_init(void)
 {
        oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
@@ -681,7 +707,7 @@ static int __init oom_init(void)
 }
 subsys_initcall(oom_init)
 #else
-static inline void wake_oom_reaper(struct task_struct *tsk)
+static inline void queue_oom_reaper(struct task_struct *tsk)
 {
 }
 #endif /* CONFIG_MMU */
@@ -932,7 +958,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
        rcu_read_unlock();
 
        if (can_oom_reap)
-               wake_oom_reaper(victim);
+               queue_oom_reaper(victim);
 
        mmdrop(mm);
        put_task_struct(victim);
@@ -968,7 +994,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
        task_lock(victim);
        if (task_will_free_mem(victim)) {
                mark_oom_victim(victim);
-               wake_oom_reaper(victim);
+               queue_oom_reaper(victim);
                task_unlock(victim);
                put_task_struct(victim);
                return;
@@ -1067,7 +1093,7 @@ bool out_of_memory(struct oom_control *oc)
         */
        if (task_will_free_mem(current)) {
                mark_oom_victim(current);
-               wake_oom_reaper(current);
+               queue_oom_reaper(current);
                return true;
        }
 
index 6e5b448..0e42038 100644 (file)
@@ -6131,7 +6131,7 @@ static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
        do {
                zone_type--;
                zone = pgdat->node_zones + zone_type;
-               if (managed_zone(zone)) {
+               if (populated_zone(zone)) {
                        zoneref_set_zone(zone, &zonerefs[nr_zones++]);
                        check_highest_zone(zone_type);
                }
@@ -8919,7 +8919,7 @@ void *__init alloc_large_system_hash(const char *tablename,
                                table = memblock_alloc_raw(size,
                                                           SMP_CACHE_BYTES);
                } else if (get_order(size) >= MAX_ORDER || hashdist) {
-                       table = __vmalloc(size, gfp_flags);
+                       table = vmalloc_huge(size, gfp_flags);
                        virt = true;
                        if (table)
                                huge = is_vm_area_hugepages(table);
index b417f00..89fbf3c 100644 (file)
@@ -51,54 +51,6 @@ void end_swap_bio_write(struct bio *bio)
        bio_put(bio);
 }
 
-static void swap_slot_free_notify(struct page *page)
-{
-       struct swap_info_struct *sis;
-       struct gendisk *disk;
-       swp_entry_t entry;
-
-       /*
-        * There is no guarantee that the page is in swap cache - the software
-        * suspend code (at least) uses end_swap_bio_read() against a non-
-        * swapcache page.  So we must check PG_swapcache before proceeding with
-        * this optimization.
-        */
-       if (unlikely(!PageSwapCache(page)))
-               return;
-
-       sis = page_swap_info(page);
-       if (data_race(!(sis->flags & SWP_BLKDEV)))
-               return;
-
-       /*
-        * The swap subsystem performs lazy swap slot freeing,
-        * expecting that the page will be swapped out again.
-        * So we can avoid an unnecessary write if the page
-        * isn't redirtied.
-        * This is good for real swap storage because we can
-        * reduce unnecessary I/O and enhance wear-leveling
-        * if an SSD is used as the as swap device.
-        * But if in-memory swap device (eg zram) is used,
-        * this causes a duplicated copy between uncompressed
-        * data in VM-owned memory and compressed data in
-        * zram-owned memory.  So let's free zram-owned memory
-        * and make the VM-owned decompressed page *dirty*,
-        * so the page should be swapped out somewhere again if
-        * we again wish to reclaim it.
-        */
-       disk = sis->bdev->bd_disk;
-       entry.val = page_private(page);
-       if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) {
-               unsigned long offset;
-
-               offset = swp_offset(entry);
-
-               SetPageDirty(page);
-               disk->fops->swap_slot_free_notify(sis->bdev,
-                               offset);
-       }
-}
-
 static void end_swap_bio_read(struct bio *bio)
 {
        struct page *page = bio_first_page_all(bio);
@@ -114,7 +66,6 @@ static void end_swap_bio_read(struct bio *bio)
        }
 
        SetPageUptodate(page);
-       swap_slot_free_notify(page);
 out:
        unlock_page(page);
        WRITE_ONCE(bio->bi_private, NULL);
@@ -394,11 +345,6 @@ int swap_readpage(struct page *page, bool synchronous)
        if (sis->flags & SWP_SYNCHRONOUS_IO) {
                ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
                if (!ret) {
-                       if (trylock_page(page)) {
-                               swap_slot_free_notify(page);
-                               unlock_page(page);
-                       }
-
                        count_vm_event(PSWPIN);
                        goto out;
                }
index 098638d..3b3cf28 100644 (file)
@@ -158,6 +158,22 @@ const struct address_space_operations secretmem_aops = {
        .isolate_page   = secretmem_isolate_page,
 };
 
+static int secretmem_setattr(struct user_namespace *mnt_userns,
+                            struct dentry *dentry, struct iattr *iattr)
+{
+       struct inode *inode = d_inode(dentry);
+       unsigned int ia_valid = iattr->ia_valid;
+
+       if ((ia_valid & ATTR_SIZE) && inode->i_size)
+               return -EINVAL;
+
+       return simple_setattr(mnt_userns, dentry, iattr);
+}
+
+static const struct inode_operations secretmem_iops = {
+       .setattr = secretmem_setattr,
+};
+
 static struct vfsmount *secretmem_mnt;
 
 static struct file *secretmem_file_create(unsigned long flags)
@@ -177,6 +193,7 @@ static struct file *secretmem_file_create(unsigned long flags)
        mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
        mapping_set_unevictable(inode->i_mapping);
 
+       inode->i_op = &secretmem_iops;
        inode->i_mapping->a_ops = &secretmem_aops;
 
        /* pretend we are a normal file with zero size */
index 529c9ad..4b2fea3 100644 (file)
@@ -2513,7 +2513,6 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
                pgoff_t end_index;
                unsigned long nr, ret;
                loff_t i_size = i_size_read(inode);
-               bool got_page;
 
                end_index = i_size >> PAGE_SHIFT;
                if (index > end_index)
@@ -2570,24 +2569,34 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
                         */
                        if (!offset)
                                mark_page_accessed(page);
-                       got_page = true;
+                       /*
+                        * Ok, we have the page, and it's up-to-date, so
+                        * now we can copy it to user space...
+                        */
+                       ret = copy_page_to_iter(page, offset, nr, to);
+                       put_page(page);
+
+               } else if (iter_is_iovec(to)) {
+                       /*
+                        * Copy to user tends to be so well optimized, but
+                        * clear_user() not so much, that it is noticeably
+                        * faster to copy the zero page instead of clearing.
+                        */
+                       ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
                } else {
-                       page = ZERO_PAGE(0);
-                       got_page = false;
+                       /*
+                        * But submitting the same page twice in a row to
+                        * splice() - or others? - can result in confusion:
+                        * so don't attempt that optimization on pipes etc.
+                        */
+                       ret = iov_iter_zero(nr, to);
                }
 
-               /*
-                * Ok, we have the page, and it's up-to-date, so
-                * now we can copy it to user space...
-                */
-               ret = copy_page_to_iter(page, offset, nr, to);
                retval += ret;
                offset += ret;
                index += offset >> PAGE_SHIFT;
                offset &= ~PAGE_MASK;
 
-               if (got_page)
-                       put_page(page);
                if (!iov_iter_count(to))
                        break;
                if (ret < nr) {
index b04e400..0edb474 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3665,7 +3665,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
 #endif /* CONFIG_NUMA */
 
 #ifdef CONFIG_PRINTK
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
 {
        struct kmem_cache *cachep;
        unsigned int objnr;
index fd7ae20..95eb341 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -868,7 +868,7 @@ struct kmem_obj_info {
        void *kp_stack[KS_ADDRS_COUNT];
        void *kp_free_stack[KS_ADDRS_COUNT];
 };
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
 #endif
 
 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
index 6ee64d6..2b3206a 100644 (file)
@@ -555,6 +555,13 @@ bool kmem_valid_obj(void *object)
 }
 EXPORT_SYMBOL_GPL(kmem_valid_obj);
 
+static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+       if (__kfence_obj_info(kpp, object, slab))
+               return;
+       __kmem_obj_info(kpp, object, slab);
+}
+
 /**
  * kmem_dump_obj - Print available slab provenance information
  * @object: slab object for which to find provenance information.
@@ -590,6 +597,8 @@ void kmem_dump_obj(void *object)
                pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
        else
                pr_cont(" slab%s", cp);
+       if (is_kfence_address(object))
+               pr_cont(" (kfence)");
        if (kp.kp_objp)
                pr_cont(" start %px", kp.kp_objp);
        if (kp.kp_data_offset)
index dfa6808..40ea6e2 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -463,7 +463,7 @@ out:
 }
 
 #ifdef CONFIG_PRINTK
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
 {
        kpp->kp_ptr = object;
        kpp->kp_slab = slab;
index 74d92aa..ed5c2c0 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4312,7 +4312,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
 }
 
 #ifdef CONFIG_PRINTK
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
 {
        void *base;
        int __maybe_unused i;
index 0cb8e5e..e9bb6db 100644 (file)
@@ -72,12 +72,15 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
        _dst_pte = pte_mkdirty(_dst_pte);
        if (page_in_cache && !vm_shared)
                writable = false;
-       if (writable) {
-               if (wp_copy)
-                       _dst_pte = pte_mkuffd_wp(_dst_pte);
-               else
-                       _dst_pte = pte_mkwrite(_dst_pte);
-       }
+
+       /*
+        * Always mark a PTE as write-protected when needed, regardless of
+        * VM_WRITE, which the user might change.
+        */
+       if (wp_copy)
+               _dst_pte = pte_mkuffd_wp(_dst_pte);
+       else if (writable)
+               _dst_pte = pte_mkwrite(_dst_pte);
 
        dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
 
index 54e5e76..44fbf21 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -344,7 +344,7 @@ unsigned long randomize_stack_top(unsigned long stack_top)
 }
 
 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
-unsigned long arch_randomize_brk(struct mm_struct *mm)
+unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
 {
        /* Is the current task 32bit ? */
        if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
@@ -592,8 +592,15 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
                return NULL;
        }
 
-       return __vmalloc_node(size, 1, flags, node,
-                       __builtin_return_address(0));
+       /*
+        * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
+        * since the callers already cannot assume anything
+        * about the resulting pointer, and cannot play
+        * protection games.
+        */
+       return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+                       flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
+                       node, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(kvmalloc_node);
 
index e163372..cadfbb5 100644 (file)
@@ -1671,17 +1671,6 @@ static DEFINE_MUTEX(vmap_purge_lock);
 /* for per-CPU blocks */
 static void purge_fragmented_blocks_allcpus(void);
 
-#ifdef CONFIG_X86_64
-/*
- * called before a call to iounmap() if the caller wants vm_area_struct's
- * immediately freed.
- */
-void set_iounmap_nonlazy(void)
-{
-       atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
-}
-#endif /* CONFIG_X86_64 */
-
 /*
  * Purges all lazily-freed vmap areas.
  */
@@ -2664,15 +2653,18 @@ static void __vunmap(const void *addr, int deallocate_pages)
        vm_remove_mappings(area, deallocate_pages);
 
        if (deallocate_pages) {
-               unsigned int page_order = vm_area_page_order(area);
-               int i, step = 1U << page_order;
+               int i;
 
-               for (i = 0; i < area->nr_pages; i += step) {
+               for (i = 0; i < area->nr_pages; i++) {
                        struct page *page = area->pages[i];
 
                        BUG_ON(!page);
-                       mod_memcg_page_state(page, MEMCG_VMALLOC, -step);
-                       __free_pages(page, page_order);
+                       mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
+                       /*
+                        * High-order allocs for huge vmallocs are split, so
+                        * can be freed as an array of order-0 allocations
+                        */
+                       __free_pages(page, 0);
                        cond_resched();
                }
                atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
@@ -2925,12 +2917,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                        if (nr != nr_pages_request)
                                break;
                }
-       } else
-               /*
-                * Compound pages required for remap_vmalloc_page if
-                * high-order pages.
-                */
-               gfp |= __GFP_COMP;
+       }
 
        /* High-order pages or fallback path if "bulk" fails. */
 
@@ -2944,6 +2931,15 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                        page = alloc_pages_node(nid, gfp, order);
                if (unlikely(!page))
                        break;
+               /*
+                * Higher order allocations must be able to be treated as
+                * indepdenent small pages by callers (as they can with
+                * small-page vmallocs). Some drivers do their own refcounting
+                * on vmalloc_to_page() pages, some use page->mapping,
+                * page->lru, etc.
+                */
+               if (order)
+                       split_page(page, order);
 
                /*
                 * Careful, we allocate and map page-order pages, but
@@ -3003,11 +2999,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 
        atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
        if (gfp_mask & __GFP_ACCOUNT) {
-               int i, step = 1U << page_order;
+               int i;
 
-               for (i = 0; i < area->nr_pages; i += step)
-                       mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC,
-                                            step);
+               for (i = 0; i < area->nr_pages; i++)
+                       mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
        }
 
        /*
@@ -3106,7 +3101,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
                return NULL;
        }
 
-       if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) {
+       if (vmap_allow_huge && (vm_flags & VM_ALLOW_HUGE_VMAP)) {
                unsigned long size_per_node;
 
                /*
@@ -3273,21 +3268,24 @@ void *vmalloc(unsigned long size)
 EXPORT_SYMBOL(vmalloc);
 
 /**
- * vmalloc_no_huge - allocate virtually contiguous memory using small pages
- * @size:    allocation size
+ * vmalloc_huge - allocate virtually contiguous memory, allow huge pages
+ * @size:      allocation size
+ * @gfp_mask:  flags for the page level allocator
  *
- * Allocate enough non-huge pages to cover @size from the page level
+ * Allocate enough pages to cover @size from the page level
  * allocator and map them into contiguous kernel virtual space.
+ * If @size is greater than or equal to PMD_SIZE, allow using
+ * huge pages for the memory
  *
  * Return: pointer to the allocated memory or %NULL on error
  */
-void *vmalloc_no_huge(unsigned long size)
+void *vmalloc_huge(unsigned long size, gfp_t gfp_mask)
 {
        return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
-                                   GFP_KERNEL, PAGE_KERNEL, VM_NO_HUGE_VMAP,
+                                   gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
                                    NUMA_NO_NODE, __builtin_return_address(0));
 }
-EXPORT_SYMBOL(vmalloc_no_huge);
+EXPORT_SYMBOL_GPL(vmalloc_huge);
 
 /**
  * vzalloc - allocate virtually contiguous memory with zero fill
index 8a3828a..592569a 100644 (file)
@@ -355,7 +355,7 @@ void workingset_refault(struct folio *folio, void *shadow)
 
        mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
 
-       mem_cgroup_flush_stats();
+       mem_cgroup_flush_stats_delayed();
        /*
         * Compare the distance to the existing workingset size. We
         * don't activate pages that couldn't stay resident even if
index bafb0fb..ff5d787 100644 (file)
@@ -906,6 +906,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        struct canfd_frame *cf;
        int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
        int wait_tx_done = (so->opt.flags & CAN_ISOTP_WAIT_TX_DONE) ? 1 : 0;
+       s64 hrtimer_sec = 0;
        int off;
        int err;
 
@@ -1004,7 +1005,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
                isotp_create_fframe(cf, so, ae);
 
                /* start timeout for FC */
-               hrtimer_start(&so->txtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
+               hrtimer_sec = 1;
+               hrtimer_start(&so->txtimer, ktime_set(hrtimer_sec, 0),
+                             HRTIMER_MODE_REL_SOFT);
        }
 
        /* send the first or only CAN frame */
@@ -1017,6 +1020,11 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        if (err) {
                pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
                               __func__, ERR_PTR(err));
+
+               /* no transmission -> no timeout monitoring */
+               if (hrtimer_sec)
+                       hrtimer_cancel(&so->txtimer);
+
                goto err_out_drop;
        }
 
index 03b6e64..6f7ec72 100644 (file)
@@ -1032,7 +1032,7 @@ bool __skb_flow_dissect(const struct net *net,
                key_eth_addrs = skb_flow_dissector_target(flow_dissector,
                                                          FLOW_DISSECTOR_KEY_ETH_ADDRS,
                                                          target_container);
-               memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
+               memcpy(key_eth_addrs, eth, sizeof(*key_eth_addrs));
        }
 
 proto_again:
@@ -1183,6 +1183,7 @@ proto_again:
                                         VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
                        }
                        key_vlan->vlan_tpid = saved_vlan_tpid;
+                       key_vlan->vlan_eth_type = proto;
                }
 
                fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
index 159c9c6..d1381ea 100644 (file)
@@ -5242,6 +5242,8 @@ static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
                *prividx = attr_id_l3_stats;
 
                size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
+               if (!size_l3)
+                       goto skip_l3_stats;
                attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
                                         IFLA_OFFLOAD_XSTATS_UNSPEC);
                if (!attr)
@@ -5253,6 +5255,7 @@ static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
                        return err;
 
                have_data = true;
+skip_l3_stats:
                *prividx = 0;
        }
 
index ca6af86..cf93322 100644 (file)
@@ -562,7 +562,6 @@ static void dsa_port_teardown(struct dsa_port *dp)
 {
        struct devlink_port *dlp = &dp->devlink_port;
        struct dsa_switch *ds = dp->ds;
-       struct net_device *slave;
 
        if (!dp->setup)
                return;
@@ -584,11 +583,9 @@ static void dsa_port_teardown(struct dsa_port *dp)
                dsa_port_link_unregister_of(dp);
                break;
        case DSA_PORT_TYPE_USER:
-               slave = dp->slave;
-
-               if (slave) {
+               if (dp->slave) {
+                       dsa_slave_destroy(dp->slave);
                        dp->slave = NULL;
-                       dsa_slave_destroy(slave);
                }
                break;
        }
@@ -1147,17 +1144,17 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
        if (err)
                goto teardown_cpu_ports;
 
-       err = dsa_tree_setup_master(dst);
+       err = dsa_tree_setup_ports(dst);
        if (err)
                goto teardown_switches;
 
-       err = dsa_tree_setup_ports(dst);
+       err = dsa_tree_setup_master(dst);
        if (err)
-               goto teardown_master;
+               goto teardown_ports;
 
        err = dsa_tree_setup_lags(dst);
        if (err)
-               goto teardown_ports;
+               goto teardown_master;
 
        dst->setup = true;
 
@@ -1165,10 +1162,10 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
 
        return 0;
 
-teardown_ports:
-       dsa_tree_teardown_ports(dst);
 teardown_master:
        dsa_tree_teardown_master(dst);
+teardown_ports:
+       dsa_tree_teardown_ports(dst);
 teardown_switches:
        dsa_tree_teardown_switches(dst);
 teardown_cpu_ports:
@@ -1186,10 +1183,10 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
 
        dsa_tree_teardown_lags(dst);
 
-       dsa_tree_teardown_ports(dst);
-
        dsa_tree_teardown_master(dst);
 
+       dsa_tree_teardown_ports(dst);
+
        dsa_tree_teardown_switches(dst);
 
        dsa_tree_teardown_cpu_ports(dst);
index f64b805..eb204ad 100644 (file)
@@ -21,6 +21,14 @@ static struct sk_buff *hellcreek_xmit(struct sk_buff *skb,
        struct dsa_port *dp = dsa_slave_to_port(dev);
        u8 *tag;
 
+       /* Calculate checksums (if required) before adding the trailer tag to
+        * avoid including it in calculations. That would lead to wrong
+        * checksums after the switch strips the tag.
+        */
+       if (skb->ip_summed == CHECKSUM_PARTIAL &&
+           skb_checksum_help(skb))
+               return NULL;
+
        /* Tag encoding */
        tag  = skb_put(skb, HELLCREEK_TAG_LEN);
        *tag = BIT(dp->index);
index 70e6c87..d747166 100644 (file)
@@ -446,7 +446,6 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
        struct page *page;
        struct sk_buff *trailer;
        int tailen = esp->tailen;
-       unsigned int allocsz;
 
        /* this is non-NULL only with TCP/UDP Encapsulation */
        if (x->encap) {
@@ -456,8 +455,8 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
                        return err;
        }
 
-       allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
-       if (allocsz > ESP_SKB_FRAG_MAXSIZE)
+       if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
+           ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
                goto cow;
 
        if (!skb_cloned(skb)) {
index 99db2e4..365caeb 100644 (file)
@@ -605,8 +605,8 @@ static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
        key = &info->key;
        ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
                            tunnel_id_to_key32(key->tun_id),
-                           key->tos & ~INET_ECN_MASK, 0, skb->mark,
-                           skb_get_hash(skb));
+                           key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
+                           skb->mark, skb_get_hash(skb));
        rt = ip_route_output_key(dev_net(dev), &fl4);
        if (IS_ERR(rt))
                return PTR_ERR(rt);
index 5a47331..94017a8 100644 (file)
@@ -294,8 +294,8 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
 
                ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
                                    iph->saddr, tunnel->parms.o_key,
-                                   RT_TOS(iph->tos), tunnel->parms.link,
-                                   tunnel->fwmark, 0);
+                                   RT_TOS(iph->tos), dev_net(dev),
+                                   tunnel->parms.link, tunnel->fwmark, 0);
                rt = ip_route_output_key(tunnel->net, &fl4);
 
                if (!IS_ERR(rt)) {
@@ -570,7 +570,7 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        }
        ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
                            tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
-                           0, skb->mark, skb_get_hash(skb));
+                           dev_net(dev), 0, skb->mark, skb_get_hash(skb));
        if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
                goto tx_error;
 
@@ -726,7 +726,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        }
 
        ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
-                           tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
+                           tunnel->parms.o_key, RT_TOS(tos),
+                           dev_net(dev), tunnel->parms.link,
                            tunnel->fwmark, skb_get_hash(skb));
 
        if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
index 55d604c..f2120e9 100644 (file)
@@ -482,7 +482,6 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
        struct page *page;
        struct sk_buff *trailer;
        int tailen = esp->tailen;
-       unsigned int allocsz;
 
        if (x->encap) {
                int err = esp6_output_encap(x, skb, esp);
@@ -491,8 +490,8 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
                        return err;
        }
 
-       allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES);
-       if (allocsz > ESP_SKB_FRAG_MAXSIZE)
+       if (ALIGN(tailen, L1_CACHE_BYTES) > PAGE_SIZE ||
+           ALIGN(skb->data_len, L1_CACHE_BYTES) > PAGE_SIZE)
                goto cow;
 
        if (!skb_cloned(skb)) {
index 8753e9c..9762367 100644 (file)
@@ -733,9 +733,6 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
        else
                fl6->daddr = tunnel->parms.raddr;
 
-       if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
-               return -ENOMEM;
-
        /* Push GRE header. */
        protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
 
@@ -743,6 +740,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
                struct ip_tunnel_info *tun_info;
                const struct ip_tunnel_key *key;
                __be16 flags;
+               int tun_hlen;
 
                tun_info = skb_tunnel_info_txcheck(skb);
                if (IS_ERR(tun_info) ||
@@ -760,9 +758,12 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
                dsfield = key->tos;
                flags = key->tun_flags &
                        (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
-               tunnel->tun_hlen = gre_calc_hlen(flags);
+               tun_hlen = gre_calc_hlen(flags);
 
-               gre_build_header(skb, tunnel->tun_hlen,
+               if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen))
+                       return -ENOMEM;
+
+               gre_build_header(skb, tun_hlen,
                                 flags, protocol,
                                 tunnel_id_to_key32(tun_info->key.tun_id),
                                 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++)
@@ -772,6 +773,9 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
                if (tunnel->parms.o_flags & TUNNEL_SEQ)
                        tunnel->o_seqno++;
 
+               if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
+                       return -ENOMEM;
+
                gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
                                 protocol, tunnel->parms.o_key,
                                 htonl(tunnel->o_seqno));
index e23f058..fa63ef2 100644 (file)
@@ -485,7 +485,7 @@ int ip6_forward(struct sk_buff *skb)
                goto drop;
 
        if (!net->ipv6.devconf_all->disable_policy &&
-           !idev->cnf.disable_policy &&
+           (!idev || !idev->cnf.disable_policy) &&
            !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
                __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
                goto drop;
index 169e9df..c4b6ce0 100644 (file)
@@ -3292,6 +3292,7 @@ static int ip6_dst_gc(struct dst_ops *ops)
        int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
        int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
        unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
+       unsigned int val;
        int entries;
 
        entries = dst_entries_get_fast(ops);
@@ -3302,13 +3303,13 @@ static int ip6_dst_gc(struct dst_ops *ops)
            entries <= rt_max_size)
                goto out;
 
-       net->ipv6.ip6_rt_gc_expire++;
-       fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
+       fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
        entries = dst_entries_get_slow(ops);
        if (entries < ops->gc_thresh)
-               net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
+               atomic_set(&net->ipv6.ip6_rt_gc_expire, rt_gc_timeout >> 1);
 out:
-       net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
+       val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
+       atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
        return entries > rt_max_size;
 }
 
@@ -6509,7 +6510,7 @@ static int __net_init ip6_route_net_init(struct net *net)
        net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
        net->ipv6.sysctl.skip_notify_on_dev_down = 0;
 
-       net->ipv6.ip6_rt_gc_expire = 30*HZ;
+       atomic_set(&net->ipv6.ip6_rt_gc_expire, 30*HZ);
 
        ret = 0;
 out:
index 4eb8892..ca10916 100644 (file)
@@ -147,7 +147,7 @@ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
 
        dev = dev_get_by_index_rcu(net, ifindex);
        while (dev && !netif_is_l3_master(dev))
-               dev = netdev_master_upper_dev_get(dev);
+               dev = netdev_master_upper_dev_get_rcu(dev);
 
        return dev ? dev->ifindex : 0;
 }
index 9479f27..88d9cc9 100644 (file)
@@ -441,7 +441,7 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
 #define PRINT_HT_CAP(_cond, _str) \
        do { \
        if (_cond) \
-                       p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \
+                       p += scnprintf(p, bufsz + buf - p, "\t" _str "\n"); \
        } while (0)
        char *buf, *p;
        int i;
index 128ee3b..16c3a39 100644 (file)
@@ -9363,7 +9363,7 @@ int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
 }
 EXPORT_SYMBOL_GPL(nft_parse_u32_check);
 
-static unsigned int nft_parse_register(const struct nlattr *attr, u32 *preg)
+static int nft_parse_register(const struct nlattr *attr, u32 *preg)
 {
        unsigned int reg;
 
index bd3792f..6d9e8e0 100644 (file)
@@ -37,12 +37,11 @@ static void nft_socket_wildcard(const struct nft_pktinfo *pkt,
 
 #ifdef CONFIG_SOCK_CGROUP_DATA
 static noinline bool
-nft_sock_get_eval_cgroupv2(u32 *dest, const struct nft_pktinfo *pkt, u32 level)
+nft_sock_get_eval_cgroupv2(u32 *dest, struct sock *sk, const struct nft_pktinfo *pkt, u32 level)
 {
-       struct sock *sk = skb_to_full_sk(pkt->skb);
        struct cgroup *cgrp;
 
-       if (!sk || !sk_fullsock(sk) || !net_eq(nft_net(pkt), sock_net(sk)))
+       if (!sk_fullsock(sk))
                return false;
 
        cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
@@ -109,7 +108,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
                break;
 #ifdef CONFIG_SOCK_CGROUP_DATA
        case NFT_SOCKET_CGROUPV2:
-               if (!nft_sock_get_eval_cgroupv2(dest, pkt, priv->level)) {
+               if (!nft_sock_get_eval_cgroupv2(dest, sk, pkt, priv->level)) {
                        regs->verdict.code = NFT_BREAK;
                        return;
                }
index 47a876c..05a3795 100644 (file)
@@ -2263,6 +2263,13 @@ static int netlink_dump(struct sock *sk)
         * single netdev. The outcome is MSG_TRUNC error.
         */
        skb_reserve(skb, skb_tailroom(skb) - alloc_size);
+
+       /* Make sure malicious BPF programs can not read unitialized memory
+        * from skb->head -> skb->data
+        */
+       skb_reset_network_header(skb);
+       skb_reset_mac_header(skb);
+
        netlink_skb_set_owner_r(skb, sk);
 
        if (nlk->dump_done_errno > 0) {
index d253738..6a193cc 100644 (file)
@@ -560,6 +560,10 @@ static int nci_close_device(struct nci_dev *ndev)
        mutex_lock(&ndev->req_lock);
 
        if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
+               /* Need to flush the cmd wq in case
+                * there is a queued/running cmd_work
+                */
+               flush_workqueue(ndev->cmd_wq);
                del_timer_sync(&ndev->cmd_timer);
                del_timer_sync(&ndev->data_timer);
                mutex_unlock(&ndev->req_lock);
index 7176156..4c09cf8 100644 (file)
@@ -2465,7 +2465,7 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
        new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
 
        if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
-               if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
+               if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) {
                        OVS_NLERR(log, "Flow action size exceeds max %u",
                                  MAX_ACTIONS_BUFSIZE);
                        return ERR_PTR(-EMSGSIZE);
index c39c098..002d2b9 100644 (file)
@@ -2858,8 +2858,9 @@ tpacket_error:
 
                status = TP_STATUS_SEND_REQUEST;
                err = po->xmit(skb);
-               if (unlikely(err > 0)) {
-                       err = net_xmit_errno(err);
+               if (unlikely(err != 0)) {
+                       if (err > 0)
+                               err = net_xmit_errno(err);
                        if (err && __packet_get_status(po, ph) ==
                                   TP_STATUS_AVAILABLE) {
                                /* skb was destructed already */
@@ -3060,8 +3061,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
                skb->no_fcs = 1;
 
        err = po->xmit(skb);
-       if (err > 0 && (err = net_xmit_errno(err)) != 0)
-               goto out_unlock;
+       if (unlikely(err != 0)) {
+               if (err > 0)
+                       err = net_xmit_errno(err);
+               if (err)
+                       goto out_unlock;
+       }
 
        dev_put(dev);
 
index f15d694..cc7e307 100644 (file)
@@ -113,7 +113,9 @@ static __net_exit void rxrpc_exit_net(struct net *net)
        struct rxrpc_net *rxnet = rxrpc_net(net);
 
        rxnet->live = false;
+       del_timer_sync(&rxnet->peer_keepalive_timer);
        cancel_work_sync(&rxnet->peer_keepalive_work);
+       /* Remove the timer again as the worker may have restarted it. */
        del_timer_sync(&rxnet->peer_keepalive_timer);
        rxrpc_destroy_all_calls(rxnet);
        rxrpc_destroy_all_connections(rxnet);
index 2957f8f..f0699f3 100644 (file)
@@ -1672,10 +1672,10 @@ static int tcf_chain_tp_insert(struct tcf_chain *chain,
        if (chain->flushing)
                return -EAGAIN;
 
+       RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
        if (*chain_info->pprev == chain->filter_chain)
                tcf_chain0_head_change(chain, tp);
        tcf_proto_get(tp);
-       RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
        rcu_assign_pointer(*chain_info->pprev, tp);
 
        return 0;
index c80fc49..ed5e6f0 100644 (file)
@@ -1013,6 +1013,7 @@ static int fl_set_key_mpls(struct nlattr **tb,
 static void fl_set_key_vlan(struct nlattr **tb,
                            __be16 ethertype,
                            int vlan_id_key, int vlan_prio_key,
+                           int vlan_next_eth_type_key,
                            struct flow_dissector_key_vlan *key_val,
                            struct flow_dissector_key_vlan *key_mask)
 {
@@ -1031,6 +1032,11 @@ static void fl_set_key_vlan(struct nlattr **tb,
        }
        key_val->vlan_tpid = ethertype;
        key_mask->vlan_tpid = cpu_to_be16(~0);
+       if (tb[vlan_next_eth_type_key]) {
+               key_val->vlan_eth_type =
+                       nla_get_be16(tb[vlan_next_eth_type_key]);
+               key_mask->vlan_eth_type = cpu_to_be16(~0);
+       }
 }
 
 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
@@ -1602,8 +1608,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
 
                if (eth_type_vlan(ethertype)) {
                        fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
-                                       TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
-                                       &mask->vlan);
+                                       TCA_FLOWER_KEY_VLAN_PRIO,
+                                       TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+                                       &key->vlan, &mask->vlan);
 
                        if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
                                ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
@@ -1611,6 +1618,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
                                        fl_set_key_vlan(tb, ethertype,
                                                        TCA_FLOWER_KEY_CVLAN_ID,
                                                        TCA_FLOWER_KEY_CVLAN_PRIO,
+                                                       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
                                                        &key->cvlan, &mask->cvlan);
                                        fl_set_key_val(tb, &key->basic.n_proto,
                                                       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
@@ -3002,13 +3010,13 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net,
                goto nla_put_failure;
 
        if (mask->basic.n_proto) {
-               if (mask->cvlan.vlan_tpid) {
+               if (mask->cvlan.vlan_eth_type) {
                        if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
                                         key->basic.n_proto))
                                goto nla_put_failure;
-               } else if (mask->vlan.vlan_tpid) {
+               } else if (mask->vlan.vlan_eth_type) {
                        if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
-                                        key->basic.n_proto))
+                                        key->vlan.vlan_eth_type))
                                goto nla_put_failure;
                }
        }
index cf56492..4d27300 100644 (file)
@@ -386,14 +386,19 @@ static int u32_init(struct tcf_proto *tp)
        return 0;
 }
 
-static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
+static void __u32_destroy_key(struct tc_u_knode *n)
 {
        struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
 
        tcf_exts_destroy(&n->exts);
-       tcf_exts_put_net(&n->exts);
        if (ht && --ht->refcnt == 0)
                kfree(ht);
+       kfree(n);
+}
+
+static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
+{
+       tcf_exts_put_net(&n->exts);
 #ifdef CONFIG_CLS_U32_PERF
        if (free_pf)
                free_percpu(n->pf);
@@ -402,8 +407,7 @@ static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
        if (free_pf)
                free_percpu(n->pcpu_success);
 #endif
-       kfree(n);
-       return 0;
+       __u32_destroy_key(n);
 }
 
 /* u32_delete_key_rcu should be called when free'ing a copied
@@ -811,10 +815,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
        new->flags = n->flags;
        RCU_INIT_POINTER(new->ht_down, ht);
 
-       /* bump reference count as long as we hold pointer to structure */
-       if (ht)
-               ht->refcnt++;
-
 #ifdef CONFIG_CLS_U32_PERF
        /* Statistics may be incremented by readers during update
         * so we must keep them in tact. When the node is later destroyed
@@ -836,6 +836,10 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
                return NULL;
        }
 
+       /* bump reference count as long as we hold pointer to structure */
+       if (ht)
+               ht->refcnt++;
+
        return new;
 }
 
@@ -900,13 +904,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
                                    extack);
 
                if (err) {
-                       u32_destroy_key(new, false);
+                       __u32_destroy_key(new);
                        return err;
                }
 
                err = u32_replace_hw_knode(tp, new, flags, extack);
                if (err) {
-                       u32_destroy_key(new, false);
+                       __u32_destroy_key(new);
                        return err;
                }
 
index 377f896..b9c71a3 100644 (file)
@@ -417,7 +417,8 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
 {
        struct taprio_sched *q = qdisc_priv(sch);
 
-       if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
+       /* sk_flags are only safe to use on full sockets. */
+       if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
                if (!is_valid_interval(skb, sch))
                        return qdisc_drop(skb, sch, to_free);
        } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
index 7f342bc..52edee1 100644 (file)
@@ -781,7 +781,7 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
                }
        }
 
-       if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
+       if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
                sctp_association_free(new_asoc);
                return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
@@ -932,7 +932,7 @@ enum sctp_disposition sctp_sf_do_5_1E_ca(struct net *net,
 
        /* Set peer label for connection. */
        if (security_sctp_assoc_established((struct sctp_association *)asoc,
-                                           chunk->skb))
+                                           chunk->head_skb ?: chunk->skb))
                return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Verify that the chunk length for the COOKIE-ACK is OK.
@@ -2262,7 +2262,7 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
        }
 
        /* Update socket peer label if first association. */
-       if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
+       if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
                sctp_association_free(new_asoc);
                return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
index 3e1a960..7b04276 100644 (file)
@@ -5636,7 +5636,7 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
         * Set the daddr and initialize id to something more random and also
         * copy over any ip options.
         */
-       sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
+       sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sock->sk);
        sp->pf->copy_ip_options(sk, sock->sk);
 
        /* Populate the fields of the newsk from the oldsk and migrate the
index f0d118e..fc7b6eb 100644 (file)
@@ -121,6 +121,7 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
                                          bool *own_req)
 {
        struct smc_sock *smc;
+       struct sock *child;
 
        smc = smc_clcsock_user_data(sk);
 
@@ -134,8 +135,17 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
        }
 
        /* passthrough to original syn recv sock fct */
-       return smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash,
-                                             own_req);
+       child = smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash,
+                                              own_req);
+       /* child must not inherit smc or its ops */
+       if (child) {
+               rcu_assign_sk_user_data(child, NULL);
+
+               /* v4-mapped sockets don't inherit parent ops. Don't restore. */
+               if (inet_csk(child)->icsk_af_ops == inet_csk(sk)->icsk_af_ops)
+                       inet_csk(child)->icsk_af_ops = smc->ori_af_ops;
+       }
+       return child;
 
 drop:
        dst_release(dst);
@@ -2664,8 +2674,10 @@ static int smc_shutdown(struct socket *sock, int how)
        if (smc->use_fallback) {
                rc = kernel_sock_shutdown(smc->clcsock, how);
                sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
-               if (sk->sk_shutdown == SHUTDOWN_MASK)
+               if (sk->sk_shutdown == SHUTDOWN_MASK) {
                        sk->sk_state = SMC_CLOSED;
+                       sock_put(sk);
+               }
                goto out;
        }
        switch (how) {
index ce27399..f9f3f59 100644 (file)
@@ -191,7 +191,8 @@ static int smc_nl_ueid_dumpinfo(struct sk_buff *skb, u32 portid, u32 seq,
                          flags, SMC_NETLINK_DUMP_UEID);
        if (!hdr)
                return -ENOMEM;
-       snprintf(ueid_str, sizeof(ueid_str), "%s", ueid);
+       memcpy(ueid_str, ueid, SMC_MAX_EID_LEN);
+       ueid_str[SMC_MAX_EID_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_EID_TABLE_ENTRY, ueid_str)) {
                genlmsg_cancel(skb, hdr);
                return -EMSGSIZE;
@@ -252,7 +253,8 @@ int smc_nl_dump_seid(struct sk_buff *skb, struct netlink_callback *cb)
                goto end;
 
        smc_ism_get_system_eid(&seid);
-       snprintf(seid_str, sizeof(seid_str), "%s", seid);
+       memcpy(seid_str, seid, SMC_MAX_EID_LEN);
+       seid_str[SMC_MAX_EID_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_SEID_ENTRY, seid_str))
                goto err;
        read_lock(&smc_clc_eid_table.lock);
index 7984f88..7055ed1 100644 (file)
@@ -311,8 +311,9 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
        list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
                if (!strncmp(ibdev->ibdev->name, ib_name,
                             sizeof(ibdev->ibdev->name)) ||
-                   !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name,
-                            IB_DEVICE_NAME_MAX - 1)) {
+                   (ibdev->ibdev->dev.parent &&
+                    !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name,
+                            IB_DEVICE_NAME_MAX - 1))) {
                        goto out;
                }
        }
index 297c498..5b59e21 100644 (file)
@@ -1231,6 +1231,8 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
                dr->daddr = rqstp->rq_daddr;
                dr->argslen = rqstp->rq_arg.len >> 2;
                dr->xprt_hlen = rqstp->rq_xprt_hlen;
+               dr->xprt_ctxt = rqstp->rq_xprt_ctxt;
+               rqstp->rq_xprt_ctxt = NULL;
 
                /* back up head to the start of the buffer and copy */
                skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
@@ -1269,6 +1271,7 @@ static noinline int svc_deferred_recv(struct svc_rqst *rqstp)
        rqstp->rq_xprt_hlen   = dr->xprt_hlen;
        rqstp->rq_daddr       = dr->daddr;
        rqstp->rq_respages    = rqstp->rq_pages;
+       rqstp->rq_xprt_ctxt   = dr->xprt_ctxt;
        svc_xprt_received(rqstp->rq_xprt);
        return (dr->argslen<<2) - dr->xprt_hlen;
 }
index cf76a6a..864131a 100644 (file)
@@ -831,7 +831,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
                goto out_err;
        if (ret == 0)
                goto out_drop;
-       rqstp->rq_xprt_hlen = ret;
+       rqstp->rq_xprt_hlen = 0;
 
        if (svc_rdma_is_reverse_direction_reply(xprt, ctxt))
                goto out_backchannel;
index ee1c2b6..21e808f 100644 (file)
@@ -528,7 +528,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
                                   .len = IEEE80211_MAX_MESH_ID_LEN },
        [NL80211_ATTR_MPATH_NEXT_HOP] = NLA_POLICY_ETH_ADDR_COMPAT,
 
-       [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
+       /* allow 3 for NUL-termination, we used to declare this NLA_STRING */
+       [NL80211_ATTR_REG_ALPHA2] = NLA_POLICY_RANGE(NLA_BINARY, 2, 3),
        [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },
 
        [NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 },
index b2fdac9..4a6d864 100644 (file)
@@ -2018,11 +2018,13 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
                /* this is a nontransmitting bss, we need to add it to
                 * transmitting bss' list if it is not there
                 */
+               spin_lock_bh(&rdev->bss_lock);
                if (cfg80211_add_nontrans_list(non_tx_data->tx_bss,
                                               &res->pub)) {
                        if (__cfg80211_unlink_bss(rdev, res))
                                rdev->bss_generation++;
                }
+               spin_unlock_bh(&rdev->bss_lock);
        }
 
        trace_cfg80211_return_bss(&res->pub);
index 19aa994..00bd0ec 100644 (file)
@@ -2593,12 +2593,14 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
 
                if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
                        __u32 mark = 0;
+                       int oif;
 
                        if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
                                mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
 
                        family = xfrm[i]->props.family;
-                       dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
+                       oif = fl->flowi_oif ? : fl->flowi_l3mdev;
+                       dst = xfrm_dst_lookup(xfrm[i], tos, oif,
                                              &saddr, &daddr, family, mark);
                        err = PTR_ERR(dst);
                        if (IS_ERR(dst))
index 589454b..8425da4 100644 (file)
@@ -86,25 +86,31 @@ static struct plugin_info latent_entropy_plugin_info = {
        .help           = "disable\tturn off latent entropy instrumentation\n",
 };
 
-static unsigned HOST_WIDE_INT seed;
-/*
- * get_random_seed() (this is a GCC function) generates the seed.
- * This is a simple random generator without any cryptographic security because
- * the entropy doesn't come from here.
- */
+static unsigned HOST_WIDE_INT deterministic_seed;
+static unsigned HOST_WIDE_INT rnd_buf[32];
+static size_t rnd_idx = ARRAY_SIZE(rnd_buf);
+static int urandom_fd = -1;
+
 static unsigned HOST_WIDE_INT get_random_const(void)
 {
-       unsigned int i;
-       unsigned HOST_WIDE_INT ret = 0;
-
-       for (i = 0; i < 8 * sizeof(ret); i++) {
-               ret = (ret << 1) | (seed & 1);
-               seed >>= 1;
-               if (ret & 1)
-                       seed ^= 0xD800000000000000ULL;
+       if (deterministic_seed) {
+               unsigned HOST_WIDE_INT w = deterministic_seed;
+               w ^= w << 13;
+               w ^= w >> 7;
+               w ^= w << 17;
+               deterministic_seed = w;
+               return deterministic_seed;
        }
 
-       return ret;
+       if (urandom_fd < 0) {
+               urandom_fd = open("/dev/urandom", O_RDONLY);
+               gcc_assert(urandom_fd >= 0);
+       }
+       if (rnd_idx >= ARRAY_SIZE(rnd_buf)) {
+               gcc_assert(read(urandom_fd, rnd_buf, sizeof(rnd_buf)) == sizeof(rnd_buf));
+               rnd_idx = 0;
+       }
+       return rnd_buf[rnd_idx++];
 }
 
 static tree tree_get_random_const(tree type)
@@ -537,8 +543,6 @@ static void latent_entropy_start_unit(void *gcc_data __unused,
        tree type, id;
        int quals;
 
-       seed = get_random_seed(false);
-
        if (in_lto_p)
                return;
 
@@ -573,6 +577,12 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
        const struct plugin_argument * const argv = plugin_info->argv;
        int i;
 
+       /*
+        * Call get_random_seed() with noinit=true, so that this returns
+        * 0 in the case where no seed has been passed via -frandom-seed.
+        */
+       deterministic_seed = get_random_seed(true);
+
        static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = {
                {
                        .base = &latent_entropy_decl,
index 31ba702..726a835 100644 (file)
@@ -209,6 +209,12 @@ static void __snd_card_release(struct device *dev, void *data)
  * snd_card_register(), the very first devres action to call snd_card_free()
  * is added automatically.  In that way, the resource disconnection is assured
  * at first, then released in the expected order.
+ *
+ * If an error happens at the probe before snd_card_register() is called and
+ * there have been other devres resources, you'd need to free the card manually
+ * via snd_card_free() call in the error; otherwise it may lead to UAF due to
+ * devres call orders.  You can use snd_card_free_on_error() helper for
+ * handling it more easily.
  */
 int snd_devm_card_new(struct device *parent, int idx, const char *xid,
                      struct module *module, size_t extra_size,
@@ -235,6 +241,28 @@ int snd_devm_card_new(struct device *parent, int idx, const char *xid,
 }
 EXPORT_SYMBOL_GPL(snd_devm_card_new);
 
+/**
+ * snd_card_free_on_error - a small helper for handling devm probe errors
+ * @dev: the managed device object
+ * @ret: the return code from the probe callback
+ *
+ * This function handles the explicit snd_card_free() call at the error from
+ * the probe callback.  It's just a small helper for simplifying the error
+ * handling with the managed devices.
+ */
+int snd_card_free_on_error(struct device *dev, int ret)
+{
+       struct snd_card *card;
+
+       if (!ret)
+               return 0;
+       card = devres_find(dev, __snd_card_release, NULL, NULL);
+       if (card)
+               snd_card_free(card);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(snd_card_free_on_error);
+
 static int snd_card_init(struct snd_card *card, struct device *parent,
                         int idx, const char *xid, struct module *module,
                         size_t extra_size)
index 6fd763d..15dc716 100644 (file)
@@ -499,6 +499,10 @@ static const struct snd_malloc_ops snd_dma_wc_ops = {
 };
 #endif /* CONFIG_X86 */
 
+#ifdef CONFIG_SND_DMA_SGBUF
+static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
+#endif
+
 /*
  * Non-contiguous pages allocator
  */
@@ -509,8 +513,18 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
 
        sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
                                      DEFAULT_GFP, 0);
-       if (!sgt)
+       if (!sgt) {
+#ifdef CONFIG_SND_DMA_SGBUF
+               if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
+                       dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
+               else
+                       dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
+               return snd_dma_sg_fallback_alloc(dmab, size);
+#else
                return NULL;
+#endif
+       }
+
        dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
                                            sg_dma_address(sgt->sgl));
        p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
@@ -633,6 +647,8 @@ static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
 
        if (!p)
                return NULL;
+       if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
+               return p;
        for_each_sgtable_page(sgt, &iter, 0)
                set_memory_wc(sg_wc_address(&iter), 1);
        return p;
@@ -665,6 +681,95 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
        .get_page = snd_dma_noncontig_get_page,
        .get_chunk_size = snd_dma_noncontig_get_chunk_size,
 };
+
+/* Fallback SG-buffer allocations for x86 */
+struct snd_dma_sg_fallback {
+       size_t count;
+       struct page **pages;
+       dma_addr_t *addrs;
+};
+
+static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
+                                      struct snd_dma_sg_fallback *sgbuf)
+{
+       size_t i;
+
+       if (sgbuf->count && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
+               set_pages_array_wb(sgbuf->pages, sgbuf->count);
+       for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
+               dma_free_coherent(dmab->dev.dev, PAGE_SIZE,
+                                 page_address(sgbuf->pages[i]),
+                                 sgbuf->addrs[i]);
+       kvfree(sgbuf->pages);
+       kvfree(sgbuf->addrs);
+       kfree(sgbuf);
+}
+
+static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+       struct snd_dma_sg_fallback *sgbuf;
+       struct page **pages;
+       size_t i, count;
+       void *p;
+
+       sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
+       if (!sgbuf)
+               return NULL;
+       count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
+       if (!pages)
+               goto error;
+       sgbuf->pages = pages;
+       sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL);
+       if (!sgbuf->addrs)
+               goto error;
+
+       for (i = 0; i < count; sgbuf->count++, i++) {
+               p = dma_alloc_coherent(dmab->dev.dev, PAGE_SIZE,
+                                      &sgbuf->addrs[i], DEFAULT_GFP);
+               if (!p)
+                       goto error;
+               sgbuf->pages[i] = virt_to_page(p);
+       }
+
+       if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
+               set_pages_array_wc(pages, count);
+       p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
+       if (!p)
+               goto error;
+       dmab->private_data = sgbuf;
+       return p;
+
+ error:
+       __snd_dma_sg_fallback_free(dmab, sgbuf);
+       return NULL;
+}
+
+static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
+{
+       vunmap(dmab->area);
+       __snd_dma_sg_fallback_free(dmab, dmab->private_data);
+}
+
+static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
+                                   struct vm_area_struct *area)
+{
+       struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
+
+       if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
+               area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+       return vm_map_pages(area, sgbuf->pages, sgbuf->count);
+}
+
+static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
+       .alloc = snd_dma_sg_fallback_alloc,
+       .free = snd_dma_sg_fallback_free,
+       .mmap = snd_dma_sg_fallback_mmap,
+       /* reuse vmalloc helpers */
+       .get_addr = snd_dma_vmalloc_get_addr,
+       .get_page = snd_dma_vmalloc_get_page,
+       .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
+};
 #endif /* CONFIG_SND_DMA_SGBUF */
 
 /*
@@ -736,6 +841,10 @@ static const struct snd_malloc_ops *dma_ops[] = {
 #ifdef CONFIG_GENERIC_ALLOCATOR
        [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
 #endif /* CONFIG_GENERIC_ALLOCATOR */
+#ifdef CONFIG_SND_DMA_SGBUF
+       [SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
+       [SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
+#endif
 #endif /* CONFIG_HAS_DMA */
 };
 
index 4866aed..5588b6a 100644 (file)
@@ -433,7 +433,7 @@ int snd_pcm_format_set_silence(snd_pcm_format_t format, void *data, unsigned int
                return 0;
        width = pcm_formats[(INT)format].phys; /* physical width */
        pat = pcm_formats[(INT)format].silence;
-       if (! width)
+       if (!width || !pat)
                return -EINVAL;
        /* signed or 1 byte data */
        if (pcm_formats[(INT)format].signd == 1 || width <= 8) {
index 11235ba..f212f23 100644 (file)
@@ -693,8 +693,6 @@ static int snd_mtpav_probe(struct platform_device *dev)
        mtp_card->outmidihwport = 0xffffffff;
        timer_setup(&mtp_card->timer, snd_mtpav_output_timer, 0);
 
-       card->private_free = snd_mtpav_free;
-
        err = snd_mtpav_get_RAWMIDI(mtp_card);
        if (err < 0)
                return err;
@@ -716,6 +714,8 @@ static int snd_mtpav_probe(struct platform_device *dev)
        if (err < 0)
                return err;
 
+       card->private_free = snd_mtpav_free;
+
        platform_set_drvdata(dev, card);
        printk(KERN_INFO "Motu MidiTimePiece on parallel port irq: %d ioport: 0x%lx\n", irq, port);
        return 0;
index efe810a..3f35972 100644 (file)
@@ -116,16 +116,24 @@ static int i915_component_master_match(struct device *dev, int subcomponent,
        return 0;
 }
 
-/* check whether intel graphics is present */
-static bool i915_gfx_present(void)
+/* check whether Intel graphics is present and reachable */
+static int i915_gfx_present(struct pci_dev *hdac_pci)
 {
-       static const struct pci_device_id ids[] = {
-               { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID),
-                 .class = PCI_BASE_CLASS_DISPLAY << 16,
-                 .class_mask = 0xff << 16 },
-               {}
-       };
-       return pci_dev_present(ids);
+       unsigned int class = PCI_BASE_CLASS_DISPLAY << 16;
+       struct pci_dev *display_dev = NULL;
+       bool match = false;
+
+       do {
+               display_dev = pci_get_class(class, display_dev);
+
+               if (display_dev && display_dev->vendor == PCI_VENDOR_ID_INTEL &&
+                   connectivity_check(display_dev, hdac_pci)) {
+                       pci_dev_put(display_dev);
+                       match = true;
+               }
+       } while (!match && display_dev);
+
+       return match;
 }
 
 /**
@@ -145,7 +153,7 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
        struct drm_audio_component *acomp;
        int err;
 
-       if (!i915_gfx_present())
+       if (!i915_gfx_present(to_pci_dev(bus->dev)))
                return -ENODEV;
 
        err = snd_hdac_acomp_init(bus, NULL,
index 70fd8b1..a8fe017 100644 (file)
@@ -390,26 +390,49 @@ static const struct config_entry config_table[] = {
 
 /* Alder Lake */
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_ALDERLAKE)
+       /* Alderlake-S */
        {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x7ad0,
        },
+       /* RaptorLake-S */
        {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
-               .device = 0x51c8,
+               .device = 0x7a50,
        },
+       /* Alderlake-P */
        {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
-               .device = 0x51cc,
+               .device = 0x51c8,
        },
        {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x51cd,
        },
+       /* Alderlake-PS */
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x51c9,
+       },
+       /* Alderlake-M */
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x51cc,
+       },
+       /* Alderlake-N */
        {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x54c8,
        },
+       /* RaptorLake-P */
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x51ca,
+       },
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x51cb,
+       },
 #endif
 
 };
index ea001c8..3164eb8 100644 (file)
@@ -478,7 +478,7 @@ static void snd_galaxy_free(struct snd_card *card)
                galaxy_set_config(galaxy, galaxy->config);
 }
 
-static int snd_galaxy_probe(struct device *dev, unsigned int n)
+static int __snd_galaxy_probe(struct device *dev, unsigned int n)
 {
        struct snd_galaxy *galaxy;
        struct snd_wss *chip;
@@ -598,6 +598,11 @@ static int snd_galaxy_probe(struct device *dev, unsigned int n)
        return 0;
 }
 
+static int snd_galaxy_probe(struct device *dev, unsigned int n)
+{
+       return snd_card_free_on_error(dev, __snd_galaxy_probe(dev, n));
+}
+
 static struct isa_driver snd_galaxy_driver = {
        .match          = snd_galaxy_match,
        .probe          = snd_galaxy_probe,
index 26ab7ff..60398fc 100644 (file)
@@ -537,7 +537,7 @@ static void snd_sc6000_free(struct snd_card *card)
                sc6000_setup_board(vport, 0);
 }
 
-static int snd_sc6000_probe(struct device *devptr, unsigned int dev)
+static int __snd_sc6000_probe(struct device *devptr, unsigned int dev)
 {
        static const int possible_irqs[] = { 5, 7, 9, 10, 11, -1 };
        static const int possible_dmas[] = { 1, 3, 0, -1 };
@@ -662,6 +662,11 @@ static int snd_sc6000_probe(struct device *devptr, unsigned int dev)
        return 0;
 }
 
+static int snd_sc6000_probe(struct device *devptr, unsigned int dev)
+{
+       return snd_card_free_on_error(devptr, __snd_sc6000_probe(devptr, dev));
+}
+
 static struct isa_driver snd_sc6000_driver = {
        .match          = snd_sc6000_match,
        .probe          = snd_sc6000_probe,
index c1c52b4..ad8ce6a 100644 (file)
@@ -88,11 +88,7 @@ static inline int ioctl_return(int __user *addr, int value)
      */
 
 extern int dmasound_init(void);
-#ifdef MODULE
 extern void dmasound_deinit(void);
-#else
-#define dmasound_deinit()      do { } while (0)
-#endif
 
 /* description of the set-up applies to either hard or soft settings */
 
@@ -114,9 +110,7 @@ typedef struct {
     void *(*dma_alloc)(unsigned int, gfp_t);
     void (*dma_free)(void *, unsigned int);
     int (*irqinit)(void);
-#ifdef MODULE
     void (*irqcleanup)(void);
-#endif
     void (*init)(void);
     void (*silence)(void);
     int (*setFormat)(int);
index 0c95828..164335d 100644 (file)
@@ -206,12 +206,10 @@ module_param(writeBufSize, int, 0);
 
 MODULE_LICENSE("GPL");
 
-#ifdef MODULE
 static int sq_unit = -1;
 static int mixer_unit = -1;
 static int state_unit = -1;
 static int irq_installed;
-#endif /* MODULE */
 
 /* control over who can modify resources shared between play/record */
 static fmode_t shared_resource_owner;
@@ -391,9 +389,6 @@ static const struct file_operations mixer_fops =
 
 static void mixer_init(void)
 {
-#ifndef MODULE
-       int mixer_unit;
-#endif
        mixer_unit = register_sound_mixer(&mixer_fops, -1);
        if (mixer_unit < 0)
                return;
@@ -1171,9 +1166,6 @@ static const struct file_operations sq_fops =
 static int sq_init(void)
 {
        const struct file_operations *fops = &sq_fops;
-#ifndef MODULE
-       int sq_unit;
-#endif
 
        sq_unit = register_sound_dsp(fops, -1);
        if (sq_unit < 0) {
@@ -1366,9 +1358,6 @@ static const struct file_operations state_fops = {
 
 static int state_init(void)
 {
-#ifndef MODULE
-       int state_unit;
-#endif
        state_unit = register_sound_special(&state_fops, SND_DEV_STATUS);
        if (state_unit < 0)
                return state_unit ;
@@ -1386,10 +1375,9 @@ static int state_init(void)
 int dmasound_init(void)
 {
        int res ;
-#ifdef MODULE
+
        if (irq_installed)
                return -EBUSY;
-#endif
 
        /* Set up sound queue, /dev/audio and /dev/dsp. */
 
@@ -1408,9 +1396,7 @@ int dmasound_init(void)
                printk(KERN_ERR "DMA sound driver: Interrupt initialization failed\n");
                return -ENODEV;
        }
-#ifdef MODULE
        irq_installed = 1;
-#endif
 
        printk(KERN_INFO "%s DMA sound driver rev %03d installed\n",
                dmasound.mach.name, (DMASOUND_CORE_REVISION<<4) +
@@ -1424,8 +1410,6 @@ int dmasound_init(void)
        return 0;
 }
 
-#ifdef MODULE
-
 void dmasound_deinit(void)
 {
        if (irq_installed) {
@@ -1444,9 +1428,7 @@ void dmasound_deinit(void)
                unregister_sound_dsp(sq_unit);
 }
 
-#else /* !MODULE */
-
-static int dmasound_setup(char *str)
+static int __maybe_unused dmasound_setup(char *str)
 {
        int ints[6], size;
 
@@ -1489,8 +1471,6 @@ static int dmasound_setup(char *str)
 
 __setup("dmasound=", dmasound_setup);
 
-#endif /* !MODULE */
-
     /*
      *  Conversion tables
      */
@@ -1577,9 +1557,7 @@ char dmasound_alaw2dma8[] = {
 
 EXPORT_SYMBOL(dmasound);
 EXPORT_SYMBOL(dmasound_init);
-#ifdef MODULE
 EXPORT_SYMBOL(dmasound_deinit);
-#endif
 EXPORT_SYMBOL(dmasound_write_sq);
 EXPORT_SYMBOL(dmasound_catchRadius);
 #ifdef HAS_8BIT_TABLES
index bba4dae..50e3070 100644 (file)
@@ -844,8 +844,8 @@ snd_ad1889_create(struct snd_card *card, struct pci_dev *pci)
 }
 
 static int
-snd_ad1889_probe(struct pci_dev *pci,
-                const struct pci_device_id *pci_id)
+__snd_ad1889_probe(struct pci_dev *pci,
+                  const struct pci_device_id *pci_id)
 {
        int err;
        static int devno;
@@ -904,6 +904,12 @@ snd_ad1889_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_ad1889_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_ad1889_probe(pci, pci_id));
+}
+
 static const struct pci_device_id snd_ad1889_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_ANALOG_DEVICES, PCI_DEVICE_ID_AD1889JS) },
        { 0, },
index 92eb59d..2378a39 100644 (file)
@@ -2124,8 +2124,8 @@ static int snd_ali_create(struct snd_card *card,
        return 0;
 }
 
-static int snd_ali_probe(struct pci_dev *pci,
-                        const struct pci_device_id *pci_id)
+static int __snd_ali_probe(struct pci_dev *pci,
+                          const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct snd_ali *codec;
@@ -2170,6 +2170,12 @@ static int snd_ali_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_ali_probe(struct pci_dev *pci,
+                        const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_ali_probe(pci, pci_id));
+}
+
 static struct pci_driver ali5451_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_ali_ids,
index b86565d..c70aff0 100644 (file)
@@ -708,7 +708,7 @@ static int snd_als300_probe(struct pci_dev *pci,
 
        err = snd_als300_create(card, pci, chip_type);
        if (err < 0)
-               return err;
+               goto error;
 
        strcpy(card->driver, "ALS300");
        if (chip->chip_type == DEVICE_ALS300_PLUS)
@@ -723,11 +723,15 @@ static int snd_als300_probe(struct pci_dev *pci,
 
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
 
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver als300_driver = {
index 535eccd..f33aeb6 100644 (file)
@@ -806,8 +806,8 @@ static void snd_card_als4000_free( struct snd_card *card )
        snd_als4000_free_gameport(acard);
 }
 
-static int snd_card_als4000_probe(struct pci_dev *pci,
-                                 const struct pci_device_id *pci_id)
+static int __snd_card_als4000_probe(struct pci_dev *pci,
+                                   const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -930,6 +930,12 @@ static int snd_card_als4000_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_card_als4000_probe(struct pci_dev *pci,
+                                 const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_card_als4000_probe(pci, pci_id));
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int snd_als4000_suspend(struct device *dev)
 {
index b8e035d..43d01f1 100644 (file)
@@ -1572,8 +1572,8 @@ static int snd_atiixp_init(struct snd_card *card, struct pci_dev *pci)
 }
 
 
-static int snd_atiixp_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_atiixp_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct atiixp *chip;
@@ -1623,6 +1623,12 @@ static int snd_atiixp_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_atiixp_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_atiixp_probe(pci, pci_id));
+}
+
 static struct pci_driver atiixp_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_atiixp_ids,
index 178dce8..8864c4c 100644 (file)
@@ -1201,8 +1201,8 @@ static int snd_atiixp_init(struct snd_card *card, struct pci_dev *pci)
 }
 
 
-static int snd_atiixp_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_atiixp_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct atiixp_modem *chip;
@@ -1247,6 +1247,12 @@ static int snd_atiixp_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_atiixp_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_atiixp_probe(pci, pci_id));
+}
+
 static struct pci_driver atiixp_modem_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_atiixp_ids,
index 342ef2a..eb23415 100644 (file)
@@ -193,7 +193,7 @@ snd_vortex_create(struct snd_card *card, struct pci_dev *pci)
 
 // constructor -- see "Constructor" sub-section
 static int
-snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -310,6 +310,12 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
        return 0;
 }
 
+static int
+snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_vortex_probe(pci, pci_id));
+}
+
 // pci_driver definition
 static struct pci_driver vortex_driver = {
        .name = KBUILD_MODNAME,
index d56f126..29a4bcd 100644 (file)
@@ -275,7 +275,7 @@ static int snd_aw2_probe(struct pci_dev *pci,
        /* (3) Create main component */
        err = snd_aw2_create(card, pci);
        if (err < 0)
-               return err;
+               goto error;
 
        /* initialize mutex */
        mutex_init(&chip->mtx);
@@ -294,13 +294,17 @@ static int snd_aw2_probe(struct pci_dev *pci,
        /* (6) Register card instance */
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
 
        /* (7) Set PCI driver data */
        pci_set_drvdata(pci, card);
 
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 /* open callback */
index 0890504..7f329df 100644 (file)
@@ -2427,7 +2427,7 @@ snd_azf3328_create(struct snd_card *card,
 }
 
 static int
-snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2520,6 +2520,12 @@ snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
        return 0;
 }
 
+static int
+snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_azf3328_probe(pci, pci_id));
+}
+
 #ifdef CONFIG_PM_SLEEP
 static inline void
 snd_azf3328_suspend_regs(const struct snd_azf3328 *chip,
index d23f931..621985b 100644 (file)
@@ -805,8 +805,8 @@ static int snd_bt87x_detect_card(struct pci_dev *pci)
        return SND_BT87X_BOARD_UNKNOWN;
 }
 
-static int snd_bt87x_probe(struct pci_dev *pci,
-                          const struct pci_device_id *pci_id)
+static int __snd_bt87x_probe(struct pci_dev *pci,
+                            const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -889,6 +889,12 @@ static int snd_bt87x_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_bt87x_probe(struct pci_dev *pci,
+                          const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_bt87x_probe(pci, pci_id));
+}
+
 /* default entries for all Bt87x cards - it's not exported */
 /* driver_data is set to 0 to call detection */
 static const struct pci_device_id snd_bt87x_default_ids[] = {
index 8577f9f..cf1bac7 100644 (file)
@@ -1725,8 +1725,8 @@ static int snd_ca0106_midi(struct snd_ca0106 *chip, unsigned int channel)
 }
 
 
-static int snd_ca0106_probe(struct pci_dev *pci,
-                                       const struct pci_device_id *pci_id)
+static int __snd_ca0106_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1786,6 +1786,12 @@ static int snd_ca0106_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_ca0106_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_ca0106_probe(pci, pci_id));
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int snd_ca0106_suspend(struct device *dev)
 {
index dab801d..727db6d 100644 (file)
@@ -3247,15 +3247,19 @@ static int snd_cmipci_probe(struct pci_dev *pci,
 
        err = snd_cmipci_create(card, pci, dev);
        if (err < 0)
-               return err;
+               goto error;
 
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
 
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 #ifdef CONFIG_PM_SLEEP
index e736740..0c9cadf 100644 (file)
@@ -1827,8 +1827,8 @@ static void snd_cs4281_opl3_command(struct snd_opl3 *opl3, unsigned short cmd,
        spin_unlock_irqrestore(&opl3->reg_lock, flags);
 }
 
-static int snd_cs4281_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_cs4281_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1888,6 +1888,12 @@ static int snd_cs4281_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_cs4281_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_cs4281_probe(pci, pci_id));
+}
+
 /*
  * Power Management
  */
index 499fa01..440b8f9 100644 (file)
@@ -281,8 +281,8 @@ static int snd_cs5535audio_create(struct snd_card *card,
        return 0;
 }
 
-static int snd_cs5535audio_probe(struct pci_dev *pci,
-                                const struct pci_device_id *pci_id)
+static int __snd_cs5535audio_probe(struct pci_dev *pci,
+                                  const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -331,6 +331,12 @@ static int snd_cs5535audio_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_cs5535audio_probe(struct pci_dev *pci,
+                                const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_cs5535audio_probe(pci, pci_id));
+}
+
 static struct pci_driver cs5535audio_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_cs5535audio_ids,
index 25b012e..c70c3ac 100644 (file)
@@ -1970,8 +1970,8 @@ static int snd_echo_create(struct snd_card *card,
 }
 
 /* constructor */
-static int snd_echo_probe(struct pci_dev *pci,
-                         const struct pci_device_id *pci_id)
+static int __snd_echo_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2139,6 +2139,11 @@ static int snd_echo_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_echo_probe(struct pci_dev *pci,
+                         const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_echo_probe(pci, pci_id));
+}
 
 
 #if defined(CONFIG_PM_SLEEP)
index c49c44d..8904339 100644 (file)
@@ -1491,8 +1491,8 @@ static int snd_emu10k1x_midi(struct emu10k1x *emu)
        return 0;
 }
 
-static int snd_emu10k1x_probe(struct pci_dev *pci,
-                             const struct pci_device_id *pci_id)
+static int __snd_emu10k1x_probe(struct pci_dev *pci,
+                               const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1554,6 +1554,12 @@ static int snd_emu10k1x_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_emu10k1x_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_emu10k1x_probe(pci, pci_id));
+}
+
 // PCI IDs
 static const struct pci_device_id snd_emu10k1x_ids[] = {
        { PCI_VDEVICE(CREATIVE, 0x0006), 0 },   /* Dell OEM version (EMU10K1) */
index 2651f0c..94efe34 100644 (file)
@@ -2304,8 +2304,8 @@ static irqreturn_t snd_audiopci_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int snd_audiopci_probe(struct pci_dev *pci,
-                             const struct pci_device_id *pci_id)
+static int __snd_audiopci_probe(struct pci_dev *pci,
+                               const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2369,6 +2369,12 @@ static int snd_audiopci_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_audiopci_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_audiopci_probe(pci, pci_id));
+}
+
 static struct pci_driver ens137x_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_audiopci_ids,
index 00b976f..e34ec6f 100644 (file)
@@ -1716,8 +1716,8 @@ static int snd_es1938_mixer(struct es1938 *chip)
 }
        
 
-static int snd_es1938_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_es1938_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1796,6 +1796,12 @@ static int snd_es1938_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_es1938_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_es1938_probe(pci, pci_id));
+}
+
 static struct pci_driver es1938_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_es1938_ids,
index 6a8a02a..4a7e20b 100644 (file)
@@ -2741,8 +2741,8 @@ static int snd_es1968_create(struct snd_card *card,
 
 /*
  */
-static int snd_es1968_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_es1968_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2848,6 +2848,12 @@ static int snd_es1968_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_es1968_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_es1968_probe(pci, pci_id));
+}
+
 static struct pci_driver es1968_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_es1968_ids,
index 9c22ff1..62b3cb1 100644 (file)
@@ -1268,8 +1268,8 @@ static int snd_fm801_create(struct snd_card *card,
        return 0;
 }
 
-static int snd_card_fm801_probe(struct pci_dev *pci,
-                               const struct pci_device_id *pci_id)
+static int __snd_card_fm801_probe(struct pci_dev *pci,
+                                 const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1333,6 +1333,12 @@ static int snd_card_fm801_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_card_fm801_probe(struct pci_dev *pci,
+                               const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_card_fm801_probe(pci, pci_id));
+}
+
 #ifdef CONFIG_PM_SLEEP
 static const unsigned char saved_regs[] = {
        FM801_PCM_VOL, FM801_I2S_VOL, FM801_FM_VOL, FM801_REC_SRC,
index 3e086ee..31fe417 100644 (file)
@@ -1395,7 +1395,7 @@ static int hdmi_find_pcm_slot(struct hdmi_spec *spec,
 
  last_try:
        /* the last try; check the empty slots in pins */
-       for (i = 0; i < spec->num_nids; i++) {
+       for (i = 0; i < spec->pcm_used; i++) {
                if (!test_bit(i, &spec->pcm_bitmap))
                        return i;
        }
@@ -2325,7 +2325,9 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
         * dev_num is the device entry number in a pin
         */
 
-       if (codec->mst_no_extra_pcms)
+       if (spec->dyn_pcm_no_legacy && codec->mst_no_extra_pcms)
+               pcm_num = spec->num_cvts;
+       else if (codec->mst_no_extra_pcms)
                pcm_num = spec->num_nids;
        else
                pcm_num = spec->num_nids + spec->dev_num - 1;
@@ -4551,6 +4553,7 @@ HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI",   patch_i915_adlp_hdmi),
 HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI",        patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_adlp_hdmi),
+HDA_CODEC_ENTRY(0x8086281f, "Raptorlake-P HDMI",       patch_i915_adlp_hdmi),
 HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
 HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI",        patch_i915_byt_hdmi),
 HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI",   patch_i915_byt_hdmi),
index 4e12af2..4c0c593 100644 (file)
@@ -2619,6 +2619,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x65f5, "Clevo PD50PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
@@ -7005,6 +7006,7 @@ enum {
        ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE,
        ALC287_FIXUP_LEGION_16ACHG6,
        ALC287_FIXUP_CS35L41_I2C_2,
+       ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED,
        ALC245_FIXUP_CS35L41_SPI_2,
        ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED,
        ALC245_FIXUP_CS35L41_SPI_4,
@@ -8768,6 +8770,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cs35l41_fixup_i2c_two,
        },
+       [ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cs35l41_fixup_i2c_two,
+               .chained = true,
+               .chain_id = ALC285_FIXUP_HP_MUTE_LED,
+       },
        [ALC245_FIXUP_CS35L41_SPI_2] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cs35l41_fixup_spi_two,
@@ -9024,9 +9032,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8981, "HP Elite Dragonfly G3", ALC245_FIXUP_CS35L41_SPI_4),
        SND_PCI_QUIRK(0x103c, 0x898e, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x103c, 0x898f, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2),
-       SND_PCI_QUIRK(0x103c, 0x8991, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x103c, 0x8991, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8992, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2),
-       SND_PCI_QUIRK(0x103c, 0x8994, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x103c, 0x8994, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8995, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x103c, 0x89a4, "HP ProBook 440 G9", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89a6, "HP ProBook 450 G9", ALC236_FIXUP_HP_GPIO_LED),
@@ -9162,6 +9170,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[57][0-9]RZ[Q]", ALC269_FIXUP_DMIC),
        SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x866d, "Clevo NP5[05]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x867c, "Clevo NP7[01]PNP", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x867d, "Clevo NP7[01]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME),
@@ -9264,6 +9273,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x508b, "Thinkpad X12 Gen 1", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
        SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
index f627586..6fab2ad 100644 (file)
@@ -2519,8 +2519,8 @@ static int snd_vt1724_create(struct snd_card *card,
  *
  */
 
-static int snd_vt1724_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_vt1724_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2662,6 +2662,12 @@ static int snd_vt1724_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_vt1724_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_vt1724_probe(pci, pci_id));
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int snd_vt1724_suspend(struct device *dev)
 {
index a51032b..ae285c0 100644 (file)
@@ -3109,8 +3109,8 @@ static int check_default_spdif_aclink(struct pci_dev *pci)
        return 0;
 }
 
-static int snd_intel8x0_probe(struct pci_dev *pci,
-                             const struct pci_device_id *pci_id)
+static int __snd_intel8x0_probe(struct pci_dev *pci,
+                               const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct intel8x0 *chip;
@@ -3189,6 +3189,12 @@ static int snd_intel8x0_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_intel8x0_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_intel8x0_probe(pci, pci_id));
+}
+
 static struct pci_driver intel8x0_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_intel8x0_ids,
index 7de3cb2..2845cc0 100644 (file)
@@ -1178,8 +1178,8 @@ static struct shortname_table {
        { 0 },
 };
 
-static int snd_intel8x0m_probe(struct pci_dev *pci,
-                              const struct pci_device_id *pci_id)
+static int __snd_intel8x0m_probe(struct pci_dev *pci,
+                                const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct intel8x0m *chip;
@@ -1225,6 +1225,12 @@ static int snd_intel8x0m_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_intel8x0m_probe(struct pci_dev *pci,
+                              const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_intel8x0m_probe(pci, pci_id));
+}
+
 static struct pci_driver intel8x0m_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_intel8x0m_ids,
index 5c9e240..33b4f95 100644 (file)
@@ -2355,7 +2355,7 @@ snd_korg1212_probe(struct pci_dev *pci,
 
        err = snd_korg1212_create(card, pci);
        if (err < 0)
-               return err;
+               goto error;
 
        strcpy(card->driver, "korg1212");
        strcpy(card->shortname, "korg1212");
@@ -2366,10 +2366,14 @@ snd_korg1212_probe(struct pci_dev *pci,
 
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver korg1212_driver = {
index 5269a1d..1aa30e9 100644 (file)
@@ -637,8 +637,8 @@ static int lola_create(struct snd_card *card, struct pci_dev *pci, int dev)
        return 0;
 }
 
-static int lola_probe(struct pci_dev *pci,
-                     const struct pci_device_id *pci_id)
+static int __lola_probe(struct pci_dev *pci,
+                       const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -687,6 +687,12 @@ static int lola_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int lola_probe(struct pci_dev *pci,
+                     const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __lola_probe(pci, pci_id));
+}
+
 /* PCI IDs */
 static const struct pci_device_id lola_ids[] = {
        { PCI_VDEVICE(DIGIGRAM, 0x0001) },
index 168a108..bd9b614 100644 (file)
@@ -1019,7 +1019,7 @@ static int snd_lx6464es_probe(struct pci_dev *pci,
        err = snd_lx6464es_create(card, pci);
        if (err < 0) {
                dev_err(card->dev, "error during snd_lx6464es_create\n");
-               return err;
+               goto error;
        }
 
        strcpy(card->driver, "LX6464ES");
@@ -1036,12 +1036,16 @@ static int snd_lx6464es_probe(struct pci_dev *pci,
 
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
 
        dev_dbg(chip->card->dev, "initialization successful\n");
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver lx6464es_driver = {
index 056838e..2618507 100644 (file)
@@ -2637,7 +2637,7 @@ snd_m3_create(struct snd_card *card, struct pci_dev *pci,
 /*
  */
 static int
-snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2702,6 +2702,12 @@ snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
        return 0;
 }
 
+static int
+snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_m3_probe(pci, pci_id));
+}
+
 static struct pci_driver m3_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_m3_ids,
index c9c1785..f99a1e9 100644 (file)
@@ -1573,7 +1573,6 @@ snd_nm256_create(struct snd_card *card, struct pci_dev *pci)
        chip->coeffs_current = 0;
 
        snd_nm256_init_chip(chip);
-       card->private_free = snd_nm256_free;
 
        // pci_set_master(pci); /* needed? */
        return 0;
@@ -1680,6 +1679,7 @@ static int snd_nm256_probe(struct pci_dev *pci,
        err = snd_card_register(card);
        if (err < 0)
                return err;
+       card->private_free = snd_nm256_free;
 
        pci_set_drvdata(pci, card);
        return 0;
index 4fb3f24..92ffe9d 100644 (file)
@@ -576,7 +576,7 @@ static void oxygen_card_free(struct snd_card *card)
        mutex_destroy(&chip->mutex);
 }
 
-int oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
+static int __oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
                     struct module *owner,
                     const struct pci_device_id *ids,
                     int (*get_model)(struct oxygen *chip,
@@ -701,6 +701,16 @@ int oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
        pci_set_drvdata(pci, card);
        return 0;
 }
+
+int oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
+                    struct module *owner,
+                    const struct pci_device_id *ids,
+                    int (*get_model)(struct oxygen *chip,
+                                     const struct pci_device_id *id))
+{
+       return snd_card_free_on_error(&pci->dev,
+                                     __oxygen_pci_probe(pci, index, id, owner, ids, get_model));
+}
 EXPORT_SYMBOL(oxygen_pci_probe);
 
 #ifdef CONFIG_PM_SLEEP
index 5a987c6..b37c877 100644 (file)
@@ -2023,7 +2023,7 @@ static void snd_riptide_joystick_remove(struct pci_dev *pci)
 #endif
 
 static int
-snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2124,6 +2124,12 @@ snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
        return 0;
 }
 
+static int
+snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_card_riptide_probe(pci, pci_id));
+}
+
 static struct pci_driver driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_riptide_ids,
index 5b6bd9f..9c0ac02 100644 (file)
@@ -1875,7 +1875,7 @@ static void snd_rme32_card_free(struct snd_card *card)
 }
 
 static int
-snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
 {
        static int dev;
        struct rme32 *rme32;
@@ -1927,6 +1927,12 @@ snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
        return 0;
 }
 
+static int
+snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_rme32_probe(pci, pci_id));
+}
+
 static struct pci_driver rme32_driver = {
        .name =         KBUILD_MODNAME,
        .id_table =     snd_rme32_ids,
index 8fc8115..bccb7e0 100644 (file)
@@ -2430,8 +2430,8 @@ static void snd_rme96_card_free(struct snd_card *card)
 }
 
 static int
-snd_rme96_probe(struct pci_dev *pci,
-               const struct pci_device_id *pci_id)
+__snd_rme96_probe(struct pci_dev *pci,
+                 const struct pci_device_id *pci_id)
 {
        static int dev;
        struct rme96 *rme96;
@@ -2498,6 +2498,12 @@ snd_rme96_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_rme96_probe(struct pci_dev *pci,
+                          const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_rme96_probe(pci, pci_id));
+}
+
 static struct pci_driver rme96_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_rme96_ids,
index 96c12df..3db6413 100644 (file)
@@ -5444,17 +5444,21 @@ static int snd_hdsp_probe(struct pci_dev *pci,
        hdsp->pci = pci;
        err = snd_hdsp_create(card, hdsp);
        if (err)
-               return err;
+               goto error;
 
        strcpy(card->shortname, "Hammerfall DSP");
        sprintf(card->longname, "%s at 0x%lx, irq %d", hdsp->card_name,
                hdsp->port, hdsp->irq);
        err = snd_card_register(card);
        if (err)
-               return err;
+               goto error;
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver hdsp_driver = {
index ff06ee8..fa1812e 100644 (file)
@@ -6895,7 +6895,7 @@ static int snd_hdspm_probe(struct pci_dev *pci,
 
        err = snd_hdspm_create(card, hdspm);
        if (err < 0)
-               return err;
+               goto error;
 
        if (hdspm->io_type != MADIface) {
                snprintf(card->shortname, sizeof(card->shortname), "%s_%x",
@@ -6914,12 +6914,16 @@ static int snd_hdspm_probe(struct pci_dev *pci,
 
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
 
        pci_set_drvdata(pci, card);
 
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver hdspm_driver = {
index 7755e19..1d614fe 100644 (file)
@@ -2572,7 +2572,7 @@ static int snd_rme9652_probe(struct pci_dev *pci,
        rme9652->pci = pci;
        err = snd_rme9652_create(card, rme9652, precise_ptr[dev]);
        if (err)
-               return err;
+               goto error;
 
        strcpy(card->shortname, rme9652->card_name);
 
@@ -2580,10 +2580,14 @@ static int snd_rme9652_probe(struct pci_dev *pci,
                card->shortname, rme9652->port, rme9652->irq);
        err = snd_card_register(card);
        if (err)
-               return err;
+               goto error;
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver rme9652_driver = {
index 0b722b0..fabe393 100644 (file)
@@ -1331,8 +1331,8 @@ static int sis_chip_create(struct snd_card *card,
        return 0;
 }
 
-static int snd_sis7019_probe(struct pci_dev *pci,
-                            const struct pci_device_id *pci_id)
+static int __snd_sis7019_probe(struct pci_dev *pci,
+                              const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct sis7019 *sis;
@@ -1352,8 +1352,8 @@ static int snd_sis7019_probe(struct pci_dev *pci,
        if (!codecs)
                codecs = SIS_PRIMARY_CODEC_PRESENT;
 
-       rc = snd_card_new(&pci->dev, index, id, THIS_MODULE,
-                         sizeof(*sis), &card);
+       rc = snd_devm_card_new(&pci->dev, index, id, THIS_MODULE,
+                              sizeof(*sis), &card);
        if (rc < 0)
                return rc;
 
@@ -1386,6 +1386,12 @@ static int snd_sis7019_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_sis7019_probe(struct pci_dev *pci,
+                            const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_sis7019_probe(pci, pci_id));
+}
+
 static struct pci_driver sis7019_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_sis7019_ids,
index c8c4988..f91cbf6 100644 (file)
@@ -1387,8 +1387,8 @@ static int snd_sonicvibes_midi(struct sonicvibes *sonic,
        return 0;
 }
 
-static int snd_sonic_probe(struct pci_dev *pci,
-                          const struct pci_device_id *pci_id)
+static int __snd_sonic_probe(struct pci_dev *pci,
+                            const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1459,6 +1459,12 @@ static int snd_sonic_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_sonic_probe(struct pci_dev *pci,
+                          const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_sonic_probe(pci, pci_id));
+}
+
 static struct pci_driver sonicvibes_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_sonic_ids,
index 65514f7..361b83f 100644 (file)
@@ -2458,8 +2458,8 @@ static int check_dxs_list(struct pci_dev *pci, int revision)
        return VIA_DXS_48K;
 };
 
-static int snd_via82xx_probe(struct pci_dev *pci,
-                            const struct pci_device_id *pci_id)
+static int __snd_via82xx_probe(struct pci_dev *pci,
+                              const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct via82xx *chip;
@@ -2569,6 +2569,12 @@ static int snd_via82xx_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_via82xx_probe(struct pci_dev *pci,
+                            const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_via82xx_probe(pci, pci_id));
+}
+
 static struct pci_driver via82xx_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_via82xx_ids,
index 234f7fb..ca7f024 100644 (file)
@@ -1103,8 +1103,8 @@ static int snd_via82xx_create(struct snd_card *card,
 }
 
 
-static int snd_via82xx_probe(struct pci_dev *pci,
-                            const struct pci_device_id *pci_id)
+static int __snd_via82xx_probe(struct pci_dev *pci,
+                              const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct via82xx_modem *chip;
@@ -1157,6 +1157,12 @@ static int snd_via82xx_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_via82xx_probe(struct pci_dev *pci,
+                            const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_via82xx_probe(pci, pci_id));
+}
+
 static struct pci_driver via82xx_modem_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_via82xx_modem_ids,
index 33e4301..0d639a3 100644 (file)
  */
 #undef ENABLE_MIC_INPUT
 
-static struct clk *mclk;
-
-static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card,
-                                       struct snd_soc_dapm_context *dapm,
-                                       enum snd_soc_bias_level level)
-{
-       static int mclk_on;
-       int ret = 0;
-
-       switch (level) {
-       case SND_SOC_BIAS_ON:
-       case SND_SOC_BIAS_PREPARE:
-               if (!mclk_on)
-                       ret = clk_enable(mclk);
-               if (ret == 0)
-                       mclk_on = 1;
-               break;
-
-       case SND_SOC_BIAS_OFF:
-       case SND_SOC_BIAS_STANDBY:
-               if (mclk_on)
-                       clk_disable(mclk);
-               mclk_on = 0;
-               break;
-       }
-
-       return ret;
-}
-
 static const struct snd_soc_dapm_widget at91sam9g20ek_dapm_widgets[] = {
        SND_SOC_DAPM_MIC("Int Mic", NULL),
        SND_SOC_DAPM_SPK("Ext Spk", NULL),
@@ -135,7 +106,6 @@ static struct snd_soc_card snd_soc_at91sam9g20ek = {
        .owner = THIS_MODULE,
        .dai_link = &at91sam9g20ek_dai,
        .num_links = 1,
-       .set_bias_level = at91sam9g20ek_set_bias_level,
 
        .dapm_widgets = at91sam9g20ek_dapm_widgets,
        .num_dapm_widgets = ARRAY_SIZE(at91sam9g20ek_dapm_widgets),
@@ -148,7 +118,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        struct device_node *codec_np, *cpu_np;
-       struct clk *pllb;
        struct snd_soc_card *card = &snd_soc_at91sam9g20ek;
        int ret;
 
@@ -162,31 +131,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       /*
-        * Codec MCLK is supplied by PCK0 - set it up.
-        */
-       mclk = clk_get(NULL, "pck0");
-       if (IS_ERR(mclk)) {
-               dev_err(&pdev->dev, "Failed to get MCLK\n");
-               ret = PTR_ERR(mclk);
-               goto err;
-       }
-
-       pllb = clk_get(NULL, "pllb");
-       if (IS_ERR(pllb)) {
-               dev_err(&pdev->dev, "Failed to get PLLB\n");
-               ret = PTR_ERR(pllb);
-               goto err_mclk;
-       }
-       ret = clk_set_parent(mclk, pllb);
-       clk_put(pllb);
-       if (ret != 0) {
-               dev_err(&pdev->dev, "Failed to set MCLK parent\n");
-               goto err_mclk;
-       }
-
-       clk_set_rate(mclk, MCLK_RATE);
-
        card->dev = &pdev->dev;
 
        /* Parse device node info */
@@ -230,9 +174,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
 
        return ret;
 
-err_mclk:
-       clk_put(mclk);
-       mclk = NULL;
 err:
        atmel_ssc_put_audio(0);
        return ret;
@@ -242,8 +183,6 @@ static int at91sam9g20ek_audio_remove(struct platform_device *pdev)
 {
        struct snd_soc_card *card = platform_get_drvdata(pdev);
 
-       clk_disable(mclk);
-       mclk = NULL;
        snd_soc_unregister_card(card);
        atmel_ssc_put_audio(0);
 
index e5a56bc..aa6823f 100644 (file)
@@ -822,8 +822,8 @@ int cs35l41_otp_unpack(struct device *dev, struct regmap *regmap)
        word_offset = otp_map_match->word_offset;
 
        for (i = 0; i < otp_map_match->num_elements; i++) {
-               dev_dbg(dev, "bitoffset= %d, word_offset=%d, bit_sum mod 32=%d\n",
-                       bit_offset, word_offset, bit_sum % 32);
+               dev_dbg(dev, "bitoffset= %d, word_offset=%d, bit_sum mod 32=%d otp_map[i].size = %d\n",
+                       bit_offset, word_offset, bit_sum % 32, otp_map[i].size);
                if (bit_offset + otp_map[i].size - 1 >= 32) {
                        otp_val = (otp_mem[word_offset] &
                                        GENMASK(31, bit_offset)) >> bit_offset;
@@ -831,12 +831,14 @@ int cs35l41_otp_unpack(struct device *dev, struct regmap *regmap)
                                        GENMASK(bit_offset + otp_map[i].size - 33, 0)) <<
                                        (32 - bit_offset);
                        bit_offset += otp_map[i].size - 32;
-               } else {
+               } else if (bit_offset + otp_map[i].size - 1 >= 0) {
                        otp_val = (otp_mem[word_offset] &
                                   GENMASK(bit_offset + otp_map[i].size - 1, bit_offset)
                                  ) >> bit_offset;
                        bit_offset += otp_map[i].size;
-               }
+               } else /* both bit_offset and otp_map[i].size are 0 */
+                       otp_val = 0;
+
                bit_sum += otp_map[i].size;
 
                if (bit_offset == 32) {
index 6884ae5..3143f9c 100644 (file)
@@ -3566,12 +3566,16 @@ static int rx_macro_probe(struct platform_device *pdev)
                return PTR_ERR(rx->pds);
 
        base = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(base))
-               return PTR_ERR(base);
+       if (IS_ERR(base)) {
+               ret = PTR_ERR(base);
+               goto err;
+       }
 
        rx->regmap = devm_regmap_init_mmio(dev, base, &rx_regmap_config);
-       if (IS_ERR(rx->regmap))
-               return PTR_ERR(rx->regmap);
+       if (IS_ERR(rx->regmap)) {
+               ret = PTR_ERR(rx->regmap);
+               goto err;
+       }
 
        dev_set_drvdata(dev, rx);
 
@@ -3632,6 +3636,8 @@ err_mclk:
 err_dcodec:
        clk_disable_unprepare(rx->macro);
 err:
+       lpass_macro_pds_exit(rx->pds);
+
        return ret;
 }
 
index 714a411..55503ba 100644 (file)
@@ -1828,8 +1828,10 @@ static int tx_macro_probe(struct platform_device *pdev)
                return PTR_ERR(tx->pds);
 
        base = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(base))
-               return PTR_ERR(base);
+       if (IS_ERR(base)) {
+               ret = PTR_ERR(base);
+               goto err;
+       }
 
        /* Update defaults for lpass sc7280 */
        if (of_device_is_compatible(np, "qcom,sc7280-lpass-tx-macro")) {
@@ -1846,8 +1848,10 @@ static int tx_macro_probe(struct platform_device *pdev)
        }
 
        tx->regmap = devm_regmap_init_mmio(dev, base, &tx_regmap_config);
-       if (IS_ERR(tx->regmap))
-               return PTR_ERR(tx->regmap);
+       if (IS_ERR(tx->regmap)) {
+               ret = PTR_ERR(tx->regmap);
+               goto err;
+       }
 
        dev_set_drvdata(dev, tx);
 
@@ -1907,6 +1911,8 @@ err_mclk:
 err_dcodec:
        clk_disable_unprepare(tx->macro);
 err:
+       lpass_macro_pds_exit(tx->pds);
+
        return ret;
 }
 
index f3cb596..d18b56e 100644 (file)
@@ -1434,8 +1434,10 @@ static int va_macro_probe(struct platform_device *pdev)
                va->dmic_clk_div = VA_MACRO_CLK_DIV_2;
        } else {
                ret = va_macro_validate_dmic_sample_rate(sample_rate, va);
-               if (!ret)
-                       return -EINVAL;
+               if (!ret) {
+                       ret = -EINVAL;
+                       goto err;
+               }
        }
 
        base = devm_platform_ioremap_resource(pdev, 0);
@@ -1492,6 +1494,8 @@ err_mclk:
 err_dcodec:
        clk_disable_unprepare(va->macro);
 err:
+       lpass_macro_pds_exit(va->pds);
+
        return ret;
 }
 
index 9ad7fc0..20a07c9 100644 (file)
@@ -1206,9 +1206,16 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev)
 
        dev_set_drvdata(dev, priv);
 
-       return devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
+       ret = devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
                                      msm8916_wcd_digital_dai,
                                      ARRAY_SIZE(msm8916_wcd_digital_dai));
+       if (ret)
+               goto err_mclk;
+
+       return 0;
+
+err_mclk:
+       clk_disable_unprepare(priv->mclk);
 err_clk:
        clk_disable_unprepare(priv->ahbclk);
        return ret;
index 8fffe37..cce6f4e 100644 (file)
@@ -489,7 +489,7 @@ static int rk817_platform_probe(struct platform_device *pdev)
 
        rk817_codec_parse_dt_property(&pdev->dev, rk817_codec_data);
 
-       rk817_codec_data->mclk = clk_get(pdev->dev.parent, "mclk");
+       rk817_codec_data->mclk = devm_clk_get(pdev->dev.parent, "mclk");
        if (IS_ERR(rk817_codec_data->mclk)) {
                dev_dbg(&pdev->dev, "Unable to get mclk\n");
                ret = -ENXIO;
index be68d57..2b6c6d6 100644 (file)
@@ -1100,6 +1100,15 @@ void rt5682_jack_detect_handler(struct work_struct *work)
                return;
        }
 
+       if (rt5682->is_sdw) {
+               if (pm_runtime_status_suspended(rt5682->slave->dev.parent)) {
+                       dev_dbg(&rt5682->slave->dev,
+                               "%s: parent device is pm_runtime_status_suspended, skipping jack detection\n",
+                               __func__);
+                       return;
+               }
+       }
+
        dapm = snd_soc_component_get_dapm(rt5682->component);
 
        snd_soc_dapm_mutex_lock(dapm);
@@ -2822,14 +2831,11 @@ static int rt5682_bclk_set_rate(struct clk_hw *hw, unsigned long rate,
 
        for_each_component_dais(component, dai)
                if (dai->id == RT5682_AIF1)
-                       break;
-       if (!dai) {
-               dev_err(rt5682->i2c_dev, "dai %d not found in component\n",
-                       RT5682_AIF1);
-               return -ENODEV;
-       }
+                       return rt5682_set_bclk1_ratio(dai, factor);
 
-       return rt5682_set_bclk1_ratio(dai, factor);
+       dev_err(rt5682->i2c_dev, "dai %d not found in component\n",
+               RT5682_AIF1);
+       return -ENODEV;
 }
 
 static const struct clk_ops rt5682_dai_clk_ops[RT5682_DAI_NUM_CLKS] = {
index 1cba8ec..b55f3ac 100644 (file)
@@ -2687,14 +2687,11 @@ static int rt5682s_bclk_set_rate(struct clk_hw *hw, unsigned long rate,
 
        for_each_component_dais(component, dai)
                if (dai->id == RT5682S_AIF1)
-                       break;
-       if (!dai) {
-               dev_err(component->dev, "dai %d not found in component\n",
-                       RT5682S_AIF1);
-               return -ENODEV;
-       }
+                       return rt5682s_set_bclk1_ratio(dai, factor);
 
-       return rt5682s_set_bclk1_ratio(dai, factor);
+       dev_err(component->dev, "dai %d not found in component\n",
+               RT5682S_AIF1);
+       return -ENODEV;
 }
 
 static const struct clk_ops rt5682s_dai_clk_ops[RT5682S_DAI_NUM_CLKS] = {
index 6770825..ea25fd5 100644 (file)
@@ -245,6 +245,13 @@ static void rt711_jack_detect_handler(struct work_struct *work)
        if (!rt711->component->card->instantiated)
                return;
 
+       if (pm_runtime_status_suspended(rt711->slave->dev.parent)) {
+               dev_dbg(&rt711->slave->dev,
+                       "%s: parent device is pm_runtime_status_suspended, skipping jack detection\n",
+                       __func__);
+               return;
+       }
+
        reg = RT711_VERB_GET_PIN_SENSE | RT711_HP_OUT;
        ret = regmap_read(rt711->regmap, reg, &jack_status);
        if (ret < 0)
index 1e75e93..6298ebe 100644 (file)
@@ -1274,29 +1274,7 @@ static int wcd934x_set_sido_input_src(struct wcd934x_codec *wcd, int sido_src)
        if (sido_src == wcd->sido_input_src)
                return 0;
 
-       if (sido_src == SIDO_SOURCE_INTERNAL) {
-               regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
-                                  WCD934X_ANA_BUCK_HI_ACCU_EN_MASK, 0);
-               usleep_range(100, 110);
-               regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
-                                  WCD934X_ANA_BUCK_HI_ACCU_PRE_ENX_MASK, 0x0);
-               usleep_range(100, 110);
-               regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO,
-                                  WCD934X_ANA_RCO_BG_EN_MASK, 0);
-               usleep_range(100, 110);
-               regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
-                                  WCD934X_ANA_BUCK_PRE_EN1_MASK,
-                                  WCD934X_ANA_BUCK_PRE_EN1_ENABLE);
-               usleep_range(100, 110);
-               regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
-                                  WCD934X_ANA_BUCK_PRE_EN2_MASK,
-                                  WCD934X_ANA_BUCK_PRE_EN2_ENABLE);
-               usleep_range(100, 110);
-               regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
-                                  WCD934X_ANA_BUCK_HI_ACCU_EN_MASK,
-                                  WCD934X_ANA_BUCK_HI_ACCU_ENABLE);
-               usleep_range(100, 110);
-       } else if (sido_src == SIDO_SOURCE_RCO_BG) {
+       if (sido_src == SIDO_SOURCE_RCO_BG) {
                regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO,
                                   WCD934X_ANA_RCO_BG_EN_MASK,
                                   WCD934X_ANA_RCO_BG_ENABLE);
@@ -1382,8 +1360,6 @@ static int wcd934x_disable_ana_bias_and_syclk(struct wcd934x_codec *wcd)
        regmap_update_bits(wcd->regmap, WCD934X_CLK_SYS_MCLK_PRG,
                           WCD934X_EXT_CLK_BUF_EN_MASK |
                           WCD934X_MCLK_EN_MASK, 0x0);
-       wcd934x_set_sido_input_src(wcd, SIDO_SOURCE_INTERNAL);
-
        regmap_update_bits(wcd->regmap, WCD934X_ANA_BIAS,
                           WCD934X_ANA_BIAS_EN_MASK, 0);
        regmap_update_bits(wcd->regmap, WCD934X_ANA_BIAS,
index 5d4949c..b14c6d1 100644 (file)
@@ -602,7 +602,7 @@ static int wm8731_hw_init(struct device *dev, struct wm8731_priv *wm8731)
        ret = wm8731_reset(wm8731->regmap);
        if (ret < 0) {
                dev_err(dev, "Failed to issue reset: %d\n", ret);
-               goto err_regulator_enable;
+               goto err;
        }
 
        /* Clear POWEROFF, keep everything else disabled */
@@ -619,10 +619,7 @@ static int wm8731_hw_init(struct device *dev, struct wm8731_priv *wm8731)
 
        regcache_mark_dirty(wm8731->regmap);
 
-err_regulator_enable:
-       /* Regulators will be enabled by bias management */
-       regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
-
+err:
        return ret;
 }
 
@@ -760,21 +757,27 @@ static int wm8731_i2c_probe(struct i2c_client *i2c,
                ret = PTR_ERR(wm8731->regmap);
                dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
                        ret);
-               return ret;
+               goto err_regulator_enable;
        }
 
        ret = wm8731_hw_init(&i2c->dev, wm8731);
        if (ret != 0)
-               return ret;
+               goto err_regulator_enable;
 
        ret = devm_snd_soc_register_component(&i2c->dev,
                        &soc_component_dev_wm8731, &wm8731_dai, 1);
        if (ret != 0) {
                dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret);
-               return ret;
+               goto err_regulator_enable;
        }
 
        return 0;
+
+err_regulator_enable:
+       /* Regulators will be enabled by bias management */
+       regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
+
+       return ret;
 }
 
 static const struct i2c_device_id wm8731_i2c_id[] = {
index 4650a69..ffc24af 100644 (file)
@@ -372,7 +372,7 @@ static int fsl_sai_set_bclk(struct snd_soc_dai *dai, bool tx, u32 freq)
                        continue;
                if (ratio == 1 && !support_1_1_ratio)
                        continue;
-               else if (ratio & 1)
+               if ((ratio & 1) && ratio > 1)
                        continue;
 
                diff = abs((long)clk_rate - ratio * freq);
index 8e03783..f215794 100644 (file)
@@ -364,13 +364,15 @@ static int asoc_simple_set_tdm(struct snd_soc_dai *dai,
                                struct snd_pcm_hw_params *params)
 {
        int sample_bits = params_width(params);
-       int slot_width = simple_dai->slot_width;
-       int slot_count = simple_dai->slots;
+       int slot_width, slot_count;
        int i, ret;
 
        if (!simple_dai || !simple_dai->tdm_width_map)
                return 0;
 
+       slot_width = simple_dai->slot_width;
+       slot_count = simple_dai->slots;
+
        if (slot_width == 0)
                slot_width = sample_bits;
 
index 5e0529a..9d61783 100644 (file)
 #define SOF_ES8336_SSP_CODEC(quirk)            ((quirk) & GENMASK(3, 0))
 #define SOF_ES8336_SSP_CODEC_MASK              (GENMASK(3, 0))
 
-#define SOF_ES8336_TGL_GPIO_QUIRK              BIT(4)
+#define SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK     BIT(4)
 #define SOF_ES8336_ENABLE_DMIC                 BIT(5)
 #define SOF_ES8336_JD_INVERTED                 BIT(6)
+#define SOF_ES8336_HEADPHONE_GPIO              BIT(7)
+#define SOC_ES8336_HEADSET_MIC1                        BIT(8)
 
 static unsigned long quirk;
 
@@ -39,7 +41,7 @@ MODULE_PARM_DESC(quirk, "Board-specific quirk override");
 
 struct sof_es8336_private {
        struct device *codec_dev;
-       struct gpio_desc *gpio_pa;
+       struct gpio_desc *gpio_speakers, *gpio_headphone;
        struct snd_soc_jack jack;
        struct list_head hdmi_pcm_list;
        bool speaker_en;
@@ -51,19 +53,31 @@ struct sof_hdmi_pcm {
        int device;
 };
 
-static const struct acpi_gpio_params pa_enable_gpio = { 0, 0, true };
-static const struct acpi_gpio_mapping acpi_es8336_gpios[] = {
-       { "pa-enable-gpios", &pa_enable_gpio, 1 },
+static const struct acpi_gpio_params enable_gpio0 = { 0, 0, true };
+static const struct acpi_gpio_params enable_gpio1 = { 1, 0, true };
+
+static const struct acpi_gpio_mapping acpi_speakers_enable_gpio0[] = {
+       { "speakers-enable-gpios", &enable_gpio0, 1 },
        { }
 };
 
-static const struct acpi_gpio_params quirk_pa_enable_gpio = { 1, 0, true };
-static const struct acpi_gpio_mapping quirk_acpi_es8336_gpios[] = {
-       { "pa-enable-gpios", &quirk_pa_enable_gpio, 1 },
+static const struct acpi_gpio_mapping acpi_speakers_enable_gpio1[] = {
+       { "speakers-enable-gpios", &enable_gpio1, 1 },
+};
+
+static const struct acpi_gpio_mapping acpi_enable_both_gpios[] = {
+       { "speakers-enable-gpios", &enable_gpio0, 1 },
+       { "headphone-enable-gpios", &enable_gpio1, 1 },
        { }
 };
 
-static const struct acpi_gpio_mapping *gpio_mapping = acpi_es8336_gpios;
+static const struct acpi_gpio_mapping acpi_enable_both_gpios_rev_order[] = {
+       { "speakers-enable-gpios", &enable_gpio1, 1 },
+       { "headphone-enable-gpios", &enable_gpio0, 1 },
+       { }
+};
+
+static const struct acpi_gpio_mapping *gpio_mapping = acpi_speakers_enable_gpio0;
 
 static void log_quirks(struct device *dev)
 {
@@ -71,10 +85,14 @@ static void log_quirks(struct device *dev)
        dev_info(dev, "quirk SSP%ld\n",  SOF_ES8336_SSP_CODEC(quirk));
        if (quirk & SOF_ES8336_ENABLE_DMIC)
                dev_info(dev, "quirk DMIC enabled\n");
-       if (quirk & SOF_ES8336_TGL_GPIO_QUIRK)
-               dev_info(dev, "quirk TGL GPIO enabled\n");
+       if (quirk & SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK)
+               dev_info(dev, "Speakers GPIO1 quirk enabled\n");
+       if (quirk & SOF_ES8336_HEADPHONE_GPIO)
+               dev_info(dev, "quirk headphone GPIO enabled\n");
        if (quirk & SOF_ES8336_JD_INVERTED)
                dev_info(dev, "quirk JD inverted enabled\n");
+       if (quirk & SOC_ES8336_HEADSET_MIC1)
+               dev_info(dev, "quirk headset at mic1 port enabled\n");
 }
 
 static int sof_es8316_speaker_power_event(struct snd_soc_dapm_widget *w,
@@ -83,12 +101,23 @@ static int sof_es8316_speaker_power_event(struct snd_soc_dapm_widget *w,
        struct snd_soc_card *card = w->dapm->card;
        struct sof_es8336_private *priv = snd_soc_card_get_drvdata(card);
 
+       if (priv->speaker_en == !SND_SOC_DAPM_EVENT_ON(event))
+               return 0;
+
+       priv->speaker_en = !SND_SOC_DAPM_EVENT_ON(event);
+
+       if (SND_SOC_DAPM_EVENT_ON(event))
+               msleep(70);
+
+       gpiod_set_value_cansleep(priv->gpio_speakers, priv->speaker_en);
+
+       if (!(quirk & SOF_ES8336_HEADPHONE_GPIO))
+               return 0;
+
        if (SND_SOC_DAPM_EVENT_ON(event))
-               priv->speaker_en = false;
-       else
-               priv->speaker_en = true;
+               msleep(70);
 
-       gpiod_set_value_cansleep(priv->gpio_pa, priv->speaker_en);
+       gpiod_set_value_cansleep(priv->gpio_headphone, priv->speaker_en);
 
        return 0;
 }
@@ -114,18 +143,23 @@ static const struct snd_soc_dapm_route sof_es8316_audio_map[] = {
 
        /*
         * There is no separate speaker output instead the speakers are muxed to
-        * the HP outputs. The mux is controlled by the "Speaker Power" supply.
+        * the HP outputs. The mux is controlled Speaker and/or headphone switch.
         */
        {"Speaker", NULL, "HPOL"},
        {"Speaker", NULL, "HPOR"},
        {"Speaker", NULL, "Speaker Power"},
 };
 
-static const struct snd_soc_dapm_route sof_es8316_intmic_in1_map[] = {
+static const struct snd_soc_dapm_route sof_es8316_headset_mic2_map[] = {
        {"MIC1", NULL, "Internal Mic"},
        {"MIC2", NULL, "Headset Mic"},
 };
 
+static const struct snd_soc_dapm_route sof_es8316_headset_mic1_map[] = {
+       {"MIC2", NULL, "Internal Mic"},
+       {"MIC1", NULL, "Headset Mic"},
+};
+
 static const struct snd_soc_dapm_route dmic_map[] = {
        /* digital mics */
        {"DMic", NULL, "SoC DMIC"},
@@ -199,8 +233,13 @@ static int sof_es8316_init(struct snd_soc_pcm_runtime *runtime)
 
        card->dapm.idle_bias_off = true;
 
-       custom_map = sof_es8316_intmic_in1_map;
-       num_routes = ARRAY_SIZE(sof_es8316_intmic_in1_map);
+       if (quirk & SOC_ES8336_HEADSET_MIC1) {
+               custom_map = sof_es8316_headset_mic1_map;
+               num_routes = ARRAY_SIZE(sof_es8316_headset_mic1_map);
+       } else {
+               custom_map = sof_es8316_headset_mic2_map;
+               num_routes = ARRAY_SIZE(sof_es8316_headset_mic2_map);
+       }
 
        ret = snd_soc_dapm_add_routes(&card->dapm, custom_map, num_routes);
        if (ret)
@@ -233,8 +272,14 @@ static int sof_es8336_quirk_cb(const struct dmi_system_id *id)
 {
        quirk = (unsigned long)id->driver_data;
 
-       if (quirk & SOF_ES8336_TGL_GPIO_QUIRK)
-               gpio_mapping = quirk_acpi_es8336_gpios;
+       if (quirk & SOF_ES8336_HEADPHONE_GPIO) {
+               if (quirk & SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK)
+                       gpio_mapping = acpi_enable_both_gpios;
+               else
+                       gpio_mapping = acpi_enable_both_gpios_rev_order;
+       } else if (quirk & SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK) {
+               gpio_mapping = acpi_speakers_enable_gpio1;
+       }
 
        return 1;
 }
@@ -257,7 +302,16 @@ static const struct dmi_system_id sof_es8336_quirk_table[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "IP3 tech"),
                        DMI_MATCH(DMI_BOARD_NAME, "WN1"),
                },
-               .driver_data = (void *)(SOF_ES8336_TGL_GPIO_QUIRK)
+               .driver_data = (void *)(SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK)
+       },
+       {
+               .callback = sof_es8336_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HUAWEI"),
+                       DMI_MATCH(DMI_BOARD_NAME, "BOHB-WAX9-PCB-B2"),
+               },
+               .driver_data = (void *)(SOF_ES8336_HEADPHONE_GPIO |
+                                       SOC_ES8336_HEADSET_MIC1)
        },
        {}
 };
@@ -585,10 +639,17 @@ static int sof_es8336_probe(struct platform_device *pdev)
        if (ret)
                dev_warn(codec_dev, "unable to add GPIO mapping table\n");
 
-       priv->gpio_pa = gpiod_get_optional(codec_dev, "pa-enable", GPIOD_OUT_LOW);
-       if (IS_ERR(priv->gpio_pa)) {
-               ret = dev_err_probe(dev, PTR_ERR(priv->gpio_pa),
-                                   "could not get pa-enable GPIO\n");
+       priv->gpio_speakers = gpiod_get_optional(codec_dev, "speakers-enable", GPIOD_OUT_LOW);
+       if (IS_ERR(priv->gpio_speakers)) {
+               ret = dev_err_probe(dev, PTR_ERR(priv->gpio_speakers),
+                                   "could not get speakers-enable GPIO\n");
+               goto err_put_codec;
+       }
+
+       priv->gpio_headphone = gpiod_get_optional(codec_dev, "headphone-enable", GPIOD_OUT_LOW);
+       if (IS_ERR(priv->gpio_headphone)) {
+               ret = dev_err_probe(dev, PTR_ERR(priv->gpio_headphone),
+                                   "could not get headphone-enable GPIO\n");
                goto err_put_codec;
        }
 
@@ -604,7 +665,7 @@ static int sof_es8336_probe(struct platform_device *pdev)
 
        ret = devm_snd_soc_register_card(dev, card);
        if (ret) {
-               gpiod_put(priv->gpio_pa);
+               gpiod_put(priv->gpio_speakers);
                dev_err(dev, "snd_soc_register_card failed: %d\n", ret);
                goto err_put_codec;
        }
@@ -622,7 +683,7 @@ static int sof_es8336_remove(struct platform_device *pdev)
        struct snd_soc_card *card = platform_get_drvdata(pdev);
        struct sof_es8336_private *priv = snd_soc_card_get_drvdata(card);
 
-       gpiod_put(priv->gpio_pa);
+       gpiod_put(priv->gpio_speakers);
        device_remove_software_node(priv->codec_dev);
        put_device(priv->codec_dev);
 
index ebec4d1..7126fcb 100644 (file)
@@ -212,6 +212,19 @@ static const struct dmi_system_id sof_rt5682_quirk_table[] = {
                                        SOF_SSP_BT_OFFLOAD_PRESENT),
 
        },
+       {
+               .callback = sof_rt5682_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Brya"),
+                       DMI_MATCH(DMI_OEM_STRING, "AUDIO-MAX98360_ALC5682I_I2S_AMP_SSP2"),
+               },
+               .driver_data = (void *)(SOF_RT5682_MCLK_EN |
+                                       SOF_RT5682_SSP_CODEC(0) |
+                                       SOF_SPEAKER_AMP_PRESENT |
+                                       SOF_MAX98360A_SPEAKER_AMP_PRESENT |
+                                       SOF_RT5682_SSP_AMP(2) |
+                                       SOF_RT5682_NUM_HDMIDEV(4)),
+       },
        {}
 };
 
index 6edc9b7..ef19150 100644 (file)
@@ -132,13 +132,13 @@ static const struct snd_soc_acpi_adr_device mx8373_1_adr[] = {
        {
                .adr = 0x000123019F837300ull,
                .num_endpoints = 1,
-               .endpoints = &spk_l_endpoint,
+               .endpoints = &spk_r_endpoint,
                .name_prefix = "Right"
        },
        {
                .adr = 0x000127019F837300ull,
                .num_endpoints = 1,
-               .endpoints = &spk_r_endpoint,
+               .endpoints = &spk_l_endpoint,
                .name_prefix = "Left"
        }
 };
index 27a6d32..22e1816 100644 (file)
@@ -193,6 +193,9 @@ static const struct snd_soc_component_driver aiu_acodec_ctrl_component = {
        .of_xlate_dai_name      = aiu_acodec_of_xlate_dai_name,
        .endianness             = 1,
        .non_legacy_dai_naming  = 1,
+#ifdef CONFIG_DEBUG_FS
+       .debugfs_prefix         = "acodec",
+#endif
 };
 
 int aiu_acodec_ctrl_register_component(struct device *dev)
index c3ea733..59ee66f 100644 (file)
@@ -140,6 +140,9 @@ static const struct snd_soc_component_driver aiu_hdmi_ctrl_component = {
        .of_xlate_dai_name      = aiu_hdmi_of_xlate_dai_name,
        .endianness             = 1,
        .non_legacy_dai_naming  = 1,
+#ifdef CONFIG_DEBUG_FS
+       .debugfs_prefix         = "hdmi",
+#endif
 };
 
 int aiu_hdmi_ctrl_register_component(struct device *dev)
index d299a70..88e611e 100644 (file)
@@ -103,6 +103,9 @@ static const struct snd_soc_component_driver aiu_cpu_component = {
        .pointer                = aiu_fifo_pointer,
        .probe                  = aiu_cpu_component_probe,
        .remove                 = aiu_cpu_component_remove,
+#ifdef CONFIG_DEBUG_FS
+       .debugfs_prefix         = "cpu",
+#endif
 };
 
 static struct snd_soc_dai_driver aiu_cpu_dai_drv[] = {
index ce153ac..8c7da82 100644 (file)
@@ -2587,6 +2587,11 @@ int snd_soc_component_initialize(struct snd_soc_component *component,
        component->dev          = dev;
        component->driver       = driver;
 
+#ifdef CONFIG_DEBUG_FS
+       if (!component->debugfs_prefix)
+               component->debugfs_prefix = driver->debugfs_prefix;
+#endif
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_initialize);
index b435b5c..ca917a8 100644 (file)
@@ -1687,8 +1687,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
                switch (w->id) {
                case snd_soc_dapm_pre:
                        if (!w->event)
-                               list_for_each_entry_safe_continue(w, n, list,
-                                                                 power_list);
+                               continue;
 
                        if (event == SND_SOC_DAPM_STREAM_START)
                                ret = w->event(w,
@@ -1700,8 +1699,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
 
                case snd_soc_dapm_post:
                        if (!w->event)
-                               list_for_each_entry_safe_continue(w, n, list,
-                                                                 power_list);
+                               continue;
 
                        if (event == SND_SOC_DAPM_STREAM_START)
                                ret = w->event(w,
index 9a95468..11c9853 100644 (file)
@@ -1214,7 +1214,7 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
                be_substream->pcm->nonatomic = 1;
        }
 
-       dpcm = kzalloc(sizeof(struct snd_soc_dpcm), GFP_ATOMIC);
+       dpcm = kzalloc(sizeof(struct snd_soc_dpcm), GFP_KERNEL);
        if (!dpcm)
                return -ENOMEM;
 
index 72e50df..3bb90a8 100644 (file)
@@ -1436,12 +1436,12 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
        template.num_kcontrols = le32_to_cpu(w->num_kcontrols);
        kc = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(*kc), GFP_KERNEL);
        if (!kc)
-               goto err;
+               goto hdr_err;
 
        kcontrol_type = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(unsigned int),
                                     GFP_KERNEL);
        if (!kcontrol_type)
-               goto err;
+               goto hdr_err;
 
        for (i = 0; i < le32_to_cpu(w->num_kcontrols); i++) {
                control_hdr = (struct snd_soc_tplg_ctl_hdr *)tplg->pos;
index 4c95967..12f5cff 100644 (file)
@@ -83,7 +83,14 @@ static const struct dmi_system_id sof_tplg_table[] = {
                },
                .driver_data = "sof-adl-max98357a-rt5682-2way.tplg",
        },
-
+       {
+               .callback = sof_tplg_cb,
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Brya"),
+                       DMI_MATCH(DMI_OEM_STRING, "AUDIO-MAX98360_ALC5682I_I2S_AMP_SSP2"),
+               },
+               .driver_data = "sof-adl-max98357a-rt5682.tplg",
+       },
        {}
 };
 
index 9b11e97..3e5b319 100644 (file)
@@ -904,8 +904,10 @@ static int sof_control_load(struct snd_soc_component *scomp, int index,
                return -ENOMEM;
 
        scontrol->name = kstrdup(hdr->name, GFP_KERNEL);
-       if (!scontrol->name)
+       if (!scontrol->name) {
+               kfree(scontrol);
                return -ENOMEM;
+       }
 
        scontrol->scomp = scomp;
        scontrol->access = kc->access;
@@ -941,11 +943,13 @@ static int sof_control_load(struct snd_soc_component *scomp, int index,
        default:
                dev_warn(scomp->dev, "control type not supported %d:%d:%d\n",
                         hdr->ops.get, hdr->ops.put, hdr->ops.info);
+               kfree(scontrol->name);
                kfree(scontrol);
                return 0;
        }
 
        if (ret < 0) {
+               kfree(scontrol->name);
                kfree(scontrol);
                return ret;
        }
@@ -1068,6 +1072,46 @@ static int sof_connect_dai_widget(struct snd_soc_component *scomp,
        return 0;
 }
 
+static void sof_disconnect_dai_widget(struct snd_soc_component *scomp,
+                                     struct snd_soc_dapm_widget *w)
+{
+       struct snd_soc_card *card = scomp->card;
+       struct snd_soc_pcm_runtime *rtd;
+       struct snd_soc_dai *cpu_dai;
+       int i;
+
+       if (!w->sname)
+               return;
+
+       list_for_each_entry(rtd, &card->rtd_list, list) {
+               /* does stream match DAI link ? */
+               if (!rtd->dai_link->stream_name ||
+                   strcmp(w->sname, rtd->dai_link->stream_name))
+                       continue;
+
+               switch (w->id) {
+               case snd_soc_dapm_dai_out:
+                       for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+                               if (cpu_dai->capture_widget == w) {
+                                       cpu_dai->capture_widget = NULL;
+                                       break;
+                               }
+                       }
+                       break;
+               case snd_soc_dapm_dai_in:
+                       for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+                               if (cpu_dai->playback_widget == w) {
+                                       cpu_dai->playback_widget = NULL;
+                                       break;
+                               }
+                       }
+                       break;
+               default:
+                       break;
+               }
+       }
+}
+
 /* bind PCM ID to host component ID */
 static int spcm_bind(struct snd_soc_component *scomp, struct snd_sof_pcm *spcm,
                     int dir)
@@ -1353,6 +1397,9 @@ static int sof_widget_unload(struct snd_soc_component *scomp,
 
                if (dai)
                        list_del(&dai->list);
+
+               sof_disconnect_dai_widget(scomp, widget);
+
                break;
        default:
                break;
@@ -1380,6 +1427,7 @@ static int sof_widget_unload(struct snd_soc_component *scomp,
                }
                kfree(scontrol->ipc_control_data);
                list_del(&scontrol->list);
+               kfree(scontrol->name);
                kfree(scontrol);
        }
 
index 2c01649..7c6ca2b 100644 (file)
@@ -1194,6 +1194,7 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
                } while (drain_urbs && timeout);
                finish_wait(&ep->drain_wait, &wait);
        }
+       port->active = 0;
        spin_unlock_irq(&ep->buffer_lock);
 }
 
index 64f5544..7ef7a8a 100644 (file)
@@ -599,6 +599,10 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .id = USB_ID(0x0db0, 0x419c),
                .map = msi_mpg_x570s_carbon_max_wifi_alc4080_map,
        },
+       {       /* MSI MAG X570S Torpedo Max */
+               .id = USB_ID(0x0db0, 0xa073),
+               .map = msi_mpg_x570s_carbon_max_wifi_alc4080_map,
+       },
        {       /* MSI TRX40 */
                .id = USB_ID(0x0db0, 0x543d),
                .map = trx40_mobo_map,
index cec6e91..6d69906 100644 (file)
@@ -669,9 +669,9 @@ static const struct snd_pcm_hardware snd_usb_hardware =
                                SNDRV_PCM_INFO_PAUSE,
        .channels_min =         1,
        .channels_max =         256,
-       .buffer_bytes_max =     1024 * 1024,
+       .buffer_bytes_max =     INT_MAX, /* limited by BUFFER_TIME later */
        .period_bytes_min =     64,
-       .period_bytes_max =     512 * 1024,
+       .period_bytes_max =     INT_MAX, /* limited by PERIOD_TIME later */
        .periods_min =          2,
        .periods_max =          1024,
 };
@@ -1064,6 +1064,18 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
                        return err;
        }
 
+       /* set max period and buffer sizes for 1 and 2 seconds, respectively */
+       err = snd_pcm_hw_constraint_minmax(runtime,
+                                          SNDRV_PCM_HW_PARAM_PERIOD_TIME,
+                                          0, 1000000);
+       if (err < 0)
+               return err;
+       err = snd_pcm_hw_constraint_minmax(runtime,
+                                          SNDRV_PCM_HW_PARAM_BUFFER_TIME,
+                                          0, 2000000);
+       if (err < 0)
+               return err;
+
        /* additional hw constraints for implicit fb */
        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
                                  hw_rule_format_implicit_fb, subs,
index 1678341..b8359a0 100644 (file)
@@ -8,7 +8,7 @@
  */
 
 /* handling of USB vendor/product ID pairs as 32-bit numbers */
-#define USB_ID(vendor, product) (((vendor) << 16) | (product))
+#define USB_ID(vendor, product) (((unsigned int)(vendor) << 16) | (product))
 #define USB_ID_VENDOR(id) ((id) >> 16)
 #define USB_ID_PRODUCT(id) ((u16)(id))
 
index b006346..0d828e3 100644 (file)
@@ -1652,7 +1652,7 @@ static void hdmi_lpe_audio_free(struct snd_card *card)
  * This function is called when the i915 driver creates the
  * hdmi-lpe-audio platform device.
  */
-static int hdmi_lpe_audio_probe(struct platform_device *pdev)
+static int __hdmi_lpe_audio_probe(struct platform_device *pdev)
 {
        struct snd_card *card;
        struct snd_intelhad_card *card_ctx;
@@ -1815,6 +1815,11 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
        return 0;
 }
 
+static int hdmi_lpe_audio_probe(struct platform_device *pdev)
+{
+       return snd_card_free_on_error(&pdev->dev, __hdmi_lpe_audio_probe(pdev));
+}
+
 static const struct dev_pm_ops hdmi_lpe_audio_pm = {
        SET_SYSTEM_SLEEP_PM_OPS(hdmi_lpe_audio_suspend, hdmi_lpe_audio_resume)
 };
index 0eb90d2..ee15311 100644 (file)
 #define TSX_CTRL_RTM_DISABLE           BIT(0)  /* Disable RTM feature */
 #define TSX_CTRL_CPUID_CLEAR           BIT(1)  /* Disable TSX enumeration */
 
-/* SRBDS support */
 #define MSR_IA32_MCU_OPT_CTRL          0x00000123
-#define RNGDS_MITG_DIS                 BIT(0)
+#define RNGDS_MITG_DIS                 BIT(0)  /* SRBDS support */
+#define RTM_ALLOW                      BIT(1)  /* TSX development mode */
 
 #define MSR_IA32_SYSENTER_CS           0x00000174
 #define MSR_IA32_SYSENTER_ESP          0x00000175
index f41d8a0..0616409 100644 (file)
@@ -28,7 +28,13 @@ static inline void *kzalloc(size_t size, gfp_t gfp)
        return kmalloc(size, gfp | __GFP_ZERO);
 }
 
-void *kmem_cache_alloc(struct kmem_cache *cachep, int flags);
+struct list_lru;
+
+void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *, int flags);
+static inline void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
+{
+       return kmem_cache_alloc_lru(cachep, NULL, flags);
+}
 void kmem_cache_free(struct kmem_cache *cachep, void *objp);
 
 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
index 1b15ba1..a093155 100644 (file)
@@ -577,7 +577,6 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
 {
        struct perf_evsel *evsel;
        const struct perf_cpu_map *cpus = evlist->user_requested_cpus;
-       const struct perf_thread_map *threads = evlist->threads;
 
        if (!ops || !ops->get || !ops->mmap)
                return -EINVAL;
@@ -589,7 +588,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
        perf_evlist__for_each_entry(evlist, evsel) {
                if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
                    evsel->sample_id == NULL &&
-                   perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
+                   perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
                        return -ENOMEM;
        }
 
index f264017..44e1f8a 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/numa.h>
 #include <linux/zalloc.h>
 
+#include "../util/header.h"
 #include <numa.h>
 #include <numaif.h>
 
@@ -54,7 +55,7 @@
 
 struct thread_data {
        int                     curr_cpu;
-       cpu_set_t               bind_cpumask;
+       cpu_set_t               *bind_cpumask;
        int                     bind_node;
        u8                      *process_data;
        int                     process_nr;
@@ -266,71 +267,115 @@ static bool node_has_cpus(int node)
        return ret;
 }
 
-static cpu_set_t bind_to_cpu(int target_cpu)
+static cpu_set_t *bind_to_cpu(int target_cpu)
 {
-       cpu_set_t orig_mask, mask;
-       int ret;
+       int nrcpus = numa_num_possible_cpus();
+       cpu_set_t *orig_mask, *mask;
+       size_t size;
 
-       ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
-       BUG_ON(ret);
+       orig_mask = CPU_ALLOC(nrcpus);
+       BUG_ON(!orig_mask);
+       size = CPU_ALLOC_SIZE(nrcpus);
+       CPU_ZERO_S(size, orig_mask);
+
+       if (sched_getaffinity(0, size, orig_mask))
+               goto err_out;
+
+       mask = CPU_ALLOC(nrcpus);
+       if (!mask)
+               goto err_out;
 
-       CPU_ZERO(&mask);
+       CPU_ZERO_S(size, mask);
 
        if (target_cpu == -1) {
                int cpu;
 
                for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
-                       CPU_SET(cpu, &mask);
+                       CPU_SET_S(cpu, size, mask);
        } else {
-               BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus);
-               CPU_SET(target_cpu, &mask);
+               if (target_cpu < 0 || target_cpu >= g->p.nr_cpus)
+                       goto err;
+
+               CPU_SET_S(target_cpu, size, mask);
        }
 
-       ret = sched_setaffinity(0, sizeof(mask), &mask);
-       BUG_ON(ret);
+       if (sched_setaffinity(0, size, mask))
+               goto err;
 
        return orig_mask;
+
+err:
+       CPU_FREE(mask);
+err_out:
+       CPU_FREE(orig_mask);
+
+       /* BUG_ON due to failure in allocation of orig_mask/mask */
+       BUG_ON(-1);
 }
 
-static cpu_set_t bind_to_node(int target_node)
+static cpu_set_t *bind_to_node(int target_node)
 {
-       cpu_set_t orig_mask, mask;
+       int nrcpus = numa_num_possible_cpus();
+       size_t size;
+       cpu_set_t *orig_mask, *mask;
        int cpu;
-       int ret;
 
-       ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
-       BUG_ON(ret);
+       orig_mask = CPU_ALLOC(nrcpus);
+       BUG_ON(!orig_mask);
+       size = CPU_ALLOC_SIZE(nrcpus);
+       CPU_ZERO_S(size, orig_mask);
 
-       CPU_ZERO(&mask);
+       if (sched_getaffinity(0, size, orig_mask))
+               goto err_out;
+
+       mask = CPU_ALLOC(nrcpus);
+       if (!mask)
+               goto err_out;
+
+       CPU_ZERO_S(size, mask);
 
        if (target_node == NUMA_NO_NODE) {
                for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
-                       CPU_SET(cpu, &mask);
+                       CPU_SET_S(cpu, size, mask);
        } else {
                struct bitmask *cpumask = numa_allocate_cpumask();
 
-               BUG_ON(!cpumask);
+               if (!cpumask)
+                       goto err;
+
                if (!numa_node_to_cpus(target_node, cpumask)) {
                        for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
                                if (numa_bitmask_isbitset(cpumask, cpu))
-                                       CPU_SET(cpu, &mask);
+                                       CPU_SET_S(cpu, size, mask);
                        }
                }
                numa_free_cpumask(cpumask);
        }
 
-       ret = sched_setaffinity(0, sizeof(mask), &mask);
-       BUG_ON(ret);
+       if (sched_setaffinity(0, size, mask))
+               goto err;
 
        return orig_mask;
+
+err:
+       CPU_FREE(mask);
+err_out:
+       CPU_FREE(orig_mask);
+
+       /* BUG_ON due to failure in allocation of orig_mask/mask */
+       BUG_ON(-1);
 }
 
-static void bind_to_cpumask(cpu_set_t mask)
+static void bind_to_cpumask(cpu_set_t *mask)
 {
        int ret;
+       size_t size = CPU_ALLOC_SIZE(numa_num_possible_cpus());
 
-       ret = sched_setaffinity(0, sizeof(mask), &mask);
-       BUG_ON(ret);
+       ret = sched_setaffinity(0, size, mask);
+       if (ret) {
+               CPU_FREE(mask);
+               BUG_ON(ret);
+       }
 }
 
 static void mempol_restore(void)
@@ -376,7 +421,7 @@ do {                                                        \
 static u8 *alloc_data(ssize_t bytes0, int map_flags,
                      int init_zero, int init_cpu0, int thp, int init_random)
 {
-       cpu_set_t orig_mask;
+       cpu_set_t *orig_mask = NULL;
        ssize_t bytes;
        u8 *buf;
        int ret;
@@ -434,6 +479,7 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
        /* Restore affinity: */
        if (init_cpu0) {
                bind_to_cpumask(orig_mask);
+               CPU_FREE(orig_mask);
                mempol_restore();
        }
 
@@ -585,10 +631,16 @@ static int parse_setup_cpu_list(void)
                        return -1;
                }
 
+               if (is_cpu_online(bind_cpu_0) != 1 || is_cpu_online(bind_cpu_1) != 1) {
+                       printf("\nTest not applicable, bind_cpu_0 or bind_cpu_1 is offline\n");
+                       return -1;
+               }
+
                BUG_ON(bind_cpu_0 < 0 || bind_cpu_1 < 0);
                BUG_ON(bind_cpu_0 > bind_cpu_1);
 
                for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) {
+                       size_t size = CPU_ALLOC_SIZE(g->p.nr_cpus);
                        int i;
 
                        for (i = 0; i < mul; i++) {
@@ -608,10 +660,15 @@ static int parse_setup_cpu_list(void)
                                        tprintf("%2d", bind_cpu);
                                }
 
-                               CPU_ZERO(&td->bind_cpumask);
+                               td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus);
+                               BUG_ON(!td->bind_cpumask);
+                               CPU_ZERO_S(size, td->bind_cpumask);
                                for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) {
-                                       BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus);
-                                       CPU_SET(cpu, &td->bind_cpumask);
+                                       if (cpu < 0 || cpu >= g->p.nr_cpus) {
+                                               CPU_FREE(td->bind_cpumask);
+                                               BUG_ON(-1);
+                                       }
+                                       CPU_SET_S(cpu, size, td->bind_cpumask);
                                }
                                t++;
                        }
@@ -752,8 +809,6 @@ static int parse_nodes_opt(const struct option *opt __maybe_unused,
        return parse_node_list(arg);
 }
 
-#define BIT(x) (1ul << x)
-
 static inline uint32_t lfsr_32(uint32_t lfsr)
 {
        const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31);
@@ -1241,7 +1296,7 @@ static void *worker_thread(void *__tdata)
                 * by migrating to CPU#0:
                 */
                if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) {
-                       cpu_set_t orig_mask;
+                       cpu_set_t *orig_mask;
                        int target_cpu;
                        int this_cpu;
 
@@ -1265,6 +1320,7 @@ static void *worker_thread(void *__tdata)
                                printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu);
 
                        bind_to_cpumask(orig_mask);
+                       CPU_FREE(orig_mask);
                }
 
                if (details >= 3) {
@@ -1398,21 +1454,31 @@ static void init_thread_data(void)
 
        for (t = 0; t < g->p.nr_tasks; t++) {
                struct thread_data *td = g->threads + t;
+               size_t cpuset_size = CPU_ALLOC_SIZE(g->p.nr_cpus);
                int cpu;
 
                /* Allow all nodes by default: */
                td->bind_node = NUMA_NO_NODE;
 
                /* Allow all CPUs by default: */
-               CPU_ZERO(&td->bind_cpumask);
+               td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus);
+               BUG_ON(!td->bind_cpumask);
+               CPU_ZERO_S(cpuset_size, td->bind_cpumask);
                for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
-                       CPU_SET(cpu, &td->bind_cpumask);
+                       CPU_SET_S(cpu, cpuset_size, td->bind_cpumask);
        }
 }
 
 static void deinit_thread_data(void)
 {
        ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
+       int t;
+
+       /* Free the bind_cpumask allocated for thread_data */
+       for (t = 0; t < g->p.nr_tasks; t++) {
+               struct thread_data *td = g->threads + t;
+               CPU_FREE(td->bind_cpumask);
+       }
 
        free_data(g->threads, size);
 }
index ba74fab..069825c 100644 (file)
@@ -989,8 +989,11 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru
        struct mmap *overwrite_mmap = evlist->overwrite_mmap;
        struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
 
-       thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
-                                             thread_data->mask->maps.nbits);
+       if (cpu_map__is_dummy(cpus))
+               thread_data->nr_mmaps = nr_mmaps;
+       else
+               thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
+                                                     thread_data->mask->maps.nbits);
        if (mmap) {
                thread_data->maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
                if (!thread_data->maps)
@@ -1007,16 +1010,17 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru
                 thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps);
 
        for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) {
-               if (test_bit(cpus->map[m].cpu, thread_data->mask->maps.bits)) {
+               if (cpu_map__is_dummy(cpus) ||
+                   test_bit(cpus->map[m].cpu, thread_data->mask->maps.bits)) {
                        if (thread_data->maps) {
                                thread_data->maps[tm] = &mmap[m];
                                pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n",
-                                         thread_data, cpus->map[m].cpu, tm, m);
+                                         thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
                        }
                        if (thread_data->overwrite_maps) {
                                thread_data->overwrite_maps[tm] = &overwrite_mmap[m];
                                pr_debug2("thread_data[%p]: cpu%d: ow_maps[%d] -> ow_mmap[%d]\n",
-                                         thread_data, cpus->map[m].cpu, tm, m);
+                                         thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
                        }
                        tm++;
                }
@@ -3329,6 +3333,9 @@ static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_c
 {
        int c;
 
+       if (cpu_map__is_dummy(cpus))
+               return;
+
        for (c = 0; c < cpus->nr; c++)
                set_bit(cpus->map[c].cpu, mask->bits);
 }
@@ -3680,6 +3687,11 @@ static int record__init_thread_masks(struct record *rec)
        if (!record__threads_enabled(rec))
                return record__init_thread_default_masks(rec, cpus);
 
+       if (cpu_map__is_dummy(cpus)) {
+               pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n");
+               return -EINVAL;
+       }
+
        switch (rec->opts.threads_spec) {
        case THREAD_SPEC__CPU:
                ret = record__init_thread_cpu_masks(rec, cpus);
index 1ad75c7..afe4a55 100644 (file)
@@ -353,6 +353,7 @@ static int report__setup_sample_type(struct report *rep)
        struct perf_session *session = rep->session;
        u64 sample_type = evlist__combined_sample_type(session->evlist);
        bool is_pipe = perf_data__is_pipe(session->data);
+       struct evsel *evsel;
 
        if (session->itrace_synth_opts->callchain ||
            session->itrace_synth_opts->add_callchain ||
@@ -407,6 +408,19 @@ static int report__setup_sample_type(struct report *rep)
        }
 
        if (sort__mode == SORT_MODE__MEMORY) {
+               /*
+                * FIXUP: prior to kernel 5.18, Arm SPE missed to set
+                * PERF_SAMPLE_DATA_SRC bit in sample type.  For backward
+                * compatibility, set the bit if it's an old perf data file.
+                */
+               evlist__for_each_entry(session->evlist, evsel) {
+                       if (strstr(evsel->name, "arm_spe") &&
+                               !(sample_type & PERF_SAMPLE_DATA_SRC)) {
+                               evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
+                               sample_type |= PERF_SAMPLE_DATA_SRC;
+                       }
+               }
+
                if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) {
                        ui__error("Selected --mem-mode but no mem data. "
                                  "Did you call perf record without -d?\n");
index a2f1179..cf5eab5 100644 (file)
@@ -461,7 +461,7 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
                return -EINVAL;
 
        if (PRINT_FIELD(DATA_SRC) &&
-           evsel__check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC))
+           evsel__do_check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC, allow_user_set))
                return -EINVAL;
 
        if (PRINT_FIELD(WEIGHT) &&
index cc6df49..4ad0dfb 100644 (file)
@@ -123,6 +123,10 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
                evsel->core.attr.enable_on_exec = 0;
        }
 
+       if (evlist__open(evlist) == -ENOENT) {
+               err = TEST_SKIP;
+               goto out_err;
+       }
        CHECK__(evlist__open(evlist));
 
        CHECK__(evlist__mmap(evlist, UINT_MAX));
index df7b18f..1aad7d6 100644 (file)
 #include "llvm/Option/Option.h"
 #include "llvm/Support/FileSystem.h"
 #include "llvm/Support/ManagedStatic.h"
+#if CLANG_VERSION_MAJOR >= 14
+#include "llvm/MC/TargetRegistry.h"
+#else
 #include "llvm/Support/TargetRegistry.h"
+#endif
 #include "llvm/Support/TargetSelect.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetOptions.h"
index d546ff7..a27132e 100644 (file)
@@ -983,6 +983,57 @@ static int write_dir_format(struct feat_fd *ff,
        return do_write(ff, &data->dir.version, sizeof(data->dir.version));
 }
 
+/*
+ * Check whether a CPU is online
+ *
+ * Returns:
+ *     1 -> if CPU is online
+ *     0 -> if CPU is offline
+ *    -1 -> error case
+ */
+int is_cpu_online(unsigned int cpu)
+{
+       char *str;
+       size_t strlen;
+       char buf[256];
+       int status = -1;
+       struct stat statbuf;
+
+       snprintf(buf, sizeof(buf),
+               "/sys/devices/system/cpu/cpu%d", cpu);
+       if (stat(buf, &statbuf) != 0)
+               return 0;
+
+       /*
+        * Check if /sys/devices/system/cpu/cpux/online file
+        * exists. Some cases cpu0 won't have online file since
+        * it is not expected to be turned off generally.
+        * In kernels without CONFIG_HOTPLUG_CPU, this
+        * file won't exist
+        */
+       snprintf(buf, sizeof(buf),
+               "/sys/devices/system/cpu/cpu%d/online", cpu);
+       if (stat(buf, &statbuf) != 0)
+               return 1;
+
+       /*
+        * Read online file using sysfs__read_str.
+        * If read or open fails, return -1.
+        * If read succeeds, return value from file
+        * which gets stored in "str"
+        */
+       snprintf(buf, sizeof(buf),
+               "devices/system/cpu/cpu%d/online", cpu);
+
+       if (sysfs__read_str(buf, &str, &strlen) < 0)
+               return status;
+
+       status = atoi(str);
+
+       free(str);
+       return status;
+}
+
 #ifdef HAVE_LIBBPF_SUPPORT
 static int write_bpf_prog_info(struct feat_fd *ff,
                               struct evlist *evlist __maybe_unused)
index c9e3265..0eb4bc2 100644 (file)
@@ -158,6 +158,7 @@ int do_write(struct feat_fd *fd, const void *buf, size_t size);
 int write_padded(struct feat_fd *fd, const void *bf,
                 size_t count, size_t count_aligned);
 
+int is_cpu_online(unsigned int cpu);
 /*
  * arch specific callback
  */
index 2499792..dd84fed 100644 (file)
@@ -1523,7 +1523,9 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
        bool use_uncore_alias;
        LIST_HEAD(config_terms);
 
-       if (verbose > 1) {
+       pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
+
+       if (verbose > 1 && !(pmu && pmu->selectable)) {
                fprintf(stderr, "Attempting to add event pmu '%s' with '",
                        name);
                if (head_config) {
@@ -1536,7 +1538,6 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
                fprintf(stderr, "' that may result in non-fatal errors\n");
        }
 
-       pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
        if (!pmu) {
                char *err_str;
 
index ee6f034..817a2de 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <errno.h>
+#include <linux/err.h>
 #include <inttypes.h>
 #include <math.h>
 #include <string.h>
@@ -311,7 +312,7 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
 
        if (!mask) {
                mask = hashmap__new(pkg_id_hash, pkg_id_equal, NULL);
-               if (!mask)
+               if (IS_ERR(mask))
                        return -ENOMEM;
 
                counter->per_pkg_mask = mask;
index 81539f5..d5c1bcb 100644 (file)
@@ -25,7 +25,8 @@ struct kmem_cache {
        void (*ctor)(void *);
 };
 
-void *kmem_cache_alloc(struct kmem_cache *cachep, int gfp)
+void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
+               int gfp)
 {
        void *p;
 
index 429f7ee..fd23c80 100755 (executable)
@@ -159,6 +159,17 @@ flooding_remotes_add()
        local lsb
        local i
 
+       # Prevent unwanted packets from entering the bridge and interfering
+       # with the test.
+       tc qdisc add dev br0 clsact
+       tc filter add dev br0 egress protocol all pref 1 handle 1 \
+               matchall skip_hw action drop
+       tc qdisc add dev $h1 clsact
+       tc filter add dev $h1 egress protocol all pref 1 handle 1 \
+               flower skip_hw dst_mac de:ad:be:ef:13:37 action pass
+       tc filter add dev $h1 egress protocol all pref 2 handle 2 \
+               matchall skip_hw action drop
+
        for i in $(eval echo {1..$num_remotes}); do
                lsb=$((i + 1))
 
@@ -195,6 +206,12 @@ flooding_filters_del()
        done
 
        tc qdisc del dev $rp2 clsact
+
+       tc filter del dev $h1 egress protocol all pref 2 handle 2 matchall
+       tc filter del dev $h1 egress protocol all pref 1 handle 1 flower
+       tc qdisc del dev $h1 clsact
+       tc filter del dev br0 egress protocol all pref 1 handle 1 matchall
+       tc qdisc del dev br0 clsact
 }
 
 flooding_check_packets()
index fedcb7b..af5ea50 100755 (executable)
@@ -172,6 +172,17 @@ flooding_filters_add()
        local lsb
        local i
 
+       # Prevent unwanted packets from entering the bridge and interfering
+       # with the test.
+       tc qdisc add dev br0 clsact
+       tc filter add dev br0 egress protocol all pref 1 handle 1 \
+               matchall skip_hw action drop
+       tc qdisc add dev $h1 clsact
+       tc filter add dev $h1 egress protocol all pref 1 handle 1 \
+               flower skip_hw dst_mac de:ad:be:ef:13:37 action pass
+       tc filter add dev $h1 egress protocol all pref 2 handle 2 \
+               matchall skip_hw action drop
+
        tc qdisc add dev $rp2 clsact
 
        for i in $(eval echo {1..$num_remotes}); do
@@ -194,6 +205,12 @@ flooding_filters_del()
        done
 
        tc qdisc del dev $rp2 clsact
+
+       tc filter del dev $h1 egress protocol all pref 2 handle 2 matchall
+       tc filter del dev $h1 egress protocol all pref 1 handle 1 flower
+       tc qdisc del dev $h1 clsact
+       tc filter del dev br0 egress protocol all pref 1 handle 1 matchall
+       tc qdisc del dev br0 clsact
 }
 
 flooding_check_packets()
index d1e8f52..0b0e440 100644 (file)
@@ -3,6 +3,7 @@
 /aarch64/debug-exceptions
 /aarch64/get-reg-list
 /aarch64/psci_cpu_on_test
+/aarch64/vcpu_width_config
 /aarch64/vgic_init
 /aarch64/vgic_irq
 /s390x/memop
@@ -33,6 +34,7 @@
 /x86_64/state_test
 /x86_64/svm_vmcall_test
 /x86_64/svm_int_ctl_test
+/x86_64/tsc_scaling_sync
 /x86_64/sync_regs_test
 /x86_64/tsc_msrs_test
 /x86_64/userspace_io_test
index 21c2dbd..681b173 100644 (file)
@@ -106,6 +106,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
 TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
 TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
 TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test
+TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
 TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
 TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
 TEST_GEN_PROGS_aarch64 += demand_paging_test
index b08d30b..3b940a1 100644 (file)
@@ -362,11 +362,12 @@ static void test_init_timer_irq(struct kvm_vm *vm)
        pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
 }
 
+static int gic_fd;
+
 static struct kvm_vm *test_vm_create(void)
 {
        struct kvm_vm *vm;
        unsigned int i;
-       int ret;
        int nr_vcpus = test_args.nr_vcpus;
 
        vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
@@ -383,8 +384,8 @@ static struct kvm_vm *test_vm_create(void)
 
        ucall_init(vm, NULL);
        test_init_timer_irq(vm);
-       ret = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
-       if (ret < 0) {
+       gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
+       if (gic_fd < 0) {
                print_skip("Failed to create vgic-v3");
                exit(KSFT_SKIP);
        }
@@ -395,6 +396,12 @@ static struct kvm_vm *test_vm_create(void)
        return vm;
 }
 
+static void test_vm_cleanup(struct kvm_vm *vm)
+{
+       close(gic_fd);
+       kvm_vm_free(vm);
+}
+
 static void test_print_help(char *name)
 {
        pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n",
@@ -478,7 +485,7 @@ int main(int argc, char *argv[])
 
        vm = test_vm_create();
        test_run(vm);
-       kvm_vm_free(vm);
+       test_vm_cleanup(vm);
 
        return 0;
 }
index f12147c..0b571f3 100644 (file)
@@ -503,8 +503,13 @@ static void run_test(struct vcpu_config *c)
                ++missing_regs;
 
        if (new_regs || missing_regs) {
+               n = 0;
+               for_each_reg_filtered(i)
+                       ++n;
+
                printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
-               printf("%s: Number registers:         %5lld\n", config_name(c), reg_list->n);
+               printf("%s: Number registers:         %5lld (includes %lld filtered registers)\n",
+                      config_name(c), reg_list->n, reg_list->n - n);
        }
 
        if (new_regs) {
@@ -683,9 +688,10 @@ static __u64 base_regs[] = {
        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]),
        KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr),
        KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr),
-       KVM_REG_ARM_FW_REG(0),
-       KVM_REG_ARM_FW_REG(1),
-       KVM_REG_ARM_FW_REG(2),
+       KVM_REG_ARM_FW_REG(0),          /* KVM_REG_ARM_PSCI_VERSION */
+       KVM_REG_ARM_FW_REG(1),          /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 */
+       KVM_REG_ARM_FW_REG(2),          /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 */
+       KVM_REG_ARM_FW_REG(3),          /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 */
        ARM64_SYS_REG(3, 3, 14, 3, 1),  /* CNTV_CTL_EL0 */
        ARM64_SYS_REG(3, 3, 14, 3, 2),  /* CNTV_CVAL_EL0 */
        ARM64_SYS_REG(3, 3, 14, 0, 2),
diff --git a/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c b/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c
new file mode 100644 (file)
index 0000000..6e94026
--- /dev/null
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vcpu_width_config - Test KVM_ARM_VCPU_INIT() with KVM_ARM_VCPU_EL1_32BIT.
+ *
+ * Copyright (c) 2022 Google LLC.
+ *
+ * This is a test that ensures that non-mixed-width vCPUs (all 64bit vCPUs
+ * or all 32bit vcPUs) can be configured and mixed-width vCPUs cannot be
+ * configured.
+ */
+
+#include "kvm_util.h"
+#include "processor.h"
+#include "test_util.h"
+
+
+/*
+ * Add a vCPU, run KVM_ARM_VCPU_INIT with @init1, and then
+ * add another vCPU, and run KVM_ARM_VCPU_INIT with @init2.
+ */
+static int add_init_2vcpus(struct kvm_vcpu_init *init1,
+                          struct kvm_vcpu_init *init2)
+{
+       struct kvm_vm *vm;
+       int ret;
+
+       vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+
+       vm_vcpu_add(vm, 0);
+       ret = _vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
+       if (ret)
+               goto free_exit;
+
+       vm_vcpu_add(vm, 1);
+       ret = _vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
+
+free_exit:
+       kvm_vm_free(vm);
+       return ret;
+}
+
+/*
+ * Add two vCPUs, then run KVM_ARM_VCPU_INIT for one vCPU with @init1,
+ * and run KVM_ARM_VCPU_INIT for another vCPU with @init2.
+ */
+static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init1,
+                                 struct kvm_vcpu_init *init2)
+{
+       struct kvm_vm *vm;
+       int ret;
+
+       vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+
+       vm_vcpu_add(vm, 0);
+       vm_vcpu_add(vm, 1);
+
+       ret = _vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
+       if (ret)
+               goto free_exit;
+
+       ret = _vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
+
+free_exit:
+       kvm_vm_free(vm);
+       return ret;
+}
+
+/*
+ * Tests that two 64bit vCPUs can be configured, two 32bit vCPUs can be
+ * configured, and two mixed-width vCPUs cannot be configured.
+ * Each of those three cases, configure vCPUs in two different orders.
+ * The one is running KVM_CREATE_VCPU for 2 vCPUs, and then running
+ * KVM_ARM_VCPU_INIT for them.
+ * The other is running KVM_CREATE_VCPU and KVM_ARM_VCPU_INIT for a vCPU,
+ * and then run those commands for another vCPU.
+ */
+int main(void)
+{
+       struct kvm_vcpu_init init1, init2;
+       struct kvm_vm *vm;
+       int ret;
+
+       if (!kvm_check_cap(KVM_CAP_ARM_EL1_32BIT)) {
+               print_skip("KVM_CAP_ARM_EL1_32BIT is not supported");
+               exit(KSFT_SKIP);
+       }
+
+       /* Get the preferred target type and copy that to init2 for later use */
+       vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+       vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init1);
+       kvm_vm_free(vm);
+       init2 = init1;
+
+       /* Test with 64bit vCPUs */
+       ret = add_init_2vcpus(&init1, &init1);
+       TEST_ASSERT(ret == 0,
+                   "Configuring 64bit EL1 vCPUs failed unexpectedly");
+       ret = add_2vcpus_init_2vcpus(&init1, &init1);
+       TEST_ASSERT(ret == 0,
+                   "Configuring 64bit EL1 vCPUs failed unexpectedly");
+
+       /* Test with 32bit vCPUs */
+       init1.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
+       ret = add_init_2vcpus(&init1, &init1);
+       TEST_ASSERT(ret == 0,
+                   "Configuring 32bit EL1 vCPUs failed unexpectedly");
+       ret = add_2vcpus_init_2vcpus(&init1, &init1);
+       TEST_ASSERT(ret == 0,
+                   "Configuring 32bit EL1 vCPUs failed unexpectedly");
+
+       /* Test with mixed-width vCPUs  */
+       init1.features[0] = 0;
+       init2.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
+       ret = add_init_2vcpus(&init1, &init2);
+       TEST_ASSERT(ret != 0,
+                   "Configuring mixed-width vCPUs worked unexpectedly");
+       ret = add_2vcpus_init_2vcpus(&init1, &init2);
+       TEST_ASSERT(ret != 0,
+                   "Configuring mixed-width vCPUs worked unexpectedly");
+
+       return 0;
+}
index c9d9e51..7b47ae4 100644 (file)
 #include "test_util.h"
 #include "perf_test_util.h"
 #include "guest_modes.h"
+
 #ifdef __aarch64__
 #include "aarch64/vgic.h"
 
 #define GICD_BASE_GPA                  0x8000000ULL
 #define GICR_BASE_GPA                  0x80A0000ULL
+
+static int gic_fd;
+
+static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
+{
+       /*
+        * The test can still run even if hardware does not support GICv3, as it
+        * is only an optimization to reduce guest exits.
+        */
+       gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
+}
+
+static void arch_cleanup_vm(struct kvm_vm *vm)
+{
+       if (gic_fd > 0)
+               close(gic_fd);
+}
+
+#else /* __aarch64__ */
+
+static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
+{
+}
+
+static void arch_cleanup_vm(struct kvm_vm *vm)
+{
+}
+
 #endif
 
 /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
@@ -206,9 +235,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                vm_enable_cap(vm, &cap);
        }
 
-#ifdef __aarch64__
-       vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
-#endif
+       arch_setup_vm(vm, nr_vcpus);
 
        /* Start the iterations */
        iteration = 0;
@@ -302,6 +329,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        }
 
        free_bitmaps(bitmaps, p->slots);
+       arch_cleanup_vm(vm);
        perf_test_destroy_vm(vm);
 }
 
index dc284c6..eca5c62 100644 (file)
@@ -101,7 +101,9 @@ static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id,
 #define PGTBL_PTE_WRITE_SHIFT                  2
 #define PGTBL_PTE_READ_MASK                    0x0000000000000002ULL
 #define PGTBL_PTE_READ_SHIFT                   1
-#define PGTBL_PTE_PERM_MASK                    (PGTBL_PTE_EXECUTE_MASK | \
+#define PGTBL_PTE_PERM_MASK                    (PGTBL_PTE_ACCESSED_MASK | \
+                                                PGTBL_PTE_DIRTY_MASK | \
+                                                PGTBL_PTE_EXECUTE_MASK | \
                                                 PGTBL_PTE_WRITE_MASK | \
                                                 PGTBL_PTE_READ_MASK)
 #define PGTBL_PTE_VALID_MASK                   0x0000000000000001ULL
index 37db341..d0d51ad 100644 (file)
 /* CPUID.0x8000_0001.EDX */
 #define CPUID_GBPAGES          (1ul << 26)
 
+/* Page table bitfield declarations */
+#define PTE_PRESENT_MASK        BIT_ULL(0)
+#define PTE_WRITABLE_MASK       BIT_ULL(1)
+#define PTE_USER_MASK           BIT_ULL(2)
+#define PTE_ACCESSED_MASK       BIT_ULL(5)
+#define PTE_DIRTY_MASK          BIT_ULL(6)
+#define PTE_LARGE_MASK          BIT_ULL(7)
+#define PTE_GLOBAL_MASK         BIT_ULL(8)
+#define PTE_NX_MASK             BIT_ULL(63)
+
+#define PAGE_SHIFT             12
+#define PAGE_SIZE              (1ULL << PAGE_SHIFT)
+#define PAGE_MASK              (~(PAGE_SIZE-1))
+
+#define PHYSICAL_PAGE_MASK      GENMASK_ULL(51, 12)
+#define PTE_GET_PFN(pte)        (((pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
+
 /* General Registers in 64-Bit Mode */
 struct gpr64_regs {
        u64 rax;
index ba1fdc3..2c4a756 100644 (file)
@@ -278,7 +278,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
        else
                guest_test_phys_mem = p->phys_offset;
 #ifdef __s390x__
-       alignment = max(0x100000, alignment);
+       alignment = max(0x100000UL, alignment);
 #endif
        guest_test_phys_mem = align_down(guest_test_phys_mem, alignment);
 
index d377f26..3961487 100644 (file)
@@ -268,7 +268,7 @@ void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
                core.regs.t3, core.regs.t4, core.regs.t5, core.regs.t6);
 }
 
-static void guest_hang(void)
+static void __aligned(16) guest_hang(void)
 {
        while (1)
                ;
index 9f000df..33ea5e9 100644 (file)
 
 vm_vaddr_t exception_handlers;
 
-/* Virtual translation table structure declarations */
-struct pageUpperEntry {
-       uint64_t present:1;
-       uint64_t writable:1;
-       uint64_t user:1;
-       uint64_t write_through:1;
-       uint64_t cache_disable:1;
-       uint64_t accessed:1;
-       uint64_t ignored_06:1;
-       uint64_t page_size:1;
-       uint64_t ignored_11_08:4;
-       uint64_t pfn:40;
-       uint64_t ignored_62_52:11;
-       uint64_t execute_disable:1;
-};
-
-struct pageTableEntry {
-       uint64_t present:1;
-       uint64_t writable:1;
-       uint64_t user:1;
-       uint64_t write_through:1;
-       uint64_t cache_disable:1;
-       uint64_t accessed:1;
-       uint64_t dirty:1;
-       uint64_t reserved_07:1;
-       uint64_t global:1;
-       uint64_t ignored_11_09:3;
-       uint64_t pfn:40;
-       uint64_t ignored_62_52:11;
-       uint64_t execute_disable:1;
-};
-
 void regs_dump(FILE *stream, struct kvm_regs *regs,
               uint8_t indent)
 {
@@ -195,23 +163,21 @@ static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr,
        return &page_table[index];
 }
 
-static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm,
-                                                   uint64_t pt_pfn,
-                                                   uint64_t vaddr,
-                                                   uint64_t paddr,
-                                                   int level,
-                                                   enum x86_page_size page_size)
+static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
+                                      uint64_t pt_pfn,
+                                      uint64_t vaddr,
+                                      uint64_t paddr,
+                                      int level,
+                                      enum x86_page_size page_size)
 {
-       struct pageUpperEntry *pte = virt_get_pte(vm, pt_pfn, vaddr, level);
-
-       if (!pte->present) {
-               pte->writable = true;
-               pte->present = true;
-               pte->page_size = (level == page_size);
-               if (pte->page_size)
-                       pte->pfn = paddr >> vm->page_shift;
+       uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, level);
+
+       if (!(*pte & PTE_PRESENT_MASK)) {
+               *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
+               if (level == page_size)
+                       *pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK);
                else
-                       pte->pfn = vm_alloc_page_table(vm) >> vm->page_shift;
+                       *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
        } else {
                /*
                 * Entry already present.  Assert that the caller doesn't want
@@ -221,7 +187,7 @@ static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm,
                TEST_ASSERT(level != page_size,
                            "Cannot create hugepage at level: %u, vaddr: 0x%lx\n",
                            page_size, vaddr);
-               TEST_ASSERT(!pte->page_size,
+               TEST_ASSERT(!(*pte & PTE_LARGE_MASK),
                            "Cannot create page table at level: %u, vaddr: 0x%lx\n",
                            level, vaddr);
        }
@@ -232,8 +198,8 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
                   enum x86_page_size page_size)
 {
        const uint64_t pg_size = 1ull << ((page_size * 9) + 12);
-       struct pageUpperEntry *pml4e, *pdpe, *pde;
-       struct pageTableEntry *pte;
+       uint64_t *pml4e, *pdpe, *pde;
+       uint64_t *pte;
 
        TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K,
                    "Unknown or unsupported guest mode, mode: 0x%x", vm->mode);
@@ -257,24 +223,22 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
         */
        pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift,
                                      vaddr, paddr, 3, page_size);
-       if (pml4e->page_size)
+       if (*pml4e & PTE_LARGE_MASK)
                return;
 
-       pdpe = virt_create_upper_pte(vm, pml4e->pfn, vaddr, paddr, 2, page_size);
-       if (pdpe->page_size)
+       pdpe = virt_create_upper_pte(vm, PTE_GET_PFN(*pml4e), vaddr, paddr, 2, page_size);
+       if (*pdpe & PTE_LARGE_MASK)
                return;
 
-       pde = virt_create_upper_pte(vm, pdpe->pfn, vaddr, paddr, 1, page_size);
-       if (pde->page_size)
+       pde = virt_create_upper_pte(vm, PTE_GET_PFN(*pdpe), vaddr, paddr, 1, page_size);
+       if (*pde & PTE_LARGE_MASK)
                return;
 
        /* Fill in page table entry. */
-       pte = virt_get_pte(vm, pde->pfn, vaddr, 0);
-       TEST_ASSERT(!pte->present,
+       pte = virt_get_pte(vm, PTE_GET_PFN(*pde), vaddr, 0);
+       TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
                    "PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
-       pte->pfn = paddr >> vm->page_shift;
-       pte->writable = true;
-       pte->present = 1;
+       *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
 }
 
 void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
@@ -282,22 +246,22 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
        __virt_pg_map(vm, vaddr, paddr, X86_PAGE_SIZE_4K);
 }
 
-static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
+static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
                                                       uint64_t vaddr)
 {
        uint16_t index[4];
-       struct pageUpperEntry *pml4e, *pdpe, *pde;
-       struct pageTableEntry *pte;
+       uint64_t *pml4e, *pdpe, *pde;
+       uint64_t *pte;
        struct kvm_cpuid_entry2 *entry;
        struct kvm_sregs sregs;
        int max_phy_addr;
-       /* Set the bottom 52 bits. */
-       uint64_t rsvd_mask = 0x000fffffffffffff;
+       uint64_t rsvd_mask = 0;
 
        entry = kvm_get_supported_cpuid_index(0x80000008, 0);
        max_phy_addr = entry->eax & 0x000000ff;
-       /* Clear the bottom bits of the reserved mask. */
-       rsvd_mask = (rsvd_mask >> max_phy_addr) << max_phy_addr;
+       /* Set the high bits in the reserved mask. */
+       if (max_phy_addr < 52)
+               rsvd_mask = GENMASK_ULL(51, max_phy_addr);
 
        /*
         * SDM vol 3, fig 4-11 "Formats of CR3 and Paging-Structure Entries
@@ -307,7 +271,7 @@ static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vc
         */
        vcpu_sregs_get(vm, vcpuid, &sregs);
        if ((sregs.efer & EFER_NX) == 0) {
-               rsvd_mask |= (1ull << 63);
+               rsvd_mask |= PTE_NX_MASK;
        }
 
        TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
@@ -329,30 +293,29 @@ static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vc
        index[3] = (vaddr >> 39) & 0x1ffu;
 
        pml4e = addr_gpa2hva(vm, vm->pgd);
-       TEST_ASSERT(pml4e[index[3]].present,
+       TEST_ASSERT(pml4e[index[3]] & PTE_PRESENT_MASK,
                "Expected pml4e to be present for gva: 0x%08lx", vaddr);
-       TEST_ASSERT((*(uint64_t*)(&pml4e[index[3]]) &
-               (rsvd_mask | (1ull << 7))) == 0,
+       TEST_ASSERT((pml4e[index[3]] & (rsvd_mask | PTE_LARGE_MASK)) == 0,
                "Unexpected reserved bits set.");
 
-       pdpe = addr_gpa2hva(vm, pml4e[index[3]].pfn * vm->page_size);
-       TEST_ASSERT(pdpe[index[2]].present,
+       pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size);
+       TEST_ASSERT(pdpe[index[2]] & PTE_PRESENT_MASK,
                "Expected pdpe to be present for gva: 0x%08lx", vaddr);
-       TEST_ASSERT(pdpe[index[2]].page_size == 0,
+       TEST_ASSERT(!(pdpe[index[2]] & PTE_LARGE_MASK),
                "Expected pdpe to map a pde not a 1-GByte page.");
-       TEST_ASSERT((*(uint64_t*)(&pdpe[index[2]]) & rsvd_mask) == 0,
+       TEST_ASSERT((pdpe[index[2]] & rsvd_mask) == 0,
                "Unexpected reserved bits set.");
 
-       pde = addr_gpa2hva(vm, pdpe[index[2]].pfn * vm->page_size);
-       TEST_ASSERT(pde[index[1]].present,
+       pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size);
+       TEST_ASSERT(pde[index[1]] & PTE_PRESENT_MASK,
                "Expected pde to be present for gva: 0x%08lx", vaddr);
-       TEST_ASSERT(pde[index[1]].page_size == 0,
+       TEST_ASSERT(!(pde[index[1]] & PTE_LARGE_MASK),
                "Expected pde to map a pte not a 2-MByte page.");
-       TEST_ASSERT((*(uint64_t*)(&pde[index[1]]) & rsvd_mask) == 0,
+       TEST_ASSERT((pde[index[1]] & rsvd_mask) == 0,
                "Unexpected reserved bits set.");
 
-       pte = addr_gpa2hva(vm, pde[index[1]].pfn * vm->page_size);
-       TEST_ASSERT(pte[index[0]].present,
+       pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size);
+       TEST_ASSERT(pte[index[0]] & PTE_PRESENT_MASK,
                "Expected pte to be present for gva: 0x%08lx", vaddr);
 
        return &pte[index[0]];
@@ -360,7 +323,7 @@ static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vc
 
 uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr)
 {
-       struct pageTableEntry *pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
+       uint64_t *pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
 
        return *(uint64_t *)pte;
 }
@@ -368,18 +331,17 @@ uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr)
 void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr,
                             uint64_t pte)
 {
-       struct pageTableEntry *new_pte = _vm_get_page_table_entry(vm, vcpuid,
-                                                                 vaddr);
+       uint64_t *new_pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
 
        *(uint64_t *)new_pte = pte;
 }
 
 void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
 {
-       struct pageUpperEntry *pml4e, *pml4e_start;
-       struct pageUpperEntry *pdpe, *pdpe_start;
-       struct pageUpperEntry *pde, *pde_start;
-       struct pageTableEntry *pte, *pte_start;
+       uint64_t *pml4e, *pml4e_start;
+       uint64_t *pdpe, *pdpe_start;
+       uint64_t *pde, *pde_start;
+       uint64_t *pte, *pte_start;
 
        if (!vm->pgd_created)
                return;
@@ -389,58 +351,58 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
        fprintf(stream, "%*s      index hvaddr         gpaddr         "
                "addr         w exec dirty\n",
                indent, "");
-       pml4e_start = (struct pageUpperEntry *) addr_gpa2hva(vm, vm->pgd);
+       pml4e_start = (uint64_t *) addr_gpa2hva(vm, vm->pgd);
        for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
                pml4e = &pml4e_start[n1];
-               if (!pml4e->present)
+               if (!(*pml4e & PTE_PRESENT_MASK))
                        continue;
-               fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10lx %u "
+               fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u "
                        " %u\n",
                        indent, "",
                        pml4e - pml4e_start, pml4e,
-                       addr_hva2gpa(vm, pml4e), (uint64_t) pml4e->pfn,
-                       pml4e->writable, pml4e->execute_disable);
+                       addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e),
+                       !!(*pml4e & PTE_WRITABLE_MASK), !!(*pml4e & PTE_NX_MASK));
 
-               pdpe_start = addr_gpa2hva(vm, pml4e->pfn * vm->page_size);
+               pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK);
                for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
                        pdpe = &pdpe_start[n2];
-                       if (!pdpe->present)
+                       if (!(*pdpe & PTE_PRESENT_MASK))
                                continue;
-                       fprintf(stream, "%*spdpe  0x%-3zx %p 0x%-12lx 0x%-10lx "
+                       fprintf(stream, "%*spdpe  0x%-3zx %p 0x%-12lx 0x%-10llx "
                                "%u  %u\n",
                                indent, "",
                                pdpe - pdpe_start, pdpe,
                                addr_hva2gpa(vm, pdpe),
-                               (uint64_t) pdpe->pfn, pdpe->writable,
-                               pdpe->execute_disable);
+                               PTE_GET_PFN(*pdpe), !!(*pdpe & PTE_WRITABLE_MASK),
+                               !!(*pdpe & PTE_NX_MASK));
 
-                       pde_start = addr_gpa2hva(vm, pdpe->pfn * vm->page_size);
+                       pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK);
                        for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
                                pde = &pde_start[n3];
-                               if (!pde->present)
+                               if (!(*pde & PTE_PRESENT_MASK))
                                        continue;
                                fprintf(stream, "%*spde   0x%-3zx %p "
-                                       "0x%-12lx 0x%-10lx %u  %u\n",
+                                       "0x%-12lx 0x%-10llx %u  %u\n",
                                        indent, "", pde - pde_start, pde,
                                        addr_hva2gpa(vm, pde),
-                                       (uint64_t) pde->pfn, pde->writable,
-                                       pde->execute_disable);
+                                       PTE_GET_PFN(*pde), !!(*pde & PTE_WRITABLE_MASK),
+                                       !!(*pde & PTE_NX_MASK));
 
-                               pte_start = addr_gpa2hva(vm, pde->pfn * vm->page_size);
+                               pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK);
                                for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
                                        pte = &pte_start[n4];
-                                       if (!pte->present)
+                                       if (!(*pte & PTE_PRESENT_MASK))
                                                continue;
                                        fprintf(stream, "%*spte   0x%-3zx %p "
-                                               "0x%-12lx 0x%-10lx %u  %u "
+                                               "0x%-12lx 0x%-10llx %u  %u "
                                                "    %u    0x%-10lx\n",
                                                indent, "",
                                                pte - pte_start, pte,
                                                addr_hva2gpa(vm, pte),
-                                               (uint64_t) pte->pfn,
-                                               pte->writable,
-                                               pte->execute_disable,
-                                               pte->dirty,
+                                               PTE_GET_PFN(*pte),
+                                               !!(*pte & PTE_WRITABLE_MASK),
+                                               !!(*pte & PTE_NX_MASK),
+                                               !!(*pte & PTE_DIRTY_MASK),
                                                ((uint64_t) n1 << 27)
                                                        | ((uint64_t) n2 << 18)
                                                        | ((uint64_t) n3 << 9)
@@ -558,8 +520,8 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
 {
        uint16_t index[4];
-       struct pageUpperEntry *pml4e, *pdpe, *pde;
-       struct pageTableEntry *pte;
+       uint64_t *pml4e, *pdpe, *pde;
+       uint64_t *pte;
 
        TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
                "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
@@ -572,22 +534,22 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
        if (!vm->pgd_created)
                goto unmapped_gva;
        pml4e = addr_gpa2hva(vm, vm->pgd);
-       if (!pml4e[index[3]].present)
+       if (!(pml4e[index[3]] & PTE_PRESENT_MASK))
                goto unmapped_gva;
 
-       pdpe = addr_gpa2hva(vm, pml4e[index[3]].pfn * vm->page_size);
-       if (!pdpe[index[2]].present)
+       pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size);
+       if (!(pdpe[index[2]] & PTE_PRESENT_MASK))
                goto unmapped_gva;
 
-       pde = addr_gpa2hva(vm, pdpe[index[2]].pfn * vm->page_size);
-       if (!pde[index[1]].present)
+       pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size);
+       if (!(pde[index[1]] & PTE_PRESENT_MASK))
                goto unmapped_gva;
 
-       pte = addr_gpa2hva(vm, pde[index[1]].pfn * vm->page_size);
-       if (!pte[index[0]].present)
+       pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size);
+       if (!(pte[index[0]] & PTE_PRESENT_MASK))
                goto unmapped_gva;
 
-       return (pte[index[0]].pfn * vm->page_size) + (gva & 0xfffu);
+       return (PTE_GET_PFN(pte[index[0]]) * vm->page_size) + (gva & ~PAGE_MASK);
 
 unmapped_gva:
        TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
index 52a3ef6..76f65c2 100644 (file)
@@ -29,7 +29,6 @@
 #define X86_FEATURE_XSAVE              (1 << 26)
 #define X86_FEATURE_OSXSAVE            (1 << 27)
 
-#define PAGE_SIZE                      (1 << 12)
 #define NUM_TILES                      8
 #define TILE_SIZE                      1024
 #define XSAVE_SIZE                     ((NUM_TILES * TILE_SIZE) + PAGE_SIZE)
index f070ff0..aeb3850 100644 (file)
@@ -12,7 +12,6 @@
 #include "vmx.h"
 
 #define VCPU_ID           1
-#define PAGE_SIZE  4096
 #define MAXPHYADDR 36
 
 #define MEM_REGION_GVA 0x0000123456789000
index a626d40..b4e0c86 100644 (file)
@@ -21,8 +21,6 @@
 
 #define VCPU_ID              1
 
-#define PAGE_SIZE  4096
-
 #define SMRAM_SIZE 65536
 #define SMRAM_MEMSLOT ((1 << 16) | 1)
 #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
index e683d0a..19b35c6 100644 (file)
@@ -32,7 +32,6 @@
 #define MSR_IA32_TSC_ADJUST 0x3b
 #endif
 
-#define PAGE_SIZE      4096
 #define VCPU_ID                5
 
 #define TSC_ADJUST_VALUE (1ll << 32)
index 865e171..bcd3708 100644 (file)
@@ -23,7 +23,6 @@
 #define SHINFO_REGION_GVA      0xc0000000ULL
 #define SHINFO_REGION_GPA      0xc0000000ULL
 #define SHINFO_REGION_SLOT     10
-#define PAGE_SIZE              4096
 
 #define DUMMY_REGION_GPA       (SHINFO_REGION_GPA + (2 * PAGE_SIZE))
 #define DUMMY_REGION_SLOT      11
index adc9445..b30fe9d 100644 (file)
@@ -15,7 +15,6 @@
 
 #define HCALL_REGION_GPA       0xc0000000ULL
 #define HCALL_REGION_SLOT      10
-#define PAGE_SIZE              4096
 
 static struct kvm_vm *vm;
 
index b019e0b..84fda3b 100644 (file)
@@ -180,6 +180,9 @@ void shutdown(int exit_val, char *err_cause, int line_no)
        if (in_shutdown++)
                return;
 
+       /* Free the cpu_set allocated using CPU_ALLOC in main function */
+       CPU_FREE(cpu_set);
+
        for (i = 0; i < num_cpus_to_pin; i++)
                if (cpu_threads[i]) {
                        pthread_kill(cpu_threads[i], SIGUSR1);
@@ -551,6 +554,12 @@ int main(int argc, char *argv[])
                perror("sysconf(_SC_NPROCESSORS_ONLN)");
                exit(1);
        }
+
+       if (getuid() != 0)
+               ksft_exit_skip("Not running as root, but almost all tests "
+                       "require root in order to modify\nsystem settings.  "
+                       "Exiting.\n");
+
        cpus_online = min(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN));
        cpu_set = CPU_ALLOC(cpus_online);
        if (cpu_set == NULL) {
@@ -589,7 +598,7 @@ int main(int argc, char *argv[])
                                                cpu_set)) {
                                        fprintf(stderr, "Any given CPU may "
                                                "only be given once.\n");
-                                       exit(1);
+                                       goto err_code;
                                } else
                                        CPU_SET_S(cpus_to_pin[cpu],
                                                  cpu_set_size, cpu_set);
@@ -607,7 +616,7 @@ int main(int argc, char *argv[])
                                queue_path = malloc(strlen(option) + 2);
                                if (!queue_path) {
                                        perror("malloc()");
-                                       exit(1);
+                                       goto err_code;
                                }
                                queue_path[0] = '/';
                                queue_path[1] = 0;
@@ -622,17 +631,12 @@ int main(int argc, char *argv[])
                fprintf(stderr, "Must pass at least one CPU to continuous "
                        "mode.\n");
                poptPrintUsage(popt_context, stderr, 0);
-               exit(1);
+               goto err_code;
        } else if (!continuous_mode) {
                num_cpus_to_pin = 1;
                cpus_to_pin[0] = cpus_online - 1;
        }
 
-       if (getuid() != 0)
-               ksft_exit_skip("Not running as root, but almost all tests "
-                       "require root in order to modify\nsystem settings.  "
-                       "Exiting.\n");
-
        max_msgs = fopen(MAX_MSGS, "r+");
        max_msgsize = fopen(MAX_MSGSIZE, "r+");
        if (!max_msgs)
@@ -740,4 +744,9 @@ int main(int argc, char *argv[])
                        sleep(1);
        }
        shutdown(0, "", 0);
+
+err_code:
+       CPU_FREE(cpu_set);
+       exit(1);
+
 }
index b7d188f..b9fa9cd 100644 (file)
@@ -135,6 +135,11 @@ do {                                                               \
 #define PPC_FEATURE2_ARCH_3_1 0x00040000
 #endif
 
+/* POWER10 features */
+#ifndef PPC_FEATURE2_MMA
+#define PPC_FEATURE2_MMA 0x00020000
+#endif
+
 #if defined(__powerpc64__)
 #define UCONTEXT_NIA(UC)       (UC)->uc_mcontext.gp_regs[PT_NIP]
 #define UCONTEXT_MSR(UC)       (UC)->uc_mcontext.gp_regs[PT_MSR]
index fcc91c2..3948f7c 100644 (file)
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
-TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal fpu_denormal vmx_syscall vmx_preempt vmx_signal vsx_preempt
+TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal fpu_denormal vmx_syscall vmx_preempt vmx_signal vsx_preempt mma
 
 top_srcdir = ../../../../..
 include ../../lib.mk
@@ -17,3 +17,5 @@ $(OUTPUT)/vmx_signal: vmx_asm.S ../utils.c
 
 $(OUTPUT)/vsx_preempt: CFLAGS += -mvsx
 $(OUTPUT)/vsx_preempt: vsx_asm.S ../utils.c
+
+$(OUTPUT)/mma: mma.c mma.S ../utils.c
diff --git a/tools/testing/selftests/powerpc/math/mma.S b/tools/testing/selftests/powerpc/math/mma.S
new file mode 100644 (file)
index 0000000..8528c98
--- /dev/null
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Test basic matrix multiply assist (MMA) functionality if available.
+ *
+ * Copyright 2020, Alistair Popple, IBM Corp.
+ */
+       .global test_mma
+test_mma:
+       /* Load accumulator via VSX registers from image passed in r3 */
+       lxvh8x  4,0,3
+       lxvh8x  5,0,4
+
+       /* Clear and prime the accumulator (xxsetaccz) */
+       .long   0x7c030162
+
+       /* Prime the accumulator with MMA VSX move to accumulator
+       * X-form (xxmtacc) (not needed due to above zeroing) */
+       //.long 0x7c010162
+
+       /* xvi16ger2s */
+       .long   0xec042958
+
+       /* Store result in image passed in r5 */
+       stxvw4x 0,0,5
+       addi    5,5,16
+       stxvw4x 1,0,5
+       addi    5,5,16
+       stxvw4x 2,0,5
+       addi    5,5,16
+       stxvw4x 3,0,5
+       addi    5,5,16
+
+       blr
diff --git a/tools/testing/selftests/powerpc/math/mma.c b/tools/testing/selftests/powerpc/math/mma.c
new file mode 100644 (file)
index 0000000..3a71808
--- /dev/null
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Test basic matrix multiply assist (MMA) functionality if available.
+ *
+ * Copyright 2020, Alistair Popple, IBM Corp.
+ */
+#include <stdio.h>
+#include <stdint.h>
+
+#include "utils.h"
+
+extern void test_mma(uint16_t (*)[8], uint16_t (*)[8], uint32_t (*)[4*4]);
+
+static int mma(void)
+{
+       int i;
+       int rc = 0;
+       uint16_t x[] = {1, 0, 2, 0, 3, 0, 4, 0};
+       uint16_t y[] = {1, 0, 2, 0, 3, 0, 4, 0};
+       uint32_t z[4*4];
+       uint32_t exp[4*4] = {1, 2, 3, 4,
+                            2, 4, 6, 8,
+                            3, 6, 9, 12,
+                            4, 8, 12, 16};
+
+       SKIP_IF_MSG(!have_hwcap2(PPC_FEATURE2_ARCH_3_1), "Need ISAv3.1");
+       SKIP_IF_MSG(!have_hwcap2(PPC_FEATURE2_MMA), "Need MMA");
+
+       test_mma(&x, &y, &z);
+
+       for (i = 0; i < 16; i++) {
+               printf("MMA[%d] = %d ", i, z[i]);
+
+               if (z[i] == exp[i]) {
+                       printf(" (Correct)\n");
+               } else {
+                       printf(" (Incorrect)\n");
+                       rc = 1;
+               }
+       }
+
+       return rc;
+}
+
+int main(int argc, char *argv[])
+{
+       return test_harness(mma, "mma");
+}
index aac4a59..4e1a294 100644 (file)
@@ -12,3 +12,4 @@ pkey_exec_prot
 pkey_siginfo
 stack_expansion_ldst
 stack_expansion_signal
+large_vm_gpr_corruption
index 40253ab..27dc09d 100644 (file)
@@ -4,7 +4,8 @@ noarg:
 
 TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \
                  large_vm_fork_separation bad_accesses pkey_exec_prot \
-                 pkey_siginfo stack_expansion_signal stack_expansion_ldst
+                 pkey_siginfo stack_expansion_signal stack_expansion_ldst \
+                 large_vm_gpr_corruption
 TEST_PROGS := stress_code_patching.sh
 
 TEST_GEN_PROGS_EXTENDED := tlbie_test
@@ -19,6 +20,7 @@ $(OUTPUT)/prot_sao: ../utils.c
 
 $(OUTPUT)/wild_bctr: CFLAGS += -m64
 $(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64
+$(OUTPUT)/large_vm_gpr_corruption: CFLAGS += -m64
 $(OUTPUT)/bad_accesses: CFLAGS += -m64
 $(OUTPUT)/pkey_exec_prot: CFLAGS += -m64
 $(OUTPUT)/pkey_siginfo: CFLAGS += -m64
diff --git a/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c b/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c
new file mode 100644 (file)
index 0000000..927bfae
--- /dev/null
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright 2022, Michael Ellerman, IBM Corp.
+//
+// Test that the 4PB address space SLB handling doesn't corrupt userspace registers
+// (r9-r13) due to a SLB fault while saving the PPR.
+//
+// The bug was introduced in f384796c4 ("powerpc/mm: Add support for handling > 512TB
+// address in SLB miss") and fixed in 4c2de74cc869 ("powerpc/64: Interrupts save PPR on
+// stack rather than thread_struct").
+//
+// To hit the bug requires the task struct and kernel stack to be in different segments.
+// Usually that requires more than 1TB of RAM, or if that's not practical, boot the kernel
+// with "disable_1tb_segments".
+//
+// The test works by creating mappings above 512TB, to trigger the large address space
+// support. It creates 64 mappings, double the size of the SLB, to cause SLB faults on
+// each access (assuming naive replacement). It then loops over those mappings touching
+// each, and checks that r9-r13 aren't corrupted.
+//
+// It then forks another child and tries again, because a new child process will get a new
+// kernel stack and thread struct allocated, which may be more optimally placed to trigger
+// the bug. It would probably be better to leave the previous child processes hanging
+// around, so that kernel stack & thread struct allocations are not reused, but that would
+// amount to a 30 second fork bomb. The current design reliably triggers the bug on
+// unpatched kernels.
+
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+#ifndef MAP_FIXED_NOREPLACE
+#define MAP_FIXED_NOREPLACE MAP_FIXED // "Should be safe" above 512TB
+#endif
+
+#define BASE_ADDRESS (1ul << 50) // 1PB
+#define STRIDE      (2ul << 40) // 2TB
+#define SLB_SIZE     32
+#define NR_MAPPINGS  (SLB_SIZE * 2)
+
+static volatile sig_atomic_t signaled;
+
+static void signal_handler(int sig)
+{
+       signaled = 1;
+}
+
+#define CHECK_REG(_reg)                                                                \
+       if (_reg != _reg##_orig) {                                                     \
+               printf(str(_reg) " corrupted! Expected 0x%lx != 0x%lx\n", _reg##_orig, \
+                      _reg);                                                          \
+               _exit(1);                                                              \
+       }
+
+static int touch_mappings(void)
+{
+       unsigned long r9_orig, r10_orig, r11_orig, r12_orig, r13_orig;
+       unsigned long r9, r10, r11, r12, r13;
+       unsigned long addr, *p;
+       int i;
+
+       for (i = 0; i < NR_MAPPINGS; i++) {
+               addr = BASE_ADDRESS + (i * STRIDE);
+               p = (unsigned long *)addr;
+
+               asm volatile("mr   %0, %%r9     ;" // Read original GPR values
+                            "mr   %1, %%r10    ;"
+                            "mr   %2, %%r11    ;"
+                            "mr   %3, %%r12    ;"
+                            "mr   %4, %%r13    ;"
+                            "std %10, 0(%11)   ;" // Trigger SLB fault
+                            "mr   %5, %%r9     ;" // Save possibly corrupted values
+                            "mr   %6, %%r10    ;"
+                            "mr   %7, %%r11    ;"
+                            "mr   %8, %%r12    ;"
+                            "mr   %9, %%r13    ;"
+                            "mr   %%r9,  %0    ;" // Restore original values
+                            "mr   %%r10, %1    ;"
+                            "mr   %%r11, %2    ;"
+                            "mr   %%r12, %3    ;"
+                            "mr   %%r13, %4    ;"
+                            : "=&b"(r9_orig), "=&b"(r10_orig), "=&b"(r11_orig),
+                              "=&b"(r12_orig), "=&b"(r13_orig), "=&b"(r9), "=&b"(r10),
+                              "=&b"(r11), "=&b"(r12), "=&b"(r13)
+                            : "b"(i), "b"(p)
+                            : "r9", "r10", "r11", "r12", "r13");
+
+               CHECK_REG(r9);
+               CHECK_REG(r10);
+               CHECK_REG(r11);
+               CHECK_REG(r12);
+               CHECK_REG(r13);
+       }
+
+       return 0;
+}
+
+static int test(void)
+{
+       unsigned long page_size, addr, *p;
+       struct sigaction action;
+       bool hash_mmu;
+       int i, status;
+       pid_t pid;
+
+       // This tests a hash MMU specific bug.
+       FAIL_IF(using_hash_mmu(&hash_mmu));
+       SKIP_IF(!hash_mmu);
+
+       page_size = sysconf(_SC_PAGESIZE);
+
+       for (i = 0; i < NR_MAPPINGS; i++) {
+               addr = BASE_ADDRESS + (i * STRIDE);
+
+               p = mmap((void *)addr, page_size, PROT_READ | PROT_WRITE,
+                        MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0);
+               if (p == MAP_FAILED) {
+                       perror("mmap");
+                       printf("Error: couldn't mmap(), confirm kernel has 4PB support?\n");
+                       return 1;
+               }
+       }
+
+       action.sa_handler = signal_handler;
+       action.sa_flags = SA_RESTART;
+       FAIL_IF(sigaction(SIGALRM, &action, NULL) < 0);
+
+       // Seen to always crash in under ~10s on affected kernels.
+       alarm(30);
+
+       while (!signaled) {
+               // Fork new processes, to increase the chance that we hit the case where
+               // the kernel stack and task struct are in different segments.
+               pid = fork();
+               if (pid == 0)
+                       exit(touch_mappings());
+
+               FAIL_IF(waitpid(-1, &status, 0) == -1);
+               FAIL_IF(WIFSIGNALED(status));
+               FAIL_IF(!WIFEXITED(status));
+               FAIL_IF(WEXITSTATUS(status));
+       }
+
+       return 0;
+}
+
+int main(void)
+{
+       return test_harness(test, "large_vm_gpr_corruption");
+}
index fca054b..c01a31d 100644 (file)
@@ -274,7 +274,7 @@ u64 *get_intr_regs(struct event *event, void *sample_buff)
        return intr_regs;
 }
 
-static const unsigned int __perf_reg_mask(const char *register_name)
+static const int __perf_reg_mask(const char *register_name)
 {
        if (!strcmp(register_name, "R0"))
                return 0;
index d42ca8c..80dc97e 100644 (file)
@@ -207,7 +207,7 @@ int spectre_v2_test(void)
                break;
        case COUNT_CACHE_DISABLED:
                if (miss_percent < 95) {
-                       printf("Branch misses < 20%% unexpected in this configuration!\n");
+                       printf("Branch misses < 95%% unexpected in this configuration!\n");
                        printf("Possible mis-match between reported & actual mitigation\n");
                        return 1;
                }
index 7c0b061..db02701 100644 (file)
@@ -6,9 +6,11 @@
 
 #include <errno.h>
 #include <stdlib.h>
+#include <stdio.h>
 #include <string.h>
 #include <sys/mman.h>
 #include <time.h>
+#include <stdbool.h>
 
 #include "../kselftest.h"
 
@@ -63,6 +65,59 @@ enum {
        .expect_failure = should_fail                           \
 }
 
+/*
+ * Returns false if the requested remap region overlaps with an
+ * existing mapping (e.g text, stack) else returns true.
+ */
+static bool is_remap_region_valid(void *addr, unsigned long long size)
+{
+       void *remap_addr = NULL;
+       bool ret = true;
+
+       /* Use MAP_FIXED_NOREPLACE flag to ensure region is not mapped */
+       remap_addr = mmap(addr, size, PROT_READ | PROT_WRITE,
+                                        MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
+                                        -1, 0);
+
+       if (remap_addr == MAP_FAILED) {
+               if (errno == EEXIST)
+                       ret = false;
+       } else {
+               munmap(remap_addr, size);
+       }
+
+       return ret;
+}
+
+/* Returns mmap_min_addr sysctl tunable from procfs */
+static unsigned long long get_mmap_min_addr(void)
+{
+       FILE *fp;
+       int n_matched;
+       static unsigned long long addr;
+
+       if (addr)
+               return addr;
+
+       fp = fopen("/proc/sys/vm/mmap_min_addr", "r");
+       if (fp == NULL) {
+               ksft_print_msg("Failed to open /proc/sys/vm/mmap_min_addr: %s\n",
+                       strerror(errno));
+               exit(KSFT_SKIP);
+       }
+
+       n_matched = fscanf(fp, "%llu", &addr);
+       if (n_matched != 1) {
+               ksft_print_msg("Failed to read /proc/sys/vm/mmap_min_addr: %s\n",
+                       strerror(errno));
+               fclose(fp);
+               exit(KSFT_SKIP);
+       }
+
+       fclose(fp);
+       return addr;
+}
+
 /*
  * Returns the start address of the mapping on success, else returns
  * NULL on failure.
@@ -71,11 +126,18 @@ static void *get_source_mapping(struct config c)
 {
        unsigned long long addr = 0ULL;
        void *src_addr = NULL;
+       unsigned long long mmap_min_addr;
+
+       mmap_min_addr = get_mmap_min_addr();
+
 retry:
        addr += c.src_alignment;
+       if (addr < mmap_min_addr)
+               goto retry;
+
        src_addr = mmap((void *) addr, c.region_size, PROT_READ | PROT_WRITE,
-                       MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
-                       -1, 0);
+                                       MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
+                                       -1, 0);
        if (src_addr == MAP_FAILED) {
                if (errno == EPERM || errno == EEXIST)
                        goto retry;
@@ -90,8 +152,10 @@ retry:
         * alignment in the tests.
         */
        if (((unsigned long long) src_addr & (c.src_alignment - 1)) ||
-                       !((unsigned long long) src_addr & c.src_alignment))
+                       !((unsigned long long) src_addr & c.src_alignment)) {
+               munmap(src_addr, c.region_size);
                goto retry;
+       }
 
        if (!src_addr)
                goto error;
@@ -140,9 +204,20 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
        if (!((unsigned long long) addr & c.dest_alignment))
                addr = (void *) ((unsigned long long) addr | c.dest_alignment);
 
+       /* Don't destroy existing mappings unless expected to overlap */
+       while (!is_remap_region_valid(addr, c.region_size) && !c.overlapping) {
+               /* Check for unsigned overflow */
+               if (addr + c.dest_alignment < addr) {
+                       ksft_print_msg("Couldn't find a valid region to remap to\n");
+                       ret = -1;
+                       goto out;
+               }
+               addr += c.dest_alignment;
+       }
+
        clock_gettime(CLOCK_MONOTONIC, &t_start);
        dest_addr = mremap(src_addr, c.region_size, c.region_size,
-                       MREMAP_MAYMOVE|MREMAP_FIXED, (char *) addr);
+                                         MREMAP_MAYMOVE|MREMAP_FIXED, (char *) addr);
        clock_gettime(CLOCK_MONOTONIC, &t_end);
 
        if (dest_addr == MAP_FAILED) {
@@ -193,7 +268,7 @@ static void run_mremap_test_case(struct test test_case, int *failures,
 
        if (remap_time < 0) {
                if (test_case.expect_failure)
-                       ksft_test_result_pass("%s\n\tExpected mremap failure\n",
+                       ksft_test_result_xfail("%s\n\tExpected mremap failure\n",
                                              test_case.name);
                else {
                        ksft_test_result_fail("%s\n", test_case.name);
index 3b265f1..352ba00 100755 (executable)
@@ -291,11 +291,16 @@ echo "-------------------"
 echo "running mremap_test"
 echo "-------------------"
 ./mremap_test
-if [ $? -ne 0 ]; then
+ret_val=$?
+
+if [ $ret_val -eq 0 ]; then
+       echo "[PASS]"
+elif [ $ret_val -eq $ksft_skip ]; then
+        echo "[SKIP]"
+        exitcode=$ksft_skip
+else
        echo "[FAIL]"
        exitcode=1
-else
-       echo "[PASS]"
 fi
 
 echo "-----------------"
index 222ecc8..f4c2a6e 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * KVM dirty ring implementation
  *
index 70e05af..f30bb8c 100644 (file)
@@ -164,6 +164,10 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
 {
 }
 
+__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
+{
+}
+
 bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
 {
        /*
@@ -357,6 +361,12 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
 #endif
 
+static void kvm_flush_shadow_all(struct kvm *kvm)
+{
+       kvm_arch_flush_shadow_all(kvm);
+       kvm_arch_guest_memory_reclaimed(kvm);
+}
+
 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
                                               gfp_t gfp_flags)
@@ -434,8 +444,8 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 
 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
-       kvm_dirty_ring_free(&vcpu->dirty_ring);
        kvm_arch_vcpu_destroy(vcpu);
+       kvm_dirty_ring_free(&vcpu->dirty_ring);
 
        /*
         * No need for rcu_read_lock as VCPU_RUN is the only place that changes
@@ -485,12 +495,15 @@ typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
                             unsigned long end);
 
+typedef void (*on_unlock_fn_t)(struct kvm *kvm);
+
 struct kvm_hva_range {
        unsigned long start;
        unsigned long end;
        pte_t pte;
        hva_handler_t handler;
        on_lock_fn_t on_lock;
+       on_unlock_fn_t on_unlock;
        bool flush_on_ret;
        bool may_block;
 };
@@ -578,8 +591,11 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
        if (range->flush_on_ret && ret)
                kvm_flush_remote_tlbs(kvm);
 
-       if (locked)
+       if (locked) {
                KVM_MMU_UNLOCK(kvm);
+               if (!IS_KVM_NULL_FN(range->on_unlock))
+                       range->on_unlock(kvm);
+       }
 
        srcu_read_unlock(&kvm->srcu, idx);
 
@@ -600,6 +616,7 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
                .pte            = pte,
                .handler        = handler,
                .on_lock        = (void *)kvm_null_fn,
+               .on_unlock      = (void *)kvm_null_fn,
                .flush_on_ret   = true,
                .may_block      = false,
        };
@@ -619,6 +636,7 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
                .pte            = __pte(0),
                .handler        = handler,
                .on_lock        = (void *)kvm_null_fn,
+               .on_unlock      = (void *)kvm_null_fn,
                .flush_on_ret   = false,
                .may_block      = false,
        };
@@ -662,7 +680,7 @@ void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
                kvm->mmu_notifier_range_end = end;
        } else {
                /*
-                * Fully tracking multiple concurrent ranges has dimishing
+                * Fully tracking multiple concurrent ranges has diminishing
                 * returns. Keep things simple and just find the minimal range
                 * which includes the current and new ranges. As there won't be
                 * enough information to subtract a range after its invalidate
@@ -687,6 +705,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
                .pte            = __pte(0),
                .handler        = kvm_unmap_gfn_range,
                .on_lock        = kvm_inc_notifier_count,
+               .on_unlock      = kvm_arch_guest_memory_reclaimed,
                .flush_on_ret   = true,
                .may_block      = mmu_notifier_range_blockable(range),
        };
@@ -741,6 +760,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
                .pte            = __pte(0),
                .handler        = (void *)kvm_null_fn,
                .on_lock        = kvm_dec_notifier_count,
+               .on_unlock      = (void *)kvm_null_fn,
                .flush_on_ret   = false,
                .may_block      = mmu_notifier_range_blockable(range),
        };
@@ -813,7 +833,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
        int idx;
 
        idx = srcu_read_lock(&kvm->srcu);
-       kvm_arch_flush_shadow_all(kvm);
+       kvm_flush_shadow_all(kvm);
        srcu_read_unlock(&kvm->srcu, idx);
 }
 
@@ -932,7 +952,7 @@ static void kvm_destroy_vm_debugfs(struct kvm *kvm)
        int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
                                      kvm_vcpu_stats_header.num_desc;
 
-       if (!kvm->debugfs_dentry)
+       if (IS_ERR(kvm->debugfs_dentry))
                return;
 
        debugfs_remove_recursive(kvm->debugfs_dentry);
@@ -1075,6 +1095,12 @@ static struct kvm *kvm_create_vm(unsigned long type)
 
        BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
 
+       /*
+        * Force subsequent debugfs file creations to fail if the VM directory
+        * is not created (by kvm_create_vm_debugfs()).
+        */
+       kvm->debugfs_dentry = ERR_PTR(-ENOENT);
+
        if (init_srcu_struct(&kvm->srcu))
                goto out_err_no_srcu;
        if (init_srcu_struct(&kvm->irq_srcu))
@@ -1219,7 +1245,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
        WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
        kvm->mn_active_invalidate_count = 0;
 #else
-       kvm_arch_flush_shadow_all(kvm);
+       kvm_flush_shadow_all(kvm);
 #endif
        kvm_arch_destroy_vm(kvm);
        kvm_destroy_devices(kvm);
@@ -1646,6 +1672,7 @@ static void kvm_invalidate_memslot(struct kvm *kvm,
         *      - kvm_is_visible_gfn (mmu_check_root)
         */
        kvm_arch_flush_shadow_memslot(kvm, old);
+       kvm_arch_guest_memory_reclaimed(kvm);
 
        /* Was released by kvm_swap_active_memslots, reacquire. */
        mutex_lock(&kvm->slots_arch_lock);
@@ -1793,7 +1820,7 @@ static int kvm_set_memslot(struct kvm *kvm,
 
        /*
         * No need to refresh new->arch, changes after dropping slots_arch_lock
-        * will directly hit the final, active memsot.  Architectures are
+        * will directly hit the final, active memslot.  Architectures are
         * responsible for knowing that new->arch may be stale.
         */
        kvm_commit_memory_region(kvm, old, new, change);
@@ -5479,7 +5506,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
        }
        add_uevent_var(env, "PID=%d", kvm->userspace_pid);
 
-       if (kvm->debugfs_dentry) {
+       if (!IS_ERR(kvm->debugfs_dentry)) {
                char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
 
                if (p) {
index 34ca408..41da467 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+/* SPDX-License-Identifier: GPL-2.0-only */
 
 #ifndef __KVM_MM_H__
 #define __KVM_MM_H__ 1