Merge branches 'acpi-pci' and 'acpi-processor'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 26 Apr 2021 15:03:05 +0000 (17:03 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 26 Apr 2021 15:03:05 +0000 (17:03 +0200)
* acpi-pci:
  ACPI: PCI: Replace direct printk() invocations in pci_link.c
  ACPI: PCI: Drop ACPI_PCI_COMPONENT that is not used any more
  ACPI: PCI: Replace ACPI_DEBUG_PRINT() and ACPI_EXCEPTION()
  ACPI: PCI: IRQ: Consolidate printing diagnostic messages

* acpi-processor:
  ACPI: processor: perflib: Eliminate redundant status check
  ACPI: processor: Get rid of ACPICA message printing
  ACPI: processor: idle: Drop extra prefix from pr_notice()
  ACPI: processor: Remove initialization of static variable

650 files changed:
.mailmap
Documentation/ABI/testing/debugfs-moxtet
Documentation/ABI/testing/debugfs-turris-mox-rwtm
Documentation/ABI/testing/sysfs-bus-moxtet-devices
Documentation/ABI/testing/sysfs-class-led-driver-turris-omnia
Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm
Documentation/admin-guide/kernel-parameters.txt
Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
Documentation/devicetree/bindings/i2c/i2c-gpio.yaml
Documentation/devicetree/bindings/i2c/i2c-imx.yaml
Documentation/devicetree/bindings/iio/adc/ingenic,adc.yaml
Documentation/devicetree/bindings/input/adc-joystick.yaml
Documentation/devicetree/bindings/input/touchscreen/resistive-adc-touch.txt
Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml
Documentation/devicetree/bindings/mfd/ab8500.txt
Documentation/devicetree/bindings/mfd/motorola-cpcap.txt
Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
Documentation/devicetree/bindings/net/ethernet-controller.yaml
Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
Documentation/firmware-guide/acpi/debug.rst
Documentation/networking/ethtool-netlink.rst
Documentation/networking/ip-sysctl.rst
Documentation/networking/seg6-sysctl.rst
MAINTAINERS
Makefile
arch/arc/boot/dts/haps_hs.dts
arch/arc/kernel/signal.c
arch/arc/kernel/unwind.c
arch/arm/Kconfig
arch/arm/boot/dts/armada-385-turris-omnia.dts
arch/arm/boot/dts/bcm2711.dtsi
arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/omap44xx-clocks.dtsi
arch/arm/boot/dts/omap5.dtsi
arch/arm/mach-footbridge/cats-pci.c
arch/arm/mach-footbridge/ebsa285-pci.c
arch/arm/mach-footbridge/netwinder-pci.c
arch/arm/mach-footbridge/personal-pci.c
arch/arm/mach-keystone/keystone.c
arch/arm/mach-omap1/ams-delta-fiq-handler.S
arch/arm/mach-omap2/board-generic.c
arch/arm/mach-omap2/omap-secure.c
arch/arm/mach-omap2/omap-secure.h
arch/arm/mach-omap2/pmic-cpcap.c
arch/arm/mach-omap2/sr_device.c
arch/arm/mach-pxa/mainstone.c
arch/arm/mm/mmu.c
arch/arm/mm/pmsa-v7.c
arch/arm/mm/pmsa-v8.c
arch/arm/probes/uprobes/core.c
arch/arm64/Kconfig
arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
arch/arm64/include/asm/alternative-macros.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/word-at-a-time.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/probes/kprobes.c
arch/arm64/kernel/sleep.S
arch/arm64/kvm/debug.c
arch/arm64/kvm/hyp/vgic-v3-sr.c
arch/csky/Kconfig
arch/csky/include/asm/page.h
arch/ia64/configs/generic_defconfig
arch/ia64/include/asm/ptrace.h
arch/ia64/mm/discontig.c
arch/m68k/include/asm/page_mm.h
arch/mips/kernel/setup.c
arch/nds32/mm/cacheflush.c
arch/parisc/include/asm/cmpxchg.h
arch/parisc/include/asm/processor.h
arch/parisc/math-emu/fpu.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/ptrace/Makefile
arch/powerpc/kernel/ptrace/ptrace-decl.h
arch/powerpc/kernel/ptrace/ptrace-fpu.c
arch/powerpc/kernel/ptrace/ptrace-novsx.c
arch/powerpc/kernel/ptrace/ptrace-view.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/mobility.c
arch/riscv/Kconfig
arch/riscv/include/asm/uaccess.h
arch/riscv/kernel/entry.S
arch/riscv/kernel/probes/ftrace.c
arch/riscv/kernel/stacktrace.c
arch/riscv/kernel/traps.c
arch/riscv/mm/fault.c
arch/riscv/mm/kasan_init.c
arch/s390/include/asm/stacktrace.h
arch/s390/include/asm/vdso/data.h
arch/s390/kernel/cpcmd.c
arch/s390/kernel/dumpstack.c
arch/s390/kernel/entry.S
arch/s390/kernel/irq.c
arch/s390/kernel/setup.c
arch/s390/kernel/time.c
arch/x86/include/asm/kfence.h
arch/x86/include/asm/smp.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/acpi/wakeup_64.S
arch/x86/kernel/setup.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/traps.c
arch/x86/kvm/Makefile
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/net/bpf_jit_comp.c
arch/x86/net/bpf_jit_comp32.c
arch/xtensa/kernel/coprocessor.S
arch/xtensa/mm/fault.c
block/bio.c
block/blk-mq-debugfs.c
drivers/acpi/acpica/acpredef.h
drivers/acpi/acpica/acresrc.h
drivers/acpi/acpica/acutils.h
drivers/acpi/acpica/amlresrc.h
drivers/acpi/acpica/rscalc.c
drivers/acpi/acpica/rsdump.c
drivers/acpi/acpica/rsdumpinfo.c
drivers/acpi/acpica/rsinfo.c
drivers/acpi/acpica/rslist.c
drivers/acpi/acpica/rsmisc.c
drivers/acpi/acpica/rsserial.c
drivers/acpi/acpica/utresdecode.c
drivers/acpi/acpica/utresrc.c
drivers/acpi/apei/einj.c
drivers/acpi/nfit/core.c
drivers/acpi/pci_irq.c
drivers/acpi/pci_link.c
drivers/acpi/processor_idle.c
drivers/acpi/scan.c
drivers/acpi/sysfs.c
drivers/acpi/tables.c
drivers/base/dd.c
drivers/base/power/runtime.c
drivers/block/null_blk/main.c
drivers/block/null_blk/null_blk.h
drivers/block/xen-blkback/blkback.c
drivers/bluetooth/btusb.c
drivers/bus/moxtet.c
drivers/bus/mvebu-mbus.c
drivers/char/agp/Kconfig
drivers/clk/clk-fixed-factor.c
drivers/clk/clk.c
drivers/clk/qcom/camcc-sc7180.c
drivers/clk/socfpga/clk-gate.c
drivers/cpufreq/freq_table.c
drivers/cxl/mem.c
drivers/dax/bus.c
drivers/dma/dmaengine.c
drivers/dma/dw/Kconfig
drivers/dma/idxd/device.c
drivers/dma/idxd/idxd.h
drivers/dma/idxd/init.c
drivers/dma/idxd/irq.c
drivers/dma/idxd/sysfs.c
drivers/dma/plx_dma.c
drivers/dma/tegra20-apb-dma.c
drivers/dma/xilinx/xilinx_dpdma.c
drivers/extcon/extcon.c
drivers/firewire/nosy.c
drivers/firmware/turris-mox-rwtm.c
drivers/gpio/gpio-moxtet.c
drivers/gpio/gpiolib-sysfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/i915/display/intel_acpi.c
drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
drivers/gpu/drm/i915/display/vlv_dsi.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/panel/panel-dsi-cm.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
drivers/gpu/drm/xen/xen_drm_front.c
drivers/gpu/drm/xen/xen_drm_front_conn.h
drivers/gpu/host1x/bus.c
drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
drivers/hid/hid-alps.c
drivers/hid/hid-asus.c
drivers/hid/hid-cp2112.c
drivers/hid/hid-google-hammer.c
drivers/hid/hid-ids.h
drivers/hid/wacom_wac.c
drivers/i2c/busses/i2c-designware-master.c
drivers/i2c/busses/i2c-exynos5.c
drivers/i2c/busses/i2c-hix5hd2.c
drivers/i2c/busses/i2c-jz4780.c
drivers/i2c/busses/i2c-mv64xxx.c
drivers/i2c/busses/i2c-stm32f4.c
drivers/i2c/i2c-core-base.c
drivers/infiniband/core/addr.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/hfi1/affinity.c
drivers/infiniband/hw/hfi1/hfi.h
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/netdev_rx.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/ulp/rtrs/rtrs-clt.c
drivers/input/joystick/n64joy.c
drivers/input/keyboard/nspire-keypad.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/touchscreen/elants_i2c.c
drivers/input/touchscreen/s6sy761.c
drivers/interconnect/bulk.c
drivers/interconnect/core.c
drivers/interconnect/qcom/msm8939.c
drivers/leds/leds-turris-omnia.c
drivers/mailbox/armada-37xx-rwtm-mailbox.c
drivers/md/dm-verity-fec.c
drivers/md/dm-verity-fec.h
drivers/misc/mei/client.c
drivers/mtd/nand/raw/mtk_nand.c
drivers/net/can/spi/mcp251x.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/dsa/lantiq_gswip.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/broadcom/bcm4908_enet.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_controlq.h
drivers/net/ethernet/intel/ice/ice_dcb.c
drivers/net/ethernet/intel/ice/ice_dcb_nl.c
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/port.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
drivers/net/ethernet/microchip/lan743x_main.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/flower/metadata.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/xilinx/xilinx_axienet.h
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/geneve.c
drivers/net/ieee802154/atusb.c
drivers/net/phy/bcm-phy-lib.c
drivers/net/phy/marvell.c
drivers/net/tun.c
drivers/net/usb/hso.c
drivers/net/virtio_net.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wan/hdlc_fr.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/mediatek/mt76/mt7921/regs.h
drivers/net/wireless/virt_wifi.c
drivers/net/xen-netback/xenbus.c
drivers/nvdimm/bus.c
drivers/nvdimm/pmem.c
drivers/nvdimm/region_devs.c
drivers/of/fdt.c
drivers/of/of_private.h
drivers/of/overlay.c
drivers/of/property.c
drivers/of/unittest.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/pinctrl-microchip-sgpio.c
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
drivers/pinctrl/qcom/pinctrl-sc7280.c
drivers/pinctrl/qcom/pinctrl-sdx55.c
drivers/platform/x86/intel-hid.c
drivers/ras/cec.c
drivers/regulator/bd9571mwv-regulator.c
drivers/remoteproc/pru_rproc.c
drivers/remoteproc/qcom_pil_info.c
drivers/scsi/hpsa_cmd.h
drivers/scsi/libiscsi.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/scsi_transport_srp.c
drivers/scsi/ufs/ufshcd.c
drivers/soc/fsl/qbman/qman.c
drivers/soc/litex/litex_soc_ctrl.c
drivers/soc/qcom/qcom-geni-se.c
drivers/staging/rtl8192e/rtllib.h
drivers/staging/rtl8192e/rtllib_rx.c
drivers/target/iscsi/iscsi_target.c
drivers/thunderbolt/retimer.c
drivers/tty/serial/qcom_geni_serial.c
drivers/usb/cdns3/cdnsp-gadget.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/dwc3-qcom.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/udc/amd5536udc_pci.c
drivers/usb/host/xhci-mtk.c
drivers/usb/musb/musb_core.c
drivers/usb/usbip/stub_dev.c
drivers/usb/usbip/usbip_common.h
drivers/usb/usbip/usbip_event.c
drivers/usb/usbip/vhci_hcd.c
drivers/usb/usbip/vhci_sysfs.c
drivers/usb/usbip/vudc_dev.c
drivers/usb/usbip/vudc_sysfs.c
drivers/vdpa/mlx5/core/mlx5_vdpa.h
drivers/vdpa/mlx5/core/mr.c
drivers/vdpa/mlx5/core/resources.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vfio/pci/Kconfig
drivers/vfio/pci/vfio_pci.c
drivers/vfio/vfio_iommu_type1.c
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/hyperv_fb.c
drivers/watchdog/armada_37xx_wdt.c
drivers/xen/events/events_base.c
fs/block_dev.c
fs/btrfs/zoned.c
fs/cifs/Kconfig
fs/cifs/Makefile
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/direct-io.c
fs/file.c
fs/gfs2/super.c
fs/hostfs/hostfs_kern.c
fs/io-wq.c
fs/io_uring.c
fs/namei.c
fs/ocfs2/aops.c
fs/ocfs2/file.c
fs/readdir.c
fs/reiserfs/xattr.h
include/acpi/acoutput.h
include/acpi/acpi_drivers.h
include/acpi/acpixf.h
include/acpi/acrestyp.h
include/acpi/actbl1.h
include/acpi/actbl2.h
include/acpi/actbl3.h
include/acpi/acuuid.h
include/acpi/platform/acgcc.h
include/dt-bindings/bus/moxtet.h
include/linux/acpi.h
include/linux/armada-37xx-rwtm-mailbox.h
include/linux/avf/virtchnl.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/ethtool.h
include/linux/extcon.h
include/linux/firmware/intel/stratix10-svc-client.h
include/linux/host1x.h
include/linux/kasan.h
include/linux/marvell_phy.h
include/linux/mlx5/mlx5_ifc.h
include/linux/moxtet.h
include/linux/nd.h
include/linux/netfilter_arp/arp_tables.h
include/linux/netfilter_bridge/ebtables.h
include/linux/qcom-geni-se.h
include/linux/skmsg.h
include/linux/virtio_net.h
include/linux/xarray.h
include/net/act_api.h
include/net/netns/xfrm.h
include/net/red.h
include/net/rtnetlink.h
include/net/sock.h
include/net/xfrm.h
include/scsi/scsi_transport_iscsi.h
include/uapi/linux/blkpg.h
include/uapi/linux/can.h
include/uapi/linux/ethtool.h
include/uapi/linux/idxd.h
include/uapi/linux/rfkill.h
kernel/bpf/disasm.c
kernel/bpf/inode.c
kernel/bpf/stackmap.c
kernel/bpf/trampoline.c
kernel/bpf/verifier.c
kernel/gcov/clang.c
kernel/locking/lockdep.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace_dynevent.c
kernel/watchdog.c
kernel/workqueue.c
lib/Kconfig.debug
lib/Kconfig.kasan
lib/earlycpio.c
lib/lru_cache.c
lib/parman.c
lib/radix-tree.c
lib/test_kasan_module.c
lib/test_xarray.c
lib/xarray.c
mm/gup.c
mm/internal.h
mm/kasan/common.c
mm/kasan/kasan.h
mm/kasan/report_generic.c
mm/mapping_dirty_helpers.c
mm/memory.c
mm/mmu_gather.c
mm/oom_kill.c
mm/page_poison.c
mm/percpu-internal.h
mm/percpu-stats.c
mm/percpu.c
mm/ptdump.c
mm/shuffle.c
net/batman-adv/translation-table.c
net/bridge/netfilter/ebtable_broute.c
net/bridge/netfilter/ebtable_filter.c
net/bridge/netfilter/ebtable_nat.c
net/bridge/netfilter/ebtables.c
net/can/bcm.c
net/can/isotp.c
net/can/raw.c
net/core/dev.c
net/core/neighbour.c
net/core/rtnetlink.c
net/core/skmsg.c
net/core/sock.c
net/core/xdp.c
net/dsa/dsa2.c
net/dsa/switch.c
net/ethtool/common.c
net/ethtool/eee.c
net/ethtool/ioctl.c
net/ethtool/netlink.h
net/ethtool/pause.c
net/hsr/hsr_device.c
net/hsr/hsr_forward.c
net/ieee802154/nl-mac.c
net/ieee802154/nl802154.c
net/ipv4/ah4.c
net/ipv4/devinet.c
net/ipv4/esp4.c
net/ipv4/esp4_offload.c
net/ipv4/ip_vti.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/arptable_filter.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/ah6.c
net/ipv6/esp6.c
net/ipv6/esp6_offload.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/sit.c
net/mac80211/cfg.c
net/mac80211/mlme.c
net/mac80211/tx.c
net/mac802154/llsec.c
net/mptcp/protocol.c
net/ncsi/ncsi-manage.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_limit.c
net/netfilter/x_tables.c
net/netlink/af_netlink.c
net/nfc/llcp_sock.c
net/openvswitch/conntrack.c
net/qrtr/qrtr.c
net/rds/message.c
net/rds/send.c
net/rfkill/core.c
net/sched/act_api.c
net/sched/cls_api.c
net/sched/sch_htb.c
net/sched/sch_teql.c
net/sctp/ipv6.c
net/sctp/socket.c
net/tipc/bearer.h
net/tipc/crypto.c
net/tipc/net.c
net/tipc/node.c
net/tipc/socket.c
net/wireless/nl80211.c
net/wireless/scan.c
net/wireless/sme.c
net/xfrm/xfrm_compat.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_interface.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_state.c
scripts/Makefile.kasan
scripts/module.lds.S
security/Kconfig.hardening
security/selinux/ss/avtab.c
security/selinux/ss/avtab.h
security/selinux/ss/conditional.c
security/selinux/ss/services.c
security/selinux/ss/sidtab.c
security/selinux/ss/sidtab.h
security/tomoyo/network.c
sound/drivers/aloop.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/soc/bcm/cygnus-ssp.c
sound/soc/codecs/lpass-rx-macro.c
sound/soc/codecs/lpass-tx-macro.c
sound/soc/codecs/max98373-i2c.c
sound/soc/codecs/max98373-sdw.c
sound/soc/codecs/max98373.c
sound/soc/codecs/wm8960.c
sound/soc/fsl/fsl_esai.c
sound/soc/intel/atom/sst-mfld-platform-pcm.c
sound/soc/sof/core.c
sound/soc/sof/intel/apl.c
sound/soc/sof/intel/cnl.c
sound/soc/sof/intel/hda-dsp.c
sound/soc/sof/intel/hda.h
sound/soc/sof/intel/icl.c
sound/soc/sof/intel/pci-tgl.c
sound/soc/sof/intel/tgl.c
sound/soc/sunxi/sun4i-codec.c
sound/usb/quirks.c
tools/arch/ia64/include/asm/barrier.h
tools/include/uapi/asm/errno.h
tools/kvm/kvm_stat/kvm_stat.service
tools/lib/bpf/ringbuf.c
tools/lib/bpf/xsk.c
tools/perf/builtin-inject.c
tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c
tools/perf/util/block-info.c
tools/power/acpi/common/cmfsize.c
tools/testing/radix-tree/idr-test.c
tools/testing/radix-tree/linux/compiler_types.h [deleted file]
tools/testing/radix-tree/multiorder.c
tools/testing/radix-tree/xarray.c
tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c [new file with mode: 0644]
tools/testing/selftests/bpf/verifier/bounds.c
tools/testing/selftests/bpf/verifier/bounds_deduction.c
tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c
tools/testing/selftests/bpf/verifier/map_ptr.c
tools/testing/selftests/bpf/verifier/unpriv.c
tools/testing/selftests/bpf/verifier/value_ptr_arith.c
tools/testing/selftests/kvm/hardware_disable_test.c
tools/testing/selftests/kvm/x86_64/hyperv_clock.c
tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh

index 541635d..2d93232 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -168,6 +168,7 @@ Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
 Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
 John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
 John Stultz <johnstul@us.ibm.com>
+Jordan Crouse <jordan@cosmicpenguin.net> <jcrouse@codeaurora.org>
 <josh@joshtriplett.org> <josh@freedesktop.org>
 <josh@joshtriplett.org> <josh@kernel.org>
 <josh@joshtriplett.org> <josht@linux.vnet.ibm.com>
@@ -253,8 +254,14 @@ Morten Welinder <welinder@anemone.rentec.com>
 Morten Welinder <welinder@darter.rentec.com>
 Morten Welinder <welinder@troll.com>
 Mythri P K <mythripk@ti.com>
+Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
 Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
 Nguyen Anh Quynh <aquynh@gmail.com>
+Nicholas Piggin <npiggin@gmail.com> <npiggen@suse.de>
+Nicholas Piggin <npiggin@gmail.com> <npiggin@kernel.dk>
+Nicholas Piggin <npiggin@gmail.com> <npiggin@suse.de>
+Nicholas Piggin <npiggin@gmail.com> <nickpiggin@yahoo.com.au>
+Nicholas Piggin <npiggin@gmail.com> <piggin@cyberone.com.au>
 Nicolas Ferre <nicolas.ferre@microchip.com> <nicolas.ferre@atmel.com>
 Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
 Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
index 6eee10c..637d858 100644 (file)
@@ -1,7 +1,7 @@
 What:          /sys/kernel/debug/moxtet/input
 Date:          March 2019
 KernelVersion: 5.3
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Read input from the shift registers, in hexadecimal.
                Returns N+1 bytes, where N is the number of Moxtet connected
                modules. The first byte is from the CPU board itself.
@@ -19,7 +19,7 @@ Description:  (Read) Read input from the shift registers, in hexadecimal.
 What:          /sys/kernel/debug/moxtet/output
 Date:          March 2019
 KernelVersion: 5.3
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (RW) Read last written value to the shift registers, in
                hexadecimal, or write values to the shift registers, also
                in hexadecimal.
index 326df1b..813987d 100644 (file)
@@ -1,7 +1,7 @@
 What:          /sys/kernel/debug/turris-mox-rwtm/do_sign
 Date:          Jun 2020
 KernelVersion: 5.8
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:
 
                ======= ===========================================================
index 4a6d61b..32dccc0 100644 (file)
@@ -1,17 +1,17 @@
 What:          /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_description
 Date:          March 2019
 KernelVersion: 5.3
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Moxtet module description. Format: string
 
 What:          /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_id
 Date:          March 2019
 KernelVersion: 5.3
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Moxtet module ID. Format: %x
 
 What:          /sys/bus/moxtet/devices/moxtet-<name>.<addr>/module_name
 Date:          March 2019
 KernelVersion: 5.3
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Moxtet module name. Format: string
index 795a5de..c4d4697 100644 (file)
@@ -1,7 +1,7 @@
 What:          /sys/class/leds/<led>/device/brightness
 Date:          July 2020
 KernelVersion: 5.9
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (RW) On the front panel of the Turris Omnia router there is also
                a button which can be used to control the intensity of all the
                LEDs at once, so that if they are too bright, user can dim them.
index b8631f5..ea5e5b4 100644 (file)
@@ -1,21 +1,21 @@
 What:          /sys/firmware/turris-mox-rwtm/board_version
 Date:          August 2019
 KernelVersion: 5.4
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Board version burned into eFuses of this Turris Mox board.
                Format: %i
 
 What:          /sys/firmware/turris-mox-rwtm/mac_address*
 Date:          August 2019
 KernelVersion: 5.4
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) MAC addresses burned into eFuses of this Turris Mox board.
                Format: %pM
 
 What:          /sys/firmware/turris-mox-rwtm/pubkey
 Date:          August 2019
 KernelVersion: 5.4
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) ECDSA public key (in pubkey hex compressed form) computed
                as pair to the ECDSA private key burned into eFuses of this
                Turris Mox Board.
@@ -24,7 +24,7 @@ Description:  (Read) ECDSA public key (in pubkey hex compressed form) computed
 What:          /sys/firmware/turris-mox-rwtm/ram_size
 Date:          August 2019
 KernelVersion: 5.4
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) RAM size in MiB of this Turris Mox board as was detected
                during manufacturing and burned into eFuses. Can be 512 or 1024.
                Format: %i
@@ -32,6 +32,6 @@ Description:  (Read) RAM size in MiB of this Turris Mox board as was detected
 What:          /sys/firmware/turris-mox-rwtm/serial_number
 Date:          August 2019
 KernelVersion: 5.4
-Contact:       Marek Behún <marek.behun@nic.cz>
+Contact:       Marek Behún <kabel@kernel.org>
 Description:   (Read) Serial number burned into eFuses of this Turris Mox device.
                Format: %016X
index 0454572..bda4e8e 100644 (file)
@@ -50,7 +50,7 @@
                        CONFIG_ACPI_DEBUG must be enabled to produce any ACPI
                        debug output.  Bits in debug_layer correspond to a
                        _COMPONENT in an ACPI source file, e.g.,
-                           #define _COMPONENT ACPI_PCI_COMPONENT
+                           #define _COMPONENT ACPI_EVENTS
                        Bits in debug_level correspond to a level in
                        ACPI_DEBUG_PRINT statements, e.g.,
                            ACPI_DEBUG_PRINT((ACPI_DB_INFO, ...
@@ -60,8 +60,6 @@
 
                        Enable processor driver info messages:
                            acpi.debug_layer=0x20000000
-                       Enable PCI/PCI interrupt routing info messages:
-                           acpi.debug_layer=0x400000
                        Enable AML "Debug" output, i.e., stores to the Debug
                        object while interpreting AML:
                            acpi.debug_layer=0xffffffff acpi.debug_level=0x2
index 37f18d6..4c5c371 100644 (file)
@@ -32,7 +32,7 @@ Optional node properties:
 - "#thermal-sensor-cells" Used to expose itself to thermal fw.
 
 Read more about iio bindings at
-       Documentation/devicetree/bindings/iio/iio-bindings.txt
+       https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/
 
 Example:
        ncp15wb473@0 {
index ff99344..fd04028 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Bindings for GPIO bitbanged I2C
 
 maintainers:
-  - Wolfram Sang <wolfram@the-dreams.de>
+  - Wolfram Sang <wsa@kernel.org>
 
 allOf:
   - $ref: /schemas/i2c/i2c-controller.yaml#
index f23966b..3592d49 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Freescale Inter IC (I2C) and High Speed Inter IC (HS-I2C) for i.MX
 
 maintainers:
-  - Wolfram Sang <wolfram@the-dreams.de>
+  - Oleksij Rempel <o.rempel@pengutronix.de>
 
 allOf:
   - $ref: /schemas/i2c/i2c-controller.yaml#
index 9f414db..433a3fb 100644 (file)
@@ -14,8 +14,9 @@ description: >
   Industrial I/O subsystem bindings for ADC controller found in
   Ingenic JZ47xx SoCs.
 
-  ADC clients must use the format described in iio-bindings.txt, giving
-  a phandle and IIO specifier pair ("io-channels") to the ADC controller.
+  ADC clients must use the format described in
+  https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/iio-consumer.yaml,
+  giving a phandle and IIO specifier pair ("io-channels") to the ADC controller.
 
 properties:
   compatible:
index 054406b..721878d 100644 (file)
@@ -24,7 +24,9 @@ properties:
     description: >
       List of phandle and IIO specifier pairs.
       Each pair defines one ADC channel to which a joystick axis is connected.
-      See Documentation/devicetree/bindings/iio/iio-bindings.txt for details.
+      See
+      https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/iio-consumer.yaml
+      for details.
 
   '#address-cells':
     const: 1
index 51456c0..af5223b 100644 (file)
@@ -5,7 +5,10 @@ Required properties:
  - compatible: must be "resistive-adc-touch"
 The device must be connected to an ADC device that provides channels for
 position measurement and optional pressure.
-Refer to ../iio/iio-bindings.txt for details
+Refer to
+https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/iio-consumer.yaml
+for details
+
  - iio-channels: must have at least two channels connected to an ADC device.
 These should correspond to the channels exposed by the ADC device and should
 have the right index as the ADC device registers them. These channels
index fe7fa25..c7ed287 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: CZ.NIC's Turris Omnia LEDs driver
 
 maintainers:
-  - Marek Behún <marek.behun@nic.cz>
+  - Marek Behún <kabel@kernel.org>
 
 description:
   This module adds support for the RGB LEDs found on the front panel of the
index d2a6e83..937b3e5 100644 (file)
@@ -72,7 +72,9 @@ Required child device properties:
                                                pwm|regulator|rtc|sysctrl|usb]";
 
   A few child devices require ADC channels from the GPADC node. Those follow the
-  standard bindings from iio/iio-bindings.txt and iio/adc/adc.txt
+  standard bindings from
+  https://github.com/devicetree-org/dt-schema/blob/master/schemas/iio/iio-consumer.yaml
+  and Documentation/devicetree/bindings/iio/adc/adc.yaml
 
   abx500-temp           : io-channels "aux1" and "aux2" for measuring external
                           temperatures.
index 5ddcc8f..b52e7a3 100644 (file)
@@ -16,14 +16,14 @@ Optional subnodes:
 The sub-functions of CPCAP get their own node with their own compatible values,
 which are described in the following files:
 
-- ../power/supply/cpcap-battery.txt
-- ../power/supply/cpcap-charger.txt
-- ../regulator/cpcap-regulator.txt
-- ../phy/phy-cpcap-usb.txt
-- ../input/cpcap-pwrbutton.txt
-- ../rtc/cpcap-rtc.txt
-- ../leds/leds-cpcap.txt
-- ../iio/adc/cpcap-adc.txt
+- Documentation/devicetree/bindings/power/supply/cpcap-battery.txt
+- Documentation/devicetree/bindings/power/supply/cpcap-charger.txt
+- Documentation/devicetree/bindings/regulator/cpcap-regulator.txt
+- Documentation/devicetree/bindings/phy/phy-cpcap-usb.txt
+- Documentation/devicetree/bindings/input/cpcap-pwrbutton.txt
+- Documentation/devicetree/bindings/rtc/cpcap-rtc.txt
+- Documentation/devicetree/bindings/leds/leds-cpcap.txt
+- Documentation/devicetree/bindings/iio/adc/motorola,cpcap-adc.yaml
 
 The only exception is the audio codec. Instead of a compatible value its
 node must be named "audio-codec".
index 79c38ea..13c26f2 100644 (file)
@@ -32,7 +32,7 @@ required:
   - interrupts
   - interrupt-names
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 4b7d1e5..e8f0468 100644 (file)
@@ -49,7 +49,7 @@ properties:
     description:
       Reference to an nvmem node for the MAC address
 
-  nvmem-cells-names:
+  nvmem-cell-names:
     const: mac-address
 
   phy-connection-type:
index b921731..df9e844 100644 (file)
@@ -65,6 +65,71 @@ KSZ9031:
   step is 60ps. The default value is the neutral setting, so setting
   rxc-skew-ps=<0> actually results in -900 picoseconds adjustment.
 
+  The KSZ9031 hardware supports a range of skew values from negative to
+  positive, where the specific range is property dependent. All values
+  specified in the devicetree are offset by the minimum value so they
+  can be represented as positive integers in the devicetree since it's
+  difficult to represent a negative number in the devictree.
+
+  The following 5-bit values table apply to rxc-skew-ps and txc-skew-ps.
+
+  Pad Skew Value       Delay (ps)      Devicetree Value
+  ------------------------------------------------------
+  0_0000               -900ps          0
+  0_0001               -840ps          60
+  0_0010               -780ps          120
+  0_0011               -720ps          180
+  0_0100               -660ps          240
+  0_0101               -600ps          300
+  0_0110               -540ps          360
+  0_0111               -480ps          420
+  0_1000               -420ps          480
+  0_1001               -360ps          540
+  0_1010               -300ps          600
+  0_1011               -240ps          660
+  0_1100               -180ps          720
+  0_1101               -120ps          780
+  0_1110               -60ps           840
+  0_1111               0ps             900
+  1_0000               60ps            960
+  1_0001               120ps           1020
+  1_0010               180ps           1080
+  1_0011               240ps           1140
+  1_0100               300ps           1200
+  1_0101               360ps           1260
+  1_0110               420ps           1320
+  1_0111               480ps           1380
+  1_1000               540ps           1440
+  1_1001               600ps           1500
+  1_1010               660ps           1560
+  1_1011               720ps           1620
+  1_1100               780ps           1680
+  1_1101               840ps           1740
+  1_1110               900ps           1800
+  1_1111               960ps           1860
+
+  The following 4-bit values table apply to the txdX-skew-ps, rxdX-skew-ps
+  data pads, and the rxdv-skew-ps, txen-skew-ps control pads.
+
+  Pad Skew Value       Delay (ps)      Devicetree Value
+  ------------------------------------------------------
+  0000                 -420ps          0
+  0001                 -360ps          60
+  0010                 -300ps          120
+  0011                 -240ps          180
+  0100                 -180ps          240
+  0101                 -120ps          300
+  0110                 -60ps           360
+  0111                 0ps             420
+  1000                 60ps            480
+  1001                 120ps           540
+  1010                 180ps           600
+  1011                 240ps           660
+  1100                 300ps           720
+  1101                 360ps           780
+  1110                 420ps           840
+  1111                 480ps           900
+
   Optional properties:
 
     Maximum value of 1860, default value 900:
@@ -120,11 +185,21 @@ KSZ9131:
 
 Examples:
 
+       /* Attach to an Ethernet device with autodetected PHY */
+       &enet {
+               rxc-skew-ps = <1800>;
+               rxdv-skew-ps = <0>;
+               txc-skew-ps = <1800>;
+               txen-skew-ps = <0>;
+               status = "okay";
+       };
+
+       /* Attach to an explicitly-specified PHY */
        mdio {
                phy0: ethernet-phy@0 {
-                       rxc-skew-ps = <3000>;
+                       rxc-skew-ps = <1800>;
                        rxdv-skew-ps = <0>;
-                       txc-skew-ps = <3000>;
+                       txc-skew-ps = <1800>;
                        txen-skew-ps = <0>;
                        reg = <0>;
                };
@@ -133,3 +208,20 @@ Examples:
                phy = <&phy0>;
                phy-mode = "rgmii-id";
        };
+
+References
+
+  Micrel ksz9021rl/rn Data Sheet, Revision 1.2. Dated 2/13/2014.
+  http://www.micrel.com/_PDF/Ethernet/datasheets/ksz9021rl-rn_ds.pdf
+
+  Micrel ksz9031rnx Data Sheet, Revision 2.1. Dated 11/20/2014.
+  http://www.micrel.com/_PDF/Ethernet/datasheets/KSZ9031RNX.pdf
+
+Notes:
+
+  Note that a previous version of the Micrel ksz9021rl/rn Data Sheet
+  was missing extended register 106 (transmit data pad skews), and
+  incorrectly specified the ps per step as 200ps/step instead of
+  120ps/step. The latest update to this document reflects the latest
+  revision of the Micrel specification even though usage in the kernel
+  still reflects that incorrect document.
index 54b36ac..0c979d8 100644 (file)
@@ -54,7 +54,6 @@ shows the supported mask values, currently these::
     ACPI_TOOLS                      0x00002000
     ACPI_SBS_COMPONENT              0x00100000
     ACPI_FAN_COMPONENT              0x00200000
-    ACPI_PCI_COMPONENT              0x00400000
     ACPI_CONTAINER_COMPONENT        0x01000000
     ACPI_SYSTEM_COMPONENT           0x02000000
     ACPI_MEMORY_DEVICE_COMPONENT    0x08000000
@@ -126,10 +125,6 @@ AML) during boot::
 
     acpi.debug_layer=0xffffffff acpi.debug_level=0x2
 
-Enable PCI and PCI interrupt routing debug messages::
-
-    acpi.debug_layer=0x400000 acpi.debug_level=0x4
-
 Enable all ACPI hardware-related messages::
 
     acpi.debug_layer=0x2 acpi.debug_level=0xffffffff
index 0507348..dc03ff8 100644 (file)
@@ -976,9 +976,9 @@ constraints on coalescing parameters and their values.
 
 
 PAUSE_GET
-============
+=========
 
-Gets channel counts like ``ETHTOOL_GPAUSE`` ioctl request.
+Gets pause frame settings like ``ETHTOOL_GPAUSEPARAM`` ioctl request.
 
 Request contents:
 
@@ -1007,7 +1007,7 @@ the statistics in the following structure:
 Each member has a corresponding attribute defined.
 
 PAUSE_SET
-============
+=========
 
 Sets pause parameters like ``ETHTOOL_GPAUSEPARAM`` ioctl request.
 
@@ -1024,7 +1024,7 @@ Request contents:
 EEE_GET
 =======
 
-Gets channel counts like ``ETHTOOL_GEEE`` ioctl request.
+Gets Energy Efficient Ethernet settings like ``ETHTOOL_GEEE`` ioctl request.
 
 Request contents:
 
@@ -1054,7 +1054,7 @@ first 32 are provided by the ``ethtool_ops`` callback.
 EEE_SET
 =======
 
-Sets pause parameters like ``ETHTOOL_GEEEPARAM`` ioctl request.
+Sets Energy Efficient Ethernet parameters like ``ETHTOOL_SEEE`` ioctl request.
 
 Request contents:
 
index c7952ac..3feb5e5 100644 (file)
@@ -1849,21 +1849,6 @@ ip6frag_low_thresh - INTEGER
 ip6frag_time - INTEGER
        Time in seconds to keep an IPv6 fragment in memory.
 
-IPv6 Segment Routing:
-
-seg6_flowlabel - INTEGER
-       Controls the behaviour of computing the flowlabel of outer
-       IPv6 header in case of SR T.encaps
-
-        == =======================================================
-        -1  set flowlabel to zero.
-         0  copy flowlabel from Inner packet in case of Inner IPv6
-            (Set flowlabel to 0 in case IPv4/L2)
-         1  Compute the flowlabel using seg6_make_flowlabel()
-        == =======================================================
-
-       Default is 0.
-
 ``conf/default/*``:
        Change the interface-specific default settings.
 
index ec73e14..07c20e4 100644 (file)
@@ -24,3 +24,16 @@ seg6_require_hmac - INTEGER
        * 1 - Drop SR packets without HMAC, validate SR packets with HMAC
 
        Default is 0.
+
+seg6_flowlabel - INTEGER
+       Controls the behaviour of computing the flowlabel of outer
+       IPv6 header in case of SR T.encaps
+
+        == =======================================================
+        -1  set flowlabel to zero.
+         0  copy flowlabel from Inner packet in case of Inner IPv6
+            (Set flowlabel to 0 in case IPv4/L2)
+         1  Compute the flowlabel using seg6_make_flowlabel()
+        == =======================================================
+
+       Default is 0.
index fb2a363..9450e05 100644 (file)
@@ -1576,11 +1576,13 @@ R:      Jernej Skrabec <jernej.skrabec@siol.net>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sunxi/linux.git
+L:     linux-sunxi@lists.linux.dev
 F:     arch/arm/mach-sunxi/
 F:     arch/arm64/boot/dts/allwinner/
 F:     drivers/clk/sunxi-ng/
 F:     drivers/pinctrl/sunxi/
 F:     drivers/soc/sunxi/
+N:     allwinner
 N:     sun[x456789]i
 N:     sun50i
 
@@ -1790,19 +1792,26 @@ F:      drivers/net/ethernet/cortina/
 F:     drivers/pinctrl/pinctrl-gemini.c
 F:     drivers/rtc/rtc-ftrtc010.c
 
-ARM/CZ.NIC TURRIS MOX SUPPORT
-M:     Marek Behun <marek.behun@nic.cz>
+ARM/CZ.NIC TURRIS SUPPORT
+M:     Marek Behun <kabel@kernel.org>
 S:     Maintained
-W:     http://mox.turris.cz
+W:     https://www.turris.cz/
 F:     Documentation/ABI/testing/debugfs-moxtet
 F:     Documentation/ABI/testing/sysfs-bus-moxtet-devices
 F:     Documentation/ABI/testing/sysfs-firmware-turris-mox-rwtm
 F:     Documentation/devicetree/bindings/bus/moxtet.txt
 F:     Documentation/devicetree/bindings/firmware/cznic,turris-mox-rwtm.txt
 F:     Documentation/devicetree/bindings/gpio/gpio-moxtet.txt
+F:     Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml
+F:     Documentation/devicetree/bindings/watchdog/armada-37xx-wdt.txt
 F:     drivers/bus/moxtet.c
 F:     drivers/firmware/turris-mox-rwtm.c
+F:     drivers/leds/leds-turris-omnia.c
+F:     drivers/mailbox/armada-37xx-rwtm-mailbox.c
 F:     drivers/gpio/gpio-moxtet.c
+F:     drivers/watchdog/armada_37xx_wdt.c
+F:     include/dt-bindings/bus/moxtet.h
+F:     include/linux/armada-37xx-rwtm-mailbox.h
 F:     include/linux/moxtet.h
 
 ARM/EZX SMARTPHONES (A780, A910, A1200, E680, ROKR E2 and ROKR E6)
@@ -7089,7 +7098,7 @@ S:        Maintained
 F:     drivers/i2c/busses/i2c-cpm.c
 
 FREESCALE IMX / MXC FEC DRIVER
-M:     Fugang Duan <fugang.duan@nxp.com>
+M:     Joakim Zhang <qiangqing.zhang@nxp.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -7474,8 +7483,9 @@ F:        include/uapi/asm-generic/
 GENERIC PHY FRAMEWORK
 M:     Kishon Vijay Abraham I <kishon@ti.com>
 M:     Vinod Koul <vkoul@kernel.org>
-L:     linux-kernel@vger.kernel.org
+L:     linux-phy@lists.infradead.org
 S:     Supported
+Q:     https://patchwork.kernel.org/project/linux-phy/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy.git
 F:     Documentation/devicetree/bindings/phy/
 F:     drivers/phy/
@@ -8516,9 +8526,9 @@ F:        drivers/pci/hotplug/rpaphp*
 
 IBM Power SRIOV Virtual NIC Device Driver
 M:     Dany Madden <drt@linux.ibm.com>
-M:     Lijun Pan <ljp@linux.ibm.com>
 M:     Sukadev Bhattiprolu <sukadev@linux.ibm.com>
 R:     Thomas Falcon <tlfalcon@linux.ibm.com>
+R:     Lijun Pan <lijunp213@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/ibm/ibmvnic.*
@@ -14849,6 +14859,14 @@ L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
 F:     drivers/iommu/arm/arm-smmu/qcom_iommu.c
 
+QUALCOMM IPC ROUTER (QRTR) DRIVER
+M:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+L:     linux-arm-msm@vger.kernel.org
+S:     Maintained
+F:     include/trace/events/qrtr.h
+F:     include/uapi/linux/qrtr.h
+F:     net/qrtr/
+
 QUALCOMM IPCC MAILBOX DRIVER
 M:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
 L:     linux-arm-msm@vger.kernel.org
@@ -15198,6 +15216,7 @@ F:      fs/reiserfs/
 REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
 M:     Ohad Ben-Cohen <ohad@wizery.com>
 M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+M:     Mathieu Poirier <mathieu.poirier@linaro.org>
 L:     linux-remoteproc@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git rproc-next
@@ -15211,6 +15230,7 @@ F:      include/linux/remoteproc/
 REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
 M:     Ohad Ben-Cohen <ohad@wizery.com>
 M:     Bjorn Andersson <bjorn.andersson@linaro.org>
+M:     Mathieu Poirier <mathieu.poirier@linaro.org>
 L:     linux-remoteproc@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git rpmsg-next
@@ -15627,8 +15647,8 @@ F:      Documentation/s390/pci.rst
 
 S390 VFIO AP DRIVER
 M:     Tony Krowiak <akrowiak@linux.ibm.com>
-M:     Pierre Morel <pmorel@linux.ibm.com>
 M:     Halil Pasic <pasic@linux.ibm.com>
+M:     Jason Herne <jjherne@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -15640,6 +15660,7 @@ F:      drivers/s390/crypto/vfio_ap_private.h
 S390 VFIO-CCW DRIVER
 M:     Cornelia Huck <cohuck@redhat.com>
 M:     Eric Farman <farman@linux.ibm.com>
+M:     Matthew Rosato <mjrosato@linux.ibm.com>
 R:     Halil Pasic <pasic@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     kvm@vger.kernel.org
@@ -15650,6 +15671,7 @@ F:      include/uapi/linux/vfio_ccw.h
 
 S390 VFIO-PCI DRIVER
 M:     Matthew Rosato <mjrosato@linux.ibm.com>
+M:     Eric Farman <farman@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     kvm@vger.kernel.org
 S:     Supported
index 73add16..bc19584 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 12
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc8
 NAME = Frozen Wasteland
 
 # *DOCUMENTATION*
index 60d578e..76ad527 100644 (file)
@@ -16,7 +16,7 @@
        memory {
                device_type = "memory";
                /* CONFIG_LINUX_RAM_BASE needs to match low mem start */
-               reg = <0x0 0x80000000 0x0 0x20000000    /* 512 MB low mem */
+               reg = <0x0 0x80000000 0x0 0x40000000    /* 1 GB low mem */
                       0x1 0x00000000 0x0 0x40000000>;  /* 1 GB highmem */
        };
 
index a78d8f7..fdbe06c 100644 (file)
@@ -96,7 +96,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
                             sizeof(sf->uc.uc_mcontext.regs.scratch));
        err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
 
-       return err;
+       return err ? -EFAULT : 0;
 }
 
 static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
@@ -110,7 +110,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
                                &(sf->uc.uc_mcontext.regs.scratch),
                                sizeof(sf->uc.uc_mcontext.regs.scratch));
        if (err)
-               return err;
+               return -EFAULT;
 
        set_current_blocked(&set);
        regs->bta       = uregs.scratch.bta;
index 74ad425..47bab67 100644 (file)
@@ -187,25 +187,26 @@ static void init_unwind_table(struct unwind_table *table, const char *name,
                              const void *table_start, unsigned long table_size,
                              const u8 *header_start, unsigned long header_size)
 {
-       const u8 *ptr = header_start + 4;
-       const u8 *end = header_start + header_size;
-
        table->core.pc = (unsigned long)core_start;
        table->core.range = core_size;
        table->init.pc = (unsigned long)init_start;
        table->init.range = init_size;
        table->address = table_start;
        table->size = table_size;
-
-       /* See if the linker provided table looks valid. */
-       if (header_size <= 4
-           || header_start[0] != 1
-           || (void *)read_pointer(&ptr, end, header_start[1]) != table_start
-           || header_start[2] == DW_EH_PE_omit
-           || read_pointer(&ptr, end, header_start[2]) <= 0
-           || header_start[3] == DW_EH_PE_omit)
-               header_start = NULL;
-
+       /* To avoid the pointer addition with NULL pointer.*/
+       if (header_start != NULL) {
+               const u8 *ptr = header_start + 4;
+               const u8 *end = header_start + header_size;
+               /* See if the linker provided table looks valid. */
+               if (header_size <= 4
+               || header_start[0] != 1
+               || (void *)read_pointer(&ptr, end, header_start[1])
+                               != table_start
+               || header_start[2] == DW_EH_PE_omit
+               || read_pointer(&ptr, end, header_start[2]) <= 0
+               || header_start[3] == DW_EH_PE_omit)
+                       header_start = NULL;
+       }
        table->hdrsz = header_size;
        smp_wmb();
        table->header = header_start;
index 5da96f5..2fae148 100644 (file)
@@ -1293,9 +1293,15 @@ config KASAN_SHADOW_OFFSET
 
 config NR_CPUS
        int "Maximum number of CPUs (2-32)"
-       range 2 32
+       range 2 16 if DEBUG_KMAP_LOCAL
+       range 2 32 if !DEBUG_KMAP_LOCAL
        depends on SMP
        default "4"
+       help
+         The maximum number of CPUs that the kernel can support.
+         Up to 32 CPUs can be supported, or up to 16 if kmap_local()
+         debugging is enabled, which uses half of the per-CPU fixmap
+         slots as guard regions.
 
 config HOTPLUG_CPU
        bool "Support for hot-pluggable CPUs"
index 646a064..5bd6a66 100644 (file)
@@ -32,7 +32,8 @@
                ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
                          MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
                          MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
-                         MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
+                         MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000
+                         MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000>;
 
                internal-regs {
 
        phy1: ethernet-phy@1 {
                compatible = "ethernet-phy-ieee802.3-c22";
                reg = <1>;
+               marvell,reg-init = <3 18 0 0x4985>;
 
                /* irq is connected to &pcawan pin 7 */
        };
index 462b1df..720beec 100644 (file)
                        #reset-cells = <1>;
                };
 
-               bsc_intr: interrupt-controller@7ef00040 {
-                       compatible = "brcm,bcm2711-l2-intc", "brcm,l2-intc";
-                       reg = <0x7ef00040 0x30>;
-                       interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupt-controller;
-                       #interrupt-cells = <1>;
-               };
-
                aon_intr: interrupt-controller@7ef00100 {
                        compatible = "brcm,bcm2711-l2-intc", "brcm,l2-intc";
                        reg = <0x7ef00100 0x30>;
                        reg = <0x7ef04500 0x100>, <0x7ef00b00 0x300>;
                        reg-names = "bsc", "auto-i2c";
                        clock-frequency = <97500>;
-                       interrupt-parent = <&bsc_intr>;
-                       interrupts = <0>;
                        status = "disabled";
                };
 
                        reg = <0x7ef09500 0x100>, <0x7ef05b00 0x300>;
                        reg-names = "bsc", "auto-i2c";
                        clock-frequency = <97500>;
-                       interrupt-parent = <&bsc_intr>;
-                       interrupts = <1>;
                        status = "disabled";
                };
        };
index 7a1e531..f28a96f 100644 (file)
        pinctrl-0 = <&pinctrl_usdhc2>;
        cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+       vmmc-supply = <&vdd_sd1_reg>;
        status = "disabled";
 };
 
                     &pinctrl_usdhc3_cdwp>;
        cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
+       vmmc-supply = <&vdd_sd0_reg>;
        status = "disabled";
 };
index 9dcae1f..c5b9da0 100644 (file)
@@ -24,6 +24,9 @@
                i2c0 = &i2c1;
                i2c1 = &i2c2;
                i2c2 = &i2c3;
+               mmc0 = &mmc1;
+               mmc1 = &mmc2;
+               mmc2 = &mmc3;
                serial0 = &uart1;
                serial1 = &uart2;
                serial2 = &uart3;
index 72e4f64..4a9f949 100644 (file)
                i2c1 = &i2c2;
                i2c2 = &i2c3;
                i2c3 = &i2c4;
+               mmc0 = &mmc1;
+               mmc1 = &mmc2;
+               mmc2 = &mmc3;
+               mmc3 = &mmc4;
+               mmc4 = &mmc5;
                serial0 = &uart1;
                serial1 = &uart2;
                serial2 = &uart3;
index 5328685..1f1c04d 100644 (file)
                ti,max-div = <2>;
        };
 
-       sha2md5_fck: sha2md5_fck@15c8 {
-               #clock-cells = <0>;
-               compatible = "ti,gate-clock";
-               clocks = <&l3_div_ck>;
-               ti,bit-shift = <1>;
-               reg = <0x15c8>;
-       };
-
        usb_phy_cm_clk32k: usb_phy_cm_clk32k@640 {
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
index e025b7c..ee821d0 100644 (file)
                i2c2 = &i2c3;
                i2c3 = &i2c4;
                i2c4 = &i2c5;
+               mmc0 = &mmc1;
+               mmc1 = &mmc2;
+               mmc2 = &mmc3;
+               mmc3 = &mmc4;
+               mmc4 = &mmc5;
                serial0 = &uart1;
                serial1 = &uart2;
                serial2 = &uart3;
index 0b2fd7e..90b1e9b 100644 (file)
 #include <asm/mach-types.h>
 
 /* cats host-specific stuff */
-static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
+static int irqmap_cats[] = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
 
 static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin)
 {
        return 0;
 }
 
-static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        if (dev->irq >= 255)
                return -1;      /* not a valid interrupt. */
index 6f28aaa..c3f280d 100644 (file)
@@ -14,9 +14,9 @@
 #include <asm/mach/pci.h>
 #include <asm/mach-types.h>
 
-static int irqmap_ebsa285[] __initdata = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
+static int irqmap_ebsa285[] = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
 
-static int __init ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        if (dev->vendor == PCI_VENDOR_ID_CONTAQ &&
            dev->device == PCI_DEVICE_ID_CONTAQ_82C693)
index 9473aa0..e830439 100644 (file)
@@ -18,7 +18,7 @@
  * We now use the slot ID instead of the device identifiers to select
  * which interrupt is routed where.
  */
-static int __init netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        switch (slot) {
        case 0:  /* host bridge */
index 4391e43..9d19aa9 100644 (file)
 #include <asm/mach/pci.h>
 #include <asm/mach-types.h>
 
-static int irqmap_personal_server[] __initdata = {
+static int irqmap_personal_server[] = {
        IRQ_IN0, IRQ_IN1, IRQ_IN2, IRQ_IN3, 0, 0, 0,
        IRQ_DOORBELLHOST, IRQ_DMA1, IRQ_DMA2, IRQ_PCI
 };
 
-static int __init personal_server_map_irq(const struct pci_dev *dev, u8 slot,
-       u8 pin)
+static int personal_server_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        unsigned char line;
 
index cd711bf..2c647bd 100644 (file)
@@ -65,7 +65,7 @@ static void __init keystone_init(void)
 static long long __init keystone_pv_fixup(void)
 {
        long long offset;
-       phys_addr_t mem_start, mem_end;
+       u64 mem_start, mem_end;
 
        mem_start = memblock_start_of_DRAM();
        mem_end = memblock_end_of_DRAM();
@@ -78,7 +78,7 @@ static long long __init keystone_pv_fixup(void)
        if (mem_start < KEYSTONE_HIGH_PHYS_START ||
            mem_end   > KEYSTONE_HIGH_PHYS_END) {
                pr_crit("Invalid address space for memory (%08llx-%08llx)\n",
-                       (u64)mem_start, (u64)mem_end);
+                       mem_start, mem_end);
                return 0;
        }
 
index 14a6c3e..f745a65 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/platform_data/gpio-omap.h>
 
 #include <asm/assembler.h>
+#include <asm/irq.h>
 
 #include "ams-delta-fiq.h"
 #include "board-ams-delta.h"
index 7290f03..1610c56 100644 (file)
@@ -33,7 +33,7 @@ static void __init __maybe_unused omap_generic_init(void)
 }
 
 /* Clocks are needed early, see drivers/clocksource for the rest */
-void __init __maybe_unused omap_init_time_of(void)
+static void __init __maybe_unused omap_init_time_of(void)
 {
        omap_clk_init();
        timer_probe();
index f70d561..0659ab4 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/arm-smccc.h>
+#include <linux/cpu_pm.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -20,6 +21,7 @@
 
 #include "common.h"
 #include "omap-secure.h"
+#include "soc.h"
 
 static phys_addr_t omap_secure_memblock_base;
 
@@ -213,3 +215,40 @@ void __init omap_secure_init(void)
 {
        omap_optee_init_check();
 }
+
+/*
+ * Dummy dispatcher call after core OSWR and MPU off. Updates the ROM return
+ * address after MMU has been re-enabled after CPU1 has been woken up again.
+ * Otherwise the ROM code will attempt to use the earlier physical return
+ * address that got set with MMU off when waking up CPU1. Only used on secure
+ * devices.
+ */
+static int cpu_notifier(struct notifier_block *nb, unsigned long cmd, void *v)
+{
+       switch (cmd) {
+       case CPU_CLUSTER_PM_EXIT:
+               omap_secure_dispatcher(OMAP4_PPA_SERVICE_0,
+                                      FLAG_START_CRITICAL,
+                                      0, 0, 0, 0, 0);
+               break;
+       default:
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block secure_notifier_block = {
+       .notifier_call = cpu_notifier,
+};
+
+static int __init secure_pm_init(void)
+{
+       if (omap_type() == OMAP2_DEVICE_TYPE_GP || !soc_is_omap44xx())
+               return 0;
+
+       cpu_pm_register_notifier(&secure_notifier_block);
+
+       return 0;
+}
+omap_arch_initcall(secure_pm_init);
index 4aaa957..172069f 100644 (file)
@@ -50,6 +50,7 @@
 #define OMAP5_DRA7_MON_SET_ACR_INDEX   0x107
 
 /* Secure PPA(Primary Protected Application) APIs */
+#define OMAP4_PPA_SERVICE_0            0x21
 #define OMAP4_PPA_L2_POR_INDEX         0x23
 #define OMAP4_PPA_CPU_ACTRL_SMP_INDEX  0x25
 
index 09076ad..668dc84 100644 (file)
@@ -246,10 +246,10 @@ int __init omap4_cpcap_init(void)
        omap_voltage_register_pmic(voltdm, &omap443x_max8952_mpu);
 
        if (of_machine_is_compatible("motorola,droid-bionic")) {
-               voltdm = voltdm_lookup("mpu");
+               voltdm = voltdm_lookup("core");
                omap_voltage_register_pmic(voltdm, &omap_cpcap_core);
 
-               voltdm = voltdm_lookup("mpu");
+               voltdm = voltdm_lookup("iva");
                omap_voltage_register_pmic(voltdm, &omap_cpcap_iva);
        } else {
                voltdm = voltdm_lookup("core");
index 17b66f0..6059256 100644 (file)
@@ -188,7 +188,7 @@ static const char * const dra7_sr_instances[] = {
 
 int __init omap_devinit_smartreflex(void)
 {
-       const char * const *sr_inst;
+       const char * const *sr_inst = NULL;
        int i, nr_sr = 0;
 
        if (soc_is_omap44xx()) {
index d1010ec..d237bd0 100644 (file)
@@ -502,16 +502,20 @@ static inline void mainstone_init_keypad(void) {}
 #endif
 
 static int mst_pcmcia0_irqs[11] = {
-       [0 ... 10] = -1,
+       [0 ... 4] = -1,
        [5] = MAINSTONE_S0_CD_IRQ,
+       [6 ... 7] = -1,
        [8] = MAINSTONE_S0_STSCHG_IRQ,
+       [9] = -1,
        [10] = MAINSTONE_S0_IRQ,
 };
 
 static int mst_pcmcia1_irqs[11] = {
-       [0 ... 10] = -1,
+       [0 ... 4] = -1,
        [5] = MAINSTONE_S1_CD_IRQ,
+       [6 ... 7] = -1,
        [8] = MAINSTONE_S1_STSCHG_IRQ,
+       [9] = -1,
        [10] = MAINSTONE_S1_IRQ,
 };
 
index a25b660..c1e12aa 100644 (file)
@@ -387,8 +387,7 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
        pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
 
        /* Make sure fixmap region does not exceed available allocation. */
-       BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
-                    FIXADDR_END);
+       BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) < FIXADDR_START);
        BUG_ON(idx >= __end_of_fixed_addresses);
 
        /* we only support device mappings until pgprot_kernel has been set */
index 88950e4..59d916c 100644 (file)
@@ -235,6 +235,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
        phys_addr_t mem_end;
        phys_addr_t reg_start, reg_end;
        unsigned int mem_max_regions;
+       bool first = true;
        int num;
        u64 i;
 
@@ -263,7 +264,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
 #endif
 
        for_each_mem_range(i, &reg_start, &reg_end) {
-               if (i == 0) {
+               if (first) {
                        phys_addr_t phys_offset = PHYS_OFFSET;
 
                        /*
@@ -275,6 +276,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
                        mem_start = reg_start;
                        mem_end = reg_end;
                        specified_mem_size = mem_end - mem_start;
+                       first = false;
                } else {
                        /*
                         * memblock auto merges contiguous blocks, remove
index 2de019f..8359748 100644 (file)
@@ -95,10 +95,11 @@ void __init pmsav8_adjust_lowmem_bounds(void)
 {
        phys_addr_t mem_end;
        phys_addr_t reg_start, reg_end;
+       bool first = true;
        u64 i;
 
        for_each_mem_range(i, &reg_start, &reg_end) {
-               if (i == 0) {
+               if (first) {
                        phys_addr_t phys_offset = PHYS_OFFSET;
 
                        /*
@@ -107,6 +108,7 @@ void __init pmsav8_adjust_lowmem_bounds(void)
                        if (reg_start != phys_offset)
                                panic("First memory bank must be contiguous from PHYS_OFFSET");
                        mem_end = reg_end;
+                       first = false;
                } else {
                        /*
                         * memblock auto merges contiguous blocks, remove
index c4b49b3..f5f790c 100644 (file)
@@ -204,7 +204,7 @@ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
 static struct undef_hook uprobes_arm_break_hook = {
        .instr_mask     = 0x0fffffff,
        .instr_val      = (UPROBE_SWBP_ARM_INSN & 0x0fffffff),
-       .cpsr_mask      = MODE_MASK,
+       .cpsr_mask      = (PSR_T_BIT | MODE_MASK),
        .cpsr_val       = USR_MODE,
        .fn             = uprobe_trap_handler,
 };
@@ -212,7 +212,7 @@ static struct undef_hook uprobes_arm_break_hook = {
 static struct undef_hook uprobes_arm_ss_hook = {
        .instr_mask     = 0x0fffffff,
        .instr_val      = (UPROBE_SS_ARM_INSN & 0x0fffffff),
-       .cpsr_mask      = MODE_MASK,
+       .cpsr_mask      = (PSR_T_BIT | MODE_MASK),
        .cpsr_val       = USR_MODE,
        .fn             = uprobe_trap_handler,
 };
index e4e1b65..dfdc3e0 100644 (file)
@@ -1406,10 +1406,13 @@ config ARM64_PAN
 config AS_HAS_LDAPR
        def_bool $(as-instr,.arch_extension rcpc)
 
+config AS_HAS_LSE_ATOMICS
+       def_bool $(as-instr,.arch_extension lse)
+
 config ARM64_LSE_ATOMICS
        bool
        default ARM64_USE_LSE_ATOMICS
-       depends on $(as-instr,.arch_extension lse)
+       depends on AS_HAS_LSE_ATOMICS
 
 config ARM64_USE_LSE_ATOMICS
        bool "Atomic instructions"
@@ -1666,6 +1669,7 @@ config ARM64_MTE
        default y
        depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
        depends on AS_HAS_ARMV8_5
+       depends on AS_HAS_LSE_ATOMICS
        # Required for tag checking in the uaccess routines
        depends on ARM64_PAN
        select ARCH_USES_HIGH_VMA_FLAGS
index 437ffe3..e79ce49 100644 (file)
@@ -19,3 +19,7 @@
                };
        };
 };
+
+&mmc0 {
+       cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 push-push switch */
+};
index 3402cec..df62044 100644 (file)
@@ -34,7 +34,7 @@
        vmmc-supply = <&reg_dcdc1>;
        disable-wp;
        bus-width = <4>;
-       cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+       cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 push-pull switch */
        status = "okay";
 };
 
index 4f47551..b580804 100644 (file)
        vcc-pm-supply = <&reg_aldo1>;
 };
 
-&rtc {
-       clocks = <&ext_osc32k>;
-};
-
 &spdif {
        status = "okay";
 };
index 49e9797..af8b7d0 100644 (file)
                        compatible = "allwinner,sun8i-a23-rsb";
                        reg = <0x07083000 0x400>;
                        interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&r_ccu 13>;
+                       clocks = <&r_ccu CLK_R_APB2_RSB>;
                        clock-frequency = <3000000>;
-                       resets = <&r_ccu 7>;
+                       resets = <&r_ccu RST_R_APB2_RSB>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&r_rsb_pins>;
                        status = "disabled";
index 5ccc4cc..a003e6a 100644 (file)
 #define MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD                                     0x0A4 0x30C 0x000 0x0 0x0
 #define MX8MM_IOMUXC_SD1_CMD_GPIO2_IO1                                      0x0A4 0x30C 0x000 0x5 0x0
 #define MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0                                 0x0A8 0x310 0x000 0x0 0x0
-#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2                                    0x0A8 0x31  0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2                                    0x0A8 0x310 0x000 0x5 0x0
 #define MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1                                 0x0AC 0x314 0x000 0x0 0x0
 #define MX8MM_IOMUXC_SD1_DATA1_GPIO2_IO3                                    0x0AC 0x314 0x000 0x5 0x0
 #define MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2                                 0x0B0 0x318 0x000 0x0 0x0
index b94b020..68e8fa1 100644 (file)
 #define MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD                                     0x0A4 0x30C 0x000 0x0 0x0
 #define MX8MQ_IOMUXC_SD1_CMD_GPIO2_IO1                                      0x0A4 0x30C 0x000 0x5 0x0
 #define MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0                                 0x0A8 0x310 0x000 0x0 0x0
-#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2                                    0x0A8 0x31  0x000 0x5 0x0
+#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2                                    0x0A8 0x310 0x000 0x5 0x0
 #define MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1                                 0x0AC 0x314 0x000 0x0 0x0
 #define MX8MQ_IOMUXC_SD1_DATA1_GPIO2_IO3                                    0x0AC 0x314 0x000 0x5 0x0
 #define MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2                                 0x0B0 0x318 0x000 0x0 0x0
index d239ab7..53e817c 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
 /*
  * Device Tree file for CZ.NIC Turris Mox Board
- * 2019 by Marek Behun <marek.behun@nic.cz>
+ * 2019 by Marek Behún <kabel@kernel.org>
  */
 
 /dts-v1/;
index 64179a3..c6f5df2 100644 (file)
                };
 
                CP11X_LABEL(sata0): sata@540000 {
-                       compatible = "marvell,armada-8k-ahci";
+                       compatible = "marvell,armada-8k-ahci",
+                       "generic-ahci";
                        reg = <0x540000 0x30000>;
                        dma-coherent;
+                       interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&CP11X_LABEL(clk) 1 15>,
                                 <&CP11X_LABEL(clk) 1 16>;
                        #address-cells = <1>;
                        status = "disabled";
 
                        sata-port@0 {
-                               interrupts = <109 IRQ_TYPE_LEVEL_HIGH>;
                                reg = <0>;
                        };
 
                        sata-port@1 {
-                               interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
                                reg = <1>;
                        };
                };
index 5df500d..8a078fc 100644 (file)
@@ -97,9 +97,9 @@
        .popsection
        .subsection 1
 663:   \insn2
-664:   .previous
-       .org    . - (664b-663b) + (662b-661b)
+664:   .org    . - (664b-663b) + (662b-661b)
        .org    . - (662b-661b) + (664b-663b)
+       .previous
        .endif
 .endm
 
  */
 .macro alternative_endif
 664:
+       .org    . - (664b-663b) + (662b-661b)
+       .org    . - (662b-661b) + (664b-663b)
        .if .Lasm_alt_mode==0
        .previous
        .endif
-       .org    . - (664b-663b) + (662b-661b)
-       .org    . - (662b-661b) + (664b-663b)
 .endm
 
 /*
index 4e90c2d..94d4025 100644 (file)
 #define CPTR_EL2_DEFAULT       CPTR_EL2_RES1
 
 /* Hyp Debug Configuration Register bits */
+#define MDCR_EL2_TTRF          (1 << 19)
 #define MDCR_EL2_TPMS          (1 << 14)
 #define MDCR_EL2_E2PB_MASK     (UL(0x3))
 #define MDCR_EL2_E2PB_SHIFT    (UL(12))
index 3333950..ea48721 100644 (file)
@@ -53,7 +53,7 @@ static inline unsigned long find_zero(unsigned long mask)
  */
 static inline unsigned long load_unaligned_zeropad(const void *addr)
 {
-       unsigned long ret, offset;
+       unsigned long ret, tmp;
 
        /* Load word from unaligned pointer addr */
        asm(
@@ -61,9 +61,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
        "2:\n"
        "       .pushsection .fixup,\"ax\"\n"
        "       .align 2\n"
-       "3:     and     %1, %2, #0x7\n"
-       "       bic     %2, %2, #0x7\n"
-       "       ldr     %0, [%2]\n"
+       "3:     bic     %1, %2, #0x7\n"
+       "       ldr     %0, [%1]\n"
+       "       and     %1, %2, #0x7\n"
        "       lsl     %1, %1, #0x3\n"
 #ifndef __AARCH64EB__
        "       lsr     %0, %0, %1\n"
@@ -73,7 +73,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
        "       b       2b\n"
        "       .popsection\n"
        _ASM_EXTABLE(1b, 3b)
-       : "=&r" (ret), "=&r" (offset)
+       : "=&r" (ret), "=&r" (tmp)
        : "r" (addr), "Q" (*(unsigned long *)addr));
 
        return ret;
index 2a5d985..e5281e1 100644 (file)
@@ -383,7 +383,6 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
         * of support.
         */
        S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
        ARM64_FTR_END,
 };
index a31a0a7..6acfc5e 100644 (file)
@@ -148,16 +148,18 @@ alternative_cb_end
        .endm
 
        /* Check for MTE asynchronous tag check faults */
-       .macro check_mte_async_tcf, flgs, tmp
+       .macro check_mte_async_tcf, tmp, ti_flags
 #ifdef CONFIG_ARM64_MTE
+       .arch_extension lse
 alternative_if_not ARM64_MTE
        b       1f
 alternative_else_nop_endif
        mrs_s   \tmp, SYS_TFSRE0_EL1
        tbz     \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
        /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
-       orr     \flgs, \flgs, #_TIF_MTE_ASYNC_FAULT
-       str     \flgs, [tsk, #TSK_TI_FLAGS]
+       mov     \tmp, #_TIF_MTE_ASYNC_FAULT
+       add     \ti_flags, tsk, #TSK_TI_FLAGS
+       stset   \tmp, [\ti_flags]
        msr_s   SYS_TFSRE0_EL1, xzr
 1:
 #endif
@@ -244,7 +246,7 @@ alternative_else_nop_endif
        disable_step_tsk x19, x20
 
        /* Check for asynchronous tag check faults in user space */
-       check_mte_async_tcf x19, x22
+       check_mte_async_tcf x22, x23
        apply_ssbd 1, x22, x23
 
        ptrauth_keys_install_kernel tsk, x20, x22, x23
index 66aac28..85645b2 100644 (file)
@@ -267,10 +267,12 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
                if (!instruction_pointer(regs))
                        BUG();
 
-               if (kcb->kprobe_status == KPROBE_REENTER)
+               if (kcb->kprobe_status == KPROBE_REENTER) {
                        restore_previous_kprobe(kcb);
-               else
+               } else {
+                       kprobes_restore_local_irqflag(kcb, regs);
                        reset_current_kprobe();
+               }
 
                break;
        case KPROBE_HIT_ACTIVE:
index 5bfd9b8..4ea9392 100644 (file)
@@ -134,7 +134,7 @@ SYM_FUNC_START(_cpu_resume)
         */
        bl      cpu_do_resume
 
-#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
+#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
        mov     x0, sp
        bl      kasan_unpoison_task_stack_below
 #endif
index 7a7e425..dbc8905 100644 (file)
@@ -89,6 +89,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
  *  - Debug ROM Address (MDCR_EL2_TDRA)
  *  - OS related registers (MDCR_EL2_TDOSA)
  *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
+ *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
  *
  * Additionally, KVM only traps guest accesses to the debug registers if
  * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
@@ -112,6 +113,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
        vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
        vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
                                MDCR_EL2_TPMS |
+                               MDCR_EL2_TTRF |
                                MDCR_EL2_TPMCR |
                                MDCR_EL2_TDRA |
                                MDCR_EL2_TDOSA);
index ee3682b..39f8f7f 100644 (file)
@@ -429,6 +429,13 @@ u64 __vgic_v3_get_gic_config(void)
        if (has_vhe())
                flags = local_daif_save();
 
+       /*
+        * Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
+        * that to be able to set ICC_SRE_EL1.SRE to 0, all the
+        * interrupt overrides must be set. You've got to love this.
+        */
+       sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
+       isb();
        write_gicreg(0, ICC_SRE_EL1);
        isb();
 
@@ -436,6 +443,8 @@ u64 __vgic_v3_get_gic_config(void)
 
        write_gicreg(sre, ICC_SRE_EL1);
        isb();
+       sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
+       isb();
 
        if (has_vhe())
                local_daif_restore(flags);
index 34e9122..8de5b98 100644 (file)
@@ -314,7 +314,7 @@ config FORCE_MAX_ZONEORDER
        int "Maximum zone order"
        default "11"
 
-config RAM_BASE
+config DRAM_BASE
        hex "DRAM start addr (the same with memory-section in dts)"
        default 0x0
 
index 3b91fc3..ed74514 100644 (file)
@@ -28,7 +28,7 @@
 #define SSEG_SIZE      0x20000000
 #define LOWMEM_LIMIT   (SSEG_SIZE * 2)
 
-#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (SSEG_SIZE - 1))
+#define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1))
 
 #ifndef __ASSEMBLY__
 
index ca0d596..8916a28 100644 (file)
@@ -55,8 +55,6 @@ CONFIG_CHR_DEV_SG=m
 CONFIG_SCSI_FC_ATTRS=y
 CONFIG_SCSI_SYM53C8XX_2=y
 CONFIG_SCSI_QLOGIC_1280=y
-CONFIG_ATA=y
-CONFIG_ATA_PIIX=y
 CONFIG_SATA_VITESSE=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=m
index b3aa460..0817913 100644 (file)
@@ -54,8 +54,7 @@
 
 static inline unsigned long user_stack_pointer(struct pt_regs *regs)
 {
-       /* FIXME: should this be bspstore + nr_dirty regs? */
-       return regs->ar_bspstore;
+       return regs->r12;
 }
 
 static inline int is_syscall_success(struct pt_regs *regs)
@@ -79,11 +78,6 @@ static inline long regs_return_value(struct pt_regs *regs)
        unsigned long __ip = instruction_pointer(regs);                 \
        (__ip & ~3UL) + ((__ip & 3UL) << 2);                            \
 })
-/*
- * Why not default?  Because user_stack_pointer() on ia64 gives register
- * stack backing store instead...
- */
-#define current_user_stack_pointer() (current_pt_regs()->r12)
 
   /* given a pointer to a task_struct, return the user's pt_regs */
 # define task_pt_regs(t)               (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
index 03b3a02..c310b4c 100644 (file)
@@ -95,7 +95,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
  * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
  * called yet.  Note that node 0 will also count all non-existent cpus.
  */
-static int __meminit early_nr_cpus_node(int node)
+static int early_nr_cpus_node(int node)
 {
        int cpu, n = 0;
 
@@ -110,7 +110,7 @@ static int __meminit early_nr_cpus_node(int node)
  * compute_pernodesize - compute size of pernode data
  * @node: the node id.
  */
-static unsigned long __meminit compute_pernodesize(int node)
+static unsigned long compute_pernodesize(int node)
 {
        unsigned long pernodesize = 0, cpus;
 
@@ -367,7 +367,7 @@ static void __init reserve_pernode_space(void)
        }
 }
 
-static void __meminit scatter_node_data(void)
+static void scatter_node_data(void)
 {
        pg_data_t **dst;
        int node;
index 9e8f0cc..2411ea9 100644 (file)
@@ -167,7 +167,7 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
        ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn;          \
 })
 #else
-#define ARCH_PFN_OFFSET (m68k_memory[0].addr)
+#define ARCH_PFN_OFFSET (m68k_memory[0].addr >> PAGE_SHIFT)
 #include <asm-generic/memory_model.h>
 #endif
 
index 279be01..23a1403 100644 (file)
@@ -43,7 +43,7 @@
 #include <asm/prom.h>
 
 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB
-const char __section(".appended_dtb") __appended_dtb[0x100000];
+char __section(".appended_dtb") __appended_dtb[0x100000];
 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
 
 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
index 6eb98a7..ad5344e 100644 (file)
@@ -238,7 +238,7 @@ void flush_dcache_page(struct page *page)
 {
        struct address_space *mapping;
 
-       mapping = page_mapping(page);
+       mapping = page_mapping_file(page);
        if (mapping && !mapping_mapped(mapping))
                set_bit(PG_dcache_dirty, &page->flags);
        else {
index cf5ee9b..84ee232 100644 (file)
@@ -72,7 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
 #endif
        case 4: return __cmpxchg_u32((unsigned int *)ptr,
                                     (unsigned int)old, (unsigned int)new_);
-       case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
+       case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
        }
        __cmpxchg_called_with_bad_pointer();
        return old;
index 11ece0d..b5fbcd2 100644 (file)
@@ -272,7 +272,6 @@ on downward growing arches, it looks like this:
        regs->gr[23] = 0;                               \
 } while(0)
 
-struct task_struct;
 struct mm_struct;
 
 /* Free all resources held by a thread. */
index 853c19c..dec951d 100644 (file)
@@ -5,34 +5,10 @@
  * Floating-point emulation code
  *  Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
  */
-/*
- * BEGIN_DESC
- * 
- *  File: 
- *      @(#)   pa/fp/fpu.h             $Revision: 1.1 $
- * 
- *  Purpose:
- *      <<please update with a synopis of the functionality provided by this file>>
- * 
- * 
- * END_DESC  
-*/
-
-#ifdef __NO_PA_HDRS
-    PA header file -- do not include this header file for non-PA builds.
-#endif
-
 
 #ifndef _MACHINE_FPU_INCLUDED /* allows multiple inclusion */
 #define _MACHINE_FPU_INCLUDED
 
-#if 0
-#ifndef _SYS_STDSYMS_INCLUDED
-#    include <sys/stdsyms.h>
-#endif   /* _SYS_STDSYMS_INCLUDED  */
-#include  <machine/pdc/pdc_rqsts.h>
-#endif
-
 #define PA83_FPU_FLAG    0x00000001
 #define PA89_FPU_FLAG    0x00000002
 #define PA2_0_FPU_FLAG   0x00000010
 #define COPR_FP        0x00000080      /* Floating point -- Coprocessor 0 */
 #define SFU_MPY_DIVIDE 0x00008000      /* Multiply/Divide __ SFU 0 */
 
-
 #define EM_FPU_TYPE_OFFSET 272
 
 /* version of EMULATION software for COPR,0,0 instruction */
 #define EMULATION_VERSION 4
 
 /*
- * The only was to differeniate between TIMEX and ROLEX (or PCX-S and PCX-T)
- * is thorough the potential type field from the PDC_MODEL call.  The 
- * following flags are used at assist this differeniation.
+ * The only way to differentiate between TIMEX and ROLEX (or PCX-S and PCX-T)
+ * is through the potential type field from the PDC_MODEL call.
+ * The following flags are used to assist this differentiation.
  */
 
 #define ROLEX_POTENTIAL_KEY_FLAGS      PDC_MODEL_CPU_KEY_WORD_TO_IO
 #define TIMEX_POTENTIAL_KEY_FLAGS      (PDC_MODEL_CPU_KEY_QUAD_STORE | \
                                         PDC_MODEL_CPU_KEY_RECIP_SQRT)
 
-
 #endif /* ! _MACHINE_FPU_INCLUDED */
index 6084fa4..f66b63e 100644 (file)
@@ -191,3 +191,7 @@ $(obj)/prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o FORCE
 targets += prom_init_check
 
 clean-files := vmlinux.lds
+
+# Force dependency (incbin is bad)
+$(obj)/vdso32_wrapper.o : $(obj)/vdso32/vdso32.so.dbg
+$(obj)/vdso64_wrapper.o : $(obj)/vdso64/vdso64.so.dbg
index 8ebc11d..77abd1a 100644 (file)
@@ -6,11 +6,11 @@
 CFLAGS_ptrace-view.o           += -DUTS_MACHINE='"$(UTS_MACHINE)"'
 
 obj-y                          += ptrace.o ptrace-view.o
-obj-$(CONFIG_PPC_FPU_REGS)     += ptrace-fpu.o
+obj-y                          += ptrace-fpu.o
 obj-$(CONFIG_COMPAT)           += ptrace32.o
 obj-$(CONFIG_VSX)              += ptrace-vsx.o
 ifneq ($(CONFIG_VSX),y)
-obj-$(CONFIG_PPC_FPU_REGS)     += ptrace-novsx.o
+obj-y                          += ptrace-novsx.o
 endif
 obj-$(CONFIG_ALTIVEC)          += ptrace-altivec.o
 obj-$(CONFIG_SPE)              += ptrace-spe.o
index 3487f2c..eafe5f0 100644 (file)
@@ -165,22 +165,8 @@ int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data);
 extern const struct user_regset_view user_ppc_native_view;
 
 /* ptrace-fpu */
-#ifdef CONFIG_PPC_FPU_REGS
 int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data);
 int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data);
-#else
-static inline int
-ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data)
-{
-       return -EIO;
-}
-
-static inline int
-ptrace_put_fpr(struct task_struct *child, int index, unsigned long data)
-{
-       return -EIO;
-}
-#endif
 
 /* ptrace-(no)adv */
 void ppc_gethwdinfo(struct ppc_debug_info *dbginfo);
index 8301cb5..5dca193 100644 (file)
@@ -8,32 +8,42 @@
 
 int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data)
 {
+#ifdef CONFIG_PPC_FPU_REGS
        unsigned int fpidx = index - PT_FPR0;
+#endif
 
        if (index > PT_FPSCR)
                return -EIO;
 
+#ifdef CONFIG_PPC_FPU_REGS
        flush_fp_to_thread(child);
        if (fpidx < (PT_FPSCR - PT_FPR0))
                memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long));
        else
                *data = child->thread.fp_state.fpscr;
+#else
+       *data = 0;
+#endif
 
        return 0;
 }
 
 int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data)
 {
+#ifdef CONFIG_PPC_FPU_REGS
        unsigned int fpidx = index - PT_FPR0;
+#endif
 
        if (index > PT_FPSCR)
                return -EIO;
 
+#ifdef CONFIG_PPC_FPU_REGS
        flush_fp_to_thread(child);
        if (fpidx < (PT_FPSCR - PT_FPR0))
                memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long));
        else
                child->thread.fp_state.fpscr = data;
+#endif
 
        return 0;
 }
index b3b3683..7433f3d 100644 (file)
 int fpr_get(struct task_struct *target, const struct user_regset *regset,
            struct membuf to)
 {
+#ifdef CONFIG_PPC_FPU_REGS
        BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
                     offsetof(struct thread_fp_state, fpr[32]));
 
        flush_fp_to_thread(target);
 
        return membuf_write(&to, &target->thread.fp_state, 33 * sizeof(u64));
+#else
+       return membuf_write(&to, &empty_zero_page, 33 * sizeof(u64));
+#endif
 }
 
 /*
@@ -46,6 +50,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset,
            unsigned int pos, unsigned int count,
            const void *kbuf, const void __user *ubuf)
 {
+#ifdef CONFIG_PPC_FPU_REGS
        BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
                     offsetof(struct thread_fp_state, fpr[32]));
 
@@ -53,4 +58,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset,
 
        return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                  &target->thread.fp_state, 0, -1);
+#else
+       return 0;
+#endif
 }
index 2bad806..6ccffc6 100644 (file)
@@ -522,13 +522,11 @@ static const struct user_regset native_regsets[] = {
                .size = sizeof(long), .align = sizeof(long),
                .regset_get = gpr_get, .set = gpr_set
        },
-#ifdef CONFIG_PPC_FPU_REGS
        [REGSET_FPR] = {
                .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
                .size = sizeof(double), .align = sizeof(double),
                .regset_get = fpr_get, .set = fpr_set
        },
-#endif
 #ifdef CONFIG_ALTIVEC
        [REGSET_VMX] = {
                .core_note_type = NT_PPC_VMX, .n = 34,
index 75ee918..f651b99 100644 (file)
@@ -775,7 +775,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
        else
                prepare_save_user_regs(1);
 
-       if (!user_write_access_begin(frame, sizeof(*frame)))
+       if (!user_access_begin(frame, sizeof(*frame)))
                goto badframe;
 
        /* Put the siginfo & fill in most of the ucontext */
@@ -809,17 +809,15 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
                unsafe_put_user(PPC_INST_ADDI + __NR_rt_sigreturn, &mctx->mc_pad[0],
                                failed);
                unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
+               asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
        }
        unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
 
-       user_write_access_end();
+       user_access_end();
 
        if (copy_siginfo_to_user(&frame->info, &ksig->info))
                goto badframe;
 
-       if (tramp == (unsigned long)mctx->mc_pad)
-               flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long));
-
        regs->link = tramp;
 
 #ifdef CONFIG_PPC_FPU_REGS
@@ -844,7 +842,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
        return 0;
 
 failed:
-       user_write_access_end();
+       user_access_end();
 
 badframe:
        signal_fault(tsk, regs, "handle_rt_signal32", frame);
@@ -879,7 +877,7 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
        else
                prepare_save_user_regs(1);
 
-       if (!user_write_access_begin(frame, sizeof(*frame)))
+       if (!user_access_begin(frame, sizeof(*frame)))
                goto badframe;
        sc = (struct sigcontext __user *) &frame->sctx;
 
@@ -908,11 +906,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
                /* Set up the sigreturn trampoline: li r0,sigret; sc */
                unsafe_put_user(PPC_INST_ADDI + __NR_sigreturn, &mctx->mc_pad[0], failed);
                unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
+               asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
        }
-       user_write_access_end();
-
-       if (tramp == (unsigned long)mctx->mc_pad)
-               flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long));
+       user_access_end();
 
        regs->link = tramp;
 
@@ -935,7 +931,7 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
        return 0;
 
 failed:
-       user_write_access_end();
+       user_access_end();
 
 badframe:
        signal_fault(tsk, regs, "handle_signal32", frame);
index 764170f..3805519 100644 (file)
@@ -887,7 +887,8 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
 
        want_v = hpte_encode_avpn(vpn, psize, ssize);
 
-       flags = (newpp & 7) | H_AVPN;
+       flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN;
+       flags |= (newpp & HPTE_R_KEY_HI) >> 48;
        if (mmu_has_feature(MMU_FTR_KERNEL_RO))
                /* Move pp0 into bit 8 (IBM 55) */
                flags |= (newpp & HPTE_R_PP0) >> 55;
index ea4d6a6..e83e089 100644 (file)
@@ -452,12 +452,28 @@ static int do_suspend(void)
        return ret;
 }
 
+/**
+ * struct pseries_suspend_info - State shared between CPUs for join/suspend.
+ * @counter: Threads are to increment this upon resuming from suspend
+ *           or if an error is received from H_JOIN. The thread which performs
+ *           the first increment (i.e. sets it to 1) is responsible for
+ *           waking the other threads.
+ * @done: False if join/suspend is in progress. True if the operation is
+ *        complete (successful or not).
+ */
+struct pseries_suspend_info {
+       atomic_t counter;
+       bool done;
+};
+
 static int do_join(void *arg)
 {
-       atomic_t *counter = arg;
+       struct pseries_suspend_info *info = arg;
+       atomic_t *counter = &info->counter;
        long hvrc;
        int ret;
 
+retry:
        /* Must ensure MSR.EE off for H_JOIN. */
        hard_irq_disable();
        hvrc = plpar_hcall_norets(H_JOIN);
@@ -473,8 +489,20 @@ static int do_join(void *arg)
        case H_SUCCESS:
                /*
                 * The suspend is complete and this cpu has received a
-                * prod.
+                * prod, or we've received a stray prod from unrelated
+                * code (e.g. paravirt spinlocks) and we need to join
+                * again.
+                *
+                * This barrier orders the return from H_JOIN above vs
+                * the load of info->done. It pairs with the barrier
+                * in the wakeup/prod path below.
                 */
+               smp_mb();
+               if (READ_ONCE(info->done) == false) {
+                       pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying",
+                                           smp_processor_id());
+                       goto retry;
+               }
                ret = 0;
                break;
        case H_BAD_MODE:
@@ -488,6 +516,13 @@ static int do_join(void *arg)
 
        if (atomic_inc_return(counter) == 1) {
                pr_info("CPU %u waking all threads\n", smp_processor_id());
+               WRITE_ONCE(info->done, true);
+               /*
+                * This barrier orders the store to info->done vs subsequent
+                * H_PRODs to wake the other CPUs. It pairs with the barrier
+                * in the H_SUCCESS case above.
+                */
+               smp_mb();
                prod_others();
        }
        /*
@@ -535,11 +570,16 @@ static int pseries_suspend(u64 handle)
        int ret;
 
        while (true) {
-               atomic_t counter = ATOMIC_INIT(0);
+               struct pseries_suspend_info info;
                unsigned long vasi_state;
                int vasi_err;
 
-               ret = stop_machine(do_join, &counter, cpu_online_mask);
+               info = (struct pseries_suspend_info) {
+                       .counter = ATOMIC_INIT(0),
+                       .done = false,
+               };
+
+               ret = stop_machine(do_join, &info, cpu_online_mask);
                if (ret == 0)
                        break;
                /*
index 87d7b52..4515a10 100644 (file)
@@ -153,7 +153,7 @@ config ARCH_FLATMEM_ENABLE
 config ARCH_SPARSEMEM_ENABLE
        def_bool y
        depends on MMU
-       select SPARSEMEM_STATIC if 32BIT && SPARSMEM
+       select SPARSEMEM_STATIC if 32BIT && SPARSEMEM
        select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
 
 config ARCH_SELECT_MEMORY_MODEL
@@ -314,7 +314,7 @@ endchoice
 # Common NUMA Features
 config NUMA
        bool "NUMA Memory Allocation and Scheduler Support"
-       depends on SMP
+       depends on SMP && MMU
        select GENERIC_ARCH_NUMA
        select OF_NUMA
        select ARCH_SUPPORTS_NUMA_BALANCING
index 824b2c9..f944062 100644 (file)
@@ -306,7 +306,9 @@ do {                                                                \
  * data types like structures or arrays.
  *
  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
- * to the result of dereferencing @ptr.
+ * to the result of dereferencing @ptr. The value of @x is copied to avoid
+ * re-ordering where @x is evaluated inside the block that enables user-space
+ * access (thus bypassing user space protection if @x is a function).
  *
  * Caller must check the pointer with access_ok() before calling this
  * function.
@@ -316,12 +318,13 @@ do {                                                              \
 #define __put_user(x, ptr)                                     \
 ({                                                             \
        __typeof__(*(ptr)) __user *__gu_ptr = (ptr);            \
+       __typeof__(*__gu_ptr) __val = (x);                      \
        long __pu_err = 0;                                      \
                                                                \
        __chk_user_ptr(__gu_ptr);                               \
                                                                \
        __enable_user_access();                                 \
-       __put_user_nocheck(x, __gu_ptr, __pu_err);              \
+       __put_user_nocheck(__val, __gu_ptr, __pu_err);          \
        __disable_user_access();                                \
                                                                \
        __pu_err;                                               \
index 744f320..83095fa 100644 (file)
@@ -130,6 +130,9 @@ skip_context_tracking:
         */
        andi t0, s1, SR_PIE
        beqz t0, 1f
+       /* kprobes, entered via ebreak, must have interrupts disabled. */
+       li t0, EXC_BREAKPOINT
+       beq s4, t0, 1f
 #ifdef CONFIG_TRACE_IRQFLAGS
        call trace_hardirqs_on
 #endif
@@ -447,6 +450,7 @@ ENDPROC(__switch_to)
 #endif
 
        .section ".rodata"
+       .align LGREG
        /* Exception vector table */
 ENTRY(excp_vect_table)
        RISCV_PTR do_trap_insn_misaligned
index 17ca5e9..aab85a8 100644 (file)
@@ -9,10 +9,16 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
        struct kprobe *p;
        struct pt_regs *regs;
        struct kprobe_ctlblk *kcb;
+       int bit;
 
+       bit = ftrace_test_recursion_trylock(ip, parent_ip);
+       if (bit < 0)
+               return;
+
+       preempt_disable_notrace();
        p = get_kprobe((kprobe_opcode_t *)ip);
        if (unlikely(!p) || kprobe_disabled(p))
-               return;
+               goto out;
 
        regs = ftrace_get_regs(fregs);
        kcb = get_kprobe_ctlblk();
@@ -45,6 +51,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
                 */
                __this_cpu_write(current_kprobe, NULL);
        }
+out:
+       preempt_enable_notrace();
+       ftrace_test_recursion_unlock(bit);
 }
 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
 
index 3f893c9..2b3e0cb 100644 (file)
@@ -14,7 +14,7 @@
 
 #include <asm/stacktrace.h>
 
-register const unsigned long sp_in_global __asm__("sp");
+register unsigned long sp_in_global __asm__("sp");
 
 #ifdef CONFIG_FRAME_POINTER
 
index 0879b5d..1357abf 100644 (file)
@@ -178,6 +178,7 @@ asmlinkage __visible void do_trap_break(struct pt_regs *regs)
        else
                die(regs, "Kernel BUG");
 }
+NOKPROBE_SYMBOL(do_trap_break);
 
 #ifdef CONFIG_GENERIC_BUG
 int is_valid_bugaddr(unsigned long pc)
index 8f17519..c5dbd55 100644 (file)
@@ -328,3 +328,4 @@ good_area:
        }
        return;
 }
+NOKPROBE_SYMBOL(do_page_fault);
index 4f85c6d..937d13c 100644 (file)
@@ -216,7 +216,7 @@ void __init kasan_init(void)
                        break;
 
                kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
-       };
+       }
 
        for (i = 0; i < PTRS_PER_PTE; i++)
                set_pte(&kasan_early_shadow_pte[i],
index ee056f4..2b54316 100644 (file)
@@ -12,6 +12,7 @@ enum stack_type {
        STACK_TYPE_IRQ,
        STACK_TYPE_NODAT,
        STACK_TYPE_RESTART,
+       STACK_TYPE_MCCK,
 };
 
 struct stack_info {
index 7b3cdb4..73ee891 100644 (file)
@@ -6,7 +6,7 @@
 #include <vdso/datapage.h>
 
 struct arch_vdso_data {
-       __u64 tod_steering_delta;
+       __s64 tod_steering_delta;
        __u64 tod_steering_end;
 };
 
index af013b4..2da0273 100644 (file)
@@ -37,10 +37,12 @@ static int diag8_noresponse(int cmdlen)
 
 static int diag8_response(int cmdlen, char *response, int *rlen)
 {
+       unsigned long _cmdlen = cmdlen | 0x40000000L;
+       unsigned long _rlen = *rlen;
        register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
        register unsigned long reg3 asm ("3") = (addr_t) response;
-       register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
-       register unsigned long reg5 asm ("5") = *rlen;
+       register unsigned long reg4 asm ("4") = _cmdlen;
+       register unsigned long reg5 asm ("5") = _rlen;
 
        asm volatile(
                "       diag    %2,%0,0x8\n"
index 0dc4b25..db1bc00 100644 (file)
@@ -79,6 +79,15 @@ static bool in_nodat_stack(unsigned long sp, struct stack_info *info)
        return in_stack(sp, info, STACK_TYPE_NODAT, top - THREAD_SIZE, top);
 }
 
+static bool in_mcck_stack(unsigned long sp, struct stack_info *info)
+{
+       unsigned long frame_size, top;
+
+       frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+       top = S390_lowcore.mcck_stack + frame_size;
+       return in_stack(sp, info, STACK_TYPE_MCCK, top - THREAD_SIZE, top);
+}
+
 static bool in_restart_stack(unsigned long sp, struct stack_info *info)
 {
        unsigned long frame_size, top;
@@ -108,7 +117,8 @@ int get_stack_info(unsigned long sp, struct task_struct *task,
        /* Check per-cpu stacks */
        if (!in_irq_stack(sp, info) &&
            !in_nodat_stack(sp, info) &&
-           !in_restart_stack(sp, info))
+           !in_restart_stack(sp, info) &&
+           !in_mcck_stack(sp, info))
                goto unknown;
 
 recursion_check:
index c10b9f3..12de7a9 100644 (file)
@@ -401,15 +401,13 @@ ENTRY(\name)
        brasl   %r14,.Lcleanup_sie_int
 #endif
 0:     CHECK_STACK __LC_SAVE_AREA_ASYNC
-       lgr     %r11,%r15
        aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
-       stg     %r11,__SF_BACKCHAIN(%r15)
        j       2f
 1:     BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
        lctlg   %c1,%c1,__LC_KERNEL_ASCE
        lg      %r15,__LC_KERNEL_STACK
-       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-2:     la      %r11,STACK_FRAME_OVERHEAD(%r15)
+2:     xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       la      %r11,STACK_FRAME_OVERHEAD(%r15)
        stmg    %r0,%r7,__PT_R0(%r11)
        # clear user controlled registers to prevent speculative use
        xgr     %r0,%r0
@@ -445,6 +443,7 @@ INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
  * Load idle PSW.
  */
 ENTRY(psw_idle)
+       stg     %r14,(__SF_GPRS+8*8)(%r15)
        stg     %r3,__SF_EMPTY(%r15)
        larl    %r1,psw_idle_exit
        stg     %r1,__SF_EMPTY+8(%r15)
index 601c217..714269e 100644 (file)
@@ -174,7 +174,7 @@ void noinstr do_ext_irq(struct pt_regs *regs)
 
        memcpy(&regs->int_code, &S390_lowcore.ext_cpu_addr, 4);
        regs->int_parm = S390_lowcore.ext_params;
-       regs->int_parm_long = *(unsigned long *)S390_lowcore.ext_params2;
+       regs->int_parm_long = S390_lowcore.ext_params2;
 
        from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit;
        if (from_idle)
index 60da976..72134f9 100644 (file)
@@ -354,7 +354,7 @@ static int __init stack_realloc(void)
        if (!new)
                panic("Couldn't allocate machine check stack");
        WRITE_ONCE(S390_lowcore.mcck_stack, new + STACK_INIT_OFFSET);
-       memblock_free(old, THREAD_SIZE);
+       memblock_free_late(old, THREAD_SIZE);
        return 0;
 }
 early_initcall(stack_realloc);
index 165da96..326cb8f 100644 (file)
@@ -80,10 +80,12 @@ void __init time_early_init(void)
 {
        struct ptff_qto qto;
        struct ptff_qui qui;
+       int cs;
 
        /* Initialize TOD steering parameters */
        tod_steering_end = tod_clock_base.tod;
-       vdso_data->arch_data.tod_steering_end = tod_steering_end;
+       for (cs = 0; cs < CS_BASES; cs++)
+               vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
 
        if (!test_facility(28))
                return;
@@ -366,6 +368,7 @@ static void clock_sync_global(unsigned long delta)
 {
        unsigned long now, adj;
        struct ptff_qto qto;
+       int cs;
 
        /* Fixup the monotonic sched clock. */
        tod_clock_base.eitod += delta;
@@ -381,7 +384,10 @@ static void clock_sync_global(unsigned long delta)
                panic("TOD clock sync offset %li is too large to drift\n",
                      tod_steering_delta);
        tod_steering_end = now + (abs(tod_steering_delta) << 15);
-       vdso_data->arch_data.tod_steering_end = tod_steering_end;
+       for (cs = 0; cs < CS_BASES; cs++) {
+               vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
+               vdso_data[cs].arch_data.tod_steering_delta = tod_steering_delta;
+       }
 
        /* Update LPAR offset. */
        if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
index 97bbb4a..05b48b3 100644 (file)
@@ -56,8 +56,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
        else
                set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
 
-       /* Flush this CPU's TLB. */
+       /*
+        * Flush this CPU's TLB, assuming whoever did the allocation/free is
+        * likely to continue running on this CPU.
+        */
+       preempt_disable();
        flush_tlb_one_kernel(addr);
+       preempt_enable();
        return true;
 }
 
index 57ef209..630ff08 100644 (file)
@@ -132,7 +132,7 @@ void native_play_dead(void);
 void play_dead_common(void);
 void wbinvd_on_cpu(int cpu);
 int wbinvd_on_all_cpus(void);
-bool wakeup_cpu0(void);
+void cond_wakeup_cpu0(void);
 
 void native_smp_send_reschedule(int cpu);
 void native_send_call_func_ipi(const struct cpumask *mask);
index 7bdc023..14cd318 100644 (file)
@@ -1554,10 +1554,18 @@ void __init acpi_boot_table_init(void)
        /*
         * Initialize the ACPI boot-time table parser.
         */
-       if (acpi_table_init()) {
+       if (acpi_locate_initial_tables())
                disable_acpi();
-               return;
-       }
+       else
+               acpi_reserve_initial_tables();
+}
+
+int __init early_acpi_boot_init(void)
+{
+       if (acpi_disabled)
+               return 1;
+
+       acpi_table_init_complete();
 
        acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
 
@@ -1570,18 +1578,9 @@ void __init acpi_boot_table_init(void)
                } else {
                        printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
                        disable_acpi();
-                       return;
+                       return 1;
                }
        }
-}
-
-int __init early_acpi_boot_init(void)
-{
-       /*
-        * If acpi_disabled, bail out
-        */
-       if (acpi_disabled)
-               return 1;
 
        /*
         * Process the Multiple APIC Description Table (MADT), if present
index 56b6865..d5d8a35 100644 (file)
@@ -115,7 +115,7 @@ SYM_FUNC_START(do_suspend_lowlevel)
        movq    pt_regs_r14(%rax), %r14
        movq    pt_regs_r15(%rax), %r15
 
-#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
+#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
        /*
         * The suspend path may have poisoned some areas deeper in the stack,
         * which we now need to unpoison.
index d883176..ccab6cf 100644 (file)
@@ -1129,6 +1129,8 @@ void __init setup_arch(char **cmdline_p)
        reserve_initrd();
 
        acpi_table_upgrade();
+       /* Look for ACPI tables and reserve memory occupied by them. */
+       acpi_boot_table_init();
 
        vsmp_init();
 
@@ -1136,11 +1138,6 @@ void __init setup_arch(char **cmdline_p)
 
        early_platform_quirks();
 
-       /*
-        * Parse the ACPI tables for possible boot-time SMP configuration.
-        */
-       acpi_boot_table_init();
-
        early_acpi_boot_init();
 
        initmem_init();
index f877150..16703c3 100644 (file)
@@ -1659,13 +1659,17 @@ void play_dead_common(void)
        local_irq_disable();
 }
 
-bool wakeup_cpu0(void)
+/**
+ * cond_wakeup_cpu0 - Wake up CPU0 if needed.
+ *
+ * If NMI wants to wake up CPU0, start CPU0.
+ */
+void cond_wakeup_cpu0(void)
 {
        if (smp_processor_id() == 0 && enable_start_cpu0)
-               return true;
-
-       return false;
+               start_cpu0();
 }
+EXPORT_SYMBOL_GPL(cond_wakeup_cpu0);
 
 /*
  * We need to flush the caches before going to sleep, lest we have
@@ -1734,11 +1738,8 @@ static inline void mwait_play_dead(void)
                __monitor(mwait_ptr, 0, 0);
                mb();
                __mwait(eax, 0);
-               /*
-                * If NMI wants to wake up CPU0, start CPU0.
-                */
-               if (wakeup_cpu0())
-                       start_cpu0();
+
+               cond_wakeup_cpu0();
        }
 }
 
@@ -1749,11 +1750,8 @@ void hlt_play_dead(void)
 
        while (1) {
                native_halt();
-               /*
-                * If NMI wants to wake up CPU0, start CPU0.
-                */
-               if (wakeup_cpu0())
-                       start_cpu0();
+
+               cond_wakeup_cpu0();
        }
 }
 
index ac1874a..651e3e5 100644 (file)
@@ -556,7 +556,7 @@ DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
                tsk->thread.trap_nr = X86_TRAP_GP;
 
                if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
-                       return;
+                       goto exit;
 
                show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
                force_sig(SIGSEGV);
@@ -1057,7 +1057,7 @@ static void math_error(struct pt_regs *regs, int trapnr)
                goto exit;
 
        if (fixup_vdso_exception(regs, trapnr, 0, 0))
-               return;
+               goto exit;
 
        force_sig_fault(SIGFPE, si_code,
                        (void __user *)uprobe_get_trap_addr(regs));
index 1b4766f..eafc4d6 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 
-ccflags-y += -Iarch/x86/kvm
+ccflags-y += -I $(srctree)/arch/x86/kvm
 ccflags-$(CONFIG_KVM_WERROR) += -Werror
 
 ifeq ($(CONFIG_FRAME_POINTER),y)
index d75524b..951dae4 100644 (file)
@@ -5884,6 +5884,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
        struct kvm_mmu_page *sp;
        unsigned int ratio;
        LIST_HEAD(invalid_list);
+       bool flush = false;
        ulong to_zap;
 
        rcu_idx = srcu_read_lock(&kvm->srcu);
@@ -5905,19 +5906,19 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
                                      lpage_disallowed_link);
                WARN_ON_ONCE(!sp->lpage_disallowed);
                if (is_tdp_mmu_page(sp)) {
-                       kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
-                               sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
+                       flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
                } else {
                        kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
                        WARN_ON_ONCE(sp->lpage_disallowed);
                }
 
                if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
-                       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+                       kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
                        cond_resched_rwlock_write(&kvm->mmu_lock);
+                       flush = false;
                }
        }
-       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+       kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
 
        write_unlock(&kvm->mmu_lock);
        srcu_read_unlock(&kvm->srcu, rcu_idx);
index 462b1f7..018d82e 100644 (file)
@@ -86,7 +86,7 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
        list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
 
 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
-                         gfn_t start, gfn_t end, bool can_yield);
+                         gfn_t start, gfn_t end, bool can_yield, bool flush);
 
 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
 {
@@ -99,7 +99,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
 
        list_del(&root->link);
 
-       zap_gfn_range(kvm, root, 0, max_gfn, false);
+       zap_gfn_range(kvm, root, 0, max_gfn, false, false);
 
        free_page((unsigned long)root->spt);
        kmem_cache_free(mmu_page_header_cache, root);
@@ -668,20 +668,21 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
  * scheduler needs the CPU or there is contention on the MMU lock. If this
  * function cannot yield, it will not release the MMU lock or reschedule and
  * the caller must ensure it does not supply too large a GFN range, or the
- * operation can cause a soft lockup.
+ * operation can cause a soft lockup.  Note, in some use cases a flush may be
+ * required by prior actions.  Ensure the pending flush is performed prior to
+ * yielding.
  */
 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
-                         gfn_t start, gfn_t end, bool can_yield)
+                         gfn_t start, gfn_t end, bool can_yield, bool flush)
 {
        struct tdp_iter iter;
-       bool flush_needed = false;
 
        rcu_read_lock();
 
        tdp_root_for_each_pte(iter, root, start, end) {
                if (can_yield &&
-                   tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
-                       flush_needed = false;
+                   tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
+                       flush = false;
                        continue;
                }
 
@@ -699,11 +700,11 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
                        continue;
 
                tdp_mmu_set_spte(kvm, &iter, 0);
-               flush_needed = true;
+               flush = true;
        }
 
        rcu_read_unlock();
-       return flush_needed;
+       return flush;
 }
 
 /*
@@ -712,13 +713,14 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
  * SPTEs have been cleared and a TLB flush is needed before releasing the
  * MMU lock.
  */
-bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
+bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
+                                bool can_yield)
 {
        struct kvm_mmu_page *root;
        bool flush = false;
 
        for_each_tdp_mmu_root_yield_safe(kvm, root)
-               flush |= zap_gfn_range(kvm, root, start, end, true);
+               flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
 
        return flush;
 }
@@ -930,7 +932,7 @@ static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
                                     struct kvm_mmu_page *root, gfn_t start,
                                     gfn_t end, unsigned long unused)
 {
-       return zap_gfn_range(kvm, root, start, end, false);
+       return zap_gfn_range(kvm, root, start, end, false, false);
 }
 
 int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
index 3b761c1..31096ec 100644 (file)
@@ -8,7 +8,29 @@
 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
 
-bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end);
+bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
+                                bool can_yield);
+static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start,
+                                            gfn_t end)
+{
+       return __kvm_tdp_mmu_zap_gfn_range(kvm, start, end, true);
+}
+static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level);
+
+       /*
+        * Don't allow yielding, as the caller may have a flush pending.  Note,
+        * if mmu_lock is held for write, zapping will never yield in this case,
+        * but explicitly disallow it for safety.  The TDP MMU does not yield
+        * until it has made forward progress (steps sideways), and when zapping
+        * a single shadow page that it's guaranteed to see (thus the mmu_lock
+        * requirement), its "step sideways" will always step beyond the bounds
+        * of the shadow page's gfn range and stop iterating before yielding.
+        */
+       lockdep_assert_held_write(&kvm->mmu_lock);
+       return __kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, end, false);
+}
 void kvm_tdp_mmu_zap_all(struct kvm *kvm);
 
 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
index 35891d9..fb204ea 100644 (file)
@@ -246,11 +246,18 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
        return true;
 }
 
-static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
+static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
 {
        struct kvm_vcpu *vcpu = &svm->vcpu;
        bool vmcb12_lma;
 
+       /*
+        * FIXME: these should be done after copying the fields,
+        * to avoid TOC/TOU races.  For these save area checks
+        * the possible damage is limited since kvm_set_cr0 and
+        * kvm_set_cr4 handle failure; EFER_SVME is an exception
+        * so it is force-set later in nested_prepare_vmcb_save.
+        */
        if ((vmcb12->save.efer & EFER_SVME) == 0)
                return false;
 
@@ -271,7 +278,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
        if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
                return false;
 
-       return nested_vmcb_check_controls(&vmcb12->control);
+       return true;
 }
 
 static void load_nested_vmcb_control(struct vcpu_svm *svm,
@@ -396,7 +403,14 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
        svm->vmcb->save.gdtr = vmcb12->save.gdtr;
        svm->vmcb->save.idtr = vmcb12->save.idtr;
        kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
-       svm_set_efer(&svm->vcpu, vmcb12->save.efer);
+
+       /*
+        * Force-set EFER_SVME even though it is checked earlier on the
+        * VMCB12, because the guest can flip the bit between the check
+        * and now.  Clearing EFER_SVME would call svm_free_nested.
+        */
+       svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
+
        svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
        svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
        svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
@@ -468,7 +482,6 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
 
 
        svm->nested.vmcb12_gpa = vmcb12_gpa;
-       load_nested_vmcb_control(svm, &vmcb12->control);
        nested_prepare_vmcb_control(svm);
        nested_prepare_vmcb_save(svm, vmcb12);
 
@@ -515,7 +528,10 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
        if (WARN_ON_ONCE(!svm->nested.initialized))
                return -EINVAL;
 
-       if (!nested_vmcb_checks(svm, vmcb12)) {
+       load_nested_vmcb_control(svm, &vmcb12->control);
+
+       if (!nested_vmcb_check_save(svm, vmcb12) ||
+           !nested_vmcb_check_controls(&svm->nested.ctl)) {
                vmcb12->control.exit_code    = SVM_EXIT_ERR;
                vmcb12->control.exit_code_hi = 0;
                vmcb12->control.exit_info_1  = 0;
@@ -1209,6 +1225,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
         */
        if (!(save->cr0 & X86_CR0_PG))
                goto out_free;
+       if (!(save->efer & EFER_SVME))
+               goto out_free;
 
        /*
         * All checks done, we can enter guest mode.  L1 control fields
index 035da07..fdf587f 100644 (file)
@@ -98,6 +98,8 @@ static enum index msr_to_index(u32 msr)
 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
                                             enum pmu_type type)
 {
+       struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
+
        switch (msr) {
        case MSR_F15H_PERF_CTL0:
        case MSR_F15H_PERF_CTL1:
@@ -105,6 +107,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
        case MSR_F15H_PERF_CTL3:
        case MSR_F15H_PERF_CTL4:
        case MSR_F15H_PERF_CTL5:
+               if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
+                       return NULL;
+               fallthrough;
        case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
                if (type != PMU_TYPE_EVNTSEL)
                        return NULL;
@@ -115,6 +120,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
        case MSR_F15H_PERF_CTR3:
        case MSR_F15H_PERF_CTR4:
        case MSR_F15H_PERF_CTR5:
+               if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
+                       return NULL;
+               fallthrough;
        case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
                if (type != PMU_TYPE_COUNTER)
                        return NULL;
index 32cf828..29b40e0 100644 (file)
@@ -6027,19 +6027,19 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
             exit_reason.basic != EXIT_REASON_PML_FULL &&
             exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
             exit_reason.basic != EXIT_REASON_TASK_SWITCH)) {
+               int ndata = 3;
+
                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
-               vcpu->run->internal.ndata = 3;
                vcpu->run->internal.data[0] = vectoring_info;
                vcpu->run->internal.data[1] = exit_reason.full;
                vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
                if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) {
-                       vcpu->run->internal.ndata++;
-                       vcpu->run->internal.data[3] =
+                       vcpu->run->internal.data[ndata++] =
                                vmcs_read64(GUEST_PHYSICAL_ADDRESS);
                }
-               vcpu->run->internal.data[vcpu->run->internal.ndata++] =
-                       vcpu->arch.last_vmentry_cpu;
+               vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
+               vcpu->run->internal.ndata = ndata;
                return 0;
        }
 
index fe806e8..eca6362 100644 (file)
@@ -271,8 +271,7 @@ static struct kmem_cache *x86_emulator_cache;
  * When called, it means the previous get/set msr reached an invalid msr.
  * Return true if we want to ignore/silent this failed msr access.
  */
-static bool kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
-                                 u64 data, bool write)
+static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write)
 {
        const char *op = write ? "wrmsr" : "rdmsr";
 
@@ -1445,7 +1444,7 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
        if (r == KVM_MSR_RET_INVALID) {
                /* Unconditionally clear the output for simplicity */
                *data = 0;
-               if (kvm_msr_ignored_check(vcpu, index, 0, false))
+               if (kvm_msr_ignored_check(index, 0, false))
                        r = 0;
        }
 
@@ -1620,7 +1619,7 @@ static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
        int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
 
        if (ret == KVM_MSR_RET_INVALID)
-               if (kvm_msr_ignored_check(vcpu, index, data, true))
+               if (kvm_msr_ignored_check(index, data, true))
                        ret = 0;
 
        return ret;
@@ -1658,7 +1657,7 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
        if (ret == KVM_MSR_RET_INVALID) {
                /* Unconditionally clear *data for simplicity */
                *data = 0;
-               if (kvm_msr_ignored_check(vcpu, index, 0, false))
+               if (kvm_msr_ignored_check(index, 0, false))
                        ret = 0;
        }
 
@@ -2329,7 +2328,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
        kvm_vcpu_write_tsc_offset(vcpu, offset);
        raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 
-       spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
+       spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
        if (!matched) {
                kvm->arch.nr_vcpus_matched_tsc = 0;
        } else if (!already_matched) {
@@ -2337,7 +2336,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
        }
 
        kvm_track_tsc_matching(vcpu);
-       spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
+       spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
 }
 
 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
@@ -2559,13 +2558,16 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
        int i;
        struct kvm_vcpu *vcpu;
        struct kvm_arch *ka = &kvm->arch;
+       unsigned long flags;
 
        kvm_hv_invalidate_tsc_page(kvm);
 
-       spin_lock(&ka->pvclock_gtod_sync_lock);
        kvm_make_mclock_inprogress_request(kvm);
+
        /* no guest entries from this point */
+       spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
        pvclock_update_vm_gtod_copy(kvm);
+       spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
 
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -2573,8 +2575,6 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
        /* guest entries allowed */
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
-
-       spin_unlock(&ka->pvclock_gtod_sync_lock);
 #endif
 }
 
@@ -2582,17 +2582,18 @@ u64 get_kvmclock_ns(struct kvm *kvm)
 {
        struct kvm_arch *ka = &kvm->arch;
        struct pvclock_vcpu_time_info hv_clock;
+       unsigned long flags;
        u64 ret;
 
-       spin_lock(&ka->pvclock_gtod_sync_lock);
+       spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
        if (!ka->use_master_clock) {
-               spin_unlock(&ka->pvclock_gtod_sync_lock);
+               spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
                return get_kvmclock_base_ns() + ka->kvmclock_offset;
        }
 
        hv_clock.tsc_timestamp = ka->master_cycle_now;
        hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
-       spin_unlock(&ka->pvclock_gtod_sync_lock);
+       spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
 
        /* both __this_cpu_read() and rdtsc() should be on the same cpu */
        get_cpu();
@@ -2686,13 +2687,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
         * If the host uses TSC clock, then passthrough TSC as stable
         * to the guest.
         */
-       spin_lock(&ka->pvclock_gtod_sync_lock);
+       spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
        use_master_clock = ka->use_master_clock;
        if (use_master_clock) {
                host_tsc = ka->master_cycle_now;
                kernel_ns = ka->master_kernel_ns;
        }
-       spin_unlock(&ka->pvclock_gtod_sync_lock);
+       spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
 
        /* Keep irq disabled to prevent changes to the clock */
        local_irq_save(flags);
@@ -5726,6 +5727,7 @@ set_pit2_out:
        }
 #endif
        case KVM_SET_CLOCK: {
+               struct kvm_arch *ka = &kvm->arch;
                struct kvm_clock_data user_ns;
                u64 now_ns;
 
@@ -5744,8 +5746,22 @@ set_pit2_out:
                 * pvclock_update_vm_gtod_copy().
                 */
                kvm_gen_update_masterclock(kvm);
-               now_ns = get_kvmclock_ns(kvm);
-               kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
+
+               /*
+                * This pairs with kvm_guest_time_update(): when masterclock is
+                * in use, we use master_kernel_ns + kvmclock_offset to set
+                * unsigned 'system_time' so if we use get_kvmclock_ns() (which
+                * is slightly ahead) here we risk going negative on unsigned
+                * 'system_time' when 'user_ns.clock' is very small.
+                */
+               spin_lock_irq(&ka->pvclock_gtod_sync_lock);
+               if (kvm->arch.use_master_clock)
+                       now_ns = ka->master_kernel_ns;
+               else
+                       now_ns = get_kvmclock_base_ns();
+               ka->kvmclock_offset = user_ns.clock - now_ns;
+               spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
+
                kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
                break;
        }
@@ -7724,6 +7740,7 @@ static void kvm_hyperv_tsc_notifier(void)
        struct kvm *kvm;
        struct kvm_vcpu *vcpu;
        int cpu;
+       unsigned long flags;
 
        mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list)
@@ -7739,17 +7756,15 @@ static void kvm_hyperv_tsc_notifier(void)
        list_for_each_entry(kvm, &vm_list, vm_list) {
                struct kvm_arch *ka = &kvm->arch;
 
-               spin_lock(&ka->pvclock_gtod_sync_lock);
-
+               spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
                pvclock_update_vm_gtod_copy(kvm);
+               spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
 
                kvm_for_each_vcpu(cpu, vcpu, kvm)
                        kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
 
                kvm_for_each_vcpu(cpu, vcpu, kvm)
                        kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
-
-               spin_unlock(&ka->pvclock_gtod_sync_lock);
        }
        mutex_unlock(&kvm_lock);
 }
index 39eb048..9035e34 100644 (file)
@@ -250,7 +250,6 @@ static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
 void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs);
 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
 
-void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
 u64 get_kvmclock_ns(struct kvm *kvm);
 
 int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
index b35fc80..7f1b3a8 100644 (file)
@@ -1689,7 +1689,16 @@ emit_jmp:
                }
 
                if (image) {
-                       if (unlikely(proglen + ilen > oldproglen)) {
+                       /*
+                        * When populating the image, assert that:
+                        *
+                        *  i) We do not write beyond the allocated space, and
+                        * ii) addrs[i] did not change from the prior run, in order
+                        *     to validate assumptions made for computing branch
+                        *     displacements.
+                        */
+                       if (unlikely(proglen + ilen > oldproglen ||
+                                    proglen + ilen != addrs[i])) {
                                pr_err("bpf_jit: fatal error\n");
                                return -EFAULT;
                        }
index d17b67c..6a99def 100644 (file)
@@ -2276,7 +2276,16 @@ notyet:
                }
 
                if (image) {
-                       if (unlikely(proglen + ilen > oldproglen)) {
+                       /*
+                        * When populating the image, assert that:
+                        *
+                        *  i) We do not write beyond the allocated space, and
+                        * ii) addrs[i] did not change from the prior run, in order
+                        *     to validate assumptions made for computing branch
+                        *     displacements.
+                        */
+                       if (unlikely(proglen + ilen > oldproglen ||
+                                    proglen + ilen != addrs[i])) {
                                pr_err("bpf_jit: fatal error\n");
                                return -EFAULT;
                        }
index c426b84..45cc0ae 100644 (file)
        LOAD_CP_REGS_TAB(6)
        LOAD_CP_REGS_TAB(7)
 
-/*
- * coprocessor_flush(struct thread_info*, index)
- *                             a2        a3
- *
- * Save coprocessor registers for coprocessor 'index'.
- * The register values are saved to or loaded from the coprocessor area 
- * inside the task_info structure.
- *
- * Note that this function doesn't update the coprocessor_owner information!
- *
- */
-
-ENTRY(coprocessor_flush)
-
-       /* reserve 4 bytes on stack to save a0 */
-       abi_entry(4)
-
-       s32i    a0, a1, 0
-       movi    a0, .Lsave_cp_regs_jump_table
-       addx8   a3, a3, a0
-       l32i    a4, a3, 4
-       l32i    a3, a3, 0
-       add     a2, a2, a4
-       beqz    a3, 1f
-       callx0  a3
-1:     l32i    a0, a1, 0
-
-       abi_ret(4)
-
-ENDPROC(coprocessor_flush)
-
 /*
  * Entry condition:
  *
@@ -245,6 +214,39 @@ ENTRY(fast_coprocessor)
 
 ENDPROC(fast_coprocessor)
 
+       .text
+
+/*
+ * coprocessor_flush(struct thread_info*, index)
+ *                             a2        a3
+ *
+ * Save coprocessor registers for coprocessor 'index'.
+ * The register values are saved to or loaded from the coprocessor area
+ * inside the task_info structure.
+ *
+ * Note that this function doesn't update the coprocessor_owner information!
+ *
+ */
+
+ENTRY(coprocessor_flush)
+
+       /* reserve 4 bytes on stack to save a0 */
+       abi_entry(4)
+
+       s32i    a0, a1, 0
+       movi    a0, .Lsave_cp_regs_jump_table
+       addx8   a3, a3, a0
+       l32i    a4, a3, 4
+       l32i    a3, a3, 0
+       add     a2, a2, a4
+       beqz    a3, 1f
+       callx0  a3
+1:     l32i    a0, a1, 0
+
+       abi_ret(4)
+
+ENDPROC(coprocessor_flush)
+
        .data
 
 ENTRY(coprocessor_owner)
index 7666408..95a7489 100644 (file)
@@ -112,8 +112,11 @@ good_area:
         */
        fault = handle_mm_fault(vma, address, flags, regs);
 
-       if (fault_signal_pending(fault, regs))
+       if (fault_signal_pending(fault, regs)) {
+               if (!user_mode(regs))
+                       goto bad_page_fault;
                return;
+       }
 
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
index 963d1d4..50e5790 100644 (file)
@@ -277,7 +277,7 @@ static struct bio *__bio_chain_endio(struct bio *bio)
 {
        struct bio *parent = bio->bi_private;
 
-       if (!parent->bi_status)
+       if (bio->bi_status && !parent->bi_status)
                parent->bi_status = bio->bi_status;
        bio_put(bio);
        return parent;
index 9ebb344..271f659 100644 (file)
@@ -302,7 +302,6 @@ static const char *const rqf_name[] = {
        RQF_NAME(QUIET),
        RQF_NAME(ELVPRIV),
        RQF_NAME(IO_STAT),
-       RQF_NAME(ALLOCED),
        RQF_NAME(PM),
        RQF_NAME(HASHED),
        RQF_NAME(STATS),
index 15cf904..5951b43 100644 (file)
@@ -328,6 +328,17 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
        {{"_BMS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
          METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
+       {{"_BPC", METHOD_0ARGS,
+         METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (4 Int) */
+       PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 4, 0, 0, 0),
+
+       {{"_BPS", METHOD_0ARGS,
+         METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (5 Int) */
+       PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 5, 0, 0, 0),
+
+       {{"_BPT", METHOD_1ARGS(ACPI_TYPE_PACKAGE),
+         METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
+
        {{"_BQC", METHOD_0ARGS,
          METHOD_RETURNS(ACPI_RTYPE_INTEGER)}},
 
@@ -347,6 +358,10 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
        {{"_CBA", METHOD_0ARGS,
          METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* See PCI firmware spec 3.0 */
 
+       {{"_CBR", METHOD_0ARGS,
+         METHOD_RETURNS(ACPI_RTYPE_PACKAGE)}}, /* Fixed-length (3 Int) */
+       PACKAGE_INFO(ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0, 0, 0),
+
        {{"_CCA", METHOD_0ARGS,
          METHOD_RETURNS(ACPI_RTYPE_INTEGER)}}, /* ACPI 5.1 */
 
index 0cb975a..37c47e1 100644 (file)
@@ -46,6 +46,7 @@ typedef enum {
        ACPI_RSC_1BITFLAG,
        ACPI_RSC_2BITFLAG,
        ACPI_RSC_3BITFLAG,
+       ACPI_RSC_6BITFLAG,
        ACPI_RSC_ADDRESS,
        ACPI_RSC_BITMASK,
        ACPI_RSC_BITMASK16,
@@ -102,6 +103,7 @@ typedef enum {
        ACPI_RSD_1BITFLAG,
        ACPI_RSD_2BITFLAG,
        ACPI_RSD_3BITFLAG,
+       ACPI_RSD_6BITFLAG,
        ACPI_RSD_ADDRESS,
        ACPI_RSD_DWORDLIST,
        ACPI_RSD_LITERAL,
@@ -295,6 +297,7 @@ extern struct acpi_rsconvert_info acpi_rs_convert_address64[];
 extern struct acpi_rsconvert_info acpi_rs_convert_ext_address64[];
 extern struct acpi_rsconvert_info acpi_rs_convert_gpio[];
 extern struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[];
+extern struct acpi_rsconvert_info acpi_rs_convert_csi2_serial_bus[];
 extern struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[];
 extern struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[];
 extern struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[];
@@ -349,6 +352,7 @@ extern struct acpi_rsdump_info acpi_rs_dump_gpio[];
 extern struct acpi_rsdump_info acpi_rs_dump_pin_function[];
 extern struct acpi_rsdump_info acpi_rs_dump_fixed_dma[];
 extern struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_csi2_serial_bus[];
 extern struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[];
 extern struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[];
 extern struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[];
index be6de71..bccae0d 100644 (file)
@@ -28,6 +28,7 @@ extern const char *acpi_gbl_max_decode[];
 extern const char *acpi_gbl_mem_decode[];
 extern const char *acpi_gbl_min_decode[];
 extern const char *acpi_gbl_mtp_decode[];
+extern const char *acpi_gbl_phy_decode[];
 extern const char *acpi_gbl_rng_decode[];
 extern const char *acpi_gbl_rw_decode[];
 extern const char *acpi_gbl_shr_decode[];
index a9d91a3..b981232 100644 (file)
@@ -40,6 +40,7 @@
 #define ACPI_RESTAG_IORESTRICTION               "_IOR"
 #define ACPI_RESTAG_LENGTH                      "_LEN"
 #define ACPI_RESTAG_LINE                        "_LIN"
+#define ACPI_RESTAG_LOCALPORT                   "_PRT"
 #define ACPI_RESTAG_MEMATTRIBUTES               "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */
 #define ACPI_RESTAG_MEMTYPE                     "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */
 #define ACPI_RESTAG_MAXADDR                     "_MAX"
@@ -49,6 +50,7 @@
 #define ACPI_RESTAG_MODE                        "_MOD"
 #define ACPI_RESTAG_PARITY                      "_PAR"
 #define ACPI_RESTAG_PHASE                       "_PHA"
+#define ACPI_RESTAG_PHYTYPE                     "_PHY"
 #define ACPI_RESTAG_PIN                         "_PIN"
 #define ACPI_RESTAG_PINCONFIG                   "_PPI"
 #define ACPI_RESTAG_PINCONFIG_TYPE              "_TYP"
@@ -316,12 +318,26 @@ struct aml_resource_gpio {
 #define AML_RESOURCE_I2C_SERIALBUSTYPE          1
 #define AML_RESOURCE_SPI_SERIALBUSTYPE          2
 #define AML_RESOURCE_UART_SERIALBUSTYPE         3
-#define AML_RESOURCE_MAX_SERIALBUSTYPE          3
+#define AML_RESOURCE_CSI2_SERIALBUSTYPE         4
+#define AML_RESOURCE_MAX_SERIALBUSTYPE          4
 #define AML_RESOURCE_VENDOR_SERIALBUSTYPE       192    /* Vendor defined is 0xC0-0xFF (NOT SUPPORTED) */
 
 struct aml_resource_common_serialbus {
 AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_SERIAL_COMMON};
 
+struct aml_resource_csi2_serialbus {
+       AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_SERIAL_COMMON
+           /*
+            * Optional fields follow immediately:
+            * 1) Vendor Data bytes
+            * 2) Resource Source String
+            */
+};
+
+#define AML_RESOURCE_CSI2_REVISION              1      /* ACPI 6.4 */
+#define AML_RESOURCE_CSI2_TYPE_REVISION         1      /* ACPI 6.4 */
+#define AML_RESOURCE_CSI2_MIN_DATA_LEN          0      /* ACPI 6.4 */
+
 struct aml_resource_i2c_serialbus {
        AML_RESOURCE_LARGE_HEADER_COMMON
            AML_RESOURCE_SERIAL_COMMON u32 connection_speed;
@@ -510,6 +526,7 @@ union aml_resource {
        struct aml_resource_i2c_serialbus i2c_serial_bus;
        struct aml_resource_spi_serialbus spi_serial_bus;
        struct aml_resource_uart_serialbus uart_serial_bus;
+       struct aml_resource_csi2_serialbus csi2_serial_bus;
        struct aml_resource_common_serialbus common_serial_bus;
        struct aml_resource_pin_function pin_function;
        struct aml_resource_pin_config pin_config;
index fcf129d..90583db 100644 (file)
@@ -677,10 +677,10 @@ acpi_rs_get_list_length(u8 *aml_buffer,
                *size_needed += buffer_size;
 
                ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
-                                 "Type %.2X, AmlLength %.2X InternalLength %.2X\n",
+                                 "Type %.2X, AmlLength %.2X InternalLength %.2X%8X\n",
                                  acpi_ut_get_resource_type(aml_buffer),
                                  acpi_ut_get_descriptor_length(aml_buffer),
-                                 buffer_size));
+                                 ACPI_FORMAT_UINT64(*size_needed)));
 
                /*
                 * Point to the next resource within the AML stream using the length
index 6601e71..611bc71 100644 (file)
@@ -87,6 +87,9 @@ void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
                            ("Invalid descriptor type (%X) in resource list\n",
                             resource_list->type);
                        return;
+               } else if (!resource_list->type) {
+                       ACPI_ERROR((AE_INFO, "Invalid Zero Resource Type"));
+                       return;
                }
 
                /* Sanity check the length. It must not be zero, or we loop forever */
@@ -258,6 +261,11 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
                                           table->pointer[*target & 0x07]);
                        break;
 
+               case ACPI_RSD_6BITFLAG:
+
+                       acpi_rs_out_integer8(name, (ACPI_GET8(target) & 0x3F));
+                       break;
+
                case ACPI_RSD_SHORTLIST:
                        /*
                         * Short byte list (single line output) for DMA and IRQ resources
index cafa813..b8b3744 100644 (file)
@@ -421,6 +421,32 @@ struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[11] = {
        ACPI_RS_DUMP_COMMON_SERIAL_BUS
 };
 
+struct acpi_rsdump_info acpi_rs_dump_csi2_serial_bus[11] = {
+       { ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_csi2_serial_bus),
+        "Camera Serial Bus", NULL },
+       { ACPI_RSD_UINT8, ACPI_RSD_OFFSET(csi2_serial_bus.revision_id),
+        "RevisionId", NULL },
+       { ACPI_RSD_UINT8, ACPI_RSD_OFFSET(csi2_serial_bus.type), "Type",
+        acpi_gbl_sbt_decode },
+       { ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(csi2_serial_bus.producer_consumer),
+        "ProducerConsumer", acpi_gbl_consume_decode },
+       { ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(csi2_serial_bus.slave_mode),
+        "SlaveMode", acpi_gbl_sm_decode },
+       { ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(csi2_serial_bus.phy_type),
+        "PhyType", acpi_gbl_phy_decode },
+       { ACPI_RSD_6BITFLAG,
+        ACPI_RSD_OFFSET(csi2_serial_bus.local_port_instance),
+        "LocalPortInstance", NULL },
+       { ACPI_RSD_UINT8, ACPI_RSD_OFFSET(csi2_serial_bus.type_revision_id),
+        "TypeRevisionId", NULL },
+       { ACPI_RSD_UINT16, ACPI_RSD_OFFSET(csi2_serial_bus.vendor_length),
+        "VendorLength", NULL },
+       { ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(csi2_serial_bus.vendor_data),
+        "VendorData", NULL },
+       { ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(csi2_serial_bus.resource_source),
+        "ResourceSource", NULL },
+};
+
 struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[14] = {
        {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
         "I2C Serial Bus", NULL},
index 6e2e596..eaeb7ab 100644 (file)
@@ -96,13 +96,14 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
        acpi_rs_convert_pin_group_config,       /* 0x12, ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG */
 };
 
-/* Subtype table for serial_bus -- I2C, SPI, and UART */
+/* Subtype table for serial_bus -- I2C, SPI, UART, and CSI2 */
 
 struct acpi_rsconvert_info *acpi_gbl_convert_resource_serial_bus_dispatch[] = {
        NULL,
        acpi_rs_convert_i2c_serial_bus,
        acpi_rs_convert_spi_serial_bus,
        acpi_rs_convert_uart_serial_bus,
+       acpi_rs_convert_csi2_serial_bus
 };
 
 #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DISASSEMBLER) || defined(ACPI_DEBUGGER)
@@ -142,6 +143,7 @@ struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = {
        acpi_rs_dump_i2c_serial_bus,    /* AML_RESOURCE_I2C_BUS_TYPE */
        acpi_rs_dump_spi_serial_bus,    /* AML_RESOURCE_SPI_BUS_TYPE */
        acpi_rs_dump_uart_serial_bus,   /* AML_RESOURCE_UART_BUS_TYPE */
+       acpi_rs_dump_csi2_serial_bus,   /* AML_RESOURCE_CSI2_BUS_TYPE */
 };
 #endif
 
@@ -226,6 +228,7 @@ const u8 acpi_gbl_aml_resource_serial_bus_sizes[] = {
        sizeof(struct aml_resource_i2c_serialbus),
        sizeof(struct aml_resource_spi_serialbus),
        sizeof(struct aml_resource_uart_serialbus),
+       sizeof(struct aml_resource_csi2_serialbus),
 };
 
 const u8 acpi_gbl_resource_struct_serial_bus_sizes[] = {
@@ -233,4 +236,5 @@ const u8 acpi_gbl_resource_struct_serial_bus_sizes[] = {
        ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
        ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
        ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
+       ACPI_RS_SIZE(struct acpi_resource_csi2_serialbus),
 };
index 0307675..e46efaa 100644 (file)
@@ -59,7 +59,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
                    AML_RESOURCE_MAX_SERIALBUSTYPE) {
                        conversion_table = NULL;
                } else {
-                       /* This is an I2C, SPI, or UART serial_bus descriptor */
+                       /* This is an I2C, SPI, UART, or CSI2 serial_bus descriptor */
 
                        conversion_table =
                            acpi_gbl_convert_resource_serial_bus_dispatch
@@ -89,6 +89,11 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
                return_ACPI_STATUS(status);
        }
 
+       if (!resource->length) {
+               ACPI_EXCEPTION((AE_INFO, status,
+                               "Zero-length resource returned from RsConvertAmlToResource"));
+       }
+
        ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES,
                          "Type %.2X, AmlLength %.2X InternalLength %.2X\n",
                          acpi_ut_get_resource_type(aml), length,
@@ -158,7 +163,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
                            AML_RESOURCE_MAX_SERIALBUSTYPE) {
                                conversion_table = NULL;
                        } else {
-                               /* This is an I2C, SPI, or UART serial_bus descriptor */
+                               /* This is an I2C, SPI, UART or CSI2 serial_bus descriptor */
 
                                conversion_table =
                                    acpi_gbl_convert_resource_serial_bus_dispatch
index 1763a3d..c2dd9aa 100644 (file)
@@ -70,6 +70,8 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
         */
        count = INIT_TABLE_LENGTH(info);
        while (count) {
+               target = NULL;
+
                /*
                 * Source is the external AML byte stream buffer,
                 * destination is the internal resource descriptor
@@ -120,6 +122,14 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
                                  ((ACPI_GET8(source) >> info->value) & 0x07));
                        break;
 
+               case ACPI_RSC_6BITFLAG:
+                       /*
+                        * Mask and shift the flag bits
+                        */
+                       ACPI_SET8(destination,
+                                 ((ACPI_GET8(source) >> info->value) & 0x3F));
+                       break;
+
                case ACPI_RSC_COUNT:
 
                        item_count = ACPI_GET8(source);
@@ -509,6 +519,15 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
                                      value));
                        break;
 
+               case ACPI_RSC_6BITFLAG:
+                       /*
+                        * Mask and shift the flag bits
+                        */
+                       ACPI_SET_BIT(*ACPI_CAST8(destination), (u8)
+                                    ((ACPI_GET8(source) & 0x3F) << info->
+                                     value));
+                       break;
+
                case ACPI_RSC_COUNT:
 
                        item_count = ACPI_GET8(source);
index 1b937d8..f926795 100644 (file)
@@ -185,6 +185,81 @@ struct acpi_rsconvert_info acpi_rs_convert_pin_function[13] = {
         0},
 };
 
+/*******************************************************************************
+ *
+ * acpi_rs_convert_csi2_serial_bus
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_csi2_serial_bus[14] = {
+       { ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
+        ACPI_RS_SIZE(struct acpi_resource_csi2_serialbus),
+        ACPI_RSC_TABLE_SIZE(acpi_rs_convert_csi2_serial_bus) },
+
+       { ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
+        sizeof(struct aml_resource_csi2_serialbus),
+        0 },
+
+       { ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
+        AML_OFFSET(common_serial_bus.revision_id),
+        1 },
+
+       { ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.csi2_serial_bus.type),
+        AML_OFFSET(csi2_serial_bus.type),
+        1 },
+
+       { ACPI_RSC_1BITFLAG,
+        ACPI_RS_OFFSET(data.csi2_serial_bus.producer_consumer),
+        AML_OFFSET(csi2_serial_bus.flags),
+        1 },
+
+       { ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.csi2_serial_bus.slave_mode),
+        AML_OFFSET(csi2_serial_bus.flags),
+        0 },
+
+       { ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.csi2_serial_bus.phy_type),
+        AML_OFFSET(csi2_serial_bus.type_specific_flags),
+        0 },
+
+       { ACPI_RSC_6BITFLAG,
+        ACPI_RS_OFFSET(data.csi2_serial_bus.local_port_instance),
+        AML_OFFSET(csi2_serial_bus.type_specific_flags),
+        2 },
+
+       { ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.csi2_serial_bus.type_revision_id),
+        AML_OFFSET(csi2_serial_bus.type_revision_id),
+        1 },
+
+       /* Vendor data */
+
+       { ACPI_RSC_COUNT_SERIAL_VEN,
+        ACPI_RS_OFFSET(data.csi2_serial_bus.vendor_length),
+        AML_OFFSET(csi2_serial_bus.type_data_length),
+        AML_RESOURCE_CSI2_MIN_DATA_LEN },
+
+       { ACPI_RSC_MOVE_SERIAL_VEN,
+        ACPI_RS_OFFSET(data.csi2_serial_bus.vendor_data),
+        0,
+        sizeof(struct aml_resource_csi2_serialbus) },
+
+       /* Resource Source */
+
+       { ACPI_RSC_MOVE8,
+        ACPI_RS_OFFSET(data.csi2_serial_bus.resource_source.index),
+        AML_OFFSET(csi2_serial_bus.res_source_index),
+        1 },
+
+       { ACPI_RSC_COUNT_SERIAL_RES,
+        ACPI_RS_OFFSET(data.csi2_serial_bus.resource_source.string_length),
+        AML_OFFSET(csi2_serial_bus.type_data_length),
+        sizeof(struct aml_resource_csi2_serialbus) },
+
+       { ACPI_RSC_MOVE_SERIAL_RES,
+        ACPI_RS_OFFSET(data.csi2_serial_bus.resource_source.string_ptr),
+        AML_OFFSET(csi2_serial_bus.type_data_length),
+        sizeof(struct aml_resource_csi2_serialbus) },
+};
+
 /*******************************************************************************
  *
  * acpi_rs_convert_i2c_serial_bus
index 0a9c337..85730fc 100644 (file)
@@ -82,6 +82,13 @@ const char *acpi_gbl_mtp_decode[] = {
        "AddressRangeNVS"
 };
 
+const char *acpi_gbl_phy_decode[] = {
+       "Type C",
+       "Type D",
+       "Unknown Type",
+       "Unknown Type"
+};
+
 const char *acpi_gbl_rng_decode[] = {
        "InvalidRanges",
        "NonISAOnlyRanges",
@@ -161,7 +168,8 @@ const char *acpi_gbl_sbt_decode[] = {
        "/* UNKNOWN serial bus type */",
        "I2C",
        "SPI",
-       "UART"
+       "UART",
+       "CSI2"
 };
 
 /* I2C serial bus access mode */
index cba5505..16f9a70 100644 (file)
@@ -64,6 +64,7 @@ const u8 acpi_gbl_resource_aml_serial_bus_sizes[] = {
        ACPI_AML_SIZE_LARGE(struct aml_resource_i2c_serialbus),
        ACPI_AML_SIZE_LARGE(struct aml_resource_spi_serialbus),
        ACPI_AML_SIZE_LARGE(struct aml_resource_uart_serialbus),
+       ACPI_AML_SIZE_LARGE(struct aml_resource_csi2_serialbus),
 };
 
 /*
index 1331567..328e8ae 100644 (file)
@@ -725,7 +725,6 @@ static int __init einj_init(void)
                goto err_release;
        }
 
-       rc = -ENOMEM;
        einj_param = einj_get_parameter_address();
        if ((param_extension || acpi5) && einj_param) {
                debugfs_create_x32("flags", S_IRUSR | S_IWUSR, einj_debug_dir,
index 8c5dde6..09b05f7 100644 (file)
@@ -3831,7 +3831,7 @@ static __init int nfit_init(void)
        int ret;
 
        BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
-       BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
+       BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 64);
        BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
        BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
        BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
index 14ee631..08e1577 100644 (file)
@@ -9,6 +9,7 @@
  *     Bjorn Helgaas <bjorn.helgaas@hp.com>
  */
 
+#define pr_fmt(fmt) "ACPI: PCI: " fmt
 
 #include <linux/dmi.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
 
-#define PREFIX "ACPI: "
-
-#define _COMPONENT             ACPI_PCI_COMPONENT
-ACPI_MODULE_NAME("pci_irq");
-
 struct acpi_prt_entry {
        struct acpi_pci_id      id;
        u8                      pin;
@@ -126,7 +122,7 @@ static void do_prt_fixups(struct acpi_prt_entry *entry,
                    entry->pin == quirk->pin &&
                    !strcmp(prt->source, quirk->source) &&
                    strlen(prt->source) >= strlen(quirk->actual_source)) {
-                       printk(KERN_WARNING PREFIX "firmware reports "
+                       pr_warn("Firmware reports "
                                "%04x:%02x:%02x PCI INT %c connected to %s; "
                                "changing to %s\n",
                                entry->id.segment, entry->id.bus,
@@ -191,12 +187,9 @@ static int acpi_pci_irq_check_entry(acpi_handle handle, struct pci_dev *dev,
         * the IRQ value, which is hardwired to specific interrupt inputs on
         * the interrupt controller.
         */
-
-       ACPI_DEBUG_PRINT_RAW((ACPI_DB_INFO,
-                             "      %04x:%02x:%02x[%c] -> %s[%d]\n",
-                             entry->id.segment, entry->id.bus,
-                             entry->id.device, pin_name(entry->pin),
-                             prt->source, entry->index));
+       pr_debug("%04x:%02x:%02x[%c] -> %s[%d]\n",
+                entry->id.segment, entry->id.bus, entry->id.device,
+                pin_name(entry->pin), prt->source, entry->index);
 
        *entry_ptr = entry;
 
@@ -307,8 +300,7 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
 #ifdef CONFIG_X86_IO_APIC
                acpi_reroute_boot_interrupt(dev, entry);
 #endif /* CONFIG_X86_IO_APIC */
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %s[%c] _PRT entry\n",
-                                 pci_name(dev), pin_name(pin)));
+               dev_dbg(&dev->dev, "Found [%c] _PRT entry\n", pin_name(pin));
                return entry;
        }
 
@@ -324,9 +316,7 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
                        /* PC card has the same IRQ as its cardbridge */
                        bridge_pin = bridge->pin;
                        if (!bridge_pin) {
-                               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                                 "No interrupt pin configured for device %s\n",
-                                                 pci_name(bridge)));
+                               dev_dbg(&bridge->dev, "No interrupt pin configured\n");
                                return NULL;
                        }
                        pin = bridge_pin;
@@ -334,10 +324,8 @@ static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
 
                ret = acpi_pci_irq_find_prt_entry(bridge, pin, &entry);
                if (!ret && entry) {
-                       ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                        "Derived GSI for %s INT %c from %s\n",
-                                        pci_name(dev), pin_name(orig_pin),
-                                        pci_name(bridge)));
+                       dev_dbg(&dev->dev, "Derived GSI INT %c from %s\n",
+                               pin_name(orig_pin), pci_name(bridge));
                        return entry;
                }
 
@@ -413,9 +401,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
 
        pin = dev->pin;
        if (!pin) {
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                 "No interrupt pin configured for device %s\n",
-                                 pci_name(dev)));
+               dev_dbg(&dev->dev, "No interrupt pin configured\n");
                return 0;
        }
 
index fb4c563..b9b80e2 100644 (file)
@@ -12,6 +12,8 @@
  *        for IRQ management (e.g. start()->_SRS).
  */
 
+#define pr_fmt(fmt) "ACPI: PCI: " fmt
+
 #include <linux/syscore_ops.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -27,8 +29,6 @@
 
 #include "internal.h"
 
-#define _COMPONENT                     ACPI_PCI_COMPONENT
-ACPI_MODULE_NAME("pci_link");
 #define ACPI_PCI_LINK_CLASS            "pci_irq_routing"
 #define ACPI_PCI_LINK_DEVICE_NAME      "PCI Interrupt Link"
 #define ACPI_PCI_LINK_MAX_POSSIBLE     16
@@ -85,6 +85,7 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource,
                                                void *context)
 {
        struct acpi_pci_link *link = context;
+       acpi_handle handle = link->device->handle;
        u32 i;
 
        switch (resource->type) {
@@ -95,17 +96,17 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource,
                {
                        struct acpi_resource_irq *p = &resource->data.irq;
                        if (!p || !p->interrupt_count) {
-                               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                                 "Blank _PRS IRQ resource\n"));
+                               acpi_handle_debug(handle,
+                                                 "Blank _PRS IRQ resource\n");
                                return AE_OK;
                        }
                        for (i = 0;
                             (i < p->interrupt_count
                              && i < ACPI_PCI_LINK_MAX_POSSIBLE); i++) {
                                if (!p->interrupts[i]) {
-                                       printk(KERN_WARNING PREFIX
-                                              "Invalid _PRS IRQ %d\n",
-                                              p->interrupts[i]);
+                                       acpi_handle_debug(handle,
+                                                         "Invalid _PRS IRQ %d\n",
+                                                         p->interrupts[i]);
                                        continue;
                                }
                                link->irq.possible[i] = p->interrupts[i];
@@ -121,17 +122,17 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource,
                        struct acpi_resource_extended_irq *p =
                            &resource->data.extended_irq;
                        if (!p || !p->interrupt_count) {
-                               printk(KERN_WARNING PREFIX
-                                             "Blank _PRS EXT IRQ resource\n");
+                               acpi_handle_debug(handle,
+                                                 "Blank _PRS EXT IRQ resource\n");
                                return AE_OK;
                        }
                        for (i = 0;
                             (i < p->interrupt_count
                              && i < ACPI_PCI_LINK_MAX_POSSIBLE); i++) {
                                if (!p->interrupts[i]) {
-                                       printk(KERN_WARNING PREFIX
-                                              "Invalid _PRS IRQ %d\n",
-                                              p->interrupts[i]);
+                                       acpi_handle_debug(handle,
+                                                         "Invalid _PRS IRQ %d\n",
+                                                         p->interrupts[i]);
                                        continue;
                                }
                                link->irq.possible[i] = p->interrupts[i];
@@ -143,8 +144,8 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource,
                        break;
                }
        default:
-               printk(KERN_ERR PREFIX "_PRS resource type 0x%x isn't an IRQ\n",
-                      resource->type);
+               acpi_handle_debug(handle, "_PRS resource type 0x%x is not IRQ\n",
+                                 resource->type);
                return AE_OK;
        }
 
@@ -153,18 +154,18 @@ static acpi_status acpi_pci_link_check_possible(struct acpi_resource *resource,
 
 static int acpi_pci_link_get_possible(struct acpi_pci_link *link)
 {
+       acpi_handle handle = link->device->handle;
        acpi_status status;
 
-       status = acpi_walk_resources(link->device->handle, METHOD_NAME__PRS,
+       status = acpi_walk_resources(handle, METHOD_NAME__PRS,
                                     acpi_pci_link_check_possible, link);
        if (ACPI_FAILURE(status)) {
-               acpi_handle_debug(link->device->handle, "_PRS not present or invalid");
+               acpi_handle_debug(handle, "_PRS not present or invalid");
                return 0;
        }
 
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                         "Found %d possible IRQs\n",
-                         link->irq.possible_count));
+       acpi_handle_debug(handle, "Found %d possible IRQs\n",
+                         link->irq.possible_count);
 
        return 0;
 }
@@ -186,8 +187,7 @@ static acpi_status acpi_pci_link_check_current(struct acpi_resource *resource,
                                 * IRQ descriptors may have no IRQ# bits set,
                                 * particularly those those w/ _STA disabled
                                 */
-                               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                                 "Blank _CRS IRQ resource\n"));
+                               pr_debug("Blank _CRS IRQ resource\n");
                                return AE_OK;
                        }
                        *irq = p->interrupts[0];
@@ -202,8 +202,7 @@ static acpi_status acpi_pci_link_check_current(struct acpi_resource *resource,
                                 * extended IRQ descriptors must
                                 * return at least 1 IRQ
                                 */
-                               printk(KERN_WARNING PREFIX
-                                             "Blank _CRS EXT IRQ resource\n");
+                               pr_debug("Blank _CRS EXT IRQ resource\n");
                                return AE_OK;
                        }
                        *irq = p->interrupts[0];
@@ -211,8 +210,8 @@ static acpi_status acpi_pci_link_check_current(struct acpi_resource *resource,
                }
                break;
        default:
-               printk(KERN_ERR PREFIX "_CRS resource type 0x%x isn't an IRQ\n",
-                      resource->type);
+               pr_debug("_CRS resource type 0x%x is not IRQ\n",
+                        resource->type);
                return AE_OK;
        }
 
@@ -228,8 +227,9 @@ static acpi_status acpi_pci_link_check_current(struct acpi_resource *resource,
  */
 static int acpi_pci_link_get_current(struct acpi_pci_link *link)
 {
-       int result = 0;
+       acpi_handle handle = link->device->handle;
        acpi_status status;
+       int result = 0;
        int irq = 0;
 
        link->irq.active = 0;
@@ -239,12 +239,12 @@ static int acpi_pci_link_get_current(struct acpi_pci_link *link)
                /* Query _STA, set link->device->status */
                result = acpi_bus_get_status(link->device);
                if (result) {
-                       printk(KERN_ERR PREFIX "Unable to read status\n");
+                       acpi_handle_err(handle, "Unable to read status\n");
                        goto end;
                }
 
                if (!link->device->status.enabled) {
-                       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Link disabled\n"));
+                       acpi_handle_debug(handle, "Link disabled\n");
                        return 0;
                }
        }
@@ -253,22 +253,23 @@ static int acpi_pci_link_get_current(struct acpi_pci_link *link)
         * Query and parse _CRS to get the current IRQ assignment.
         */
 
-       status = acpi_walk_resources(link->device->handle, METHOD_NAME__CRS,
+       status = acpi_walk_resources(handle, METHOD_NAME__CRS,
                                     acpi_pci_link_check_current, &irq);
        if (ACPI_FAILURE(status)) {
-               ACPI_EXCEPTION((AE_INFO, status, "Evaluating _CRS"));
+               acpi_handle_warn(handle, "_CRS evaluation failed: %s\n",
+                                acpi_format_exception(status));
                result = -ENODEV;
                goto end;
        }
 
        if (acpi_strict && !irq) {
-               printk(KERN_ERR PREFIX "_CRS returned 0\n");
+               acpi_handle_err(handle, "_CRS returned 0\n");
                result = -ENODEV;
        }
 
        link->irq.active = irq;
 
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Link at IRQ %d \n", link->irq.active));
+       acpi_handle_debug(handle, "Link at IRQ %d \n", link->irq.active);
 
       end:
        return result;
@@ -276,13 +277,14 @@ static int acpi_pci_link_get_current(struct acpi_pci_link *link)
 
 static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
 {
-       int result;
-       acpi_status status;
        struct {
                struct acpi_resource res;
                struct acpi_resource end;
        } *resource;
        struct acpi_buffer buffer = { 0, NULL };
+       acpi_handle handle = link->device->handle;
+       acpi_status status;
+       int result;
 
        if (!irq)
                return -EINVAL;
@@ -329,7 +331,8 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
                /* ignore resource_source, it's optional */
                break;
        default:
-               printk(KERN_ERR PREFIX "Invalid Resource_type %d\n", link->irq.resource_type);
+               acpi_handle_err(handle, "Invalid resource type %d\n",
+                               link->irq.resource_type);
                result = -EINVAL;
                goto end;
 
@@ -342,7 +345,8 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
 
        /* check for total failure */
        if (ACPI_FAILURE(status)) {
-               ACPI_EXCEPTION((AE_INFO, status, "Evaluating _SRS"));
+               acpi_handle_warn(handle, "_SRS evaluation failed: %s",
+                                acpi_format_exception(status));
                result = -ENODEV;
                goto end;
        }
@@ -350,15 +354,11 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
        /* Query _STA, set device->status */
        result = acpi_bus_get_status(link->device);
        if (result) {
-               printk(KERN_ERR PREFIX "Unable to read status\n");
+               acpi_handle_err(handle, "Unable to read status\n");
                goto end;
        }
-       if (!link->device->status.enabled) {
-               printk(KERN_WARNING PREFIX
-                             "%s [%s] disabled and referenced, BIOS bug\n",
-                             acpi_device_name(link->device),
-                             acpi_device_bid(link->device));
-       }
+       if (!link->device->status.enabled)
+               acpi_handle_warn(handle, "Disabled and referenced, BIOS bug\n");
 
        /* Query _CRS, set link->irq.active */
        result = acpi_pci_link_get_current(link);
@@ -375,14 +375,12 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
                 * policy: when _CRS doesn't return what we just _SRS
                 * assume _SRS worked and override _CRS value.
                 */
-               printk(KERN_WARNING PREFIX
-                             "%s [%s] BIOS reported IRQ %d, using IRQ %d\n",
-                             acpi_device_name(link->device),
-                             acpi_device_bid(link->device), link->irq.active, irq);
+               acpi_handle_warn(handle, "BIOS reported IRQ %d, using IRQ %d\n",
+                                link->irq.active, irq);
                link->irq.active = irq;
        }
 
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Set IRQ %d\n", link->irq.active));
+       acpi_handle_debug(handle, "Set IRQ %d\n", link->irq.active);
 
       end:
        kfree(resource);
@@ -531,6 +529,7 @@ static int acpi_irq_balance = -1;   /* 0: static, 1: balance */
 
 static int acpi_pci_link_allocate(struct acpi_pci_link *link)
 {
+       acpi_handle handle = link->device->handle;
        int irq;
        int i;
 
@@ -553,8 +552,8 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
         */
        if (i == link->irq.possible_count) {
                if (acpi_strict)
-                       printk(KERN_WARNING PREFIX "_CRS %d not found"
-                                     " in _PRS\n", link->irq.active);
+                       acpi_handle_warn(handle, "_CRS %d not found in _PRS\n",
+                                        link->irq.active);
                link->irq.active = 0;
        }
 
@@ -578,28 +577,23 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
                }
        }
        if (acpi_irq_get_penalty(irq) >= PIRQ_PENALTY_ISA_ALWAYS) {
-               printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
-                           "Try pci=noacpi or acpi=off\n",
-                           acpi_device_name(link->device),
-                           acpi_device_bid(link->device));
+               acpi_handle_err(handle,
+                               "No IRQ available. Try pci=noacpi or acpi=off\n");
                return -ENODEV;
        }
 
        /* Attempt to enable the link device at this IRQ. */
        if (acpi_pci_link_set(link, irq)) {
-               printk(KERN_ERR PREFIX "Unable to set IRQ for %s [%s]. "
-                           "Try pci=noacpi or acpi=off\n",
-                           acpi_device_name(link->device),
-                           acpi_device_bid(link->device));
+               acpi_handle_err(handle,
+                               "Unable to set IRQ. Try pci=noacpi or acpi=off\n");
                return -ENODEV;
        } else {
                if (link->irq.active < ACPI_MAX_ISA_IRQS)
                        acpi_isa_irq_penalty[link->irq.active] +=
                                PIRQ_PENALTY_PCI_USING;
 
-               pr_info("%s [%s] enabled at IRQ %d\n",
-                      acpi_device_name(link->device),
-                      acpi_device_bid(link->device), link->irq.active);
+               acpi_handle_info(handle, "Enabled at IRQ %d\n",
+                                link->irq.active);
        }
 
        link->irq.initialized = 1;
@@ -620,19 +614,19 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
 
        result = acpi_bus_get_device(handle, &device);
        if (result) {
-               printk(KERN_ERR PREFIX "Invalid link device\n");
+               acpi_handle_err(handle, "Invalid link device\n");
                return -1;
        }
 
        link = acpi_driver_data(device);
        if (!link) {
-               printk(KERN_ERR PREFIX "Invalid link context\n");
+               acpi_handle_err(handle, "Invalid link context\n");
                return -1;
        }
 
        /* TBD: Support multiple index (IRQ) entries per Link Device */
        if (index) {
-               printk(KERN_ERR PREFIX "Invalid index %d\n", index);
+               acpi_handle_err(handle, "Invalid index %d\n", index);
                return -1;
        }
 
@@ -644,7 +638,7 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
 
        if (!link->irq.active) {
                mutex_unlock(&acpi_link_lock);
-               printk(KERN_ERR PREFIX "Link active IRQ is 0!\n");
+               acpi_handle_err(handle, "Link active IRQ is 0!\n");
                return -1;
        }
        link->refcnt++;
@@ -656,9 +650,7 @@ int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
                *polarity = link->irq.polarity;
        if (name)
                *name = acpi_device_bid(link->device);
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                         "Link %s is referenced\n",
-                         acpi_device_bid(link->device)));
+       acpi_handle_debug(handle, "Link is referenced\n");
        return link->irq.active;
 }
 
@@ -674,20 +666,20 @@ int acpi_pci_link_free_irq(acpi_handle handle)
 
        result = acpi_bus_get_device(handle, &device);
        if (result) {
-               printk(KERN_ERR PREFIX "Invalid link device\n");
+               acpi_handle_err(handle, "Invalid link device\n");
                return -1;
        }
 
        link = acpi_driver_data(device);
        if (!link) {
-               printk(KERN_ERR PREFIX "Invalid link context\n");
+               acpi_handle_err(handle, "Invalid link context\n");
                return -1;
        }
 
        mutex_lock(&acpi_link_lock);
        if (!link->irq.initialized) {
                mutex_unlock(&acpi_link_lock);
-               printk(KERN_ERR PREFIX "Link isn't initialized\n");
+               acpi_handle_err(handle, "Link isn't initialized\n");
                return -1;
        }
 #ifdef FUTURE_USE
@@ -702,9 +694,7 @@ int acpi_pci_link_free_irq(acpi_handle handle)
         */
        link->refcnt--;
 #endif
-       ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                         "Link %s is dereferenced\n",
-                         acpi_device_bid(link->device)));
+       acpi_handle_debug(handle, "Link is dereferenced\n");
 
        if (link->refcnt == 0)
                acpi_evaluate_object(link->device->handle, "_DIS", NULL, NULL);
@@ -720,10 +710,10 @@ int acpi_pci_link_free_irq(acpi_handle handle)
 static int acpi_pci_link_add(struct acpi_device *device,
                             const struct acpi_device_id *not_used)
 {
-       int result;
+       acpi_handle handle = device->handle;
        struct acpi_pci_link *link;
+       int result;
        int i;
-       int found = 0;
 
        link = kzalloc(sizeof(struct acpi_pci_link), GFP_KERNEL);
        if (!link)
@@ -742,31 +732,23 @@ static int acpi_pci_link_add(struct acpi_device *device,
        /* query and set link->irq.active */
        acpi_pci_link_get_current(link);
 
-       printk(KERN_INFO PREFIX "%s [%s] (IRQs", acpi_device_name(device),
-              acpi_device_bid(device));
+       pr_info("Interrupt link %s configured for IRQ %d\n",
+               acpi_device_bid(device), link->irq.active);
+
        for (i = 0; i < link->irq.possible_count; i++) {
-               if (link->irq.active == link->irq.possible[i]) {
-                       printk(KERN_CONT " *%d", link->irq.possible[i]);
-                       found = 1;
-               } else
-                       printk(KERN_CONT " %d", link->irq.possible[i]);
+               if (link->irq.active != link->irq.possible[i])
+                       acpi_handle_debug(handle, "Possible IRQ %d\n",
+                                         link->irq.possible[i]);
        }
 
-       printk(KERN_CONT ")");
-
-       if (!found)
-               printk(KERN_CONT " *%d", link->irq.active);
-
        if (!link->device->status.enabled)
-               printk(KERN_CONT ", disabled.");
-
-       printk(KERN_CONT "\n");
+               pr_info("Interrupt link %s disabled\n", acpi_device_bid(device));
 
        list_add_tail(&link->list, &acpi_link_list);
 
       end:
        /* disable all links -- to be activated on use */
-       acpi_evaluate_object(device->handle, "_DIS", NULL, NULL);
+       acpi_evaluate_object(handle, "_DIS", NULL, NULL);
        mutex_unlock(&acpi_link_lock);
 
        if (result)
index 0925b14..45a0196 100644 (file)
@@ -540,9 +540,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
                        return -ENODEV;
 
 #if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
-               /* If NMI wants to wake up CPU0, start CPU0. */
-               if (wakeup_cpu0())
-                       start_cpu0();
+               cond_wakeup_cpu0();
 #endif
        }
 
index 84bb7c1..6efe7ed 100644 (file)
@@ -1670,6 +1670,8 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
        device_initialize(&device->dev);
        dev_set_uevent_suppress(&device->dev, true);
        acpi_init_coherency(device);
+       /* Assume there are unmet deps to start with. */
+       device->dep_unmet = 1;
 }
 
 void acpi_device_add_finalize(struct acpi_device *device)
@@ -1933,6 +1935,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
 {
        struct acpi_dep_data *dep;
 
+       adev->dep_unmet = 0;
+
        mutex_lock(&acpi_dep_list_lock);
 
        list_for_each_entry(dep, &acpi_dep_list, node) {
@@ -1980,7 +1984,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
                return AE_CTRL_DEPTH;
 
        acpi_scan_init_hotplug(device);
-       if (!check_dep)
+       /*
+        * If check_dep is true at this point, the device has no dependencies,
+        * or the creation of the device object would have been postponed above.
+        */
+       if (check_dep)
+               device->dep_unmet = 0;
+       else
                acpi_scan_dep_init(device);
 
 out:
index 4974e92..4f27d78 100644 (file)
@@ -54,7 +54,6 @@ static const struct acpi_dlayer acpi_debug_layers[] = {
 
        ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT),
        ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT),
-       ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT),
        ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT),
        ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT),
        ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT),
index e48690a..9d58104 100644 (file)
@@ -780,7 +780,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
 }
 
 /*
- * acpi_table_init()
+ * acpi_locate_initial_tables()
  *
  * find RSDP, find and checksum SDT/XSDT.
  * checksum all tables, print SDT/XSDT
@@ -788,7 +788,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
  * result: sdt_entry[] is initialized
  */
 
-int __init acpi_table_init(void)
+int __init acpi_locate_initial_tables(void)
 {
        acpi_status status;
 
@@ -803,9 +803,45 @@ int __init acpi_table_init(void)
        status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
        if (ACPI_FAILURE(status))
                return -EINVAL;
-       acpi_table_initrd_scan();
 
+       return 0;
+}
+
+void __init acpi_reserve_initial_tables(void)
+{
+       int i;
+
+       for (i = 0; i < ACPI_MAX_TABLES; i++) {
+               struct acpi_table_desc *table_desc = &initial_tables[i];
+               u64 start = table_desc->address;
+               u64 size = table_desc->length;
+
+               if (!start || !size)
+                       break;
+
+               pr_info("Reserving %4s table memory at [mem 0x%llx-0x%llx]\n",
+                       table_desc->signature.ascii, start, start + size - 1);
+
+               memblock_reserve(start, size);
+       }
+}
+
+void __init acpi_table_init_complete(void)
+{
+       acpi_table_initrd_scan();
        check_multiple_madt();
+}
+
+int __init acpi_table_init(void)
+{
+       int ret;
+
+       ret = acpi_locate_initial_tables();
+       if (ret)
+               return ret;
+
+       acpi_table_init_complete();
+
        return 0;
 }
 
index 9179825..37a5e5f 100644 (file)
@@ -97,6 +97,9 @@ static void deferred_probe_work_func(struct work_struct *work)
 
                get_device(dev);
 
+               kfree(dev->p->deferred_probe_reason);
+               dev->p->deferred_probe_reason = NULL;
+
                /*
                 * Drop the mutex while probing each device; the probe path may
                 * manipulate the deferred list
@@ -289,14 +292,16 @@ int driver_deferred_probe_check_state(struct device *dev)
 
 static void deferred_probe_timeout_work_func(struct work_struct *work)
 {
-       struct device_private *private, *p;
+       struct device_private *p;
 
        driver_deferred_probe_timeout = 0;
        driver_deferred_probe_trigger();
        flush_work(&deferred_probe_work);
 
-       list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
-               dev_info(private->device, "deferred probe pending\n");
+       mutex_lock(&deferred_probe_mutex);
+       list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
+               dev_info(p->device, "deferred probe pending\n");
+       mutex_unlock(&deferred_probe_mutex);
        wake_up_all(&probe_timeout_waitqueue);
 }
 static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
index d54e540..fe1dad6 100644 (file)
@@ -1690,8 +1690,8 @@ void pm_runtime_get_suppliers(struct device *dev)
                                device_links_read_lock_held())
                if (link->flags & DL_FLAG_PM_RUNTIME) {
                        link->supplier_preactivated = true;
-                       refcount_inc(&link->rpm_active);
                        pm_runtime_get_sync(link->supplier);
+                       refcount_inc(&link->rpm_active);
                }
 
        device_links_read_unlock(idx);
@@ -1704,6 +1704,8 @@ void pm_runtime_get_suppliers(struct device *dev)
 void pm_runtime_put_suppliers(struct device *dev)
 {
        struct device_link *link;
+       unsigned long flags;
+       bool put;
        int idx;
 
        idx = device_links_read_lock();
@@ -1712,7 +1714,11 @@ void pm_runtime_put_suppliers(struct device *dev)
                                device_links_read_lock_held())
                if (link->supplier_preactivated) {
                        link->supplier_preactivated = false;
-                       if (refcount_dec_not_one(&link->rpm_active))
+                       spin_lock_irqsave(&dev->power.lock, flags);
+                       put = pm_runtime_status_suspended(dev) &&
+                             refcount_dec_not_one(&link->rpm_active);
+                       spin_unlock_irqrestore(&dev->power.lock, flags);
+                       if (put)
                                pm_runtime_put(link->supplier);
                }
 
index d6c821d..51bfd77 100644 (file)
@@ -1369,10 +1369,13 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
        }
 
        if (dev->zoned)
-               cmd->error = null_process_zoned_cmd(cmd, op,
-                                                   sector, nr_sectors);
+               sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors);
        else
-               cmd->error = null_process_cmd(cmd, op, sector, nr_sectors);
+               sts = null_process_cmd(cmd, op, sector, nr_sectors);
+
+       /* Do not overwrite errors (e.g. timeout errors) */
+       if (cmd->error == BLK_STS_OK)
+               cmd->error = sts;
 
 out:
        nullb_complete_cmd(cmd);
@@ -1451,8 +1454,20 @@ static bool should_requeue_request(struct request *rq)
 
 static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
 {
+       struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
+
        pr_info("rq %p timed out\n", rq);
-       blk_mq_complete_request(rq);
+
+       /*
+        * If the device is marked as blocking (i.e. memory backed or zoned
+        * device), the submission path may be blocked waiting for resources
+        * and cause real timeouts. For these real timeouts, the submission
+        * path will complete the request using blk_mq_complete_request().
+        * Only fake timeouts need to execute blk_mq_complete_request() here.
+        */
+       cmd->error = BLK_STS_TIMEOUT;
+       if (cmd->fake_timeout)
+               blk_mq_complete_request(rq);
        return BLK_EH_DONE;
 }
 
@@ -1473,6 +1488,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
        cmd->rq = bd->rq;
        cmd->error = BLK_STS_OK;
        cmd->nq = nq;
+       cmd->fake_timeout = should_timeout_request(bd->rq);
 
        blk_mq_start_request(bd->rq);
 
@@ -1489,7 +1505,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
                        return BLK_STS_OK;
                }
        }
-       if (should_timeout_request(bd->rq))
+       if (cmd->fake_timeout)
                return BLK_STS_OK;
 
        return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
index 83504f3..4876d5a 100644 (file)
@@ -22,6 +22,7 @@ struct nullb_cmd {
        blk_status_t error;
        struct nullb_queue *nq;
        struct hrtimer timer;
+       bool fake_timeout;
 };
 
 struct nullb_queue {
index 1cdf09f..14e4528 100644 (file)
@@ -891,7 +891,7 @@ next:
 out:
        for (i = last_map; i < num; i++) {
                /* Don't zap current batch's valid persistent grants. */
-               if(i >= last_map + segs_to_map)
+               if(i >= map_until)
                        pages[i]->persistent_gnt = NULL;
                pages[i]->handle = BLKBACK_INVALID_HANDLE;
        }
index 52683fd..5cbfbd9 100644 (file)
@@ -4849,8 +4849,8 @@ static int btusb_probe(struct usb_interface *intf,
                        data->diag = NULL;
        }
 
-       if (!enable_autosuspend)
-               usb_disable_autosuspend(data->udev);
+       if (enable_autosuspend)
+               usb_enable_autosuspend(data->udev);
 
        err = hci_register_dev(hdev);
        if (err < 0)
@@ -4910,9 +4910,6 @@ static void btusb_disconnect(struct usb_interface *intf)
                gpiod_put(data->reset_gpio);
 
        hci_free_dev(hdev);
-
-       if (!enable_autosuspend)
-               usb_enable_autosuspend(data->udev);
 }
 
 #ifdef CONFIG_PM
index b20fdcb..fd87a59 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Turris Mox module configuration bus driver
  *
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
  */
 
 #include <dt-bindings/bus/moxtet.h>
@@ -879,6 +879,6 @@ static void __exit moxtet_exit(void)
 }
 module_exit(moxtet_exit);
 
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
 MODULE_DESCRIPTION("CZ.NIC's Turris Mox module configuration bus");
 MODULE_LICENSE("GPL v2");
index dd9e734..ea04249 100644 (file)
@@ -618,7 +618,7 @@ mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
                 * This part of the memory is above 4 GB, so we don't
                 * care for the MBus bridge hole.
                 */
-               if (reg_start >= 0x100000000ULL)
+               if ((u64)reg_start >= 0x100000000ULL)
                        continue;
 
                /*
index a086dd3..4f501e4 100644 (file)
@@ -125,7 +125,7 @@ config AGP_HP_ZX1
 
 config AGP_PARISC
        tristate "HP Quicksilver AGP support"
-       depends on AGP && PARISC && 64BIT
+       depends on AGP && PARISC && 64BIT && IOMMU_SBA
        help
          This option gives you AGP GART support for the HP Quicksilver
          AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000
index 4f7bf39..4e4b6d3 100644 (file)
@@ -66,7 +66,14 @@ EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
 
 static void devm_clk_hw_register_fixed_factor_release(struct device *dev, void *res)
 {
-       clk_hw_unregister_fixed_factor(&((struct clk_fixed_factor *)res)->hw);
+       struct clk_fixed_factor *fix = res;
+
+       /*
+        * We can not use clk_hw_unregister_fixed_factor, since it will kfree()
+        * the hw, resulting in double free. Just unregister the hw and let
+        * devres code kfree() it.
+        */
+       clk_hw_unregister(&fix->hw);
 }
 
 static struct clk_hw *
index 5052541..39cfc6c 100644 (file)
@@ -4357,20 +4357,19 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
        /* search the list of notifiers for this clk */
        list_for_each_entry(cn, &clk_notifier_list, node)
                if (cn->clk == clk)
-                       break;
+                       goto found;
 
        /* if clk wasn't in the notifier list, allocate new clk_notifier */
-       if (cn->clk != clk) {
-               cn = kzalloc(sizeof(*cn), GFP_KERNEL);
-               if (!cn)
-                       goto out;
+       cn = kzalloc(sizeof(*cn), GFP_KERNEL);
+       if (!cn)
+               goto out;
 
-               cn->clk = clk;
-               srcu_init_notifier_head(&cn->notifier_head);
+       cn->clk = clk;
+       srcu_init_notifier_head(&cn->notifier_head);
 
-               list_add(&cn->node, &clk_notifier_list);
-       }
+       list_add(&cn->node, &clk_notifier_list);
 
+found:
        ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
 
        clk->core->notifier_count++;
@@ -4395,32 +4394,28 @@ EXPORT_SYMBOL_GPL(clk_notifier_register);
  */
 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
 {
-       struct clk_notifier *cn = NULL;
-       int ret = -EINVAL;
+       struct clk_notifier *cn;
+       int ret = -ENOENT;
 
        if (!clk || !nb)
                return -EINVAL;
 
        clk_prepare_lock();
 
-       list_for_each_entry(cn, &clk_notifier_list, node)
-               if (cn->clk == clk)
-                       break;
-
-       if (cn->clk == clk) {
-               ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
+       list_for_each_entry(cn, &clk_notifier_list, node) {
+               if (cn->clk == clk) {
+                       ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
 
-               clk->core->notifier_count--;
+                       clk->core->notifier_count--;
 
-               /* XXX the notifier code should handle this better */
-               if (!cn->notifier_head.head) {
-                       srcu_cleanup_notifier_head(&cn->notifier_head);
-                       list_del(&cn->node);
-                       kfree(cn);
+                       /* XXX the notifier code should handle this better */
+                       if (!cn->notifier_head.head) {
+                               srcu_cleanup_notifier_head(&cn->notifier_head);
+                               list_del(&cn->node);
+                               kfree(cn);
+                       }
+                       break;
                }
-
-       } else {
-               ret = -ENOENT;
        }
 
        clk_prepare_unlock();
index dbac565..9bcf2f8 100644 (file)
@@ -304,7 +304,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = {
                .name = "cam_cc_bps_clk_src",
                .parent_data = cam_cc_parent_data_2,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -325,7 +325,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = {
                .name = "cam_cc_cci_0_clk_src",
                .parent_data = cam_cc_parent_data_5,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -339,7 +339,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = {
                .name = "cam_cc_cci_1_clk_src",
                .parent_data = cam_cc_parent_data_5,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -360,7 +360,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
                .name = "cam_cc_cphy_rx_clk_src",
                .parent_data = cam_cc_parent_data_3,
                .num_parents = 6,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -379,7 +379,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
                .name = "cam_cc_csi0phytimer_clk_src",
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -393,7 +393,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
                .name = "cam_cc_csi1phytimer_clk_src",
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -407,7 +407,7 @@ static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
                .name = "cam_cc_csi2phytimer_clk_src",
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -421,7 +421,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
                .name = "cam_cc_csi3phytimer_clk_src",
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -443,7 +443,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
                .name = "cam_cc_fast_ahb_clk_src",
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -466,7 +466,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
                .name = "cam_cc_icp_clk_src",
                .parent_data = cam_cc_parent_data_2,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -488,7 +488,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = {
                .name = "cam_cc_ife_0_clk_src",
                .parent_data = cam_cc_parent_data_4,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -510,7 +510,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
                .name = "cam_cc_ife_0_csid_clk_src",
                .parent_data = cam_cc_parent_data_3,
                .num_parents = 6,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -524,7 +524,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = {
                .name = "cam_cc_ife_1_clk_src",
                .parent_data = cam_cc_parent_data_4,
                .num_parents = 4,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -538,7 +538,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
                .name = "cam_cc_ife_1_csid_clk_src",
                .parent_data = cam_cc_parent_data_3,
                .num_parents = 6,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -553,7 +553,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
                .parent_data = cam_cc_parent_data_4,
                .num_parents = 4,
                .flags = CLK_SET_RATE_PARENT,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -567,7 +567,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
                .name = "cam_cc_ife_lite_csid_clk_src",
                .parent_data = cam_cc_parent_data_3,
                .num_parents = 6,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -590,7 +590,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
                .name = "cam_cc_ipe_0_clk_src",
                .parent_data = cam_cc_parent_data_2,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -613,7 +613,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = {
                .name = "cam_cc_jpeg_clk_src",
                .parent_data = cam_cc_parent_data_2,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -635,7 +635,7 @@ static struct clk_rcg2 cam_cc_lrme_clk_src = {
                .name = "cam_cc_lrme_clk_src",
                .parent_data = cam_cc_parent_data_6,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -656,7 +656,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = {
                .name = "cam_cc_mclk0_clk_src",
                .parent_data = cam_cc_parent_data_1,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -670,7 +670,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = {
                .name = "cam_cc_mclk1_clk_src",
                .parent_data = cam_cc_parent_data_1,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -684,7 +684,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = {
                .name = "cam_cc_mclk2_clk_src",
                .parent_data = cam_cc_parent_data_1,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -698,7 +698,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = {
                .name = "cam_cc_mclk3_clk_src",
                .parent_data = cam_cc_parent_data_1,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -712,7 +712,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = {
                .name = "cam_cc_mclk4_clk_src",
                .parent_data = cam_cc_parent_data_1,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
@@ -732,7 +732,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
                .parent_data = cam_cc_parent_data_0,
                .num_parents = 4,
                .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_shared_ops,
        },
 };
 
index 43ecd50..cf94a12 100644 (file)
@@ -99,7 +99,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
                val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
                val &= GENMASK(socfpgaclk->width - 1, 0);
                /* Check for GPIO_DB_CLK by its offset */
-               if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
+               if ((uintptr_t) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
                        div = val + 1;
                else
                        div = (1 << val);
index d3f756f..67e56cf 100644 (file)
@@ -267,7 +267,7 @@ struct freq_attr cpufreq_freq_attr_##_name##_freqs =     \
 __ATTR_RO(_name##_frequencies)
 
 /*
- * show_scaling_available_frequencies - show available normal frequencies for
+ * scaling_available_frequencies_show - show available normal frequencies for
  * the specified CPU
  */
 static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
@@ -279,7 +279,7 @@ cpufreq_attr_available_freq(scaling_available);
 EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
 
 /*
- * show_available_boost_freqs - show available boost frequencies for
+ * scaling_boost_frequencies_show - show available boost frequencies for
  * the specified CPU
  */
 static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
index 244cb7d..2acc617 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/security.h>
 #include <linux/debugfs.h>
 #include <linux/module.h>
+#include <linux/sizes.h>
 #include <linux/mutex.h>
 #include <linux/cdev.h>
 #include <linux/idr.h>
@@ -96,21 +97,18 @@ struct mbox_cmd {
  * @dev: driver core device object
  * @cdev: char dev core object for ioctl operations
  * @cxlm: pointer to the parent device driver data
- * @ops_active: active user of @cxlm in ops handlers
- * @ops_dead: completion when all @cxlm ops users have exited
  * @id: id number of this memdev instance.
  */
 struct cxl_memdev {
        struct device dev;
        struct cdev cdev;
        struct cxl_mem *cxlm;
-       struct percpu_ref ops_active;
-       struct completion ops_dead;
        int id;
 };
 
 static int cxl_mem_major;
 static DEFINE_IDA(cxl_memdev_ida);
+static DECLARE_RWSEM(cxl_memdev_rwsem);
 static struct dentry *cxl_debugfs;
 static bool cxl_raw_allow_all;
 
@@ -169,7 +167,7 @@ struct cxl_mem_command {
  * table will be validated against the user's input. For example, if size_in is
  * 0, and the user passed in 1, it is an error.
  */
-static struct cxl_mem_command mem_commands[] = {
+static struct cxl_mem_command mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
        CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
 #ifdef CONFIG_CXL_MEM_RAW_COMMANDS
        CXL_CMD(RAW, ~0, ~0, 0),
@@ -776,26 +774,43 @@ static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
 static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
                             unsigned long arg)
 {
-       struct cxl_memdev *cxlmd;
-       struct inode *inode;
-       int rc = -ENOTTY;
+       struct cxl_memdev *cxlmd = file->private_data;
+       int rc = -ENXIO;
 
-       inode = file_inode(file);
-       cxlmd = container_of(inode->i_cdev, typeof(*cxlmd), cdev);
+       down_read(&cxl_memdev_rwsem);
+       if (cxlmd->cxlm)
+               rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
+       up_read(&cxl_memdev_rwsem);
 
-       if (!percpu_ref_tryget_live(&cxlmd->ops_active))
-               return -ENXIO;
+       return rc;
+}
 
-       rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
+static int cxl_memdev_open(struct inode *inode, struct file *file)
+{
+       struct cxl_memdev *cxlmd =
+               container_of(inode->i_cdev, typeof(*cxlmd), cdev);
 
-       percpu_ref_put(&cxlmd->ops_active);
+       get_device(&cxlmd->dev);
+       file->private_data = cxlmd;
 
-       return rc;
+       return 0;
+}
+
+static int cxl_memdev_release_file(struct inode *inode, struct file *file)
+{
+       struct cxl_memdev *cxlmd =
+               container_of(inode->i_cdev, typeof(*cxlmd), cdev);
+
+       put_device(&cxlmd->dev);
+
+       return 0;
 }
 
 static const struct file_operations cxl_memdev_fops = {
        .owner = THIS_MODULE,
        .unlocked_ioctl = cxl_memdev_ioctl,
+       .open = cxl_memdev_open,
+       .release = cxl_memdev_release_file,
        .compat_ioctl = compat_ptr_ioctl,
        .llseek = noop_llseek,
 };
@@ -984,7 +999,7 @@ static struct cxl_mem *cxl_mem_create(struct pci_dev *pdev, u32 reg_lo,
                return NULL;
        }
 
-       offset = ((u64)reg_hi << 32) | FIELD_GET(CXL_REGLOC_ADDR_MASK, reg_lo);
+       offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK);
        bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo);
 
        /* Basic sanity check that BAR is big enough */
@@ -1049,7 +1064,6 @@ static void cxl_memdev_release(struct device *dev)
 {
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 
-       percpu_ref_exit(&cxlmd->ops_active);
        ida_free(&cxl_memdev_ida, cxlmd->id);
        kfree(cxlmd);
 }
@@ -1066,7 +1080,7 @@ static ssize_t firmware_version_show(struct device *dev,
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
        struct cxl_mem *cxlm = cxlmd->cxlm;
 
-       return sprintf(buf, "%.16s\n", cxlm->firmware_version);
+       return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
 }
 static DEVICE_ATTR_RO(firmware_version);
 
@@ -1076,7 +1090,7 @@ static ssize_t payload_max_show(struct device *dev,
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
        struct cxl_mem *cxlm = cxlmd->cxlm;
 
-       return sprintf(buf, "%zu\n", cxlm->payload_size);
+       return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
 }
 static DEVICE_ATTR_RO(payload_max);
 
@@ -1087,7 +1101,7 @@ static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
        struct cxl_mem *cxlm = cxlmd->cxlm;
        unsigned long long len = range_len(&cxlm->ram_range);
 
-       return sprintf(buf, "%#llx\n", len);
+       return sysfs_emit(buf, "%#llx\n", len);
 }
 
 static struct device_attribute dev_attr_ram_size =
@@ -1100,7 +1114,7 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
        struct cxl_mem *cxlm = cxlmd->cxlm;
        unsigned long long len = range_len(&cxlm->pmem_range);
 
-       return sprintf(buf, "%#llx\n", len);
+       return sysfs_emit(buf, "%#llx\n", len);
 }
 
 static struct device_attribute dev_attr_pmem_size =
@@ -1150,27 +1164,24 @@ static const struct device_type cxl_memdev_type = {
        .groups = cxl_memdev_attribute_groups,
 };
 
-static void cxlmdev_unregister(void *_cxlmd)
+static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd)
 {
-       struct cxl_memdev *cxlmd = _cxlmd;
-       struct device *dev = &cxlmd->dev;
-
-       percpu_ref_kill(&cxlmd->ops_active);
-       cdev_device_del(&cxlmd->cdev, dev);
-       wait_for_completion(&cxlmd->ops_dead);
+       down_write(&cxl_memdev_rwsem);
        cxlmd->cxlm = NULL;
-       put_device(dev);
+       up_write(&cxl_memdev_rwsem);
 }
 
-static void cxlmdev_ops_active_release(struct percpu_ref *ref)
+static void cxl_memdev_unregister(void *_cxlmd)
 {
-       struct cxl_memdev *cxlmd =
-               container_of(ref, typeof(*cxlmd), ops_active);
+       struct cxl_memdev *cxlmd = _cxlmd;
+       struct device *dev = &cxlmd->dev;
 
-       complete(&cxlmd->ops_dead);
+       cdev_device_del(&cxlmd->cdev, dev);
+       cxl_memdev_shutdown(cxlmd);
+       put_device(dev);
 }
 
-static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
+static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm)
 {
        struct pci_dev *pdev = cxlm->pdev;
        struct cxl_memdev *cxlmd;
@@ -1180,22 +1191,11 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
 
        cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
        if (!cxlmd)
-               return -ENOMEM;
-       init_completion(&cxlmd->ops_dead);
-
-       /*
-        * @cxlm is deallocated when the driver unbinds so operations
-        * that are using it need to hold a live reference.
-        */
-       cxlmd->cxlm = cxlm;
-       rc = percpu_ref_init(&cxlmd->ops_active, cxlmdev_ops_active_release, 0,
-                            GFP_KERNEL);
-       if (rc)
-               goto err_ref;
+               return ERR_PTR(-ENOMEM);
 
        rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
        if (rc < 0)
-               goto err_id;
+               goto err;
        cxlmd->id = rc;
 
        dev = &cxlmd->dev;
@@ -1204,30 +1204,54 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
        dev->bus = &cxl_bus_type;
        dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
        dev->type = &cxl_memdev_type;
-       dev_set_name(dev, "mem%d", cxlmd->id);
+       device_set_pm_not_required(dev);
 
        cdev = &cxlmd->cdev;
        cdev_init(cdev, &cxl_memdev_fops);
+       return cxlmd;
+
+err:
+       kfree(cxlmd);
+       return ERR_PTR(rc);
+}
+
+static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
+{
+       struct cxl_memdev *cxlmd;
+       struct device *dev;
+       struct cdev *cdev;
+       int rc;
+
+       cxlmd = cxl_memdev_alloc(cxlm);
+       if (IS_ERR(cxlmd))
+               return PTR_ERR(cxlmd);
+
+       dev = &cxlmd->dev;
+       rc = dev_set_name(dev, "mem%d", cxlmd->id);
+       if (rc)
+               goto err;
+
+       /*
+        * Activate ioctl operations, no cxl_memdev_rwsem manipulation
+        * needed as this is ordered with cdev_add() publishing the device.
+        */
+       cxlmd->cxlm = cxlm;
 
+       cdev = &cxlmd->cdev;
        rc = cdev_device_add(cdev, dev);
        if (rc)
-               goto err_add;
+               goto err;
 
-       return devm_add_action_or_reset(dev->parent, cxlmdev_unregister, cxlmd);
+       return devm_add_action_or_reset(dev->parent, cxl_memdev_unregister,
+                                       cxlmd);
 
-err_add:
-       ida_free(&cxl_memdev_ida, cxlmd->id);
-err_id:
+err:
        /*
-        * Theoretically userspace could have already entered the fops,
-        * so flush ops_active.
+        * The cdev was briefly live, shutdown any ioctl operations that
+        * saw that state.
         */
-       percpu_ref_kill(&cxlmd->ops_active);
-       wait_for_completion(&cxlmd->ops_dead);
-       percpu_ref_exit(&cxlmd->ops_active);
-err_ref:
-       kfree(cxlmd);
-
+       cxl_memdev_shutdown(cxlmd);
+       put_device(dev);
        return rc;
 }
 
@@ -1396,6 +1420,7 @@ out:
  */
 static int cxl_mem_identify(struct cxl_mem *cxlm)
 {
+       /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
        struct cxl_mbox_identify {
                char fw_revision[0x10];
                __le64 total_capacity;
@@ -1424,10 +1449,11 @@ static int cxl_mem_identify(struct cxl_mem *cxlm)
         * For now, only the capacity is exported in sysfs
         */
        cxlm->ram_range.start = 0;
-       cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) - 1;
+       cxlm->ram_range.end = le64_to_cpu(id.volatile_capacity) * SZ_256M - 1;
 
        cxlm->pmem_range.start = 0;
-       cxlm->pmem_range.end = le64_to_cpu(id.persistent_capacity) - 1;
+       cxlm->pmem_range.end =
+               le64_to_cpu(id.persistent_capacity) * SZ_256M - 1;
 
        memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
 
index 452e85a..5aee26e 100644 (file)
@@ -90,13 +90,11 @@ static ssize_t do_id_store(struct device_driver *drv, const char *buf,
                                list_add(&dax_id->list, &dax_drv->ids);
                        } else
                                rc = -ENOMEM;
-               } else
-                       /* nothing to remove */;
+               }
        } else if (action == ID_REMOVE) {
                list_del(&dax_id->list);
                kfree(dax_id);
-       } else
-               /* dax_id already added */;
+       }
        mutex_unlock(&dax_bus_lock);
 
        if (rc < 0)
index fe6a460..af3ee28 100644 (file)
@@ -1086,6 +1086,7 @@ static int __dma_async_device_channel_register(struct dma_device *device,
        kfree(chan->dev);
  err_free_local:
        free_percpu(chan->local);
+       chan->local = NULL;
        return rc;
 }
 
index e516269..db25f9b 100644 (file)
@@ -10,6 +10,7 @@ config DW_DMAC_CORE
 
 config DW_DMAC
        tristate "Synopsys DesignWare AHB DMA platform driver"
+       depends on HAS_IOMEM
        select DW_DMAC_CORE
        help
          Support the Synopsys DesignWare AHB DMA controller. This
@@ -18,6 +19,7 @@ config DW_DMAC
 config DW_DMAC_PCI
        tristate "Synopsys DesignWare AHB DMA PCI driver"
        depends on PCI
+       depends on HAS_IOMEM
        select DW_DMAC_CORE
        help
          Support the Synopsys DesignWare AHB DMA controller on the
index 84a6ea6..31c8195 100644 (file)
@@ -282,6 +282,22 @@ void idxd_wq_drain(struct idxd_wq *wq)
        idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_WQ, operand, NULL);
 }
 
+void idxd_wq_reset(struct idxd_wq *wq)
+{
+       struct idxd_device *idxd = wq->idxd;
+       struct device *dev = &idxd->pdev->dev;
+       u32 operand;
+
+       if (wq->state != IDXD_WQ_ENABLED) {
+               dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state);
+               return;
+       }
+
+       operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
+       idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, NULL);
+       wq->state = IDXD_WQ_DISABLED;
+}
+
 int idxd_wq_map_portal(struct idxd_wq *wq)
 {
        struct idxd_device *idxd = wq->idxd;
@@ -363,8 +379,6 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
 void idxd_wq_disable_cleanup(struct idxd_wq *wq)
 {
        struct idxd_device *idxd = wq->idxd;
-       struct device *dev = &idxd->pdev->dev;
-       int i, wq_offset;
 
        lockdep_assert_held(&idxd->dev_lock);
        memset(wq->wqcfg, 0, idxd->wqcfg_size);
@@ -376,14 +390,6 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
        wq->ats_dis = 0;
        clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
        memset(wq->name, 0, WQ_NAME_SIZE);
-
-       for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
-               wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
-               iowrite32(0, idxd->reg_base + wq_offset);
-               dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
-                       wq->id, i, wq_offset,
-                       ioread32(idxd->reg_base + wq_offset));
-       }
 }
 
 /* Device control bits */
@@ -574,6 +580,36 @@ void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
 }
 
 /* Device configuration bits */
+void idxd_msix_perm_setup(struct idxd_device *idxd)
+{
+       union msix_perm mperm;
+       int i, msixcnt;
+
+       msixcnt = pci_msix_vec_count(idxd->pdev);
+       if (msixcnt < 0)
+               return;
+
+       mperm.bits = 0;
+       mperm.pasid = idxd->pasid;
+       mperm.pasid_en = device_pasid_enabled(idxd);
+       for (i = 1; i < msixcnt; i++)
+               iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
+}
+
+void idxd_msix_perm_clear(struct idxd_device *idxd)
+{
+       union msix_perm mperm;
+       int i, msixcnt;
+
+       msixcnt = pci_msix_vec_count(idxd->pdev);
+       if (msixcnt < 0)
+               return;
+
+       mperm.bits = 0;
+       for (i = 1; i < msixcnt; i++)
+               iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
+}
+
 static void idxd_group_config_write(struct idxd_group *group)
 {
        struct idxd_device *idxd = group->idxd;
@@ -642,7 +678,14 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
        if (!wq->group)
                return 0;
 
-       memset(wq->wqcfg, 0, idxd->wqcfg_size);
+       /*
+        * Instead of memset the entire shadow copy of WQCFG, copy from the hardware after
+        * wq reset. This will copy back the sticky values that are present on some devices.
+        */
+       for (i = 0; i < WQCFG_STRIDES(idxd); i++) {
+               wq_offset = WQCFG_OFFSET(idxd, wq->id, i);
+               wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
+       }
 
        /* byte 0-3 */
        wq->wqcfg->wq_size = wq->size;
index 81a0e65..76014c1 100644 (file)
@@ -316,6 +316,8 @@ void idxd_unregister_driver(void);
 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd);
 
 /* device interrupt control */
+void idxd_msix_perm_setup(struct idxd_device *idxd);
+void idxd_msix_perm_clear(struct idxd_device *idxd);
 irqreturn_t idxd_irq_handler(int vec, void *data);
 irqreturn_t idxd_misc_thread(int vec, void *data);
 irqreturn_t idxd_wq_thread(int irq, void *data);
@@ -341,6 +343,7 @@ void idxd_wq_free_resources(struct idxd_wq *wq);
 int idxd_wq_enable(struct idxd_wq *wq);
 int idxd_wq_disable(struct idxd_wq *wq);
 void idxd_wq_drain(struct idxd_wq *wq);
+void idxd_wq_reset(struct idxd_wq *wq);
 int idxd_wq_map_portal(struct idxd_wq *wq);
 void idxd_wq_unmap_portal(struct idxd_wq *wq);
 void idxd_wq_disable_cleanup(struct idxd_wq *wq);
index 085a0c3..6584b0e 100644 (file)
@@ -65,7 +65,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
        struct idxd_irq_entry *irq_entry;
        int i, msixcnt;
        int rc = 0;
-       union msix_perm mperm;
 
        msixcnt = pci_msix_vec_count(pdev);
        if (msixcnt < 0) {
@@ -144,14 +143,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
        }
 
        idxd_unmask_error_interrupts(idxd);
-
-       /* Setup MSIX permission table */
-       mperm.bits = 0;
-       mperm.pasid = idxd->pasid;
-       mperm.pasid_en = device_pasid_enabled(idxd);
-       for (i = 1; i < msixcnt; i++)
-               iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
-
+       idxd_msix_perm_setup(idxd);
        return 0;
 
  err_no_irq:
@@ -510,6 +502,7 @@ static void idxd_shutdown(struct pci_dev *pdev)
                idxd_flush_work_list(irq_entry);
        }
 
+       idxd_msix_perm_clear(idxd);
        destroy_workqueue(idxd->wq);
 }
 
index a60ca11..f1463fc 100644 (file)
@@ -124,7 +124,9 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
                for (i = 0; i < 4; i++)
                        idxd->sw_err.bits[i] = ioread64(idxd->reg_base +
                                        IDXD_SWERR_OFFSET + i * sizeof(u64));
-               iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET);
+
+               iowrite64(idxd->sw_err.bits[0] & IDXD_SWERR_ACK,
+                         idxd->reg_base + IDXD_SWERR_OFFSET);
 
                if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) {
                        int id = idxd->sw_err.wq_idx;
index 4dbb03c..18bf4d1 100644 (file)
@@ -275,7 +275,6 @@ static void disable_wq(struct idxd_wq *wq)
 {
        struct idxd_device *idxd = wq->idxd;
        struct device *dev = &idxd->pdev->dev;
-       int rc;
 
        mutex_lock(&wq->wq_lock);
        dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
@@ -296,17 +295,13 @@ static void disable_wq(struct idxd_wq *wq)
        idxd_wq_unmap_portal(wq);
 
        idxd_wq_drain(wq);
-       rc = idxd_wq_disable(wq);
+       idxd_wq_reset(wq);
 
        idxd_wq_free_resources(wq);
        wq->client_count = 0;
        mutex_unlock(&wq->wq_lock);
 
-       if (rc < 0)
-               dev_warn(dev, "Failed to disable %s: %d\n",
-                        dev_name(&wq->conf_dev), rc);
-       else
-               dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
+       dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
 }
 
 static int idxd_config_bus_remove(struct device *dev)
@@ -989,7 +984,7 @@ static ssize_t wq_size_store(struct device *dev,
        if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
                return -EPERM;
 
-       if (wq->state != IDXD_WQ_DISABLED)
+       if (idxd->state == IDXD_DEV_ENABLED)
                return -EPERM;
 
        if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
@@ -1449,8 +1444,14 @@ static ssize_t op_cap_show(struct device *dev,
 {
        struct idxd_device *idxd =
                container_of(dev, struct idxd_device, conf_dev);
+       int i, rc = 0;
+
+       for (i = 0; i < 4; i++)
+               rc += sysfs_emit_at(buf, rc, "%#llx ", idxd->hw.opcap.bits[i]);
 
-       return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
+       rc--;
+       rc += sysfs_emit_at(buf, rc, "\n");
+       return rc;
 }
 static DEVICE_ATTR_RO(op_cap);
 
index f387c5b..1669345 100644 (file)
@@ -507,10 +507,8 @@ static int plx_dma_create(struct pci_dev *pdev)
 
        rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
                         KBUILD_MODNAME, plxdev);
-       if (rc) {
-               kfree(plxdev);
-               return rc;
-       }
+       if (rc)
+               goto free_plx;
 
        spin_lock_init(&plxdev->ring_lock);
        tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
@@ -540,14 +538,20 @@ static int plx_dma_create(struct pci_dev *pdev)
        rc = dma_async_device_register(dma);
        if (rc) {
                pci_err(pdev, "Failed to register dma device: %d\n", rc);
-               free_irq(pci_irq_vector(pdev, 0),  plxdev);
-               kfree(plxdev);
-               return rc;
+               goto put_device;
        }
 
        pci_set_drvdata(pdev, plxdev);
 
        return 0;
+
+put_device:
+       put_device(&pdev->dev);
+       free_irq(pci_irq_vector(pdev, 0),  plxdev);
+free_plx:
+       kfree(plxdev);
+
+       return rc;
 }
 
 static int plx_dma_probe(struct pci_dev *pdev,
index 71827d9..b726074 100644 (file)
@@ -723,7 +723,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
                goto end;
        }
        if (!tdc->busy) {
-               err = pm_runtime_get_sync(tdc->tdma->dev);
+               err = pm_runtime_resume_and_get(tdc->tdma->dev);
                if (err < 0) {
                        dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
                        goto end;
@@ -818,7 +818,7 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
        struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
        int err;
 
-       err = pm_runtime_get_sync(tdc->tdma->dev);
+       err = pm_runtime_resume_and_get(tdc->tdma->dev);
        if (err < 0) {
                dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
                return;
index 55df63d..70b29bd 100644 (file)
@@ -839,6 +839,7 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
        struct xilinx_dpdma_tx_desc *desc;
        struct virt_dma_desc *vdesc;
        u32 reg, channels;
+       bool first_frame;
 
        lockdep_assert_held(&chan->lock);
 
@@ -852,14 +853,6 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
                chan->running = true;
        }
 
-       if (chan->video_group)
-               channels = xilinx_dpdma_chan_video_group_ready(chan);
-       else
-               channels = BIT(chan->id);
-
-       if (!channels)
-               return;
-
        vdesc = vchan_next_desc(&chan->vchan);
        if (!vdesc)
                return;
@@ -884,13 +877,26 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
                            FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
                                       upper_32_bits(sw_desc->dma_addr)));
 
-       if (chan->first_frame)
+       first_frame = chan->first_frame;
+       chan->first_frame = false;
+
+       if (chan->video_group) {
+               channels = xilinx_dpdma_chan_video_group_ready(chan);
+               /*
+                * Trigger the transfer only when all channels in the group are
+                * ready.
+                */
+               if (!channels)
+                       return;
+       } else {
+               channels = BIT(chan->id);
+       }
+
+       if (first_frame)
                reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
        else
                reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
 
-       chan->first_frame = false;
-
        dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
 }
 
@@ -1042,13 +1048,14 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
  */
 static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
 {
-       struct xilinx_dpdma_tx_desc *active = chan->desc.active;
+       struct xilinx_dpdma_tx_desc *active;
        unsigned long flags;
 
        spin_lock_irqsave(&chan->lock, flags);
 
        xilinx_dpdma_debugfs_desc_done_irq(chan);
 
+       active = chan->desc.active;
        if (active)
                vchan_cyclic_callback(&active->vdesc);
        else
index 0a6438c..e7a9561 100644 (file)
@@ -1241,6 +1241,7 @@ int extcon_dev_register(struct extcon_dev *edev)
                                sizeof(*edev->nh), GFP_KERNEL);
        if (!edev->nh) {
                ret = -ENOMEM;
+               device_unregister(&edev->dev);
                goto err_dev;
        }
 
index 5fd6a60..88ed971 100644 (file)
@@ -346,6 +346,7 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        struct client *client = file->private_data;
        spinlock_t *client_list_lock = &client->lynx->client_list_lock;
        struct nosy_stats stats;
+       int ret;
 
        switch (cmd) {
        case NOSY_IOC_GET_STATS:
@@ -360,11 +361,15 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        return 0;
 
        case NOSY_IOC_START:
+               ret = -EBUSY;
                spin_lock_irq(client_list_lock);
-               list_add_tail(&client->link, &client->lynx->client_list);
+               if (list_empty(&client->link)) {
+                       list_add_tail(&client->link, &client->lynx->client_list);
+                       ret = 0;
+               }
                spin_unlock_irq(client_list_lock);
 
-               return 0;
+               return ret;
 
        case NOSY_IOC_STOP:
                spin_lock_irq(client_list_lock);
index 50bb2a6..62f0d1a 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Turris Mox rWTM firmware driver
  *
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
  */
 
 #include <linux/armada-37xx-rwtm-mailbox.h>
@@ -547,4 +547,4 @@ module_platform_driver(turris_mox_rwtm_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Turris Mox rWTM firmware driver");
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
index 8299909..61f9efd 100644 (file)
@@ -2,7 +2,7 @@
 /*
  *  Turris Mox Moxtet GPIO expander
  *
- *  Copyright (C) 2018 Marek Behun <marek.behun@nic.cz>
+ *  Copyright (C) 2018 Marek Behún <kabel@kernel.org>
  */
 
 #include <linux/bitops.h>
@@ -174,6 +174,6 @@ static struct moxtet_driver moxtet_gpio_driver = {
 };
 module_moxtet_driver(moxtet_gpio_driver);
 
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
 MODULE_DESCRIPTION("Turris Mox Moxtet GPIO expander");
 MODULE_LICENSE("GPL v2");
index 26c5466..ae49bb2 100644 (file)
@@ -458,6 +458,8 @@ static ssize_t export_store(struct class *class,
        long                    gpio;
        struct gpio_desc        *desc;
        int                     status;
+       struct gpio_chip        *gc;
+       int                     offset;
 
        status = kstrtol(buf, 0, &gpio);
        if (status < 0)
@@ -469,6 +471,12 @@ static ssize_t export_store(struct class *class,
                pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
                return -EINVAL;
        }
+       gc = desc->gdev->chip;
+       offset = gpio_chip_hwgpio(desc);
+       if (!gpiochip_line_is_valid(gc, offset)) {
+               pr_warn("%s: GPIO %ld masked\n", __func__, gpio);
+               return -EINVAL;
+       }
 
        /* No extra locking here; FLAG_SYSFS just signifies that the
         * request and export were done by on behalf of userspace, so
index 64beb33..a4e2cf7 100644 (file)
@@ -778,9 +778,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        dev_info->high_va_offset = AMDGPU_GMC_HOLE_END;
                        dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
                }
-               dev_info->virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
+               dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
                dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
-               dev_info->gart_page_size = AMDGPU_GPU_PAGE_SIZE;
+               dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
                dev_info->cu_active_number = adev->gfx.cu_info.number;
                dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
                dev_info->ce_ram_size = adev->gfx.ce_ram_size;
index 9fd2157..5efa331 100644 (file)
@@ -906,7 +906,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
 
        /* Allocate an SG array and squash pages into it */
        r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
-                                     ttm->num_pages << PAGE_SHIFT,
+                                     (u64)ttm->num_pages << PAGE_SHIFT,
                                      GFP_KERNEL);
        if (r)
                goto release_sg;
index ad91c0c..7d2c8b1 100644 (file)
@@ -2197,8 +2197,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        uint64_t eaddr;
 
        /* validate the parameters */
-       if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
-           size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+       if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+           size == 0 || size & ~PAGE_MASK)
                return -EINVAL;
 
        /* make sure object fit at this offset */
@@ -2263,8 +2263,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
        int r;
 
        /* validate the parameters */
-       if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
-           size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+       if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+           size == 0 || size & ~PAGE_MASK)
                return -EINVAL;
 
        /* make sure object fit at this offset */
@@ -2409,7 +2409,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
                        after->start = eaddr + 1;
                        after->last = tmp->last;
                        after->offset = tmp->offset;
-                       after->offset += after->start - tmp->start;
+                       after->offset += (after->start - tmp->start) << PAGE_SHIFT;
                        after->flags = tmp->flags;
                        after->bo_va = tmp->bo_va;
                        list_add(&after->list, &tmp->bo_va->invalids);
index b258a3d..159add0 100644 (file)
@@ -155,7 +155,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
 
        /* Wait till CP writes sync code: */
        status = amdkfd_fence_wait_timeout(
-                       (unsigned int *) rm_state,
+                       rm_state,
                        QUEUESTATE__ACTIVE, 1500);
 
        kfd_gtt_sa_free(dbgdev->dev, mem_obj);
index e686ce2..4598a9a 100644 (file)
@@ -1167,7 +1167,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
        if (retval)
                goto fail_allocate_vidmem;
 
-       dqm->fence_addr = dqm->fence_mem->cpu_ptr;
+       dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
        dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
 
        init_interrupts(dqm);
@@ -1340,8 +1340,8 @@ out:
        return retval;
 }
 
-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
-                               unsigned int fence_value,
+int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
+                               uint64_t fence_value,
                                unsigned int timeout_ms)
 {
        unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
index 7351dd1..45f8159 100644 (file)
@@ -192,7 +192,7 @@ struct device_queue_manager {
        uint16_t                vmid_pasid[VMID_NUM];
        uint64_t                pipelines_addr;
        uint64_t                fence_gpu_addr;
-       unsigned int            *fence_addr;
+       uint64_t                *fence_addr;
        struct kfd_mem_obj      *fence_mem;
        bool                    active_runlist;
        int                     sched_policy;
index 5d541e0..f71a7fa 100644 (file)
@@ -347,7 +347,7 @@ fail_create_runlist_ib:
 }
 
 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
-                       uint32_t fence_value)
+                       uint64_t fence_value)
 {
        uint32_t *buffer, size;
        int retval = 0;
index dfaf771..e3ba0cd 100644 (file)
@@ -283,7 +283,7 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
 }
 
 static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
-                       uint64_t fence_address, uint32_t fence_value)
+                       uint64_t fence_address, uint64_t fence_value)
 {
        struct pm4_mes_query_status *packet;
 
index a852e0d..08442e7 100644 (file)
@@ -263,7 +263,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
 }
 
 static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
-                       uint64_t fence_address, uint32_t fence_value)
+                       uint64_t fence_address, uint64_t fence_value)
 {
        struct pm4_mes_query_status *packet;
 
index 09599ef..f304d1f 100644 (file)
@@ -1003,8 +1003,8 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
                       u32 *ctl_stack_used_size,
                       u32 *save_area_used_size);
 
-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
-                             unsigned int fence_value,
+int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
+                             uint64_t fence_value,
                              unsigned int timeout_ms);
 
 /* Packet Manager */
@@ -1040,7 +1040,7 @@ struct packet_manager_funcs {
                        uint32_t filter_param, bool reset,
                        unsigned int sdma_engine);
        int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
-                       uint64_t fence_address, uint32_t fence_value);
+                       uint64_t fence_address, uint64_t fence_value);
        int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
 
        /* Packet sizes */
@@ -1062,7 +1062,7 @@ int pm_send_set_resources(struct packet_manager *pm,
                                struct scheduling_resources *res);
 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
-                               uint32_t fence_value);
+                               uint64_t fence_value);
 
 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
                        enum kfd_unmap_queues_filter mode,
index 705fbfc..8a32772 100644 (file)
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK, mask_sh),\
        HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_BLK_C, mask_sh),\
+       HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\
        HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
        HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
        HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
index 22b6a8e..c0565a9 100644 (file)
@@ -1224,7 +1224,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                    (hwmgr->chip_id == CHIP_POLARIS10) ||
                    (hwmgr->chip_id == CHIP_POLARIS11) ||
                    (hwmgr->chip_id == CHIP_POLARIS12) ||
-                   (hwmgr->chip_id == CHIP_TONGA))
+                   (hwmgr->chip_id == CHIP_TONGA) ||
+                   (hwmgr->chip_id == CHIP_TOPAZ))
                        PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
 
 
@@ -3330,7 +3331,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 
        disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&
                                                !hwmgr->display_config->multi_monitor_in_sync) ||
-                                               smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
+                                               (hwmgr->display_config->num_display &&
+                                               smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
 
        disable_mclk_switching = disable_mclk_switching_for_frame_lock ||
                                         disable_mclk_switching_for_display;
index 7ddbaec..101eaa2 100644 (file)
@@ -384,10 +384,15 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
 
 static bool vangogh_is_dpm_running(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
        uint32_t feature_mask[2];
        uint64_t feature_enabled;
 
+       /* we need to re-init after suspend so return false */
+       if (adev->in_suspend)
+               return false;
+
        ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
 
        if (ret)
index 1f79bc2..1510e4e 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/irq.h>
 #include <linux/mfd/syscon.h>
 #include <linux/of_device.h>
-#include <linux/of_gpio.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
index e21fb14..833d0c1 100644 (file)
@@ -84,13 +84,31 @@ static void intel_dsm_platform_mux_info(acpi_handle dhandle)
                return;
        }
 
+       if (!pkg->package.count) {
+               DRM_DEBUG_DRIVER("no connection in _DSM\n");
+               return;
+       }
+
        connector_count = &pkg->package.elements[0];
        DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
                  (unsigned long long)connector_count->integer.value);
        for (i = 1; i < pkg->package.count; i++) {
                union acpi_object *obj = &pkg->package.elements[i];
-               union acpi_object *connector_id = &obj->package.elements[0];
-               union acpi_object *info = &obj->package.elements[1];
+               union acpi_object *connector_id;
+               union acpi_object *info;
+
+               if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) {
+                       DRM_DEBUG_DRIVER("Invalid object for MUX #%d\n", i);
+                       continue;
+               }
+
+               connector_id = &obj->package.elements[0];
+               info = &obj->package.elements[1];
+               if (info->type != ACPI_TYPE_BUFFER || info->buffer.length < 4) {
+                       DRM_DEBUG_DRIVER("Invalid info for MUX obj #%d\n", i);
+                       continue;
+               }
+
                DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
                          (unsigned long long)connector_id->integer.value);
                DRM_DEBUG_DRIVER("  port id: %s\n",
index 6518843..4f8337c 100644 (file)
@@ -646,7 +646,6 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
                        break;
                case INTEL_BACKLIGHT_DISPLAY_DDI:
                        try_intel_interface = true;
-                       try_vesa_interface = true;
                        break;
                default:
                        return -ENODEV;
index f94025e..a9a8ba1 100644 (file)
@@ -992,14 +992,14 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
         * FIXME As we do with eDP, just make a note of the time here
         * and perform the wait before the next panel power on.
         */
-       intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
+       msleep(intel_dsi->panel_pwr_cycle_delay);
 }
 
 static void intel_dsi_shutdown(struct intel_encoder *encoder)
 {
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
 
-       intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
+       msleep(intel_dsi->panel_pwr_cycle_delay);
 }
 
 static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
index 97b57ac..4b4d8d0 100644 (file)
@@ -5471,12 +5471,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
        struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id];
        int ret;
 
-       memset(wm, 0, sizeof(*wm));
-
        /* Watermarks calculated in master */
        if (plane_state->planar_slave)
                return 0;
 
+       memset(wm, 0, sizeof(*wm));
+
        if (plane_state->planar_linked_plane) {
                const struct drm_framebuffer *fb = plane_state->hw.fb;
                enum plane_id y_plane_id = plane_state->planar_linked_plane->id;
index d1a9841..e6a88c8 100644 (file)
@@ -215,7 +215,7 @@ static int imx_drm_bind(struct device *dev)
 
        ret = drmm_mode_config_init(drm);
        if (ret)
-               return ret;
+               goto err_kms;
 
        ret = drm_vblank_init(drm, MAX_CRTC);
        if (ret)
index dbfe39e..ffdc492 100644 (file)
@@ -197,6 +197,11 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
        int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
        int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
 
+       if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+               dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+               return;
+       }
+
        drm_panel_prepare(imx_ldb_ch->panel);
 
        if (dual) {
@@ -255,6 +260,11 @@ imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder,
        int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
        u32 bus_format = imx_ldb_ch->bus_format;
 
+       if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+               dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+               return;
+       }
+
        if (mode->clock > 170000) {
                dev_warn(ldb->dev,
                         "%s: mode exceeds 170 MHz pixel clock\n", __func__);
@@ -583,7 +593,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                struct imx_ldb_channel *channel = &imx_ldb->channel[i];
 
                if (!channel->ldb)
-                       break;
+                       continue;
 
                ret = imx_ldb_register(drm, channel);
                if (ret)
index 7e553d3..ce13d49 100644 (file)
@@ -1386,8 +1386,8 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
 
 static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
 {
-       *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
-               REG_A5XX_RBBM_PERFCTR_CP_0_HI);
+       *value = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
+               REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
 
        return 0;
 }
index 690409c..d553f62 100644 (file)
@@ -567,17 +567,17 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
        }  else {
                /*
                 * a650 tier targets don't need whereami but still need to be
-                * equal to or newer than 1.95 for other security fixes
+                * equal to or newer than 0.95 for other security fixes
                 */
                if (adreno_is_a650(adreno_gpu)) {
-                       if ((buf[0] & 0xfff) >= 0x195) {
+                       if ((buf[0] & 0xfff) >= 0x095) {
                                ret = true;
                                goto out;
                        }
 
                        DRM_DEV_ERROR(&gpu->pdev->dev,
                                "a650 SQE ucode is too old. Have version %x need at least %x\n",
-                               buf[0] & 0xfff, 0x195);
+                               buf[0] & 0xfff, 0x095);
                }
 
                /*
@@ -1228,8 +1228,8 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
        /* Force the GPU power on so we can read this register */
        a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
 
-       *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
-               REG_A6XX_RBBM_PERFCTR_CP_0_HI);
+       *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
+               REG_A6XX_CP_ALWAYS_ON_COUNTER_HI);
 
        a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
        mutex_unlock(&perfcounter_oob);
@@ -1406,7 +1406,13 @@ static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
        int ret;
 
        ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin);
-       if (ret) {
+       /*
+        * -ENOENT means that the platform doesn't support speedbin which is
+        * fine
+        */
+       if (ret == -ENOENT) {
+               return 0;
+       } else if (ret) {
                DRM_DEV_ERROR(dev,
                              "failed to read speed-bin (%d). Some OPPs may not be supported by hardware",
                              ret);
index 8981cfa..92e6f1b 100644 (file)
@@ -496,7 +496,9 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
 
        DPU_REG_WRITE(c, CTL_TOP, mode_sel);
        DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
-       DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE, BIT(cfg->merge_3d - MERGE_3D_0));
+       if (cfg->merge_3d)
+               DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
+                             BIT(cfg->merge_3d - MERGE_3D_0));
 }
 
 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
index a5c6b8c..1969076 100644 (file)
@@ -570,6 +570,7 @@ err_free_priv:
        kfree(priv);
 err_put_drm_dev:
        drm_dev_put(ddev);
+       platform_set_drvdata(pdev, NULL);
        return ret;
 }
 
index af381d7..5fbfb71 100644 (file)
@@ -37,6 +37,7 @@ struct dsic_panel_data {
        u32 height_mm;
        u32 max_hs_rate;
        u32 max_lp_rate;
+       bool te_support;
 };
 
 struct panel_drv_data {
@@ -334,9 +335,11 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
        if (r)
                goto err;
 
-       r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
-       if (r)
-               goto err;
+       if (ddata->panel_data->te_support) {
+               r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+               if (r)
+                       goto err;
+       }
 
        /* possible panel bug */
        msleep(100);
@@ -619,6 +622,7 @@ static const struct dsic_panel_data taal_data = {
        .height_mm = 0,
        .max_hs_rate = 300000000,
        .max_lp_rate = 10000000,
+       .te_support = true,
 };
 
 static const struct dsic_panel_data himalaya_data = {
@@ -629,6 +633,7 @@ static const struct dsic_panel_data himalaya_data = {
        .height_mm = 88,
        .max_hs_rate = 300000000,
        .max_lp_rate = 10000000,
+       .te_support = false,
 };
 
 static const struct dsic_panel_data droid4_data = {
@@ -639,6 +644,7 @@ static const struct dsic_panel_data droid4_data = {
        .height_mm = 89,
        .max_hs_rate = 300000000,
        .max_lp_rate = 10000000,
+       .te_support = false,
 };
 
 static const struct of_device_id dsicm_of_match[] = {
index e8c66d1..78893be 100644 (file)
@@ -364,7 +364,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *
        if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
                /* check that we only pin down anonymous memory
                   to prevent problems with writeback */
-               unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
+               unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE;
                struct vm_area_struct *vma;
                vma = find_vma(gtt->usermm, gtt->userptr);
                if (!vma || vma->vm_file || vma->vm_end < end)
@@ -386,7 +386,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *
        } while (pinned < ttm->num_pages);
 
        r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
-                                     ttm->num_pages << PAGE_SHIFT,
+                                     (u64)ttm->num_pages << PAGE_SHIFT,
                                      GFP_KERNEL);
        if (r)
                goto release_sg;
index 0ae3a02..134986d 100644 (file)
@@ -1688,6 +1688,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
                        dev_err(dc->dev,
                                "failed to set clock rate to %lu Hz\n",
                                state->pclk);
+
+               err = clk_set_rate(dc->clk, state->pclk);
+               if (err < 0)
+                       dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
+                               dc->clk, state->pclk, err);
        }
 
        DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk),
@@ -1698,11 +1703,6 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
                value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
                tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
        }
-
-       err = clk_set_rate(dc->clk, state->pclk);
-       if (err < 0)
-               dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
-                       dc->clk, state->pclk, err);
 }
 
 static void tegra_dc_stop(struct tegra_dc *dc)
@@ -2501,22 +2501,18 @@ static int tegra_dc_couple(struct tegra_dc *dc)
         * POWER_CONTROL registers during CRTC enabling.
         */
        if (dc->soc->coupled_pm && dc->pipe == 1) {
-               u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
-               struct device_link *link;
-               struct device *partner;
+               struct device *companion;
+               struct tegra_dc *parent;
 
-               partner = driver_find_device(dc->dev->driver, NULL, NULL,
-                                            tegra_dc_match_by_pipe);
-               if (!partner)
+               companion = driver_find_device(dc->dev->driver, NULL, (const void *)0,
+                                              tegra_dc_match_by_pipe);
+               if (!companion)
                        return -EPROBE_DEFER;
 
-               link = device_link_add(dc->dev, partner, flags);
-               if (!link) {
-                       dev_err(dc->dev, "failed to link controllers\n");
-                       return -EINVAL;
-               }
+               parent = dev_get_drvdata(companion);
+               dc->client.parent = &parent->client;
 
-               dev_dbg(dc->dev, "coupled to %s\n", dev_name(partner));
+               dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
        }
 
        return 0;
index f02a035..7b88261 100644 (file)
@@ -3115,6 +3115,12 @@ static int tegra_sor_init(struct host1x_client *client)
         * kernel is possible.
         */
        if (sor->rst) {
+               err = pm_runtime_resume_and_get(sor->dev);
+               if (err < 0) {
+                       dev_err(sor->dev, "failed to get runtime PM: %d\n", err);
+                       return err;
+               }
+
                err = reset_control_acquire(sor->rst);
                if (err < 0) {
                        dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
@@ -3148,6 +3154,7 @@ static int tegra_sor_init(struct host1x_client *client)
                }
 
                reset_control_release(sor->rst);
+               pm_runtime_put(sor->dev);
        }
 
        err = clk_prepare_enable(sor->clk_safe);
index 269390b..76657dc 100644 (file)
@@ -210,6 +210,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
 {
        const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
        const struct vc4_pv_data *pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
+       struct vc4_dev *vc4 = to_vc4_dev(vc4_crtc->base.dev);
        u32 fifo_len_bytes = pv_data->fifo_depth;
 
        /*
@@ -238,6 +239,22 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format)
                if (crtc_data->hvs_output == 5)
                        return 32;
 
+               /*
+                * It looks like in some situations, we will overflow
+                * the PixelValve FIFO (with the bit 10 of PV stat being
+                * set) and stall the HVS / PV, eventually resulting in
+                * a page flip timeout.
+                *
+                * Displaying the video overlay during a playback with
+                * Kodi on an RPi3 seems to be a great solution with a
+                * failure rate around 50%.
+                *
+                * Removing 1 from the FIFO full level however
+                * seems to completely remove that issue.
+                */
+               if (!vc4->hvs->hvs5)
+                       return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
+
                return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
        }
 }
index 7322169..1e9c84c 100644 (file)
@@ -1146,7 +1146,6 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
        plane->state->src_y = state->src_y;
        plane->state->src_w = state->src_w;
        plane->state->src_h = state->src_h;
-       plane->state->src_h = state->src_h;
        plane->state->alpha = state->alpha;
        plane->state->pixel_blend_mode = state->pixel_blend_mode;
        plane->state->rotation = state->rotation;
index ba658fa..183571c 100644 (file)
@@ -481,11 +481,15 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
        vmw_bo_unreference(&old_buf);
        res->id = vcotbl->type;
 
+       /* Release the pin acquired in vmw_bo_init */
+       ttm_bo_unpin(bo);
+
        return 0;
 
 out_map_new:
        ttm_bo_kunmap(&old_map);
 out_wait:
+       ttm_bo_unpin(bo);
        ttm_bo_unreserve(bo);
        vmw_bo_unreference(&buf);
 
index dd69b51..6fa2464 100644 (file)
@@ -712,17 +712,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
        dev_priv->last_read_seqno = (uint32_t) -100;
        dev_priv->drm.dev_private = dev_priv;
 
-       ret = vmw_setup_pci_resources(dev_priv, pci_id);
-       if (ret)
-               return ret;
-       ret = vmw_detect_version(dev_priv);
-       if (ret)
-               goto out_no_pci_or_version;
-
        mutex_init(&dev_priv->cmdbuf_mutex);
-       mutex_init(&dev_priv->release_mutex);
        mutex_init(&dev_priv->binding_mutex);
-       mutex_init(&dev_priv->global_kms_state_mutex);
        ttm_lock_init(&dev_priv->reservation_sem);
        spin_lock_init(&dev_priv->resource_lock);
        spin_lock_init(&dev_priv->hw_lock);
@@ -730,6 +721,14 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
        spin_lock_init(&dev_priv->cap_lock);
        spin_lock_init(&dev_priv->cursor_lock);
 
+       ret = vmw_setup_pci_resources(dev_priv, pci_id);
+       if (ret)
+               return ret;
+       ret = vmw_detect_version(dev_priv);
+       if (ret)
+               goto out_no_pci_or_version;
+
+
        for (i = vmw_res_context; i < vmw_res_max; ++i) {
                idr_init(&dev_priv->res_idr[i]);
                INIT_LIST_HEAD(&dev_priv->res_lru[i]);
index 5fa5bcd..eb76a6b 100644 (file)
@@ -529,7 +529,6 @@ struct vmw_private {
        struct vmw_overlay *overlay_priv;
        struct drm_property *hotplug_mode_update_property;
        struct drm_property *implicit_placement_property;
-       struct mutex global_kms_state_mutex;
        spinlock_t cursor_lock;
        struct drm_atomic_state *suspend_state;
 
@@ -592,7 +591,6 @@ struct vmw_private {
        bool refuse_hibernation;
        bool suspend_locked;
 
-       struct mutex release_mutex;
        atomic_t num_fifo_resources;
 
        /*
@@ -1524,9 +1522,8 @@ static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
        struct vmw_buffer_object *tmp_buf = *buf;
 
        *buf = NULL;
-       if (tmp_buf != NULL) {
+       if (tmp_buf != NULL)
                ttm_bo_put(&tmp_buf->base);
-       }
 }
 
 static inline struct vmw_buffer_object *
index a372980..f2d6254 100644 (file)
@@ -94,6 +94,16 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
                             struct vmw_piter data_iter,
                             unsigned long num_data_pages);
 
+
+static inline void vmw_bo_unpin_unlocked(struct ttm_buffer_object *bo)
+{
+       int ret = ttm_bo_reserve(bo, false, true, NULL);
+       BUG_ON(ret != 0);
+       ttm_bo_unpin(bo);
+       ttm_bo_unreserve(bo);
+}
+
+
 /*
  * vmw_setup_otable_base - Issue an object table base setup command to
  * the device
@@ -277,6 +287,7 @@ out_no_setup:
                                                 &batch->otables[i]);
        }
 
+       vmw_bo_unpin_unlocked(batch->otable_bo);
        ttm_bo_put(batch->otable_bo);
        batch->otable_bo = NULL;
        return ret;
@@ -340,6 +351,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
        BUG_ON(ret != 0);
 
        vmw_bo_fence_single(bo, NULL);
+       ttm_bo_unpin(bo);
        ttm_bo_unreserve(bo);
 
        ttm_bo_put(batch->otable_bo);
@@ -528,6 +540,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
 void vmw_mob_destroy(struct vmw_mob *mob)
 {
        if (mob->pt_bo) {
+               vmw_bo_unpin_unlocked(mob->pt_bo);
                ttm_bo_put(mob->pt_bo);
                mob->pt_bo = NULL;
        }
@@ -643,6 +656,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
 out_no_cmd_space:
        vmw_fifo_resource_dec(dev_priv);
        if (pt_set_up) {
+               vmw_bo_unpin_unlocked(mob->pt_bo);
                ttm_bo_put(mob->pt_bo);
                mob->pt_bo = NULL;
        }
index 30d9adf..9f14d99 100644 (file)
@@ -521,7 +521,7 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
        drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
        if (IS_ERR(drm_dev)) {
                ret = PTR_ERR(drm_dev);
-               goto fail;
+               goto fail_dev;
        }
 
        drm_info->drm_dev = drm_dev;
@@ -551,8 +551,10 @@ fail_modeset:
        drm_kms_helper_poll_fini(drm_dev);
        drm_mode_config_cleanup(drm_dev);
        drm_dev_put(drm_dev);
-fail:
+fail_dev:
        kfree(drm_info);
+       front_info->drm_info = NULL;
+fail:
        return ret;
 }
 
index 3adacba..e5f4314 100644 (file)
@@ -16,7 +16,6 @@
 struct drm_connector;
 struct xen_drm_front_drm_info;
 
-struct xen_drm_front_drm_info;
 
 int xen_drm_front_conn_init(struct xen_drm_front_drm_info *drm_info,
                            struct drm_connector *connector);
index 347fb96..68a766f 100644 (file)
@@ -705,8 +705,9 @@ void host1x_driver_unregister(struct host1x_driver *driver)
 EXPORT_SYMBOL(host1x_driver_unregister);
 
 /**
- * host1x_client_register() - register a host1x client
+ * __host1x_client_register() - register a host1x client
  * @client: host1x client
+ * @key: lock class key for the client-specific mutex
  *
  * Registers a host1x client with each host1x controller instance. Note that
  * each client will only match their parent host1x controller and will only be
@@ -715,13 +716,14 @@ EXPORT_SYMBOL(host1x_driver_unregister);
  * device and call host1x_device_init(), which will in turn call each client's
  * &host1x_client_ops.init implementation.
  */
-int host1x_client_register(struct host1x_client *client)
+int __host1x_client_register(struct host1x_client *client,
+                            struct lock_class_key *key)
 {
        struct host1x *host1x;
        int err;
 
        INIT_LIST_HEAD(&client->list);
-       mutex_init(&client->lock);
+       __mutex_init(&client->lock, "host1x client lock", key);
        client->usecount = 0;
 
        mutex_lock(&devices_lock);
@@ -742,7 +744,7 @@ int host1x_client_register(struct host1x_client *client)
 
        return 0;
 }
-EXPORT_SYMBOL(host1x_client_register);
+EXPORT_SYMBOL(__host1x_client_register);
 
 /**
  * host1x_client_unregister() - unregister a host1x client
index dbac166..ddecc84 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/bitops.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
+#include <linux/dmi.h>
 #include <linux/interrupt.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
 #include <linux/module.h>
 
 #define ACEL_EN                BIT(0)
 #define GYRO_EN                BIT(1)
-#define MAGNO_EN               BIT(2)
+#define MAGNO_EN       BIT(2)
 #define ALS_EN         BIT(19)
 
+static int sensor_mask_override = -1;
+module_param_named(sensor_mask, sensor_mask_override, int, 0444);
+MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask");
+
 void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
 {
        union sfh_cmd_param cmd_param;
@@ -73,12 +78,41 @@ void amd_stop_all_sensors(struct amd_mp2_dev *privdata)
        writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
 }
 
+static const struct dmi_system_id dmi_sensor_mask_overrides[] = {
+       {
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 13-ag0xxx"),
+               },
+               .driver_data = (void *)(ACEL_EN | MAGNO_EN),
+       },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY x360 Convertible 15-cp0xxx"),
+               },
+               .driver_data = (void *)(ACEL_EN | MAGNO_EN),
+       },
+       { }
+};
+
 int amd_mp2_get_sensor_num(struct amd_mp2_dev *privdata, u8 *sensor_id)
 {
        int activestatus, num_of_sensors = 0;
+       const struct dmi_system_id *dmi_id;
+       u32 activecontrolstatus;
+
+       if (sensor_mask_override == -1) {
+               dmi_id = dmi_first_match(dmi_sensor_mask_overrides);
+               if (dmi_id)
+                       sensor_mask_override = (long)dmi_id->driver_data;
+       }
+
+       if (sensor_mask_override >= 0) {
+               activestatus = sensor_mask_override;
+       } else {
+               activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
+               activestatus = activecontrolstatus >> 4;
+       }
 
-       privdata->activecontrolstatus = readl(privdata->mmio + AMD_P2C_MSG3);
-       activestatus = privdata->activecontrolstatus >> 4;
        if (ACEL_EN  & activestatus)
                sensor_id[num_of_sensors++] = accel_idx;
 
index 8f8d19b..489415f 100644 (file)
@@ -61,7 +61,6 @@ struct amd_mp2_dev {
        struct pci_dev *pdev;
        struct amdtp_cl_data *cl_data;
        void __iomem *mmio;
-       u32 activecontrolstatus;
 };
 
 struct amd_mp2_sensor_info {
index 3feaece..6b66593 100644 (file)
@@ -761,6 +761,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
 
                if (input_register_device(data->input2)) {
                        input_free_device(input2);
+                       ret = -ENOENT;
                        goto exit;
                }
        }
index 1dfe184..2ab22b9 100644 (file)
@@ -1221,6 +1221,9 @@ static const struct hid_device_id asus_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
            USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD),
          QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+           USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2),
+         QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
                USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD),
          QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
index 21e1562..477baa3 100644 (file)
@@ -161,6 +161,7 @@ struct cp2112_device {
        atomic_t read_avail;
        atomic_t xfer_avail;
        struct gpio_chip gc;
+       struct irq_chip irq;
        u8 *in_out_buffer;
        struct mutex lock;
 
@@ -1175,16 +1176,6 @@ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
        return 0;
 }
 
-static struct irq_chip cp2112_gpio_irqchip = {
-       .name = "cp2112-gpio",
-       .irq_startup = cp2112_gpio_irq_startup,
-       .irq_shutdown = cp2112_gpio_irq_shutdown,
-       .irq_ack = cp2112_gpio_irq_ack,
-       .irq_mask = cp2112_gpio_irq_mask,
-       .irq_unmask = cp2112_gpio_irq_unmask,
-       .irq_set_type = cp2112_gpio_irq_type,
-};
-
 static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
                                              int pin)
 {
@@ -1339,8 +1330,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
        dev->gc.can_sleep               = 1;
        dev->gc.parent                  = &hdev->dev;
 
+       dev->irq.name = "cp2112-gpio";
+       dev->irq.irq_startup = cp2112_gpio_irq_startup;
+       dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown;
+       dev->irq.irq_ack = cp2112_gpio_irq_ack;
+       dev->irq.irq_mask = cp2112_gpio_irq_mask;
+       dev->irq.irq_unmask = cp2112_gpio_irq_unmask;
+       dev->irq.irq_set_type = cp2112_gpio_irq_type;
+       dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND;
+
        girq = &dev->gc.irq;
-       girq->chip = &cp2112_gpio_irqchip;
+       girq->chip = &dev->irq;
        /* The event comes from the outside so no parent handler */
        girq->parent_handler = NULL;
        girq->num_parents = 0;
index d931962..e60c31d 100644 (file)
@@ -573,6 +573,8 @@ static void hammer_remove(struct hid_device *hdev)
 }
 
 static const struct hid_device_id hammer_devices[] = {
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
index e42aaae..67fd8a2 100644 (file)
 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2 0x1837
 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822
 #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD        0x1866
+#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2       0x19b6
 #define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869
 
 #define USB_VENDOR_ID_ATEN             0x0557
 #define USB_DEVICE_ID_GOOGLE_MASTERBALL        0x503c
 #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
 #define USB_DEVICE_ID_GOOGLE_MOONBALL  0x5044
+#define USB_DEVICE_ID_GOOGLE_DON       0x5050
 
 #define USB_VENDOR_ID_GOTOP            0x08f2
 #define USB_DEVICE_ID_SUPER_Q2         0x007f
index 44d715c..2d70dc4 100644 (file)
@@ -2533,7 +2533,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
            !wacom_wac->shared->is_touch_on) {
                if (!wacom_wac->shared->touch_down)
                        return;
-               prox = 0;
+               prox = false;
        }
 
        wacom_wac->hid_data.num_received++;
@@ -3574,8 +3574,6 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
 {
        struct wacom_features *features = &wacom_wac->features;
 
-       input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-
        if (!(features->device_type & WACOM_DEVICETYPE_PEN))
                return -ENODEV;
 
@@ -3590,6 +3588,7 @@ int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
                return 0;
        }
 
+       input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
        __set_bit(BTN_TOUCH, input_dev->keybit);
        __set_bit(ABS_MISC, input_dev->absbit);
 
@@ -3742,8 +3741,6 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
 {
        struct wacom_features *features = &wacom_wac->features;
 
-       input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-
        if (!(features->device_type & WACOM_DEVICETYPE_TOUCH))
                return -ENODEV;
 
@@ -3756,6 +3753,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
                /* setup has already been done */
                return 0;
 
+       input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
        __set_bit(BTN_TOUCH, input_dev->keybit);
 
        if (features->touch_max == 1) {
index dd27b9d..873ef38 100644 (file)
@@ -129,6 +129,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
                if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
                        != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
                        dev_err(dev->dev, "High Speed not supported!\n");
+                       t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
                        dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
                        dev->master_cfg |= DW_IC_CON_SPEED_FAST;
                        dev->hs_hcnt = 0;
index 5ac30d9..97d4f3a 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
  * i2c-exynos5.c - Samsung Exynos5 I2C Controller Driver
  *
  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
index c45f226..aa00ba8 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
 /*
  * Copyright (c) 2014 Linaro Ltd.
- * Copyright (c) 2014 Hisilicon Limited.
+ * Copyright (c) 2014 HiSilicon Limited.
  *
  * Now only support 7 bit address.
  */
index 8509c5f..55177eb 100644 (file)
@@ -525,8 +525,8 @@ static irqreturn_t jz4780_i2c_irq(int irqno, void *dev_id)
                                i2c_sta = jz4780_i2c_readw(i2c, JZ4780_I2C_STA);
                                data = *i2c->wbuf;
                                data &= ~JZ4780_I2C_DC_READ;
-                               if ((!i2c->stop_hold) && (i2c->cdata->version >=
-                                               ID_X1000))
+                               if ((i2c->wt_len == 1) && (!i2c->stop_hold) &&
+                                               (i2c->cdata->version >= ID_X1000))
                                        data |= X1000_I2C_DC_STOP;
                                jz4780_i2c_writew(i2c, JZ4780_I2C_DC, data);
                                i2c->wbuf++;
index c590d36..5c8e94b 100644 (file)
@@ -221,6 +221,10 @@ mv64xxx_i2c_hw_init(struct mv64xxx_i2c_data *drv_data)
        writel(0, drv_data->reg_base + drv_data->reg_offsets.ext_addr);
        writel(MV64XXX_I2C_REG_CONTROL_TWSIEN | MV64XXX_I2C_REG_CONTROL_STOP,
                drv_data->reg_base + drv_data->reg_offsets.control);
+
+       if (drv_data->errata_delay)
+               udelay(5);
+
        drv_data->state = MV64XXX_I2C_STATE_IDLE;
 }
 
index 937c2c8..4933fc8 100644 (file)
@@ -534,7 +534,7 @@ static void stm32f4_i2c_handle_rx_addr(struct stm32f4_i2c_dev *i2c_dev)
        default:
                /*
                 * N-byte reception:
-                * Enable ACK, reset POS (ACK postion) and clear ADDR flag.
+                * Enable ACK, reset POS (ACK position) and clear ADDR flag.
                 * In that way, ACK will be sent as soon as the current byte
                 * will be received in the shift register
                 */
index 63ebf72..f213623 100644 (file)
@@ -378,7 +378,7 @@ static int i2c_gpio_init_recovery(struct i2c_adapter *adap)
 static int i2c_init_recovery(struct i2c_adapter *adap)
 {
        struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
-       char *err_str;
+       char *err_str, *err_level = KERN_ERR;
 
        if (!bri)
                return 0;
@@ -387,7 +387,8 @@ static int i2c_init_recovery(struct i2c_adapter *adap)
                return -EPROBE_DEFER;
 
        if (!bri->recover_bus) {
-               err_str = "no recover_bus() found";
+               err_str = "no suitable method provided";
+               err_level = KERN_DEBUG;
                goto err;
        }
 
@@ -414,7 +415,7 @@ static int i2c_init_recovery(struct i2c_adapter *adap)
 
        return 0;
  err:
-       dev_err(&adap->dev, "Not using recovery: %s\n", err_str);
+       dev_printk(err_level, &adap->dev, "Not using recovery: %s\n", err_str);
        adap->bus_recovery_info = NULL;
 
        return -EINVAL;
index 0abce00..65e3e7d 100644 (file)
@@ -76,7 +76,9 @@ static struct workqueue_struct *addr_wq;
 
 static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
        [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
-               .len = sizeof(struct rdma_nla_ls_gid)},
+               .len = sizeof(struct rdma_nla_ls_gid),
+               .validation_type = NLA_VALIDATE_MIN,
+               .min = sizeof(struct rdma_nla_ls_gid)},
 };
 
 static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
index 8190374..e42c812 100644 (file)
@@ -3616,7 +3616,8 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
                c4iw_init_wr_wait(ep->com.wr_waitp);
                err = cxgb4_remove_server(
                                ep->com.dev->rdev.lldi.ports[0], ep->stid,
-                               ep->com.dev->rdev.lldi.rxq_ids[0], true);
+                               ep->com.dev->rdev.lldi.rxq_ids[0],
+                               ep->com.local_addr.ss_family == AF_INET6);
                if (err)
                        goto done;
                err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
index 2a91b8d..04b1e8f 100644 (file)
@@ -632,22 +632,11 @@ static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
  */
 int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
 {
-       int node = pcibus_to_node(dd->pcidev->bus);
        struct hfi1_affinity_node *entry;
        const struct cpumask *local_mask;
        int curr_cpu, possible, i, ret;
        bool new_entry = false;
 
-       /*
-        * If the BIOS does not have the NUMA node information set, select
-        * NUMA 0 so we get consistent performance.
-        */
-       if (node < 0) {
-               dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
-               node = 0;
-       }
-       dd->node = node;
-
        local_mask = cpumask_of_node(dd->node);
        if (cpumask_first(local_mask) >= nr_cpu_ids)
                local_mask = topology_core_cpumask(0);
@@ -660,7 +649,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
         * create an entry in the global affinity structure and initialize it.
         */
        if (!entry) {
-               entry = node_affinity_allocate(node);
+               entry = node_affinity_allocate(dd->node);
                if (!entry) {
                        dd_dev_err(dd,
                                   "Unable to allocate global affinity node\n");
@@ -751,6 +740,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
        if (new_entry)
                node_affinity_add_tail(entry);
 
+       dd->affinity_entry = entry;
        mutex_unlock(&node_affinity.lock);
 
        return 0;
@@ -766,10 +756,9 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
 {
        struct hfi1_affinity_node *entry;
 
-       if (dd->node < 0)
-               return;
-
        mutex_lock(&node_affinity.lock);
+       if (!dd->affinity_entry)
+               goto unlock;
        entry = node_affinity_lookup(dd->node);
        if (!entry)
                goto unlock;
@@ -780,8 +769,8 @@ void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
         */
        _dev_comp_vect_cpu_mask_clean_up(dd, entry);
 unlock:
+       dd->affinity_entry = NULL;
        mutex_unlock(&node_affinity.lock);
-       dd->node = NUMA_NO_NODE;
 }
 
 /*
index e09e824..2a9a040 100644 (file)
@@ -1409,6 +1409,7 @@ struct hfi1_devdata {
        spinlock_t irq_src_lock;
        int vnic_num_vports;
        struct net_device *dummy_netdev;
+       struct hfi1_affinity_node *affinity_entry;
 
        /* Keeps track of IPoIB RSM rule users */
        atomic_t ipoib_rsm_usr_num;
index cb7ad12..786c631 100644 (file)
@@ -1277,7 +1277,6 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
        dd->pport = (struct hfi1_pportdata *)(dd + 1);
        dd->pcidev = pdev;
        pci_set_drvdata(pdev, dd);
-       dd->node = NUMA_NO_NODE;
 
        ret = xa_alloc_irq(&hfi1_dev_table, &dd->unit, dd, xa_limit_32b,
                        GFP_KERNEL);
@@ -1287,6 +1286,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
                goto bail;
        }
        rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
+       /*
+        * If the BIOS does not have the NUMA node information set, select
+        * NUMA 0 so we get consistent performance.
+        */
+       dd->node = pcibus_to_node(pdev->bus);
+       if (dd->node == NUMA_NO_NODE) {
+               dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
+               dd->node = 0;
+       }
 
        /*
         * Initialize all locks for the device. This needs to be as early as
index 1fb6e1a..1bcab99 100644 (file)
@@ -173,8 +173,7 @@ u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
                return 0;
        }
 
-       cpumask_and(node_cpu_mask, cpu_mask,
-                   cpumask_of_node(pcibus_to_node(dd->pcidev->bus)));
+       cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
 
        available_cpus = cpumask_weight(node_cpu_mask);
 
index 0eb6a7a..9ea5422 100644 (file)
@@ -1244,7 +1244,8 @@ static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
         * TGT QP isn't associated with RQ/SQ
         */
        if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created) &&
-           (attrs->qp_type != IB_QPT_XRC_TGT)) {
+           (attrs->qp_type != IB_QPT_XRC_TGT) &&
+           (attrs->qp_type != IB_QPT_XRC_INI)) {
                struct qedr_cq *send_cq = get_qedr_cq(attrs->send_cq);
                struct qedr_cq *recv_cq = get_qedr_cq(attrs->recv_cq);
 
index 0a08b4b..6734329 100644 (file)
@@ -2720,8 +2720,8 @@ void rtrs_clt_close(struct rtrs_clt *clt)
 
        /* Now it is safe to iterate over all paths without locks */
        list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
-               rtrs_clt_destroy_sess_files(sess, NULL);
                rtrs_clt_close_conns(sess, true);
+               rtrs_clt_destroy_sess_files(sess, NULL);
                kobject_put(&sess->kobj);
        }
        free_clt(clt);
index 8bcc529..9dbca36 100644 (file)
@@ -252,8 +252,8 @@ static int __init n64joy_probe(struct platform_device *pdev)
        mutex_init(&priv->n64joy_mutex);
 
        priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
-       if (!priv->reg_base) {
-               err = -EINVAL;
+       if (IS_ERR(priv->reg_base)) {
+               err = PTR_ERR(priv->reg_base);
                goto fail;
        }
 
index 63d5e48..e9fa142 100644 (file)
@@ -93,9 +93,15 @@ static irqreturn_t nspire_keypad_irq(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
+static int nspire_keypad_open(struct input_dev *input)
 {
+       struct nspire_keypad *keypad = input_get_drvdata(input);
        unsigned long val = 0, cycles_per_us, delay_cycles, row_delay_cycles;
+       int error;
+
+       error = clk_prepare_enable(keypad->clk);
+       if (error)
+               return error;
 
        cycles_per_us = (clk_get_rate(keypad->clk) / 1000000);
        if (cycles_per_us == 0)
@@ -121,30 +127,6 @@ static int nspire_keypad_chip_init(struct nspire_keypad *keypad)
        keypad->int_mask = 1 << 1;
        writel(keypad->int_mask, keypad->reg_base + KEYPAD_INTMSK);
 
-       /* Disable GPIO interrupts to prevent hanging on touchpad */
-       /* Possibly used to detect touchpad events */
-       writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
-       /* Acknowledge existing interrupts */
-       writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
-
-       return 0;
-}
-
-static int nspire_keypad_open(struct input_dev *input)
-{
-       struct nspire_keypad *keypad = input_get_drvdata(input);
-       int error;
-
-       error = clk_prepare_enable(keypad->clk);
-       if (error)
-               return error;
-
-       error = nspire_keypad_chip_init(keypad);
-       if (error) {
-               clk_disable_unprepare(keypad->clk);
-               return error;
-       }
-
        return 0;
 }
 
@@ -152,6 +134,11 @@ static void nspire_keypad_close(struct input_dev *input)
 {
        struct nspire_keypad *keypad = input_get_drvdata(input);
 
+       /* Disable interrupts */
+       writel(0, keypad->reg_base + KEYPAD_INTMSK);
+       /* Acknowledge existing interrupts */
+       writel(~0, keypad->reg_base + KEYPAD_INT);
+
        clk_disable_unprepare(keypad->clk);
 }
 
@@ -210,6 +197,25 @@ static int nspire_keypad_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
+       error = clk_prepare_enable(keypad->clk);
+       if (error) {
+               dev_err(&pdev->dev, "failed to enable clock\n");
+               return error;
+       }
+
+       /* Disable interrupts */
+       writel(0, keypad->reg_base + KEYPAD_INTMSK);
+       /* Acknowledge existing interrupts */
+       writel(~0, keypad->reg_base + KEYPAD_INT);
+
+       /* Disable GPIO interrupts to prevent hanging on touchpad */
+       /* Possibly used to detect touchpad events */
+       writel(0, keypad->reg_base + KEYPAD_UNKNOWN_INT);
+       /* Acknowledge existing GPIO interrupts */
+       writel(~0, keypad->reg_base + KEYPAD_UNKNOWN_INT_STS);
+
+       clk_disable_unprepare(keypad->clk);
+
        input_set_drvdata(input, keypad);
 
        input->id.bustype = BUS_HOST;
index 9119e12..a5a0035 100644 (file)
@@ -588,6 +588,7 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
                        DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
                },
+       }, {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
                        DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
index 4c2b579..5f7706f 100644 (file)
@@ -1441,7 +1441,7 @@ static int elants_i2c_probe(struct i2c_client *client,
 
        touchscreen_parse_properties(ts->input, true, &ts->prop);
 
-       if (ts->chip_id == EKTF3624) {
+       if (ts->chip_id == EKTF3624 && ts->phy_x && ts->phy_y) {
                /* calculate resolution from size */
                ts->x_res = DIV_ROUND_CLOSEST(ts->prop.max_x, ts->phy_x);
                ts->y_res = DIV_ROUND_CLOSEST(ts->prop.max_y, ts->phy_y);
@@ -1449,8 +1449,7 @@ static int elants_i2c_probe(struct i2c_client *client,
 
        input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
        input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
-       if (ts->major_res > 0)
-               input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res);
+       input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, ts->major_res);
 
        error = input_mt_init_slots(ts->input, MAX_CONTACT_NUM,
                                    INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
index b63d7fd..85a1f46 100644 (file)
@@ -145,8 +145,8 @@ static void s6sy761_report_coordinates(struct s6sy761_data *sdata,
        u8 major = event[4];
        u8 minor = event[5];
        u8 z = event[6] & S6SY761_MASK_Z;
-       u16 x = (event[1] << 3) | ((event[3] & S6SY761_MASK_X) >> 4);
-       u16 y = (event[2] << 3) | (event[3] & S6SY761_MASK_Y);
+       u16 x = (event[1] << 4) | ((event[3] & S6SY761_MASK_X) >> 4);
+       u16 y = (event[2] << 4) | (event[3] & S6SY761_MASK_Y);
 
        input_mt_slot(sdata->input, tid);
 
index 73e2c8d..448cc53 100644 (file)
@@ -53,7 +53,7 @@ void icc_bulk_put(int num_paths, struct icc_bulk_data *paths)
 EXPORT_SYMBOL_GPL(icc_bulk_put);
 
 /**
- * icc_bulk_set() - set bandwidth to a set of paths
+ * icc_bulk_set_bw() - set bandwidth to a set of paths
  * @num_paths: the number of icc_bulk_data
  * @paths: the icc_bulk_data table containing the paths and bandwidth
  *
index 5ad519c..8a1e70e 100644 (file)
@@ -942,6 +942,8 @@ int icc_link_destroy(struct icc_node *src, struct icc_node *dst)
                       GFP_KERNEL);
        if (new)
                src->links = new;
+       else
+               ret = -ENOMEM;
 
 out:
        mutex_unlock(&icc_lock);
index dfbec30..20f31a1 100644 (file)
@@ -131,7 +131,7 @@ DEFINE_QNODE(mas_pcnoc_sdcc_1, MSM8939_MASTER_SDCC_1, 8, -1, -1, MSM8939_PNOC_IN
 DEFINE_QNODE(mas_pcnoc_sdcc_2, MSM8939_MASTER_SDCC_2, 8, -1, -1, MSM8939_PNOC_INT_1);
 DEFINE_QNODE(mas_qdss_bam, MSM8939_MASTER_QDSS_BAM, 8, -1, -1, MSM8939_SNOC_QDSS_INT);
 DEFINE_QNODE(mas_qdss_etr, MSM8939_MASTER_QDSS_ETR, 8, -1, -1, MSM8939_SNOC_QDSS_INT);
-DEFINE_QNODE(mas_snoc_cfg, MSM8939_MASTER_SNOC_CFG, 4, 20, -1, MSM8939_SLAVE_SRVC_SNOC);
+DEFINE_QNODE(mas_snoc_cfg, MSM8939_MASTER_SNOC_CFG, 4, -1, -1, MSM8939_SLAVE_SRVC_SNOC);
 DEFINE_QNODE(mas_spdm, MSM8939_MASTER_SPDM, 4, -1, -1, MSM8939_PNOC_MAS_0);
 DEFINE_QNODE(mas_tcu0, MSM8939_MASTER_TCU0, 16, -1, -1, MSM8939_SLAVE_EBI_CH0, MSM8939_BIMC_SNOC_MAS, MSM8939_SLAVE_AMPSS_L2);
 DEFINE_QNODE(mas_usb_hs1, MSM8939_MASTER_USB_HS1, 4, -1, -1, MSM8939_PNOC_MAS_1);
@@ -156,14 +156,14 @@ DEFINE_QNODE(pcnoc_snoc_mas, MSM8939_PNOC_SNOC_MAS, 8, 29, -1, MSM8939_PNOC_SNOC
 DEFINE_QNODE(pcnoc_snoc_slv, MSM8939_PNOC_SNOC_SLV, 8, -1, 45, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC, MSM8939_SNOC_INT_1);
 DEFINE_QNODE(qdss_int, MSM8939_SNOC_QDSS_INT, 8, -1, -1, MSM8939_SNOC_INT_0, MSM8939_SNOC_INT_BIMC);
 DEFINE_QNODE(slv_apps_l2, MSM8939_SLAVE_AMPSS_L2, 16, -1, -1, 0);
-DEFINE_QNODE(slv_apss, MSM8939_SLAVE_APSS, 4, -1, 20, 0);
+DEFINE_QNODE(slv_apss, MSM8939_SLAVE_APSS, 4, -1, -1, 0);
 DEFINE_QNODE(slv_audio, MSM8939_SLAVE_LPASS, 4, -1, -1, 0);
 DEFINE_QNODE(slv_bimc_cfg, MSM8939_SLAVE_BIMC_CFG, 4, -1, -1, 0);
 DEFINE_QNODE(slv_blsp_1, MSM8939_SLAVE_BLSP_1, 4, -1, -1, 0);
 DEFINE_QNODE(slv_boot_rom, MSM8939_SLAVE_BOOT_ROM, 4, -1, -1, 0);
 DEFINE_QNODE(slv_camera_cfg, MSM8939_SLAVE_CAMERA_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(slv_cats_0, MSM8939_SLAVE_CATS_128, 16, -1, 106, 0);
-DEFINE_QNODE(slv_cats_1, MSM8939_SLAVE_OCMEM_64, 8, -1, 107, 0);
+DEFINE_QNODE(slv_cats_0, MSM8939_SLAVE_CATS_128, 16, -1, -1, 0);
+DEFINE_QNODE(slv_cats_1, MSM8939_SLAVE_OCMEM_64, 8, -1, -1, 0);
 DEFINE_QNODE(slv_clk_ctl, MSM8939_SLAVE_CLK_CTL, 4, -1, -1, 0);
 DEFINE_QNODE(slv_crypto_0_cfg, MSM8939_SLAVE_CRYPTO_0_CFG, 4, -1, -1, 0);
 DEFINE_QNODE(slv_dehr_cfg, MSM8939_SLAVE_DEHR_CFG, 4, -1, -1, 0);
@@ -187,20 +187,20 @@ DEFINE_QNODE(slv_sdcc_2, MSM8939_SLAVE_SDCC_2, 4, -1, -1, 0);
 DEFINE_QNODE(slv_security, MSM8939_SLAVE_SECURITY, 4, -1, -1, 0);
 DEFINE_QNODE(slv_snoc_cfg, MSM8939_SLAVE_SNOC_CFG, 4, -1, -1, 0);
 DEFINE_QNODE(slv_spdm, MSM8939_SLAVE_SPDM, 4, -1, -1, 0);
-DEFINE_QNODE(slv_srvc_snoc, MSM8939_SLAVE_SRVC_SNOC, 8, -1, 29, 0);
+DEFINE_QNODE(slv_srvc_snoc, MSM8939_SLAVE_SRVC_SNOC, 8, -1, -1, 0);
 DEFINE_QNODE(slv_tcsr, MSM8939_SLAVE_TCSR, 4, -1, -1, 0);
 DEFINE_QNODE(slv_tlmm, MSM8939_SLAVE_TLMM, 4, -1, -1, 0);
 DEFINE_QNODE(slv_usb_hs1, MSM8939_SLAVE_USB_HS1, 4, -1, -1, 0);
 DEFINE_QNODE(slv_usb_hs2, MSM8939_SLAVE_USB_HS2, 4, -1, -1, 0);
 DEFINE_QNODE(slv_venus_cfg, MSM8939_SLAVE_VENUS_CFG, 4, -1, -1, 0);
-DEFINE_QNODE(snoc_bimc_0_mas, MSM8939_SNOC_BIMC_0_MAS, 16, 3, -1, MSM8939_SNOC_BIMC_0_SLV);
-DEFINE_QNODE(snoc_bimc_0_slv, MSM8939_SNOC_BIMC_0_SLV, 16, -1, 24, MSM8939_SLAVE_EBI_CH0);
+DEFINE_QNODE(snoc_bimc_0_mas, MSM8939_SNOC_BIMC_0_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_0_SLV);
+DEFINE_QNODE(snoc_bimc_0_slv, MSM8939_SNOC_BIMC_0_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0);
 DEFINE_QNODE(snoc_bimc_1_mas, MSM8939_SNOC_BIMC_1_MAS, 16, 76, -1, MSM8939_SNOC_BIMC_1_SLV);
 DEFINE_QNODE(snoc_bimc_1_slv, MSM8939_SNOC_BIMC_1_SLV, 16, -1, 104, MSM8939_SLAVE_EBI_CH0);
 DEFINE_QNODE(snoc_bimc_2_mas, MSM8939_SNOC_BIMC_2_MAS, 16, -1, -1, MSM8939_SNOC_BIMC_2_SLV);
 DEFINE_QNODE(snoc_bimc_2_slv, MSM8939_SNOC_BIMC_2_SLV, 16, -1, -1, MSM8939_SLAVE_EBI_CH0);
 DEFINE_QNODE(snoc_int_0, MSM8939_SNOC_INT_0, 8, 99, 130, MSM8939_SLAVE_QDSS_STM, MSM8939_SLAVE_IMEM, MSM8939_SNOC_PNOC_MAS);
-DEFINE_QNODE(snoc_int_1, MSM8939_SNOC_INT_1, 8, 100, 131, MSM8939_SLAVE_APSS, MSM8939_SLAVE_CATS_128, MSM8939_SLAVE_OCMEM_64);
+DEFINE_QNODE(snoc_int_1, MSM8939_SNOC_INT_1, 8, -1, -1, MSM8939_SLAVE_APSS, MSM8939_SLAVE_CATS_128, MSM8939_SLAVE_OCMEM_64);
 DEFINE_QNODE(snoc_int_bimc, MSM8939_SNOC_INT_BIMC, 8, 101, 132, MSM8939_SNOC_BIMC_1_MAS);
 DEFINE_QNODE(snoc_pcnoc_mas, MSM8939_SNOC_PNOC_MAS, 8, -1, -1, MSM8939_SNOC_PNOC_SLV);
 DEFINE_QNODE(snoc_pcnoc_slv, MSM8939_SNOC_PNOC_SLV, 8, -1, -1, MSM8939_PNOC_INT_0);
index 7b2f4d0..2f9a289 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * CZ.NIC's Turris Omnia LEDs driver
  *
- * 2020 by Marek Behun <marek.behun@nic.cz>
+ * 2020 by Marek Behún <kabel@kernel.org>
  */
 
 #include <linux/i2c.h>
@@ -287,6 +287,6 @@ static struct i2c_driver omnia_leds_driver = {
 
 module_i2c_driver(omnia_leds_driver);
 
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
 MODULE_DESCRIPTION("CZ.NIC's Turris Omnia LEDs");
 MODULE_LICENSE("GPL v2");
index 9f2ce7f..456a117 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * rWTM BIU Mailbox driver for Armada 37xx
  *
- * Author: Marek Behun <marek.behun@nic.cz>
+ * Author: Marek Behún <kabel@kernel.org>
  */
 
 #include <linux/device.h>
@@ -203,4 +203,4 @@ module_platform_driver(armada_37xx_mbox_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("rWTM BIU Mailbox driver for Armada 37xx");
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
index 66f4c63..cea2b37 100644 (file)
@@ -65,7 +65,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
        u8 *res;
 
        position = (index + rsb) * v->fec->roots;
-       block = div64_u64_rem(position, v->fec->roots << SECTOR_SHIFT, &rem);
+       block = div64_u64_rem(position, v->fec->io_size, &rem);
        *offset = (unsigned)rem;
 
        res = dm_bufio_read(v->fec->bufio, block, buf);
@@ -154,7 +154,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
 
                /* read the next block when we run out of parity bytes */
                offset += v->fec->roots;
-               if (offset >= v->fec->roots << SECTOR_SHIFT) {
+               if (offset >= v->fec->io_size) {
                        dm_bufio_release(buf);
 
                        par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
@@ -742,8 +742,13 @@ int verity_fec_ctr(struct dm_verity *v)
                return -E2BIG;
        }
 
+       if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1))
+               f->io_size = 1 << v->data_dev_block_bits;
+       else
+               f->io_size = v->fec->roots << SECTOR_SHIFT;
+
        f->bufio = dm_bufio_client_create(f->dev->bdev,
-                                         f->roots << SECTOR_SHIFT,
+                                         f->io_size,
                                          1, 0, NULL, NULL);
        if (IS_ERR(f->bufio)) {
                ti->error = "Cannot initialize FEC bufio client";
index 42fbd3a..3c46c8d 100644 (file)
@@ -36,6 +36,7 @@ struct dm_verity_fec {
        struct dm_dev *dev;     /* parity data device */
        struct dm_bufio_client *data_bufio;     /* for data dev access */
        struct dm_bufio_client *bufio;          /* for parity data access */
+       size_t io_size;         /* IO size for roots */
        sector_t start;         /* parity data start in blocks */
        sector_t blocks;        /* number of blocks covered */
        sector_t rounds;        /* number of interleaving rounds */
index 4378a9b..2cc370a 100644 (file)
@@ -2286,8 +2286,8 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
        if (buffer_id == 0)
                return -EINVAL;
 
-       if (!mei_cl_is_connected(cl))
-               return -ENODEV;
+       if (mei_cl_is_connected(cl))
+               return -EPROTO;
 
        if (cl->dma_mapped)
                return -EPROTO;
@@ -2327,9 +2327,7 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
 
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(cl->wait,
-                          cl->dma_mapped ||
-                          cl->status ||
-                          !mei_cl_is_connected(cl),
+                          cl->dma_mapped || cl->status,
                           mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
        mutex_lock(&dev->device_lock);
 
@@ -2376,8 +2374,9 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
                return -EOPNOTSUPP;
        }
 
-       if (!mei_cl_is_connected(cl))
-               return -ENODEV;
+       /* do not allow unmap for connected client */
+       if (mei_cl_is_connected(cl))
+               return -EPROTO;
 
        if (!cl->dma_mapped)
                return -EPROTO;
@@ -2405,9 +2404,7 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
 
        mutex_unlock(&dev->device_lock);
        wait_event_timeout(cl->wait,
-                          !cl->dma_mapped ||
-                          cl->status ||
-                          !mei_cl_is_connected(cl),
+                          !cl->dma_mapped || cl->status,
                           mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
        mutex_lock(&dev->device_lock);
 
index 57f1f17..5c5c921 100644 (file)
@@ -488,8 +488,8 @@ static int mtk_nfc_exec_instr(struct nand_chip *chip,
                return 0;
        case NAND_OP_WAITRDY_INSTR:
                return readl_poll_timeout(nfc->regs + NFI_STA, status,
-                                         status & STA_BUSY, 20,
-                                         instr->ctx.waitrdy.timeout_ms);
+                                         !(status & STA_BUSY), 20,
+                                         instr->ctx.waitrdy.timeout_ms * 1000);
        default:
                break;
        }
index f69fb42..a57da43 100644 (file)
@@ -314,6 +314,18 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len)
        return ret;
 }
 
+static int mcp251x_spi_write(struct spi_device *spi, int len)
+{
+       struct mcp251x_priv *priv = spi_get_drvdata(spi);
+       int ret;
+
+       ret = spi_write(spi, priv->spi_tx_buf, len);
+       if (ret)
+               dev_err(&spi->dev, "spi write failed: ret = %d\n", ret);
+
+       return ret;
+}
+
 static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg)
 {
        struct mcp251x_priv *priv = spi_get_drvdata(spi);
@@ -361,7 +373,7 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val)
        priv->spi_tx_buf[1] = reg;
        priv->spi_tx_buf[2] = val;
 
-       mcp251x_spi_trans(spi, 3);
+       mcp251x_spi_write(spi, 3);
 }
 
 static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
@@ -373,7 +385,7 @@ static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2)
        priv->spi_tx_buf[2] = v1;
        priv->spi_tx_buf[3] = v2;
 
-       mcp251x_spi_trans(spi, 4);
+       mcp251x_spi_write(spi, 4);
 }
 
 static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
@@ -386,7 +398,7 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
        priv->spi_tx_buf[2] = mask;
        priv->spi_tx_buf[3] = val;
 
-       mcp251x_spi_trans(spi, 4);
+       mcp251x_spi_write(spi, 4);
 }
 
 static u8 mcp251x_read_stat(struct spi_device *spi)
@@ -618,7 +630,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
                                          buf[i]);
        } else {
                memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
-               mcp251x_spi_trans(spi, TXBDAT_OFF + len);
+               mcp251x_spi_write(spi, TXBDAT_OFF + len);
        }
 }
 
@@ -650,7 +662,7 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
 
        /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */
        priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx);
-       mcp251x_spi_trans(priv->spi, 1);
+       mcp251x_spi_write(priv->spi, 1);
 }
 
 static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
@@ -888,7 +900,7 @@ static int mcp251x_hw_reset(struct spi_device *spi)
        mdelay(MCP251X_OST_DELAY_MS);
 
        priv->spi_tx_buf[0] = INSTRUCTION_RESET;
-       ret = mcp251x_spi_trans(spi, 1);
+       ret = mcp251x_spi_write(spi, 1);
        if (ret)
                return ret;
 
index 573b115..28e916a 100644 (file)
@@ -857,7 +857,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
        if (dev->adapter->dev_set_bus) {
                err = dev->adapter->dev_set_bus(dev, 0);
                if (err)
-                       goto lbl_unregister_candev;
+                       goto adap_dev_free;
        }
 
        /* get device number early */
@@ -869,6 +869,10 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
 
        return 0;
 
+adap_dev_free:
+       if (dev->adapter->dev_free)
+               dev->adapter->dev_free(dev);
+
 lbl_unregister_candev:
        unregister_candev(netdev);
 
index 52e865a..bf5c62e 100644 (file)
 
 /* GSWIP MII Registers */
 #define GSWIP_MII_CFGp(p)              (0x2 * (p))
+#define  GSWIP_MII_CFG_RESET           BIT(15)
 #define  GSWIP_MII_CFG_EN              BIT(14)
+#define  GSWIP_MII_CFG_ISOLATE         BIT(13)
 #define  GSWIP_MII_CFG_LDCLKDIS                BIT(12)
+#define  GSWIP_MII_CFG_RGMII_IBS       BIT(8)
+#define  GSWIP_MII_CFG_RMII_CLK                BIT(7)
 #define  GSWIP_MII_CFG_MODE_MIIP       0x0
 #define  GSWIP_MII_CFG_MODE_MIIM       0x1
 #define  GSWIP_MII_CFG_MODE_RMIIP      0x2
 #define GSWIP_PCE_DEFPVID(p)           (0x486 + ((p) * 0xA))
 
 #define GSWIP_MAC_FLEN                 0x8C5
+#define GSWIP_MAC_CTRL_0p(p)           (0x903 + ((p) * 0xC))
+#define  GSWIP_MAC_CTRL_0_PADEN                BIT(8)
+#define  GSWIP_MAC_CTRL_0_FCS_EN       BIT(7)
+#define  GSWIP_MAC_CTRL_0_FCON_MASK    0x0070
+#define  GSWIP_MAC_CTRL_0_FCON_AUTO    0x0000
+#define  GSWIP_MAC_CTRL_0_FCON_RX      0x0010
+#define  GSWIP_MAC_CTRL_0_FCON_TX      0x0020
+#define  GSWIP_MAC_CTRL_0_FCON_RXTX    0x0030
+#define  GSWIP_MAC_CTRL_0_FCON_NONE    0x0040
+#define  GSWIP_MAC_CTRL_0_FDUP_MASK    0x000C
+#define  GSWIP_MAC_CTRL_0_FDUP_AUTO    0x0000
+#define  GSWIP_MAC_CTRL_0_FDUP_EN      0x0004
+#define  GSWIP_MAC_CTRL_0_FDUP_DIS     0x000C
+#define  GSWIP_MAC_CTRL_0_GMII_MASK    0x0003
+#define  GSWIP_MAC_CTRL_0_GMII_AUTO    0x0000
+#define  GSWIP_MAC_CTRL_0_GMII_MII     0x0001
+#define  GSWIP_MAC_CTRL_0_GMII_RGMII   0x0002
 #define GSWIP_MAC_CTRL_2p(p)           (0x905 + ((p) * 0xC))
 #define GSWIP_MAC_CTRL_2_MLEN          BIT(3) /* Maximum Untagged Frame Lnegth */
 
@@ -653,16 +674,13 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
                          GSWIP_SDMA_PCTRLp(port));
 
        if (!dsa_is_cpu_port(ds, port)) {
-               u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO |
-                             GSWIP_MDIO_PHY_SPEED_AUTO |
-                             GSWIP_MDIO_PHY_FDUP_AUTO |
-                             GSWIP_MDIO_PHY_FCONTX_AUTO |
-                             GSWIP_MDIO_PHY_FCONRX_AUTO |
-                             (phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK);
-
-               gswip_mdio_w(priv, macconf, GSWIP_MDIO_PHYp(port));
-               /* Activate MDIO auto polling */
-               gswip_mdio_mask(priv, 0, BIT(port), GSWIP_MDIO_MDC_CFG0);
+               u32 mdio_phy = 0;
+
+               if (phydev)
+                       mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
+
+               gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
+                               GSWIP_MDIO_PHYp(port));
        }
 
        return 0;
@@ -675,14 +693,6 @@ static void gswip_port_disable(struct dsa_switch *ds, int port)
        if (!dsa_is_user_port(ds, port))
                return;
 
-       if (!dsa_is_cpu_port(ds, port)) {
-               gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN,
-                               GSWIP_MDIO_PHY_LINK_MASK,
-                               GSWIP_MDIO_PHYp(port));
-               /* Deactivate MDIO auto polling */
-               gswip_mdio_mask(priv, BIT(port), 0, GSWIP_MDIO_MDC_CFG0);
-       }
-
        gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
                          GSWIP_FDMA_PCTRLp(port));
        gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
@@ -794,14 +804,32 @@ static int gswip_setup(struct dsa_switch *ds)
        gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
        gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
 
-       /* disable PHY auto polling */
+       /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
+        * interoperability problem with this auto polling mechanism because
+        * their status registers think that the link is in a different state
+        * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
+        * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
+        * auto polling state machine consider the link being negotiated with
+        * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
+        * to the switch port being completely dead (RX and TX are both not
+        * working).
+        * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
+        * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
+        * it would work fine for a few minutes to hours and then stop, on
+        * other device it would no traffic could be sent or received at all.
+        * Testing shows that when PHY auto polling is disabled these problems
+        * go away.
+        */
        gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
+
        /* Configure the MDIO Clock 2.5 MHz */
        gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
 
-       /* Disable the xMII link */
+       /* Disable the xMII interface and clear it's isolation bit */
        for (i = 0; i < priv->hw_info->max_ports; i++)
-               gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
+               gswip_mii_mask_cfg(priv,
+                                  GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
+                                  0, i);
 
        /* enable special tag insertion on cpu port */
        gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
@@ -1450,6 +1478,112 @@ unsupported:
        return;
 }
 
+static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
+{
+       u32 mdio_phy;
+
+       if (link)
+               mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
+       else
+               mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
+
+       gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
+                       GSWIP_MDIO_PHYp(port));
+}
+
+static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
+                                phy_interface_t interface)
+{
+       u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
+
+       switch (speed) {
+       case SPEED_10:
+               mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
+
+               if (interface == PHY_INTERFACE_MODE_RMII)
+                       mii_cfg = GSWIP_MII_CFG_RATE_M50;
+               else
+                       mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
+
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+               break;
+
+       case SPEED_100:
+               mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
+
+               if (interface == PHY_INTERFACE_MODE_RMII)
+                       mii_cfg = GSWIP_MII_CFG_RATE_M50;
+               else
+                       mii_cfg = GSWIP_MII_CFG_RATE_M25;
+
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
+               break;
+
+       case SPEED_1000:
+               mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
+
+               mii_cfg = GSWIP_MII_CFG_RATE_M125;
+
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
+               break;
+       }
+
+       gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
+                       GSWIP_MDIO_PHYp(port));
+       gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
+       gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
+                         GSWIP_MAC_CTRL_0p(port));
+}
+
+static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
+{
+       u32 mac_ctrl_0, mdio_phy;
+
+       if (duplex == DUPLEX_FULL) {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
+               mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
+       } else {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
+               mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
+       }
+
+       gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
+                         GSWIP_MAC_CTRL_0p(port));
+       gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
+                       GSWIP_MDIO_PHYp(port));
+}
+
+static void gswip_port_set_pause(struct gswip_priv *priv, int port,
+                                bool tx_pause, bool rx_pause)
+{
+       u32 mac_ctrl_0, mdio_phy;
+
+       if (tx_pause && rx_pause) {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
+               mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+                          GSWIP_MDIO_PHY_FCONRX_EN;
+       } else if (tx_pause) {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
+               mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
+                          GSWIP_MDIO_PHY_FCONRX_DIS;
+       } else if (rx_pause) {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
+               mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+                          GSWIP_MDIO_PHY_FCONRX_EN;
+       } else {
+               mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
+               mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
+                          GSWIP_MDIO_PHY_FCONRX_DIS;
+       }
+
+       gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
+                         mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
+       gswip_mdio_mask(priv,
+                       GSWIP_MDIO_PHY_FCONTX_MASK |
+                       GSWIP_MDIO_PHY_FCONRX_MASK,
+                       mdio_phy, GSWIP_MDIO_PHYp(port));
+}
+
 static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
                                     unsigned int mode,
                                     const struct phylink_link_state *state)
@@ -1469,6 +1603,9 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
                break;
        case PHY_INTERFACE_MODE_RMII:
                miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
+
+               /* Configure the RMII clock as output: */
+               miicfg |= GSWIP_MII_CFG_RMII_CLK;
                break;
        case PHY_INTERFACE_MODE_RGMII:
        case PHY_INTERFACE_MODE_RGMII_ID:
@@ -1481,7 +1618,11 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
                        "Unsupported interface: %d\n", state->interface);
                return;
        }
-       gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_MODE_MASK, miicfg, port);
+
+       gswip_mii_mask_cfg(priv,
+                          GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
+                          GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
+                          miicfg, port);
 
        switch (state->interface) {
        case PHY_INTERFACE_MODE_RGMII_ID:
@@ -1506,6 +1647,9 @@ static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
        struct gswip_priv *priv = ds->priv;
 
        gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
+
+       if (!dsa_is_cpu_port(ds, port))
+               gswip_port_set_link(priv, port, false);
 }
 
 static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -1517,6 +1661,13 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
 {
        struct gswip_priv *priv = ds->priv;
 
+       if (!dsa_is_cpu_port(ds, port)) {
+               gswip_port_set_link(priv, port, true);
+               gswip_port_set_speed(priv, port, speed, interface);
+               gswip_port_set_duplex(priv, port, duplex);
+               gswip_port_set_pause(priv, port, tx_pause, rx_pause);
+       }
+
        gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
 }
 
index 903d619..e08bf93 100644 (file)
@@ -3026,10 +3026,17 @@ out_resources:
        return err;
 }
 
+/* prod_id for switch families which do not have a PHY model number */
+static const u16 family_prod_id_table[] = {
+       [MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
+       [MV88E6XXX_FAMILY_6390] = MV88E6XXX_PORT_SWITCH_ID_PROD_6390,
+};
+
 static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
 {
        struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
        struct mv88e6xxx_chip *chip = mdio_bus->chip;
+       u16 prod_id;
        u16 val;
        int err;
 
@@ -3040,23 +3047,12 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
        err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
        mv88e6xxx_reg_unlock(chip);
 
-       if (reg == MII_PHYSID2) {
-               /* Some internal PHYs don't have a model number. */
-               if (chip->info->family != MV88E6XXX_FAMILY_6165)
-                       /* Then there is the 6165 family. It gets is
-                        * PHYs correct. But it can also have two
-                        * SERDES interfaces in the PHY address
-                        * space. And these don't have a model
-                        * number. But they are not PHYs, so we don't
-                        * want to give them something a PHY driver
-                        * will recognise.
-                        *
-                        * Use the mv88e6390 family model number
-                        * instead, for anything which really could be
-                        * a PHY,
-                        */
-                       if (!(val & 0x3f0))
-                               val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
+       /* Some internal PHYs don't have a model number. */
+       if (reg == MII_PHYSID2 && !(val & 0x3f0) &&
+           chip->info->family < ARRAY_SIZE(family_prod_id_table)) {
+               prod_id = family_prod_id_table[chip->info->family];
+               if (prod_id)
+                       val |= prod_id >> 4;
        }
 
        return err ? err : val;
index 187b0b9..f78daba 100644 (file)
@@ -1534,8 +1534,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
        pci_set_master(pdev);
 
-       ioaddr = pci_resource_start(pdev, 0);
-       if (!ioaddr) {
+       if (!pci_resource_len(pdev, 0)) {
                if (pcnet32_debug & NETIF_MSG_PROBE)
                        pr_err("card has no PCI IO resources, aborting\n");
                err = -ENODEV;
@@ -1548,6 +1547,8 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
                        pr_err("architecture does not support 32bit PCI busmaster DMA\n");
                goto err_disable_dev;
        }
+
+       ioaddr = pci_resource_start(pdev, 0);
        if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
                if (pcnet32_debug & NETIF_MSG_PROBE)
                        pr_err("io address range already allocated\n");
index ba8321e..3305979 100644 (file)
 #define XGBE_DMA_SYS_AWCR      0x30303030
 
 /* DMA cache settings - PCI device */
-#define XGBE_DMA_PCI_ARCR      0x00000003
-#define XGBE_DMA_PCI_AWCR      0x13131313
-#define XGBE_DMA_PCI_AWARCR    0x00000313
+#define XGBE_DMA_PCI_ARCR      0x000f0f0f
+#define XGBE_DMA_PCI_AWCR      0x0f0f0f0f
+#define XGBE_DMA_PCI_AWARCR    0x00000f0f
 
 /* DMA channel interrupt modes */
 #define XGBE_IRQ_MODE_EDGE     0
index 98cf82d..6598193 100644 (file)
@@ -172,6 +172,7 @@ static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
 
 err_free_buf_descs:
        dma_free_coherent(dev, size, ring->cpu_addr, ring->dma_addr);
+       ring->cpu_addr = NULL;
        return -ENOMEM;
 }
 
index 15362d0..0f6a6cb 100644 (file)
@@ -3239,6 +3239,9 @@ static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
        bool cmp_b = false;
        bool cmp_c = false;
 
+       if (!macb_is_gem(bp))
+               return;
+
        tp4sp_v = &(fs->h_u.tcp_ip4_spec);
        tp4sp_m = &(fs->m_u.tcp_ip4_spec);
 
@@ -3607,6 +3610,7 @@ static void macb_restore_features(struct macb *bp)
 {
        struct net_device *netdev = bp->dev;
        netdev_features_t features = netdev->features;
+       struct ethtool_rx_fs_item *item;
 
        /* TX checksum offload */
        macb_set_txcsum_feature(bp, features);
@@ -3615,6 +3619,9 @@ static void macb_restore_features(struct macb *bp)
        macb_set_rxcsum_feature(bp, features);
 
        /* RX Flow Filters */
+       list_for_each_entry(item, &bp->rx_fs_list.list, list)
+               gem_prog_cmp_regs(bp, &item->fs);
+
        macb_set_rxflow_feature(bp, features);
 }
 
@@ -3911,6 +3918,7 @@ static int macb_init(struct platform_device *pdev)
        reg = gem_readl(bp, DCFG8);
        bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
                        GEM_BFEXT(T2SCR, reg));
+       INIT_LIST_HEAD(&bp->rx_fs_list.list);
        if (bp->max_tuples > 0) {
                /* also needs one ethtype match to check IPv4 */
                if (GEM_BFEXT(SCR2ETH, reg) > 0) {
@@ -3921,7 +3929,6 @@ static int macb_init(struct platform_device *pdev)
                        /* Filtering is supported in hw but don't enable it in kernel now */
                        dev->hw_features |= NETIF_F_NTUPLE;
                        /* init Rx flow definitions */
-                       INIT_LIST_HEAD(&bp->rx_fs_list.list);
                        bp->rx_fs_list.count = 0;
                        spin_lock_init(&bp->rx_fs_lock);
                } else
index b248966..7aad40b 100644 (file)
           | CN6XXX_INTR_M0UNWI_ERR             \
           | CN6XXX_INTR_M1UPB0_ERR             \
           | CN6XXX_INTR_M1UPWI_ERR             \
-          | CN6XXX_INTR_M1UPB0_ERR             \
+          | CN6XXX_INTR_M1UNB0_ERR             \
           | CN6XXX_INTR_M1UNWI_ERR             \
           | CN6XXX_INTR_INSTR_DB_OF_ERR        \
           | CN6XXX_INTR_SLIST_DB_OF_ERR        \
index 6c85a10..23a2ebd 100644 (file)
@@ -1794,11 +1794,25 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
        struct cudbg_buffer temp_buff = { 0 };
        struct sge_qbase_reg_field *sge_qbase;
        struct ireg_buf *ch_sge_dbg;
+       u8 padap_running = 0;
        int i, rc;
+       u32 size;
 
-       rc = cudbg_get_buff(pdbg_init, dbg_buff,
-                           sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase),
-                           &temp_buff);
+       /* Accessing SGE_QBASE_MAP[0-3] and SGE_QBASE_INDEX regs can
+        * lead to SGE missing doorbells under heavy traffic. So, only
+        * collect them when adapter is idle.
+        */
+       for_each_port(padap, i) {
+               padap_running = netif_running(padap->port[i]);
+               if (padap_running)
+                       break;
+       }
+
+       size = sizeof(*ch_sge_dbg) * 2;
+       if (!padap_running)
+               size += sizeof(*sge_qbase);
+
+       rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
        if (rc)
                return rc;
 
@@ -1820,7 +1834,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
                ch_sge_dbg++;
        }
 
-       if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
+       if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5 &&
+           !padap_running) {
                sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
                /* 1 addr reg SGE_QBASE_INDEX and 4 data reg
                 * SGE_QBASE_MAP[0-3]
index 98829e4..80882cf 100644 (file)
@@ -2090,7 +2090,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x1190, 0x1194,
                0x11a0, 0x11a4,
                0x11b0, 0x11b4,
-               0x11fc, 0x1274,
+               0x11fc, 0x123c,
+               0x1254, 0x1274,
                0x1280, 0x133c,
                0x1800, 0x18fc,
                0x3000, 0x302c,
index 1115b8f..a3f5b80 100644 (file)
@@ -349,18 +349,6 @@ static int chcr_set_tcb_field(struct chcr_ktls_info *tx_info, u16 word,
        return cxgb4_ofld_send(tx_info->netdev, skb);
 }
 
-/*
- * chcr_ktls_mark_tcb_close: mark tcb state to CLOSE
- * @tx_info - driver specific tls info.
- * return: NET_TX_OK/NET_XMIT_DROP.
- */
-static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
-{
-       return chcr_set_tcb_field(tx_info, TCB_T_STATE_W,
-                                 TCB_T_STATE_V(TCB_T_STATE_M),
-                                 CHCR_TCB_STATE_CLOSED, 1);
-}
-
 /*
  * chcr_ktls_dev_del:  call back for tls_dev_del.
  * Remove the tid and l2t entry and close the connection.
@@ -395,8 +383,6 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
 
        /* clear tid */
        if (tx_info->tid != -1) {
-               /* clear tcb state and then release tid */
-               chcr_ktls_mark_tcb_close(tx_info);
                cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
                                 tx_info->tid, tx_info->ip_family);
        }
@@ -574,7 +560,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
        return 0;
 
 free_tid:
-       chcr_ktls_mark_tcb_close(tx_info);
 #if IS_ENABLED(CONFIG_IPV6)
        /* clear clip entry */
        if (tx_info->ip_family == AF_INET6)
@@ -672,10 +657,6 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
        if (tx_info->pending_close) {
                spin_unlock(&tx_info->lock);
                if (!status) {
-                       /* it's a late success, tcb status is established,
-                        * mark it close.
-                        */
-                       chcr_ktls_mark_tcb_close(tx_info);
                        cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
                                         tid, tx_info->ip_family);
                }
@@ -1663,54 +1644,6 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
        refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
 }
 
-/*
- * chcr_ktls_update_snd_una:  Reset the SEND_UNA. It will be done to avoid
- * sending the same segment again. It will discard the segment which is before
- * the current tx max.
- * @tx_info - driver specific tls info.
- * @q - TX queue.
- * return: NET_TX_OK/NET_XMIT_DROP.
- */
-static int chcr_ktls_update_snd_una(struct chcr_ktls_info *tx_info,
-                                   struct sge_eth_txq *q)
-{
-       struct fw_ulptx_wr *wr;
-       unsigned int ndesc;
-       int credits;
-       void *pos;
-       u32 len;
-
-       len = sizeof(*wr) + roundup(CHCR_SET_TCB_FIELD_LEN, 16);
-       ndesc = DIV_ROUND_UP(len, 64);
-
-       credits = chcr_txq_avail(&q->q) - ndesc;
-       if (unlikely(credits < 0)) {
-               chcr_eth_txq_stop(q);
-               return NETDEV_TX_BUSY;
-       }
-
-       pos = &q->q.desc[q->q.pidx];
-
-       wr = pos;
-       /* ULPTX wr */
-       wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
-       wr->cookie = 0;
-       /* fill len in wr field */
-       wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
-
-       pos += sizeof(*wr);
-
-       pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
-                                        TCB_SND_UNA_RAW_W,
-                                        TCB_SND_UNA_RAW_V(TCB_SND_UNA_RAW_M),
-                                        TCB_SND_UNA_RAW_V(0), 0);
-
-       chcr_txq_advance(&q->q, ndesc);
-       cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
-
-       return 0;
-}
-
 /*
  * chcr_end_part_handler: This handler will handle the record which
  * is complete or if record's end part is received. T6 adapter has a issue that
@@ -1735,7 +1668,9 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
                                 struct sge_eth_txq *q, u32 skb_offset,
                                 u32 tls_end_offset, bool last_wr)
 {
+       bool free_skb_if_tx_fails = false;
        struct sk_buff *nskb = NULL;
+
        /* check if it is a complete record */
        if (tls_end_offset == record->len) {
                nskb = skb;
@@ -1758,6 +1693,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
 
                if (last_wr)
                        dev_kfree_skb_any(skb);
+               else
+                       free_skb_if_tx_fails = true;
 
                last_wr = true;
 
@@ -1769,6 +1706,8 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
                                       record->num_frags,
                                       (last_wr && tcp_push_no_fin),
                                       mss)) {
+               if (free_skb_if_tx_fails)
+                       dev_kfree_skb_any(skb);
                goto out;
        }
        tx_info->prev_seq = record->end_seq;
@@ -1905,11 +1844,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
                        /* reset tcp_seq as per the prior_data_required len */
                        tcp_seq -= prior_data_len;
                }
-               /* reset snd una, so the middle record won't send the already
-                * sent part.
-                */
-               if (chcr_ktls_update_snd_una(tx_info, q))
-                       goto out;
                atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
        } else {
                atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
@@ -2010,12 +1944,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
         * we will send the complete record again.
         */
 
+       spin_lock_irqsave(&tx_ctx->base.lock, flags);
+
        do {
-               int i;
 
                cxgb4_reclaim_completed_tx(adap, &q->q, true);
-               /* lock taken */
-               spin_lock_irqsave(&tx_ctx->base.lock, flags);
                /* fetch the tls record */
                record = tls_get_record(&tx_ctx->base, tcp_seq,
                                        &tx_info->record_no);
@@ -2074,11 +2007,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
                                                    tls_end_offset, skb_offset,
                                                    0);
 
-                       spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
                        if (ret) {
                                /* free the refcount taken earlier */
                                if (tls_end_offset < data_len)
                                        dev_kfree_skb_any(skb);
+                               spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
                                goto out;
                        }
 
@@ -2088,16 +2021,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
                        continue;
                }
 
-               /* increase page reference count of the record, so that there
-                * won't be any chance of page free in middle if in case stack
-                * receives ACK and try to delete the record.
-                */
-               for (i = 0; i < record->num_frags; i++)
-                       __skb_frag_ref(&record->frags[i]);
-               /* lock cleared */
-               spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
-
-
                /* if a tls record is finishing in this SKB */
                if (tls_end_offset <= data_len) {
                        ret = chcr_end_part_handler(tx_info, skb, record,
@@ -2122,13 +2045,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
                        data_len = 0;
                }
 
-               /* clear the frag ref count which increased locally before */
-               for (i = 0; i < record->num_frags; i++) {
-                       /* clear the frag ref count */
-                       __skb_frag_unref(&record->frags[i]);
-               }
                /* if any failure, come out from the loop. */
                if (ret) {
+                       spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
                        if (th->fin)
                                dev_kfree_skb_any(skb);
 
@@ -2143,6 +2062,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
 
        } while (data_len > 0);
 
+       spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
        atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
        atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
 
index 252adfa..8a9096a 100644 (file)
@@ -1471,8 +1471,10 @@ dm9000_probe(struct platform_device *pdev)
 
        /* Init network device */
        ndev = alloc_etherdev(sizeof(struct board_info));
-       if (!ndev)
-               return -ENOMEM;
+       if (!ndev) {
+               ret = -ENOMEM;
+               goto out_regulator_disable;
+       }
 
        SET_NETDEV_DEV(ndev, &pdev->dev);
 
index 1cf8ef7..3ec4d9f 100644 (file)
@@ -363,7 +363,11 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
 
 static int gfar_set_mac_addr(struct net_device *dev, void *p)
 {
-       eth_mac_addr(dev, p);
+       int ret;
+
+       ret = eth_mac_addr(dev, p);
+       if (ret)
+               return ret;
 
        gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
 
index e3f81c7..b0dbe6d 100644 (file)
@@ -3966,7 +3966,6 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
         *    normalcy is to reset.
         * 2. A new reset request from the stack due to timeout
         *
-        * For the first case,error event might not have ae handle available.
         * check if this is a new reset request and we are not here just because
         * last reset attempt did not succeed and watchdog hit us again. We will
         * know this if last reset request did not occur very recently (watchdog
@@ -3976,14 +3975,14 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
         * want to make sure we throttle the reset request. Therefore, we will
         * not allow it again before 3*HZ times.
         */
-       if (!handle)
-               handle = &hdev->vport[0].nic;
 
        if (time_before(jiffies, (hdev->last_reset_time +
                                  HCLGE_RESET_INTERVAL))) {
                mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
                return;
-       } else if (hdev->default_reset_request) {
+       }
+
+       if (hdev->default_reset_request) {
                hdev->reset_level =
                        hclge_get_reset_level(ae_dev,
                                              &hdev->default_reset_request);
@@ -11211,7 +11210,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
        if (ret)
                return ret;
 
-       /* RSS indirection table has been configuared by user */
+       /* RSS indirection table has been configured by user */
        if (rxfh_configured)
                goto out;
 
index 700e068..e295d35 100644 (file)
@@ -2193,7 +2193,7 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
 
        if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
                               &hdev->reset_state)) {
-               /* PF has initmated that it is about to reset the hardware.
+               /* PF has intimated that it is about to reset the hardware.
                 * We now have to poll & check if hardware has actually
                 * completed the reset sequence. On hardware reset completion,
                 * VF needs to reset the client and ae device.
@@ -2624,14 +2624,14 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
 {
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 
+       clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
        hclgevf_reset_tqp_stats(handle);
 
        hclgevf_request_link_info(hdev);
 
        hclgevf_update_link_mode(hdev);
 
-       clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
-
        return 0;
 }
 
@@ -3497,7 +3497,7 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
        if (ret)
                return ret;
 
-       /* RSS indirection table has been configuared by user */
+       /* RSS indirection table has been configured by user */
        if (rxfh_configured)
                goto out;
 
index 9c6438d..ffb2a91 100644 (file)
@@ -1149,19 +1149,13 @@ static int __ibmvnic_open(struct net_device *netdev)
 
        rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
        if (rc) {
-               for (i = 0; i < adapter->req_rx_queues; i++)
-                       napi_disable(&adapter->napi[i]);
+               ibmvnic_napi_disable(adapter);
                release_resources(adapter);
                return rc;
        }
 
        netif_tx_start_all_queues(netdev);
 
-       if (prev_state == VNIC_CLOSED) {
-               for (i = 0; i < adapter->req_rx_queues; i++)
-                       napi_schedule(&adapter->napi[i]);
-       }
-
        adapter->state = VNIC_OPEN;
        return rc;
 }
@@ -1922,7 +1916,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
        u64 old_num_rx_queues, old_num_tx_queues;
        u64 old_num_rx_slots, old_num_tx_slots;
        struct net_device *netdev = adapter->netdev;
-       int i, rc;
+       int rc;
 
        netdev_dbg(adapter->netdev,
                   "[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
@@ -2111,10 +2105,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
        /* refresh device's multicast list */
        ibmvnic_set_multi(netdev);
 
-       /* kick napi */
-       for (i = 0; i < adapter->req_rx_queues; i++)
-               napi_schedule(&adapter->napi[i]);
-
        if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
            adapter->reset_reason == VNIC_RESET_MOBILITY)
                __netdev_notify_peers(netdev);
@@ -3204,9 +3194,6 @@ restart_loop:
 
                next = ibmvnic_next_scrq(adapter, scrq);
                for (i = 0; i < next->tx_comp.num_comps; i++) {
-                       if (next->tx_comp.rcs[i])
-                               dev_err(dev, "tx error %x\n",
-                                       next->tx_comp.rcs[i]);
                        index = be32_to_cpu(next->tx_comp.correlators[i]);
                        if (index & IBMVNIC_TSO_POOL_MASK) {
                                tx_pool = &adapter->tso_pool[pool];
@@ -3220,7 +3207,13 @@ restart_loop:
                        num_entries += txbuff->num_entries;
                        if (txbuff->skb) {
                                total_bytes += txbuff->skb->len;
-                               dev_consume_skb_irq(txbuff->skb);
+                               if (next->tx_comp.rcs[i]) {
+                                       dev_err(dev, "tx error %x\n",
+                                               next->tx_comp.rcs[i]);
+                                       dev_kfree_skb_irq(txbuff->skb);
+                               } else {
+                                       dev_consume_skb_irq(txbuff->skb);
+                               }
                                txbuff->skb = NULL;
                        } else {
                                netdev_warn(adapter->netdev,
index cd53981..15f93b3 100644 (file)
@@ -142,6 +142,7 @@ enum i40e_state_t {
        __I40E_VIRTCHNL_OP_PENDING,
        __I40E_RECOVERY_MODE,
        __I40E_VF_RESETS_DISABLED,      /* disable resets during i40e_remove */
+       __I40E_VFS_RELEASING,
        /* This must be last as it determines the size of the BITMAP */
        __I40E_STATE_SIZE__,
 };
index d7c13ca..d627b59 100644 (file)
@@ -578,6 +578,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
        case RING_TYPE_XDP:
                ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL);
                break;
+       default:
+               ring = NULL;
+               break;
        }
        if (!ring)
                return;
index c70dec6..0e92668 100644 (file)
@@ -232,6 +232,8 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
        I40E_STAT(struct i40e_vsi, _name, _stat)
 #define I40E_VEB_STAT(_name, _stat) \
        I40E_STAT(struct i40e_veb, _name, _stat)
+#define I40E_VEB_TC_STAT(_name, _stat) \
+       I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat)
 #define I40E_PFC_STAT(_name, _stat) \
        I40E_STAT(struct i40e_pfc_stats, _name, _stat)
 #define I40E_QUEUE_STAT(_name, _stat) \
@@ -266,11 +268,18 @@ static const struct i40e_stats i40e_gstrings_veb_stats[] = {
        I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
 };
 
+struct i40e_cp_veb_tc_stats {
+       u64 tc_rx_packets;
+       u64 tc_rx_bytes;
+       u64 tc_tx_packets;
+       u64 tc_tx_bytes;
+};
+
 static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = {
-       I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets),
-       I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes),
-       I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets),
-       I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes),
+       I40E_VEB_TC_STAT("veb.tc_%u_tx_packets", tc_tx_packets),
+       I40E_VEB_TC_STAT("veb.tc_%u_tx_bytes", tc_tx_bytes),
+       I40E_VEB_TC_STAT("veb.tc_%u_rx_packets", tc_rx_packets),
+       I40E_VEB_TC_STAT("veb.tc_%u_rx_bytes", tc_rx_bytes),
 };
 
 static const struct i40e_stats i40e_gstrings_misc_stats[] = {
@@ -1101,6 +1110,7 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
 
        /* Set flow control settings */
        ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
+       ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
 
        switch (hw->fc.requested_mode) {
        case I40E_FC_FULL:
@@ -2216,6 +2226,29 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
        }
 }
 
+/**
+ * i40e_get_veb_tc_stats - copy VEB TC statistics to formatted structure
+ * @tc: the TC statistics in VEB structure (veb->tc_stats)
+ * @i: the index of traffic class in (veb->tc_stats) structure to copy
+ *
+ * Copy VEB TC statistics from structure of arrays (veb->tc_stats) to
+ * one dimensional structure i40e_cp_veb_tc_stats.
+ * Produce formatted i40e_cp_veb_tc_stats structure of the VEB TC
+ * statistics for the given TC.
+ **/
+static struct i40e_cp_veb_tc_stats
+i40e_get_veb_tc_stats(struct i40e_veb_tc_stats *tc, unsigned int i)
+{
+       struct i40e_cp_veb_tc_stats veb_tc = {
+               .tc_rx_packets = tc->tc_rx_packets[i],
+               .tc_rx_bytes = tc->tc_rx_bytes[i],
+               .tc_tx_packets = tc->tc_tx_packets[i],
+               .tc_tx_bytes = tc->tc_tx_bytes[i],
+       };
+
+       return veb_tc;
+}
+
 /**
  * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
  * @pf: the PF device structure
@@ -2300,8 +2333,16 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
                               i40e_gstrings_veb_stats);
 
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
-               i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
-                                      i40e_gstrings_veb_tc_stats);
+               if (veb_stats) {
+                       struct i40e_cp_veb_tc_stats veb_tc =
+                               i40e_get_veb_tc_stats(&veb->tc_stats, i);
+
+                       i40e_add_ethtool_stats(&data, &veb_tc,
+                                              i40e_gstrings_veb_tc_stats);
+               } else {
+                       i40e_add_ethtool_stats(&data, NULL,
+                                              i40e_gstrings_veb_tc_stats);
+               }
 
        i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
 
@@ -5439,7 +5480,7 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
 
                status = i40e_aq_get_phy_register(hw,
                                I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
-                               true, addr, offset, &value, NULL);
+                               addr, true, offset, &value, NULL);
                if (status)
                        return -EIO;
                data[i] = value;
index 17f3b80..527023e 100644 (file)
@@ -2560,8 +2560,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                                 i40e_stat_str(hw, aq_ret),
                                 i40e_aq_str(hw, hw->aq.asq_last_status));
                } else {
-                       dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
-                                vsi->netdev->name,
+                       dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
                                 cur_multipromisc ? "entering" : "leaving");
                }
        }
@@ -6738,9 +6737,9 @@ out:
                        set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
                        set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
                }
-       /* registers are set, lets apply */
-       if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
-               ret = i40e_hw_set_dcb_config(pf, new_cfg);
+               /* registers are set, lets apply */
+               if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
+                       ret = i40e_hw_set_dcb_config(pf, new_cfg);
        }
 
 err:
@@ -10573,12 +10572,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
                goto end_core_reset;
        }
 
-       if (!lock_acquired)
-               rtnl_lock();
-       ret = i40e_setup_pf_switch(pf, reinit);
-       if (ret)
-               goto end_unlock;
-
 #ifdef CONFIG_I40E_DCB
        /* Enable FW to write a default DCB config on link-up
         * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
@@ -10593,7 +10586,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
                        i40e_aq_set_dcb_parameters(hw, false, NULL);
                        dev_warn(&pf->pdev->dev,
                                 "DCB is not supported for X710-T*L 2.5/5G speeds\n");
-                                pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+                       pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
                } else {
                        i40e_aq_set_dcb_parameters(hw, true, NULL);
                        ret = i40e_init_pf_dcb(pf);
@@ -10607,6 +10600,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
        }
 
 #endif /* CONFIG_I40E_DCB */
+       if (!lock_acquired)
+               rtnl_lock();
+       ret = i40e_setup_pf_switch(pf, reinit);
+       if (ret)
+               goto end_unlock;
 
        /* The driver only wants link up/down and module qualification
         * reports from firmware.  Note the negative logic.
@@ -12359,6 +12357,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
 {
        int err = 0;
        int size;
+       u16 pow;
 
        /* Set default capability flags */
        pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
@@ -12377,6 +12376,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
        pf->rss_table_size = pf->hw.func_caps.rss_table_size;
        pf->rss_size_max = min_t(int, pf->rss_size_max,
                                 pf->hw.func_caps.num_tx_qp);
+
+       /* find the next higher power-of-2 of num cpus */
+       pow = roundup_pow_of_two(num_online_cpus());
+       pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
+
        if (pf->hw.func_caps.rss) {
                pf->flags |= I40E_FLAG_RSS_ENABLED;
                pf->alloc_rss_size = min_t(int, pf->rss_size_max,
@@ -15140,12 +15144,16 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
         * in order to register the netdev
         */
        v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
-       if (v_idx < 0)
+       if (v_idx < 0) {
+               err = v_idx;
                goto err_switch_setup;
+       }
        pf->lan_vsi = v_idx;
        vsi = pf->vsi[v_idx];
-       if (!vsi)
+       if (!vsi) {
+               err = -EFAULT;
                goto err_switch_setup;
+       }
        vsi->alloc_queue_pairs = 1;
        err = i40e_config_netdev(vsi);
        if (err)
index 5747a99..06b4271 100644 (file)
@@ -2295,8 +2295,7 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
  * @rx_ring: Rx ring being processed
  * @xdp: XDP buffer containing the frame
  **/
-static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
-                                   struct xdp_buff *xdp)
+static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
 {
        int err, result = I40E_XDP_PASS;
        struct i40e_ring *xdp_ring;
@@ -2335,7 +2334,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
        }
 xdp_out:
        rcu_read_unlock();
-       return ERR_PTR(-result);
+       return result;
 }
 
 /**
@@ -2448,6 +2447,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
        unsigned int xdp_xmit = 0;
        bool failure = false;
        struct xdp_buff xdp;
+       int xdp_res = 0;
 
 #if (PAGE_SIZE < 8192)
        frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
@@ -2513,12 +2513,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                        /* At larger PAGE_SIZE, frame_sz depend on len size */
                        xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
 #endif
-                       skb = i40e_run_xdp(rx_ring, &xdp);
+                       xdp_res = i40e_run_xdp(rx_ring, &xdp);
                }
 
-               if (IS_ERR(skb)) {
-                       unsigned int xdp_res = -PTR_ERR(skb);
-
+               if (xdp_res) {
                        if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
                                xdp_xmit |= xdp_res;
                                i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
index 1b6ec9b..5d301a4 100644 (file)
@@ -137,6 +137,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
  **/
 static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
 {
+       struct i40e_pf *pf = vf->pf;
        int i;
 
        i40e_vc_notify_vf_reset(vf);
@@ -147,6 +148,11 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
         * ensure a reset.
         */
        for (i = 0; i < 20; i++) {
+               /* If PF is in VFs releasing state reset VF is impossible,
+                * so leave it.
+                */
+               if (test_bit(__I40E_VFS_RELEASING, pf->state))
+                       return;
                if (i40e_reset_vf(vf, false))
                        return;
                usleep_range(10000, 20000);
@@ -1574,6 +1580,8 @@ void i40e_free_vfs(struct i40e_pf *pf)
 
        if (!pf->vf)
                return;
+
+       set_bit(__I40E_VFS_RELEASING, pf->state);
        while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
                usleep_range(1000, 2000);
 
@@ -1631,6 +1639,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
                }
        }
        clear_bit(__I40E_VF_DISABLE, pf->state);
+       clear_bit(__I40E_VFS_RELEASING, pf->state);
 }
 
 #ifdef CONFIG_PCI_IOV
index fc32c50..12ca841 100644 (file)
@@ -471,7 +471,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
 
        nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
        if (!nb_pkts)
-               return false;
+               return true;
 
        if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
                nb_processed = xdp_ring->count - xdp_ring->next_to_use;
@@ -488,7 +488,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
 
        i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
 
-       return true;
+       return nb_pkts < budget;
 }
 
 /**
index 3577064..17101c4 100644 (file)
@@ -196,7 +196,6 @@ enum ice_state {
        __ICE_NEEDS_RESTART,
        __ICE_PREPARED_FOR_RESET,       /* set by driver when prepared */
        __ICE_RESET_OICR_RECV,          /* set by driver after rcv reset OICR */
-       __ICE_DCBNL_DEVRESET,           /* set by dcbnl devreset */
        __ICE_PFR_REQ,                  /* set by driver and peers */
        __ICE_CORER_REQ,                /* set by driver and peers */
        __ICE_GLOBR_REQ,                /* set by driver and peers */
@@ -624,7 +623,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
 void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
 const char *ice_stat_str(enum ice_status stat_err);
 const char *ice_aq_str(enum ice_aq_err aq_err);
-bool ice_is_wol_supported(struct ice_pf *pf);
+bool ice_is_wol_supported(struct ice_hw *hw);
 int
 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
                    bool is_tun);
@@ -642,6 +641,7 @@ int ice_fdir_create_dflt_rules(struct ice_pf *pf);
 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
                          struct ice_rq_event_info *event);
 int ice_open(struct net_device *netdev);
+int ice_open_internal(struct net_device *netdev);
 int ice_stop(struct net_device *netdev);
 void ice_service_task_schedule(struct ice_pf *pf);
 
index 3d9475e..a20edf1 100644 (file)
@@ -717,8 +717,8 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
 
                        if (!data) {
                                data = devm_kcalloc(ice_hw_to_dev(hw),
-                                                   sizeof(*data),
                                                    ICE_AQC_FW_LOG_ID_MAX,
+                                                   sizeof(*data),
                                                    GFP_KERNEL);
                                if (!data)
                                        return ICE_ERR_NO_MEMORY;
index faaa08e..68866f4 100644 (file)
@@ -31,8 +31,8 @@ enum ice_ctl_q {
        ICE_CTL_Q_MAILBOX,
 };
 
-/* Control Queue timeout settings - max delay 250ms */
-#define ICE_CTL_Q_SQ_CMD_TIMEOUT       2500  /* Count 2500 times */
+/* Control Queue timeout settings - max delay 1s */
+#define ICE_CTL_Q_SQ_CMD_TIMEOUT       10000 /* Count 10000 times */
 #define ICE_CTL_Q_SQ_CMD_USEC          100   /* Check every 100usec */
 #define ICE_CTL_Q_ADMIN_INIT_TIMEOUT   10    /* Count 10 times */
 #define ICE_CTL_Q_ADMIN_INIT_MSEC      100   /* Check every 100msec */
index e427279..28e834a 100644 (file)
@@ -738,22 +738,27 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
 /**
  * ice_cee_to_dcb_cfg
  * @cee_cfg: pointer to CEE configuration struct
- * @dcbcfg: DCB configuration struct
+ * @pi: port information structure
  *
  * Convert CEE configuration from firmware to DCB configuration
  */
 static void
 ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
-                  struct ice_dcbx_cfg *dcbcfg)
+                  struct ice_port_info *pi)
 {
        u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
-       u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
-       u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
+       u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift, j;
        u8 i, err, sync, oper, app_index, ice_app_sel_type;
+       u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
        u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
+       struct ice_dcbx_cfg *cmp_dcbcfg, *dcbcfg;
        u16 ice_app_prot_id_type;
 
-       /* CEE PG data to ETS config */
+       dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
+       dcbcfg->dcbx_mode = ICE_DCBX_MODE_CEE;
+       dcbcfg->tlv_status = tlv_status;
+
+       /* CEE PG data */
        dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
 
        /* Note that the FW creates the oper_prio_tc nibbles reversed
@@ -780,10 +785,16 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
                }
        }
 
-       /* CEE PFC data to ETS config */
+       /* CEE PFC data */
        dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en;
        dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS;
 
+       /* CEE APP TLV data */
+       if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
+               cmp_dcbcfg = &pi->qos_cfg.desired_dcbx_cfg;
+       else
+               cmp_dcbcfg = &pi->qos_cfg.remote_dcbx_cfg;
+
        app_index = 0;
        for (i = 0; i < 3; i++) {
                if (i == 0) {
@@ -802,6 +813,18 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
                        ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S;
                        ice_app_sel_type = ICE_APP_SEL_TCPIP;
                        ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI;
+
+                       for (j = 0; j < cmp_dcbcfg->numapps; j++) {
+                               u16 prot_id = cmp_dcbcfg->app[j].prot_id;
+                               u8 sel = cmp_dcbcfg->app[j].selector;
+
+                               if  (sel == ICE_APP_SEL_TCPIP &&
+                                    (prot_id == ICE_APP_PROT_ID_ISCSI ||
+                                     prot_id == ICE_APP_PROT_ID_ISCSI_860)) {
+                                       ice_app_prot_id_type = prot_id;
+                                       break;
+                               }
+                       }
                } else {
                        /* FIP APP */
                        ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M;
@@ -892,11 +915,8 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
        ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
        if (!ret) {
                /* CEE mode */
-               dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
-               dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
-               dcbx_cfg->tlv_status = le32_to_cpu(cee_cfg.tlv_status);
-               ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
                ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
+               ice_cee_to_dcb_cfg(&cee_cfg, pi);
        } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
                /* CEE mode not enabled try querying IEEE data */
                dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
index 468a63f..4180f1f 100644 (file)
@@ -18,12 +18,10 @@ static void ice_dcbnl_devreset(struct net_device *netdev)
        while (ice_is_reset_in_progress(pf->state))
                usleep_range(1000, 2000);
 
-       set_bit(__ICE_DCBNL_DEVRESET, pf->state);
        dev_close(netdev);
        netdev_state_change(netdev);
        dev_open(netdev, NULL);
        netdev_state_change(netdev);
-       clear_bit(__ICE_DCBNL_DEVRESET, pf->state);
 }
 
 /**
index 2dcfa68..32ba71a 100644 (file)
@@ -3472,7 +3472,7 @@ static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
                netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n");
 
        /* Get WoL settings based on the HW capability */
-       if (ice_is_wol_supported(pf)) {
+       if (ice_is_wol_supported(&pf->hw)) {
                wol->supported = WAKE_MAGIC;
                wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0;
        } else {
@@ -3492,7 +3492,7 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        struct ice_vsi *vsi = np->vsi;
        struct ice_pf *pf = vsi->back;
 
-       if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(pf))
+       if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(&pf->hw))
                return -EOPNOTSUPP;
 
        /* only magic packet is supported */
index 8d4e2ad..d13c7fc 100644 (file)
@@ -2620,7 +2620,7 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
                        if (!locked)
                                rtnl_lock();
 
-                       err = ice_open(vsi->netdev);
+                       err = ice_open_internal(vsi->netdev);
 
                        if (!locked)
                                rtnl_unlock();
@@ -2649,7 +2649,7 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
                        if (!locked)
                                rtnl_lock();
 
-                       ice_stop(vsi->netdev);
+                       ice_vsi_close(vsi);
 
                        if (!locked)
                                rtnl_unlock();
@@ -3078,7 +3078,6 @@ err_vsi:
 bool ice_is_reset_in_progress(unsigned long *state)
 {
        return test_bit(__ICE_RESET_OICR_RECV, state) ||
-              test_bit(__ICE_DCBNL_DEVRESET, state) ||
               test_bit(__ICE_PFR_REQ, state) ||
               test_bit(__ICE_CORER_REQ, state) ||
               test_bit(__ICE_GLOBR_REQ, state);
index 2c23c8f..d821c68 100644 (file)
@@ -3537,15 +3537,14 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
 }
 
 /**
- * ice_is_wol_supported - get NVM state of WoL
- * @pf: board private structure
+ * ice_is_wol_supported - check if WoL is supported
+ * @hw: pointer to hardware info
  *
  * Check if WoL is supported based on the HW configuration.
  * Returns true if NVM supports and enables WoL for this port, false otherwise
  */
-bool ice_is_wol_supported(struct ice_pf *pf)
+bool ice_is_wol_supported(struct ice_hw *hw)
 {
-       struct ice_hw *hw = &pf->hw;
        u16 wol_ctrl;
 
        /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
@@ -3554,7 +3553,7 @@ bool ice_is_wol_supported(struct ice_pf *pf)
        if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
                return false;
 
-       return !(BIT(hw->pf_id) & wol_ctrl);
+       return !(BIT(hw->port_info->lport) & wol_ctrl);
 }
 
 /**
@@ -4192,28 +4191,25 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
                goto err_send_version_unroll;
        }
 
+       /* not a fatal error if this fails */
        err = ice_init_nvm_phy_type(pf->hw.port_info);
-       if (err) {
+       if (err)
                dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
-               goto err_send_version_unroll;
-       }
 
+       /* not a fatal error if this fails */
        err = ice_update_link_info(pf->hw.port_info);
-       if (err) {
+       if (err)
                dev_err(dev, "ice_update_link_info failed: %d\n", err);
-               goto err_send_version_unroll;
-       }
 
        ice_init_link_dflt_override(pf->hw.port_info);
 
        /* if media available, initialize PHY settings */
        if (pf->hw.port_info->phy.link_info.link_info &
            ICE_AQ_MEDIA_AVAILABLE) {
+               /* not a fatal error if this fails */
                err = ice_init_phy_user_cfg(pf->hw.port_info);
-               if (err) {
+               if (err)
                        dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
-                       goto err_send_version_unroll;
-               }
 
                if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
                        struct ice_vsi *vsi = ice_get_main_vsi(pf);
@@ -4568,6 +4564,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
                        continue;
                ice_vsi_free_q_vectors(pf->vsi[v]);
        }
+       ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
        ice_clear_interrupt_scheme(pf);
 
        pci_save_state(pdev);
@@ -6635,6 +6632,28 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
  * Returns 0 on success, negative value on failure
  */
 int ice_open(struct net_device *netdev)
+{
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_pf *pf = np->vsi->back;
+
+       if (ice_is_reset_in_progress(pf->state)) {
+               netdev_err(netdev, "can't open net device while reset is in progress");
+               return -EBUSY;
+       }
+
+       return ice_open_internal(netdev);
+}
+
+/**
+ * ice_open_internal - Called when a network interface becomes active
+ * @netdev: network interface device structure
+ *
+ * Internal ice_open implementation. Should not be used directly except for ice_open and reset
+ * handling routine
+ *
+ * Returns 0 on success, negative value on failure
+ */
+int ice_open_internal(struct net_device *netdev)
 {
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ice_vsi *vsi = np->vsi;
@@ -6715,6 +6734,12 @@ int ice_stop(struct net_device *netdev)
 {
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ice_vsi *vsi = np->vsi;
+       struct ice_pf *pf = vsi->back;
+
+       if (ice_is_reset_in_progress(pf->state)) {
+               netdev_err(netdev, "can't stop net device while reset is in progress");
+               return -EBUSY;
+       }
 
        ice_vsi_close(vsi);
 
index 67c965a..834cbd3 100644 (file)
@@ -1238,6 +1238,9 @@ ice_add_update_vsi_list(struct ice_hw *hw,
                        ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
                                                vsi_list_id);
 
+               if (!m_entry->vsi_list_info)
+                       return ICE_ERR_NO_MEMORY;
+
                /* If this entry was large action then the large action needs
                 * to be updated to point to FWD to VSI list
                 */
@@ -2220,6 +2223,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
        return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
                 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
                (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
+                fm_entry->vsi_list_info &&
                 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
 }
 
@@ -2292,14 +2296,12 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
                return ICE_ERR_PARAM;
 
        list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
-               struct ice_fltr_info *fi;
-
-               fi = &fm_entry->fltr_info;
-               if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
+               if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
                        continue;
 
                status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
-                                                       vsi_list_head, fi);
+                                                       vsi_list_head,
+                                                       &fm_entry->fltr_info);
                if (status)
                        return status;
        }
@@ -2622,7 +2624,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
                                          &remove_list_head);
        mutex_unlock(rule_lock);
        if (status)
-               return;
+               goto free_fltr_list;
 
        switch (lkup) {
        case ICE_SW_LKUP_MAC:
@@ -2645,6 +2647,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
                break;
        }
 
+free_fltr_list:
        list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
                list_del(&fm_entry->list_entry);
                devm_kfree(ice_hw_to_dev(hw), fm_entry);
index a6cb0c3..266036b 100644 (file)
@@ -535,6 +535,7 @@ struct ice_dcb_app_priority_table {
 #define ICE_TLV_STATUS_ERR     0x4
 #define ICE_APP_PROT_ID_FCOE   0x8906
 #define ICE_APP_PROT_ID_ISCSI  0x0cbc
+#define ICE_APP_PROT_ID_ISCSI_860 0x035c
 #define ICE_APP_PROT_ID_FIP    0x8914
 #define ICE_APP_SEL_ETHTYPE    0x1
 #define ICE_APP_SEL_TCPIP      0x2
index 03d9aad..cffb95f 100644 (file)
@@ -6536,6 +6536,13 @@ err_setup_tx:
        return err;
 }
 
+static int ixgbe_rx_napi_id(struct ixgbe_ring *rx_ring)
+{
+       struct ixgbe_q_vector *q_vector = rx_ring->q_vector;
+
+       return q_vector ? q_vector->napi.napi_id : 0;
+}
+
 /**
  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
  * @adapter: pointer to ixgbe_adapter
@@ -6583,7 +6590,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
 
        /* XDP RX-queue info */
        if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
-                            rx_ring->queue_index, rx_ring->q_vector->napi.napi_id) < 0)
+                            rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0)
                goto err;
 
        rx_ring->xdp_prog = adapter->xdp_prog;
@@ -6892,6 +6899,11 @@ static int __maybe_unused ixgbe_resume(struct device *dev_d)
 
        adapter->hw.hw_addr = adapter->io_addr;
 
+       err = pci_enable_device_mem(pdev);
+       if (err) {
+               e_dev_err("Cannot enable PCI device from suspend\n");
+               return err;
+       }
        smp_mb__before_atomic();
        clear_bit(__IXGBE_DISABLED, &adapter->state);
        pci_set_master(pdev);
index b051417..9153c9b 100644 (file)
@@ -191,12 +191,12 @@ static bool is_ib_supported(struct mlx5_core_dev *dev)
 }
 
 enum {
-       MLX5_INTERFACE_PROTOCOL_ETH_REP,
        MLX5_INTERFACE_PROTOCOL_ETH,
+       MLX5_INTERFACE_PROTOCOL_ETH_REP,
 
+       MLX5_INTERFACE_PROTOCOL_IB,
        MLX5_INTERFACE_PROTOCOL_IB_REP,
        MLX5_INTERFACE_PROTOCOL_MPIB,
-       MLX5_INTERFACE_PROTOCOL_IB,
 
        MLX5_INTERFACE_PROTOCOL_VNET,
 };
index d7d8a68..d0f9d3c 100644 (file)
@@ -246,6 +246,11 @@ static int mlx5_devlink_trap_action_set(struct devlink *devlink,
        struct mlx5_devlink_trap *dl_trap;
        int err = 0;
 
+       if (is_mdev_switchdev_mode(dev)) {
+               NL_SET_ERR_MSG_MOD(extack, "Devlink traps can't be set in switchdev mode");
+               return -EOPNOTSUPP;
+       }
+
        dl_trap = mlx5_find_trap_by_id(dev, trap->id);
        if (!dl_trap) {
                mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id);
index 304b296..bc6f77e 100644 (file)
@@ -516,6 +516,7 @@ struct mlx5e_icosq {
        struct mlx5_wq_cyc         wq;
        void __iomem              *uar_map;
        u32                        sqn;
+       u16                        reserved_room;
        unsigned long              state;
 
        /* control path */
index 308fd27..89510ca 100644 (file)
@@ -387,21 +387,6 @@ enum mlx5e_fec_supported_link_mode {
                        *_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
        } while (0)
 
-#define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link)                  \
-       do {                                                                            \
-               unsigned long policy_long;                                              \
-               u16 *__policy = &(policy);                                              \
-               bool _write = (write);                                                  \
-                                                                                       \
-               policy_long = *__policy;                                                \
-               if (_write && *__policy)                                                \
-                       *__policy = find_first_bit(&policy_long,                        \
-                                                  sizeof(policy_long) * BITS_PER_BYTE);\
-               MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link);          \
-               if (!_write && *__policy)                                               \
-                       *__policy = 1 << *__policy;                                     \
-       } while (0)
-
 /* get/set FEC admin field for a given speed */
 static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
                                 enum mlx5e_fec_supported_link_mode link_mode)
@@ -423,16 +408,16 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
                MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g);
                break;
        case MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X:
-               MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 50g_1x);
+               MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 50g_1x);
                break;
        case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X:
-               MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 100g_2x);
+               MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g_2x);
                break;
        case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X:
-               MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 200g_4x);
+               MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_4x);
                break;
        case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
-               MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(pplm, *fec_policy, write, 400g_8x);
+               MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_8x);
                break;
        default:
                return -EINVAL;
index b2cd298..68e54cc 100644 (file)
@@ -185,6 +185,28 @@ mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
        return !!(entry->tuple_nat_node.next);
 }
 
+static int
+mlx5_get_label_mapping(struct mlx5_tc_ct_priv *ct_priv,
+                      u32 *labels, u32 *id)
+{
+       if (!memchr_inv(labels, 0, sizeof(u32) * 4)) {
+               *id = 0;
+               return 0;
+       }
+
+       if (mapping_add(ct_priv->labels_mapping, labels, id))
+               return -EOPNOTSUPP;
+
+       return 0;
+}
+
+static void
+mlx5_put_label_mapping(struct mlx5_tc_ct_priv *ct_priv, u32 id)
+{
+       if (id)
+               mapping_remove(ct_priv->labels_mapping, id);
+}
+
 static int
 mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
 {
@@ -436,7 +458,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
        mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
        mlx5e_mod_hdr_detach(ct_priv->dev,
                             ct_priv->mod_hdr_tbl, zone_rule->mh);
-       mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+       mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
        kfree(attr);
 }
 
@@ -639,8 +661,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
        if (!meta)
                return -EOPNOTSUPP;
 
-       err = mapping_add(ct_priv->labels_mapping, meta->ct_metadata.labels,
-                         &attr->ct_attr.ct_labels_id);
+       err = mlx5_get_label_mapping(ct_priv, meta->ct_metadata.labels,
+                                    &attr->ct_attr.ct_labels_id);
        if (err)
                return -EOPNOTSUPP;
        if (nat) {
@@ -677,7 +699,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
 
 err_mapping:
        dealloc_mod_hdr_actions(&mod_acts);
-       mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+       mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
        return err;
 }
 
@@ -745,7 +767,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
 err_rule:
        mlx5e_mod_hdr_detach(ct_priv->dev,
                             ct_priv->mod_hdr_tbl, zone_rule->mh);
-       mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+       mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
 err_mod_hdr:
        kfree(attr);
 err_attr:
@@ -1197,7 +1219,7 @@ void mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_
        if (!priv || !ct_attr->ct_labels_id)
                return;
 
-       mapping_remove(priv->labels_mapping, ct_attr->ct_labels_id);
+       mlx5_put_label_mapping(priv, ct_attr->ct_labels_id);
 }
 
 int
@@ -1280,7 +1302,7 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
                ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
                ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
                ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
-               if (mapping_add(priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
+               if (mlx5_get_label_mapping(priv, ct_labels, &ct_attr->ct_labels_id))
                        return -EOPNOTSUPP;
                mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
                                            MLX5_CT_LABELS_MASK);
index 67de2bf..e127199 100644 (file)
@@ -21,6 +21,11 @@ enum {
        MLX5E_TC_TUNNEL_TYPE_MPLSOUDP,
 };
 
+struct mlx5e_encap_key {
+       const struct ip_tunnel_key *ip_tun_key;
+       struct mlx5e_tc_tunnel     *tc_tunnel;
+};
+
 struct mlx5e_tc_tunnel {
        int tunnel_type;
        enum mlx5_flow_match_level match_level;
@@ -44,6 +49,8 @@ struct mlx5e_tc_tunnel {
                            struct flow_cls_offload *f,
                            void *headers_c,
                            void *headers_v);
+       bool (*encap_info_equal)(struct mlx5e_encap_key *a,
+                                struct mlx5e_encap_key *b);
 };
 
 extern struct mlx5e_tc_tunnel vxlan_tunnel;
@@ -101,6 +108,9 @@ int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
                                 void *headers_c,
                                 void *headers_v);
 
+bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
+                                          struct mlx5e_encap_key *b);
+
 #endif /* CONFIG_MLX5_ESWITCH */
 
 #endif //__MLX5_EN_TC_TUNNEL_H__
index 7f7b0f6..9f16ad2 100644 (file)
@@ -476,16 +476,11 @@ void mlx5e_detach_decap(struct mlx5e_priv *priv,
        mlx5e_decap_dealloc(priv, d);
 }
 
-struct encap_key {
-       const struct ip_tunnel_key *ip_tun_key;
-       struct mlx5e_tc_tunnel *tc_tunnel;
-};
-
-static int cmp_encap_info(struct encap_key *a,
-                         struct encap_key *b)
+bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
+                                          struct mlx5e_encap_key *b)
 {
-       return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
-               a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
+       return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) == 0 &&
+               a->tc_tunnel->tunnel_type == b->tc_tunnel->tunnel_type;
 }
 
 static int cmp_decap_info(struct mlx5e_decap_key *a,
@@ -494,7 +489,7 @@ static int cmp_decap_info(struct mlx5e_decap_key *a,
        return memcmp(&a->key, &b->key, sizeof(b->key));
 }
 
-static int hash_encap_info(struct encap_key *key)
+static int hash_encap_info(struct mlx5e_encap_key *key)
 {
        return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
                     key->tc_tunnel->tunnel_type);
@@ -516,18 +511,18 @@ static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
 }
 
 static struct mlx5e_encap_entry *
-mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
+mlx5e_encap_get(struct mlx5e_priv *priv, struct mlx5e_encap_key *key,
                uintptr_t hash_key)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       struct mlx5e_encap_key e_key;
        struct mlx5e_encap_entry *e;
-       struct encap_key e_key;
 
        hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
                                   encap_hlist, hash_key) {
                e_key.ip_tun_key = &e->tun_info->key;
                e_key.tc_tunnel = e->tunnel;
-               if (!cmp_encap_info(&e_key, key) &&
+               if (e->tunnel->encap_info_equal(&e_key, key) &&
                    mlx5e_encap_take(e))
                        return e;
        }
@@ -694,8 +689,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
        struct mlx5_flow_attr *attr = flow->attr;
        const struct ip_tunnel_info *tun_info;
        unsigned long tbl_time_before = 0;
-       struct encap_key key;
        struct mlx5e_encap_entry *e;
+       struct mlx5e_encap_key key;
        bool entry_created = false;
        unsigned short family;
        uintptr_t hash_key;
index 7ed3f9f..f5b26f5 100644 (file)
@@ -329,6 +329,34 @@ static int mlx5e_tc_tun_parse_geneve(struct mlx5e_priv *priv,
        return mlx5e_tc_tun_parse_geneve_options(priv, spec, f);
 }
 
+static bool mlx5e_tc_tun_encap_info_equal_geneve(struct mlx5e_encap_key *a,
+                                                struct mlx5e_encap_key *b)
+{
+       struct ip_tunnel_info *a_info;
+       struct ip_tunnel_info *b_info;
+       bool a_has_opts, b_has_opts;
+
+       if (!mlx5e_tc_tun_encap_info_equal_generic(a, b))
+               return false;
+
+       a_has_opts = !!(a->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT);
+       b_has_opts = !!(b->ip_tun_key->tun_flags & TUNNEL_GENEVE_OPT);
+
+       /* keys are equal when both don't have any options attached */
+       if (!a_has_opts && !b_has_opts)
+               return true;
+
+       if (a_has_opts != b_has_opts)
+               return false;
+
+       /* geneve options stored in memory next to ip_tunnel_info struct */
+       a_info = container_of(a->ip_tun_key, struct ip_tunnel_info, key);
+       b_info = container_of(b->ip_tun_key, struct ip_tunnel_info, key);
+
+       return a_info->options_len == b_info->options_len &&
+               memcmp(a_info + 1, b_info + 1, a_info->options_len) == 0;
+}
+
 struct mlx5e_tc_tunnel geneve_tunnel = {
        .tunnel_type          = MLX5E_TC_TUNNEL_TYPE_GENEVE,
        .match_level          = MLX5_MATCH_L4,
@@ -338,4 +366,5 @@ struct mlx5e_tc_tunnel geneve_tunnel = {
        .generate_ip_tun_hdr  = mlx5e_gen_ip_tunnel_header_geneve,
        .parse_udp_ports      = mlx5e_tc_tun_parse_udp_ports_geneve,
        .parse_tunnel         = mlx5e_tc_tun_parse_geneve,
+       .encap_info_equal     = mlx5e_tc_tun_encap_info_equal_geneve,
 };
index 2805416..ada14f0 100644 (file)
@@ -94,4 +94,5 @@ struct mlx5e_tc_tunnel gre_tunnel = {
        .generate_ip_tun_hdr  = mlx5e_gen_ip_tunnel_header_gretap,
        .parse_udp_ports      = NULL,
        .parse_tunnel         = mlx5e_tc_tun_parse_gretap,
+       .encap_info_equal     = mlx5e_tc_tun_encap_info_equal_generic,
 };
index 3479672..60952b3 100644 (file)
@@ -131,4 +131,5 @@ struct mlx5e_tc_tunnel mplsoudp_tunnel = {
        .generate_ip_tun_hdr  = generate_ip_tun_hdr,
        .parse_udp_ports      = parse_udp_ports,
        .parse_tunnel         = parse_tunnel,
+       .encap_info_equal     = mlx5e_tc_tun_encap_info_equal_generic,
 };
index 038a0f1..4267f3a 100644 (file)
@@ -150,4 +150,5 @@ struct mlx5e_tc_tunnel vxlan_tunnel = {
        .generate_ip_tun_hdr  = mlx5e_gen_ip_tunnel_header_vxlan,
        .parse_udp_ports      = mlx5e_tc_tun_parse_udp_ports_vxlan,
        .parse_tunnel         = mlx5e_tc_tun_parse_vxlan,
+       .encap_info_equal     = mlx5e_tc_tun_encap_info_equal_generic,
 };
index 2371b83..055c3bc 100644 (file)
@@ -441,4 +441,10 @@ static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
        return wqe_size * 2 - 1;
 }
 
+static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
+{
+       u16 room = sq->reserved_room + mlx5e_stop_room_for_wqe(wqe_size);
+
+       return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
+}
 #endif
index d06532d..19d22a6 100644 (file)
@@ -46,7 +46,8 @@ struct mlx5e_ktls_offload_context_rx {
        struct tls12_crypto_info_aes_gcm_128 crypto_info;
        struct accel_rule rule;
        struct sock *sk;
-       struct mlx5e_rq_stats *stats;
+       struct mlx5e_rq_stats *rq_stats;
+       struct mlx5e_tls_sw_stats *sw_stats;
        struct completion add_ctx;
        u32 tirn;
        u32 key_id;
@@ -137,11 +138,10 @@ post_static_params(struct mlx5e_icosq *sq,
 {
        struct mlx5e_set_tls_static_params_wqe *wqe;
        struct mlx5e_icosq_wqe_info wi;
-       u16 pi, num_wqebbs, room;
+       u16 pi, num_wqebbs;
 
        num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
-       room = mlx5e_stop_room_for_wqe(num_wqebbs);
-       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
+       if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
                return ERR_PTR(-ENOSPC);
 
        pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
@@ -168,11 +168,10 @@ post_progress_params(struct mlx5e_icosq *sq,
 {
        struct mlx5e_set_tls_progress_params_wqe *wqe;
        struct mlx5e_icosq_wqe_info wi;
-       u16 pi, num_wqebbs, room;
+       u16 pi, num_wqebbs;
 
        num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
-       room = mlx5e_stop_room_for_wqe(num_wqebbs);
-       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
+       if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs)))
                return ERR_PTR(-ENOSPC);
 
        pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
@@ -218,7 +217,7 @@ unlock:
        return err;
 
 err_out:
-       priv_rx->stats->tls_resync_req_skip++;
+       priv_rx->rq_stats->tls_resync_req_skip++;
        err = PTR_ERR(cseg);
        complete(&priv_rx->add_ctx);
        goto unlock;
@@ -277,17 +276,15 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
 
        buf->priv_rx = priv_rx;
 
-       BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
-
        spin_lock_bh(&sq->channel->async_icosq_lock);
 
-       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
+       if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) {
                spin_unlock_bh(&sq->channel->async_icosq_lock);
                err = -ENOSPC;
                goto err_dma_unmap;
        }
 
-       pi = mlx5e_icosq_get_next_pi(sq, 1);
+       pi = mlx5e_icosq_get_next_pi(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS);
        wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi);
 
 #define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS))
@@ -307,7 +304,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
 
        wi = (struct mlx5e_icosq_wqe_info) {
                .wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS,
-               .num_wqebbs = 1,
+               .num_wqebbs = MLX5E_KTLS_GET_PROGRESS_WQEBBS,
                .tls_get_params.buf = buf,
        };
        icosq_fill_wi(sq, pi, &wi);
@@ -322,7 +319,7 @@ err_dma_unmap:
 err_free:
        kfree(buf);
 err_out:
-       priv_rx->stats->tls_resync_req_skip++;
+       priv_rx->rq_stats->tls_resync_req_skip++;
        return err;
 }
 
@@ -378,13 +375,13 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
 
        cseg = post_static_params(sq, priv_rx);
        if (IS_ERR(cseg)) {
-               priv_rx->stats->tls_resync_res_skip++;
+               priv_rx->rq_stats->tls_resync_res_skip++;
                err = PTR_ERR(cseg);
                goto unlock;
        }
        /* Do not increment priv_rx refcnt, CQE handling is empty */
        mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
-       priv_rx->stats->tls_resync_res_ok++;
+       priv_rx->rq_stats->tls_resync_res_ok++;
 unlock:
        spin_unlock_bh(&c->async_icosq_lock);
 
@@ -420,13 +417,13 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
        auth_state = MLX5_GET(tls_progress_params, ctx, auth_state);
        if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
            auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
-               priv_rx->stats->tls_resync_req_skip++;
+               priv_rx->rq_stats->tls_resync_req_skip++;
                goto out;
        }
 
        hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
        tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
-       priv_rx->stats->tls_resync_req_end++;
+       priv_rx->rq_stats->tls_resync_req_end++;
 out:
        mlx5e_ktls_priv_rx_put(priv_rx);
        dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
@@ -609,7 +606,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
        priv_rx->rxq = rxq;
        priv_rx->sk = sk;
 
-       priv_rx->stats = &priv->channel_stats[rxq].rq;
+       priv_rx->rq_stats = &priv->channel_stats[rxq].rq;
+       priv_rx->sw_stats = &priv->tls->sw_stats;
        mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
 
        rqtn = priv->direct_tir[rxq].rqt.rqtn;
@@ -630,7 +628,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
        if (err)
                goto err_post_wqes;
 
-       priv_rx->stats->tls_ctx++;
+       atomic64_inc(&priv_rx->sw_stats->rx_tls_ctx);
 
        return 0;
 
@@ -666,7 +664,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
        if (cancel_work_sync(&resync->work))
                mlx5e_ktls_priv_rx_put(priv_rx);
 
-       priv_rx->stats->tls_del++;
+       atomic64_inc(&priv_rx->sw_stats->rx_tls_del);
        if (priv_rx->rule.rule)
                mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
 
index d16def6..51bdf71 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
 // Copyright (c) 2019 Mellanox Technologies.
 
+#include "en_accel/tls.h"
 #include "en_accel/ktls_txrx.h"
 #include "en_accel/ktls_utils.h"
 
@@ -50,6 +51,7 @@ static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
 struct mlx5e_ktls_offload_context_tx {
        struct tls_offload_context_tx *tx_ctx;
        struct tls12_crypto_info_aes_gcm_128 crypto_info;
+       struct mlx5e_tls_sw_stats *sw_stats;
        u32 expected_seq;
        u32 tisn;
        u32 key_id;
@@ -99,6 +101,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
        if (err)
                goto err_create_key;
 
+       priv_tx->sw_stats = &priv->tls->sw_stats;
        priv_tx->expected_seq = start_offload_tcp_sn;
        priv_tx->crypto_info  =
                *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
@@ -111,6 +114,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
                goto err_create_tis;
 
        priv_tx->ctx_post_pending = true;
+       atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
 
        return 0;
 
@@ -452,7 +456,6 @@ bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *s
 
        if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
                mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
-               stats->tls_ctx++;
        }
 
        seq = ntohl(tcp_hdr(skb)->seq);
index bd270a8..4c9274d 100644 (file)
 #include "en.h"
 
 struct mlx5e_tls_sw_stats {
+       atomic64_t tx_tls_ctx;
        atomic64_t tx_tls_drop_metadata;
        atomic64_t tx_tls_drop_resync_alloc;
        atomic64_t tx_tls_drop_no_sync_data;
        atomic64_t tx_tls_drop_bypass_required;
+       atomic64_t rx_tls_ctx;
+       atomic64_t rx_tls_del;
        atomic64_t rx_tls_drop_resync_request;
        atomic64_t rx_tls_resync_request;
        atomic64_t rx_tls_resync_reply;
index b949b9a..29463bd 100644 (file)
@@ -45,49 +45,60 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) },
 };
 
+static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
+       { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_ctx) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, rx_tls_del) },
+};
+
 #define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
        atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
 
-#define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc)
-
-static bool is_tls_atomic_stats(struct mlx5e_priv *priv)
+static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv)
 {
-       return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev);
+       if (!priv->tls)
+               return NULL;
+       if (mlx5_accel_is_ktls_device(priv->mdev))
+               return mlx5e_ktls_sw_stats_desc;
+       return mlx5e_tls_sw_stats_desc;
 }
 
 int mlx5e_tls_get_count(struct mlx5e_priv *priv)
 {
-       if (!is_tls_atomic_stats(priv))
+       if (!priv->tls)
                return 0;
-
-       return NUM_TLS_SW_COUNTERS;
+       if (mlx5_accel_is_ktls_device(priv->mdev))
+               return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
+       return ARRAY_SIZE(mlx5e_tls_sw_stats_desc);
 }
 
 int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
 {
-       unsigned int i, idx = 0;
+       const struct counter_desc *stats_desc;
+       unsigned int i, n, idx = 0;
 
-       if (!is_tls_atomic_stats(priv))
-               return 0;
+       stats_desc = get_tls_atomic_stats(priv);
+       n = mlx5e_tls_get_count(priv);
 
-       for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+       for (i = 0; i < n; i++)
                strcpy(data + (idx++) * ETH_GSTRING_LEN,
-                      mlx5e_tls_sw_stats_desc[i].format);
+                      stats_desc[i].format);
 
-       return NUM_TLS_SW_COUNTERS;
+       return n;
 }
 
 int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
 {
-       int i, idx = 0;
+       const struct counter_desc *stats_desc;
+       unsigned int i, n, idx = 0;
 
-       if (!is_tls_atomic_stats(priv))
-               return 0;
+       stats_desc = get_tls_atomic_stats(priv);
+       n = mlx5e_tls_get_count(priv);
 
-       for (i = 0; i < NUM_TLS_SW_COUNTERS; i++)
+       for (i = 0; i < n; i++)
                data[idx++] =
                    MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
-                                           mlx5e_tls_sw_stats_desc, i);
+                                           stats_desc, i);
 
-       return NUM_TLS_SW_COUNTERS;
+       return n;
 }
index f5f2a8f..53802e1 100644 (file)
@@ -758,11 +758,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
        return 0;
 }
 
-static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
-                                                  u32 eth_proto_cap,
-                                                  u8 connector_type, bool ext)
+static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev,
+                                                  struct ethtool_link_ksettings *link_ksettings,
+                                                  u32 eth_proto_cap, u8 connector_type)
 {
-       if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
+       if (!MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) {
                if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
                                   | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
                                   | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
@@ -898,9 +898,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
                [MLX5E_PORT_OTHER]              = PORT_OTHER,
        };
 
-static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext)
+static u8 get_connector_port(struct mlx5_core_dev *mdev, u32 eth_proto, u8 connector_type)
 {
-       if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
+       if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type))
                return ptys2connector_type[connector_type];
 
        if (eth_proto &
@@ -1001,11 +1001,11 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
                         data_rate_oper, link_ksettings);
 
        eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
-
-       link_ksettings->base.port = get_connector_port(eth_proto_oper,
-                                                      connector_type, ext);
-       ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
-                                              connector_type, ext);
+       connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ?
+                        connector_type : MLX5E_PORT_UNKNOWN;
+       link_ksettings->base.port = get_connector_port(mdev, eth_proto_oper, connector_type);
+       ptys2ethtool_supported_advertised_port(mdev, link_ksettings, eth_proto_admin,
+                                              connector_type);
        get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
 
        if (an_status == MLX5_AN_COMPLETE)
index 158f947..5db63b9 100644 (file)
@@ -1091,6 +1091,7 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
 
        sq->channel   = c;
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
+       sq->reserved_room = param->stop_room;
 
        param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -2350,6 +2351,24 @@ void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
        mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
 }
 
+static void mlx5e_build_async_icosq_param(struct mlx5e_priv *priv,
+                                         struct mlx5e_params *params,
+                                         u8 log_wq_size,
+                                         struct mlx5e_sq_param *param)
+{
+       void *sqc = param->sqc;
+       void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+       mlx5e_build_sq_param_common(priv, param);
+
+       /* async_icosq is used by XSK only if xdp_prog is active */
+       if (params->xdp_prog)
+               param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
+       MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
+       MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
+       mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
+}
+
 void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
                             struct mlx5e_params *params,
                             struct mlx5e_sq_param *param)
@@ -2398,7 +2417,7 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
        mlx5e_build_sq_param(priv, params, &cparam->txq_sq);
        mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
        mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
-       mlx5e_build_icosq_param(priv, async_icosq_log_wq_sz, &cparam->async_icosq);
+       mlx5e_build_async_icosq_param(priv, params, async_icosq_log_wq_sz, &cparam->async_icosq);
 }
 
 int mlx5e_open_channels(struct mlx5e_priv *priv,
index a132fff..8d39bfe 100644 (file)
@@ -1107,8 +1107,9 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
 
        mlx5e_rep_tc_enable(priv);
 
-       mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
-                                     0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
+       if (MLX5_CAP_GEN(mdev, uplink_follow))
+               mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
+                                             0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
        mlx5_lag_add(mdev, netdev);
        priv->events_nb.notifier_call = uplink_rep_async_event;
        mlx5_notifier_register(mdev, &priv->events_nb);
index 92c5b81..88a01c5 100644 (file)
@@ -116,7 +116,6 @@ static const struct counter_desc sw_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
@@ -180,8 +179,6 @@ static const struct counter_desc sw_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
@@ -342,8 +339,6 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
 #ifdef CONFIG_MLX5_EN_TLS
        s->rx_tls_decrypted_packets   += rq_stats->tls_decrypted_packets;
        s->rx_tls_decrypted_bytes     += rq_stats->tls_decrypted_bytes;
-       s->rx_tls_ctx                 += rq_stats->tls_ctx;
-       s->rx_tls_del                 += rq_stats->tls_del;
        s->rx_tls_resync_req_pkt      += rq_stats->tls_resync_req_pkt;
        s->rx_tls_resync_req_start    += rq_stats->tls_resync_req_start;
        s->rx_tls_resync_req_end      += rq_stats->tls_resync_req_end;
@@ -390,7 +385,6 @@ static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
 #ifdef CONFIG_MLX5_EN_TLS
        s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
        s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
-       s->tx_tls_ctx               += sq_stats->tls_ctx;
        s->tx_tls_ooo               += sq_stats->tls_ooo;
        s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
        s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
@@ -1622,8 +1616,6 @@ static const struct counter_desc rq_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
-       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) },
-       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
@@ -1650,7 +1642,6 @@ static const struct counter_desc sq_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
-       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
@@ -1776,7 +1767,6 @@ static const struct counter_desc qos_sq_stats_desc[] = {
 #ifdef CONFIG_MLX5_EN_TLS
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
-       { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
        { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
index 93c4131..adf9b7b 100644 (file)
@@ -191,7 +191,6 @@ struct mlx5e_sw_stats {
 #ifdef CONFIG_MLX5_EN_TLS
        u64 tx_tls_encrypted_packets;
        u64 tx_tls_encrypted_bytes;
-       u64 tx_tls_ctx;
        u64 tx_tls_ooo;
        u64 tx_tls_dump_packets;
        u64 tx_tls_dump_bytes;
@@ -202,8 +201,6 @@ struct mlx5e_sw_stats {
 
        u64 rx_tls_decrypted_packets;
        u64 rx_tls_decrypted_bytes;
-       u64 rx_tls_ctx;
-       u64 rx_tls_del;
        u64 rx_tls_resync_req_pkt;
        u64 rx_tls_resync_req_start;
        u64 rx_tls_resync_req_end;
@@ -334,8 +331,6 @@ struct mlx5e_rq_stats {
 #ifdef CONFIG_MLX5_EN_TLS
        u64 tls_decrypted_packets;
        u64 tls_decrypted_bytes;
-       u64 tls_ctx;
-       u64 tls_del;
        u64 tls_resync_req_pkt;
        u64 tls_resync_req_start;
        u64 tls_resync_req_end;
@@ -364,7 +359,6 @@ struct mlx5e_sq_stats {
 #ifdef CONFIG_MLX5_EN_TLS
        u64 tls_encrypted_packets;
        u64 tls_encrypted_bytes;
-       u64 tls_ctx;
        u64 tls_ooo;
        u64 tls_dump_packets;
        u64 tls_dump_bytes;
index df2a0af..d675107 100644 (file)
@@ -1895,6 +1895,9 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
                return 0;
 
        flow_rule_match_meta(rule, &match);
+       if (!match.mask->ingress_ifindex)
+               return 0;
+
        if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
                NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
                return -EOPNOTSUPP;
index 174dfbc..1fa9c18 100644 (file)
@@ -931,13 +931,24 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
        mutex_unlock(&table->lock);
 }
 
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+#define MLX5_MAX_ASYNC_EQS 4
+#else
+#define MLX5_MAX_ASYNC_EQS 3
+#endif
+
 int mlx5_eq_table_create(struct mlx5_core_dev *dev)
 {
        struct mlx5_eq_table *eq_table = dev->priv.eq_table;
+       int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
+                     MLX5_CAP_GEN(dev, max_num_eqs) :
+                     1 << MLX5_CAP_GEN(dev, log_max_eq);
        int err;
 
        eq_table->num_comp_eqs =
-               mlx5_irq_get_num_comp(eq_table->irq_table);
+               min_t(int,
+                     mlx5_irq_get_num_comp(eq_table->irq_table),
+                     num_eqs - MLX5_MAX_ASYNC_EQS);
 
        err = create_async_eqs(dev);
        if (err) {
index 6f6772b..3da7bec 100644 (file)
@@ -248,7 +248,7 @@ err_mod_hdr_regc0:
 err_ethertype:
        kfree(rule);
 out:
-       kfree(rule_spec);
+       kvfree(rule_spec);
        return err;
 }
 
@@ -328,7 +328,7 @@ static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw,
        e->recirc_cnt = 0;
 
 out:
-       kfree(in);
+       kvfree(in);
        return err;
 }
 
@@ -347,7 +347,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
 
        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
        if (!spec) {
-               kfree(in);
+               kvfree(in);
                return -ENOMEM;
        }
 
@@ -371,8 +371,8 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
        }
 
 err_out:
-       kfree(spec);
-       kfree(in);
+       kvfree(spec);
+       kvfree(in);
        return err;
 }
 
index 8694b83..d4a2f8d 100644 (file)
@@ -537,6 +537,14 @@ esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *
        return i;
 }
 
+static bool
+esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
+{
+       return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
+              mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+              MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
+}
+
 static int
 esw_setup_dests(struct mlx5_flow_destination *dest,
                struct mlx5_flow_act *flow_act,
@@ -550,9 +558,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
        int err = 0;
 
        if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
-           MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) &&
-           mlx5_eswitch_vport_match_metadata_enabled(esw) &&
-           MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
+           esw_src_port_rewrite_supported(esw))
                attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
 
        if (attr->dest_ft) {
@@ -1716,36 +1722,40 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
        }
        esw->fdb_table.offloads.send_to_vport_grp = g;
 
-       /* meta send to vport */
-       memset(flow_group_in, 0, inlen);
-       MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
-                MLX5_MATCH_MISC_PARAMETERS_2);
-
-       match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+       if (esw_src_port_rewrite_supported(esw)) {
+               /* meta send to vport */
+               memset(flow_group_in, 0, inlen);
+               MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+                        MLX5_MATCH_MISC_PARAMETERS_2);
 
-       MLX5_SET(fte_match_param, match_criteria,
-                misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
-       MLX5_SET(fte_match_param, match_criteria,
-                misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+               match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
 
-       num_vfs = esw->esw_funcs.num_vfs;
-       if (num_vfs) {
-               MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
-               MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + num_vfs - 1);
-               ix += num_vfs;
+               MLX5_SET(fte_match_param, match_criteria,
+                        misc_parameters_2.metadata_reg_c_0,
+                        mlx5_eswitch_get_vport_metadata_mask());
+               MLX5_SET(fte_match_param, match_criteria,
+                        misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
 
-               g = mlx5_create_flow_group(fdb, flow_group_in);
-               if (IS_ERR(g)) {
-                       err = PTR_ERR(g);
-                       esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
-                                err);
-                       goto send_vport_meta_err;
+               num_vfs = esw->esw_funcs.num_vfs;
+               if (num_vfs) {
+                       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+                       MLX5_SET(create_flow_group_in, flow_group_in,
+                                end_flow_index, ix + num_vfs - 1);
+                       ix += num_vfs;
+
+                       g = mlx5_create_flow_group(fdb, flow_group_in);
+                       if (IS_ERR(g)) {
+                               err = PTR_ERR(g);
+                               esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
+                                        err);
+                               goto send_vport_meta_err;
+                       }
+                       esw->fdb_table.offloads.send_to_vport_meta_grp = g;
+
+                       err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
+                       if (err)
+                               goto meta_rule_err;
                }
-               esw->fdb_table.offloads.send_to_vport_meta_grp = g;
-
-               err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
-               if (err)
-                       goto meta_rule_err;
        }
 
        if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
index d9d9e1f..ba28ac7 100644 (file)
@@ -21,6 +21,7 @@
 #include <net/red.h>
 #include <net/vxlan.h>
 #include <net/flow_offload.h>
+#include <net/inet_ecn.h>
 
 #include "port.h"
 #include "core.h"
@@ -347,6 +348,20 @@ struct mlxsw_sp_port_type_speed_ops {
        u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
 };
 
+static inline u8 mlxsw_sp_tunnel_ecn_decap(u8 outer_ecn, u8 inner_ecn,
+                                          bool *trap_en)
+{
+       bool set_ce = false;
+
+       *trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
+       if (set_ce)
+               return INET_ECN_CE;
+       else if (outer_ecn == INET_ECN_ECT_1 && inner_ecn == INET_ECN_ECT_0)
+               return INET_ECN_ECT_1;
+       else
+               return inner_ecn;
+}
+
 static inline struct net_device *
 mlxsw_sp_bridge_vxlan_dev_find(struct net_device *br_dev)
 {
index 0bd6416..078601d 100644 (file)
@@ -1230,16 +1230,22 @@ mlxsw_sp1_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
                              u32 ptys_eth_proto,
                              struct ethtool_link_ksettings *cmd)
 {
+       struct mlxsw_sp1_port_link_mode link;
        int i;
 
-       cmd->link_mode = -1;
+       cmd->base.speed = SPEED_UNKNOWN;
+       cmd->base.duplex = DUPLEX_UNKNOWN;
+       cmd->lanes = 0;
 
        if (!carrier_ok)
                return;
 
        for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
-               if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask)
-                       cmd->link_mode = mlxsw_sp1_port_link_mode[i].mask_ethtool;
+               if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) {
+                       link = mlxsw_sp1_port_link_mode[i];
+                       ethtool_params_from_link_mode(cmd,
+                                                     link.mask_ethtool);
+               }
        }
 }
 
@@ -1672,7 +1678,9 @@ mlxsw_sp2_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
        struct mlxsw_sp2_port_link_mode link;
        int i;
 
-       cmd->link_mode = -1;
+       cmd->base.speed = SPEED_UNKNOWN;
+       cmd->base.duplex = DUPLEX_UNKNOWN;
+       cmd->lanes = 0;
 
        if (!carrier_ok)
                return;
@@ -1680,7 +1688,8 @@ mlxsw_sp2_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
        for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
                if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) {
                        link = mlxsw_sp2_port_link_mode[i];
-                       cmd->link_mode = link.mask_ethtool[1];
+                       ethtool_params_from_link_mode(cmd,
+                                                     link.mask_ethtool[1]);
                }
        }
 }
index 6ccca39..64a8f83 100644 (file)
@@ -335,12 +335,11 @@ static int mlxsw_sp_ipip_ecn_decap_init_one(struct mlxsw_sp *mlxsw_sp,
                                            u8 inner_ecn, u8 outer_ecn)
 {
        char tidem_pl[MLXSW_REG_TIDEM_LEN];
-       bool trap_en, set_ce = false;
        u8 new_inner_ecn;
+       bool trap_en;
 
-       trap_en = __INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
-       new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
-
+       new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
+                                                 &trap_en);
        mlxsw_reg_tidem_pack(tidem_pl, outer_ecn, inner_ecn, new_inner_ecn,
                             trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tidem), tidem_pl);
index e5ec595..9eba8fa 100644 (file)
@@ -909,12 +909,11 @@ static int __mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp,
                                         u8 inner_ecn, u8 outer_ecn)
 {
        char tndem_pl[MLXSW_REG_TNDEM_LEN];
-       bool trap_en, set_ce = false;
        u8 new_inner_ecn;
+       bool trap_en;
 
-       trap_en = !!__INET_ECN_decapsulate(outer_ecn, inner_ecn, &set_ce);
-       new_inner_ecn = set_ce ? INET_ECN_CE : inner_ecn;
-
+       new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
+                                                 &trap_en);
        mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn,
                             trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl);
index 1c3e204..7b6794a 100644 (file)
@@ -885,8 +885,8 @@ static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu)
        }
 
        mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_);
-       mac_rx |= (((new_mtu + ETH_HLEN + 4) << MAC_RX_MAX_SIZE_SHIFT_) &
-                 MAC_RX_MAX_SIZE_MASK_);
+       mac_rx |= (((new_mtu + ETH_HLEN + ETH_FCS_LEN)
+                 << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
        lan743x_csr_write(adapter, MAC_RX, mac_rx);
 
        if (enabled) {
@@ -1944,7 +1944,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
        struct sk_buff *skb;
        dma_addr_t dma_ptr;
 
-       buffer_length = netdev->mtu + ETH_HLEN + 4 + RX_HEAD_PADDING;
+       buffer_length = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + RX_HEAD_PADDING;
 
        descriptor = &rx->ring_cpu_ptr[index];
        buffer_info = &rx->buffer_info[index];
@@ -2040,7 +2040,7 @@ lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length)
                dev_kfree_skb_irq(skb);
                return NULL;
        }
-       frame_length = max_t(int, 0, frame_length - RX_HEAD_PADDING - 4);
+       frame_length = max_t(int, 0, frame_length - ETH_FCS_LEN);
        if (skb->len > frame_length) {
                skb->tail -= skb->len - frame_length;
                skb->len = frame_length;
index 1634ca6..c84c8bf 100644 (file)
@@ -2897,7 +2897,7 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
                        dev_kfree_skb_any(curr);
                        if (segs != NULL) {
                                curr = segs;
-                               segs = segs->next;
+                               segs = next;
                                curr->next = NULL;
                                dev_kfree_skb_any(segs);
                        }
index 0e2db6e..2ec62c8 100644 (file)
@@ -454,6 +454,7 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
                        dev_consume_skb_any(skb);
                else
                        dev_kfree_skb_any(skb);
+               return;
        }
 
        nfp_ccm_rx(&bpf->ccm, skb);
index caf12ee..56833a4 100644 (file)
@@ -190,6 +190,7 @@ struct nfp_fl_internal_ports {
  * @qos_rate_limiters: Current active qos rate limiters
  * @qos_stats_lock:    Lock on qos stats updates
  * @pre_tun_rule_cnt:  Number of pre-tunnel rules offloaded
+ * @merge_table:       Hash table to store merged flows
  */
 struct nfp_flower_priv {
        struct nfp_app *app;
@@ -223,6 +224,7 @@ struct nfp_flower_priv {
        unsigned int qos_rate_limiters;
        spinlock_t qos_stats_lock; /* Protect the qos stats */
        int pre_tun_rule_cnt;
+       struct rhashtable merge_table;
 };
 
 /**
@@ -350,6 +352,12 @@ struct nfp_fl_payload_link {
 };
 
 extern const struct rhashtable_params nfp_flower_table_params;
+extern const struct rhashtable_params merge_table_params;
+
+struct nfp_merge_info {
+       u64 parent_ctx;
+       struct rhash_head ht_node;
+};
 
 struct nfp_fl_stats_frame {
        __be32 stats_con_id;
index aa06fcb..327bb56 100644 (file)
@@ -490,6 +490,12 @@ const struct rhashtable_params nfp_flower_table_params = {
        .automatic_shrinking    = true,
 };
 
+const struct rhashtable_params merge_table_params = {
+       .key_offset     = offsetof(struct nfp_merge_info, parent_ctx),
+       .head_offset    = offsetof(struct nfp_merge_info, ht_node),
+       .key_len        = sizeof(u64),
+};
+
 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
                             unsigned int host_num_mems)
 {
@@ -506,6 +512,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
        if (err)
                goto err_free_flow_table;
 
+       err = rhashtable_init(&priv->merge_table, &merge_table_params);
+       if (err)
+               goto err_free_stats_ctx_table;
+
        get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
 
        /* Init ring buffer and unallocated mask_ids. */
@@ -513,7 +523,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
                kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
                              NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
        if (!priv->mask_ids.mask_id_free_list.buf)
-               goto err_free_stats_ctx_table;
+               goto err_free_merge_table;
 
        priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
 
@@ -550,6 +560,8 @@ err_free_last_used:
        kfree(priv->mask_ids.last_used);
 err_free_mask_id:
        kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_merge_table:
+       rhashtable_destroy(&priv->merge_table);
 err_free_stats_ctx_table:
        rhashtable_destroy(&priv->stats_ctx_table);
 err_free_flow_table:
@@ -568,6 +580,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
                                    nfp_check_rhashtable_empty, NULL);
        rhashtable_free_and_destroy(&priv->stats_ctx_table,
                                    nfp_check_rhashtable_empty, NULL);
+       rhashtable_free_and_destroy(&priv->merge_table,
+                                   nfp_check_rhashtable_empty, NULL);
        kvfree(priv->stats);
        kfree(priv->mask_ids.mask_id_free_list.buf);
        kfree(priv->mask_ids.last_used);
index d72225d..e95969c 100644 (file)
@@ -1009,6 +1009,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
        struct netlink_ext_ack *extack = NULL;
        struct nfp_fl_payload *merge_flow;
        struct nfp_fl_key_ls merge_key_ls;
+       struct nfp_merge_info *merge_info;
+       u64 parent_ctx = 0;
        int err;
 
        ASSERT_RTNL();
@@ -1019,6 +1021,15 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
            nfp_flower_is_merge_flow(sub_flow2))
                return -EINVAL;
 
+       /* check if the two flows are already merged */
+       parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
+       parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
+       if (rhashtable_lookup_fast(&priv->merge_table,
+                                  &parent_ctx, merge_table_params)) {
+               nfp_flower_cmsg_warn(app, "The two flows are already merged.\n");
+               return 0;
+       }
+
        err = nfp_flower_can_merge(sub_flow1, sub_flow2);
        if (err)
                return err;
@@ -1060,16 +1071,33 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
        if (err)
                goto err_release_metadata;
 
+       merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL);
+       if (!merge_info) {
+               err = -ENOMEM;
+               goto err_remove_rhash;
+       }
+       merge_info->parent_ctx = parent_ctx;
+       err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node,
+                                    merge_table_params);
+       if (err)
+               goto err_destroy_merge_info;
+
        err = nfp_flower_xmit_flow(app, merge_flow,
                                   NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
        if (err)
-               goto err_remove_rhash;
+               goto err_remove_merge_info;
 
        merge_flow->in_hw = true;
        sub_flow1->in_hw = false;
 
        return 0;
 
+err_remove_merge_info:
+       WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
+                                           &merge_info->ht_node,
+                                           merge_table_params));
+err_destroy_merge_info:
+       kfree(merge_info);
 err_remove_rhash:
        WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
                                            &merge_flow->fl_node,
@@ -1359,7 +1387,9 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
 {
        struct nfp_flower_priv *priv = app->priv;
        struct nfp_fl_payload_link *link, *temp;
+       struct nfp_merge_info *merge_info;
        struct nfp_fl_payload *origin;
+       u64 parent_ctx = 0;
        bool mod = false;
        int err;
 
@@ -1396,8 +1426,22 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
 err_free_links:
        /* Clean any links connected with the merged flow. */
        list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
-                                merge_flow.list)
+                                merge_flow.list) {
+               u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id);
+
+               parent_ctx = (parent_ctx << 32) | (u64)(ctx_id);
                nfp_flower_unlink_flow(link);
+       }
+
+       merge_info = rhashtable_lookup_fast(&priv->merge_table,
+                                           &parent_ctx,
+                                           merge_table_params);
+       if (merge_info) {
+               WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
+                                                   &merge_info->ht_node,
+                                                   merge_table_params));
+               kfree(merge_info);
+       }
 
        kfree(merge_flow->action_data);
        kfree(merge_flow->mask_data);
index 581a92f..1df2c00 100644 (file)
@@ -2350,6 +2350,13 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
 
        if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
                pcie_set_readrq(tp->pci_dev, readrq);
+
+       /* Chip doesn't support pause in jumbo mode */
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+                        tp->phydev->advertising, !jumbo);
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+                        tp->phydev->advertising, !jumbo);
+       phy_start_aneg(tp->phydev);
 }
 
 DECLARE_RTL_COND(rtl_chipcmd_cond)
@@ -4630,8 +4637,6 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
        if (!tp->supports_gmii)
                phy_set_max_speed(phydev, SPEED_100);
 
-       phy_support_asym_pause(phydev);
-
        phy_attached_info(phydev);
 
        return 0;
index 208cae3..4749bd0 100644 (file)
@@ -1379,88 +1379,6 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
        }
 }
 
-/**
- * stmmac_reinit_rx_buffers - reinit the RX descriptor buffer.
- * @priv: driver private structure
- * Description: this function is called to re-allocate a receive buffer, perform
- * the DMA mapping and init the descriptor.
- */
-static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
-{
-       u32 rx_count = priv->plat->rx_queues_to_use;
-       u32 queue;
-       int i;
-
-       for (queue = 0; queue < rx_count; queue++) {
-               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-
-               for (i = 0; i < priv->dma_rx_size; i++) {
-                       struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
-
-                       if (buf->page) {
-                               page_pool_recycle_direct(rx_q->page_pool, buf->page);
-                               buf->page = NULL;
-                       }
-
-                       if (priv->sph && buf->sec_page) {
-                               page_pool_recycle_direct(rx_q->page_pool, buf->sec_page);
-                               buf->sec_page = NULL;
-                       }
-               }
-       }
-
-       for (queue = 0; queue < rx_count; queue++) {
-               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-
-               for (i = 0; i < priv->dma_rx_size; i++) {
-                       struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
-                       struct dma_desc *p;
-
-                       if (priv->extend_desc)
-                               p = &((rx_q->dma_erx + i)->basic);
-                       else
-                               p = rx_q->dma_rx + i;
-
-                       if (!buf->page) {
-                               buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
-                               if (!buf->page)
-                                       goto err_reinit_rx_buffers;
-
-                               buf->addr = page_pool_get_dma_addr(buf->page);
-                       }
-
-                       if (priv->sph && !buf->sec_page) {
-                               buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
-                               if (!buf->sec_page)
-                                       goto err_reinit_rx_buffers;
-
-                               buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
-                       }
-
-                       stmmac_set_desc_addr(priv, p, buf->addr);
-                       if (priv->sph)
-                               stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
-                       else
-                               stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
-                       if (priv->dma_buf_sz == BUF_SIZE_16KiB)
-                               stmmac_init_desc3(priv, p);
-               }
-       }
-
-       return;
-
-err_reinit_rx_buffers:
-       do {
-               while (--i >= 0)
-                       stmmac_free_rx_buffer(priv, queue, i);
-
-               if (queue == 0)
-                       break;
-
-               i = priv->dma_rx_size;
-       } while (queue-- > 0);
-}
-
 /**
  * init_dma_rx_desc_rings - init the RX descriptor rings
  * @dev: net device structure
@@ -5428,7 +5346,7 @@ int stmmac_resume(struct device *dev)
        mutex_lock(&priv->lock);
 
        stmmac_reset_queues_param(priv);
-       stmmac_reinit_rx_buffers(priv);
+
        stmmac_free_tx_skbufs(priv);
        stmmac_clear_descriptors(priv);
 
index 1e966a3..aca7f82 100644 (file)
@@ -504,6 +504,18 @@ static inline u32 axinet_ior_read_mcr(struct axienet_local *lp)
        return axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
 }
 
+static inline void axienet_lock_mii(struct axienet_local *lp)
+{
+       if (lp->mii_bus)
+               mutex_lock(&lp->mii_bus->mdio_lock);
+}
+
+static inline void axienet_unlock_mii(struct axienet_local *lp)
+{
+       if (lp->mii_bus)
+               mutex_unlock(&lp->mii_bus->mdio_lock);
+}
+
 /**
  * axienet_iow - Memory mapped Axi Ethernet register write
  * @lp:         Pointer to axienet local structure
index 5d677db..f8f8654 100644 (file)
@@ -1053,9 +1053,9 @@ static int axienet_open(struct net_device *ndev)
         * including the MDIO. MDIO must be disabled before resetting.
         * Hold MDIO bus lock to avoid MDIO accesses during the reset.
         */
-       mutex_lock(&lp->mii_bus->mdio_lock);
+       axienet_lock_mii(lp);
        ret = axienet_device_reset(ndev);
-       mutex_unlock(&lp->mii_bus->mdio_lock);
+       axienet_unlock_mii(lp);
 
        ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
        if (ret) {
@@ -1148,9 +1148,9 @@ static int axienet_stop(struct net_device *ndev)
        }
 
        /* Do a reset to ensure DMA is really stopped */
-       mutex_lock(&lp->mii_bus->mdio_lock);
+       axienet_lock_mii(lp);
        __axienet_device_reset(lp);
-       mutex_unlock(&lp->mii_bus->mdio_lock);
+       axienet_unlock_mii(lp);
 
        cancel_work_sync(&lp->dma_err_task);
 
@@ -1709,9 +1709,9 @@ static void axienet_dma_err_handler(struct work_struct *work)
         * including the MDIO. MDIO must be disabled before resetting.
         * Hold MDIO bus lock to avoid MDIO accesses during the reset.
         */
-       mutex_lock(&lp->mii_bus->mdio_lock);
+       axienet_lock_mii(lp);
        __axienet_device_reset(lp);
-       mutex_unlock(&lp->mii_bus->mdio_lock);
+       axienet_unlock_mii(lp);
 
        for (i = 0; i < lp->tx_bd_num; i++) {
                cur_p = &lp->tx_bd_v[i];
index 4ac0373..42f31c6 100644 (file)
@@ -891,6 +891,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
        __be16 sport;
        int err;
 
+       if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+               return -EINVAL;
+
        sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
        rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
                              geneve->cfg.info.key.tp_dst, sport);
@@ -908,8 +911,16 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
 
                info = skb_tunnel_info(skb);
                if (info) {
-                       info->key.u.ipv4.dst = fl4.saddr;
-                       info->key.u.ipv4.src = fl4.daddr;
+                       struct ip_tunnel_info *unclone;
+
+                       unclone = skb_tunnel_info_unclone(skb);
+                       if (unlikely(!unclone)) {
+                               dst_release(&rt->dst);
+                               return -ENOMEM;
+                       }
+
+                       unclone->key.u.ipv4.dst = fl4.saddr;
+                       unclone->key.u.ipv4.src = fl4.daddr;
                }
 
                if (!pskb_may_pull(skb, ETH_HLEN)) {
@@ -977,6 +988,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
        __be16 sport;
        int err;
 
+       if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+               return -EINVAL;
+
        sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
        dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
                                geneve->cfg.info.key.tp_dst, sport);
@@ -993,8 +1007,16 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                struct ip_tunnel_info *info = skb_tunnel_info(skb);
 
                if (info) {
-                       info->key.u.ipv6.dst = fl6.saddr;
-                       info->key.u.ipv6.src = fl6.daddr;
+                       struct ip_tunnel_info *unclone;
+
+                       unclone = skb_tunnel_info_unclone(skb);
+                       if (unlikely(!unclone)) {
+                               dst_release(dst);
+                               return -ENOMEM;
+                       }
+
+                       unclone->key.u.ipv6.dst = fl6.saddr;
+                       unclone->key.u.ipv6.src = fl6.daddr;
                }
 
                if (!pskb_may_pull(skb, ETH_HLEN)) {
index 0dd0ba9..23ee0b1 100644 (file)
@@ -365,6 +365,7 @@ static int atusb_alloc_urbs(struct atusb *atusb, int n)
                        return -ENOMEM;
                }
                usb_anchor_urb(urb, &atusb->idle_urbs);
+               usb_free_urb(urb);
                n--;
        }
        return 0;
index 53282a6..287cccf 100644 (file)
@@ -369,7 +369,7 @@ EXPORT_SYMBOL_GPL(bcm_phy_enable_apd);
 
 int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
 {
-       int val;
+       int val, mask = 0;
 
        /* Enable EEE at PHY level */
        val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL);
@@ -388,10 +388,17 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
        if (val < 0)
                return val;
 
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+                             phydev->supported))
+               mask |= MDIO_EEE_1000T;
+       if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+                             phydev->supported))
+               mask |= MDIO_EEE_100TX;
+
        if (enable)
-               val |= (MDIO_EEE_100TX | MDIO_EEE_1000T);
+               val |= mask;
        else
-               val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T);
+               val &= ~mask;
 
        phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val);
 
index e26a5d6..8018ddf 100644 (file)
@@ -3021,9 +3021,34 @@ static struct phy_driver marvell_drivers[] = {
                .get_stats = marvell_get_stats,
        },
        {
-               .phy_id = MARVELL_PHY_ID_88E6390,
+               .phy_id = MARVELL_PHY_ID_88E6341_FAMILY,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
-               .name = "Marvell 88E6390",
+               .name = "Marvell 88E6341 Family",
+               /* PHY_GBIT_FEATURES */
+               .flags = PHY_POLL_CABLE_TEST,
+               .probe = m88e1510_probe,
+               .config_init = marvell_config_init,
+               .config_aneg = m88e6390_config_aneg,
+               .read_status = marvell_read_status,
+               .config_intr = marvell_config_intr,
+               .handle_interrupt = marvell_handle_interrupt,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
+               .get_sset_count = marvell_get_sset_count,
+               .get_strings = marvell_get_strings,
+               .get_stats = marvell_get_stats,
+               .get_tunable = m88e1540_get_tunable,
+               .set_tunable = m88e1540_set_tunable,
+               .cable_test_start = marvell_vct7_cable_test_start,
+               .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
+               .cable_test_get_status = marvell_vct7_cable_test_get_status,
+       },
+       {
+               .phy_id = MARVELL_PHY_ID_88E6390_FAMILY,
+               .phy_id_mask = MARVELL_PHY_ID_MASK,
+               .name = "Marvell 88E6390 Family",
                /* PHY_GBIT_FEATURES */
                .flags = PHY_POLL_CABLE_TEST,
                .probe = m88e6390_probe,
@@ -3107,7 +3132,8 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
        { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
        { MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
        { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
-       { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK },
        { MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK },
        { MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK },
        { }
index fc86da7..4cf38be 100644 (file)
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
 #include <linux/mutex.h>
+#include <linux/ieee802154.h>
+#include <linux/if_ltalk.h>
+#include <uapi/linux/if_fddi.h>
+#include <uapi/linux/if_hippi.h>
+#include <uapi/linux/if_fc.h>
+#include <net/ax25.h>
+#include <net/rose.h>
+#include <net/6lowpan.h>
 
 #include <linux/uaccess.h>
 #include <linux/proc_fs.h>
@@ -2919,6 +2927,45 @@ static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
        return __tun_set_ebpf(tun, prog_p, prog);
 }
 
+/* Return correct value for tun->dev->addr_len based on tun->dev->type. */
+static unsigned char tun_get_addr_len(unsigned short type)
+{
+       switch (type) {
+       case ARPHRD_IP6GRE:
+       case ARPHRD_TUNNEL6:
+               return sizeof(struct in6_addr);
+       case ARPHRD_IPGRE:
+       case ARPHRD_TUNNEL:
+       case ARPHRD_SIT:
+               return 4;
+       case ARPHRD_ETHER:
+               return ETH_ALEN;
+       case ARPHRD_IEEE802154:
+       case ARPHRD_IEEE802154_MONITOR:
+               return IEEE802154_EXTENDED_ADDR_LEN;
+       case ARPHRD_PHONET_PIPE:
+       case ARPHRD_PPP:
+       case ARPHRD_NONE:
+               return 0;
+       case ARPHRD_6LOWPAN:
+               return EUI64_ADDR_LEN;
+       case ARPHRD_FDDI:
+               return FDDI_K_ALEN;
+       case ARPHRD_HIPPI:
+               return HIPPI_ALEN;
+       case ARPHRD_IEEE802:
+               return FC_ALEN;
+       case ARPHRD_ROSE:
+               return ROSE_ADDR_LEN;
+       case ARPHRD_NETROM:
+               return AX25_ADDR_LEN;
+       case ARPHRD_LOCALTLK:
+               return LTALK_ALEN;
+       default:
+               return 0;
+       }
+}
+
 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                            unsigned long arg, int ifreq_len)
 {
@@ -3082,6 +3129,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                                break;
                        }
                        tun->dev->type = (int) arg;
+                       tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
                        netif_info(tun, drv, tun->dev, "linktype set to %d\n",
                                   tun->dev->type);
                        call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
index 31d5134..9bc58e6 100644 (file)
@@ -611,7 +611,7 @@ static struct hso_serial *get_serial_by_index(unsigned index)
        return serial;
 }
 
-static int get_free_serial_index(void)
+static int obtain_minor(struct hso_serial *serial)
 {
        int index;
        unsigned long flags;
@@ -619,8 +619,10 @@ static int get_free_serial_index(void)
        spin_lock_irqsave(&serial_table_lock, flags);
        for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) {
                if (serial_table[index] == NULL) {
+                       serial_table[index] = serial->parent;
+                       serial->minor = index;
                        spin_unlock_irqrestore(&serial_table_lock, flags);
-                       return index;
+                       return 0;
                }
        }
        spin_unlock_irqrestore(&serial_table_lock, flags);
@@ -629,15 +631,12 @@ static int get_free_serial_index(void)
        return -1;
 }
 
-static void set_serial_by_index(unsigned index, struct hso_serial *serial)
+static void release_minor(struct hso_serial *serial)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&serial_table_lock, flags);
-       if (serial)
-               serial_table[index] = serial->parent;
-       else
-               serial_table[index] = NULL;
+       serial_table[serial->minor] = NULL;
        spin_unlock_irqrestore(&serial_table_lock, flags);
 }
 
@@ -2230,6 +2229,7 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
 static void hso_serial_tty_unregister(struct hso_serial *serial)
 {
        tty_unregister_device(tty_drv, serial->minor);
+       release_minor(serial);
 }
 
 static void hso_serial_common_free(struct hso_serial *serial)
@@ -2253,24 +2253,22 @@ static void hso_serial_common_free(struct hso_serial *serial)
 static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
                                    int rx_size, int tx_size)
 {
-       int minor;
        int i;
 
        tty_port_init(&serial->port);
 
-       minor = get_free_serial_index();
-       if (minor < 0)
+       if (obtain_minor(serial))
                goto exit2;
 
        /* register our minor number */
        serial->parent->dev = tty_port_register_device_attr(&serial->port,
-                       tty_drv, minor, &serial->parent->interface->dev,
+                       tty_drv, serial->minor, &serial->parent->interface->dev,
                        serial->parent, hso_serial_dev_groups);
-       if (IS_ERR(serial->parent->dev))
+       if (IS_ERR(serial->parent->dev)) {
+               release_minor(serial);
                goto exit2;
+       }
 
-       /* fill in specific data for later use */
-       serial->minor = minor;
        serial->magic = HSO_SERIAL_MAGIC;
        spin_lock_init(&serial->serial_lock);
        serial->num_rx_urbs = num_urbs;
@@ -2667,9 +2665,6 @@ static struct hso_device *hso_create_bulk_serial_device(
 
        serial->write_data = hso_std_serial_write_data;
 
-       /* and record this serial */
-       set_serial_by_index(serial->minor, serial);
-
        /* setup the proc dirs and files if needed */
        hso_log_port(hso_dev);
 
@@ -2726,9 +2721,6 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
        serial->shared_int->ref_count++;
        mutex_unlock(&serial->shared_int->shared_int_lock);
 
-       /* and record this serial */
-       set_serial_by_index(serial->minor, serial);
-
        /* setup the proc dirs and files if needed */
        hso_log_port(hso_dev);
 
@@ -3113,7 +3105,6 @@ static void hso_free_interface(struct usb_interface *interface)
                        cancel_work_sync(&serial_table[i]->async_get_intf);
                        hso_serial_tty_unregister(serial);
                        kref_put(&serial_table[i]->ref, hso_serial_ref_free);
-                       set_serial_by_index(i, NULL);
                }
        }
 
index 82e520d..0824e69 100644 (file)
@@ -406,9 +406,13 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
        offset += hdr_padded_len;
        p += hdr_padded_len;
 
-       copy = len;
-       if (copy > skb_tailroom(skb))
-               copy = skb_tailroom(skb);
+       /* Copy all frame if it fits skb->head, otherwise
+        * we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
+        */
+       if (len <= skb_tailroom(skb))
+               copy = len;
+       else
+               copy = ETH_HLEN + metasize;
        skb_put_data(skb, p, copy);
 
        if (metasize) {
index 6d91308..503e2fd 100644 (file)
@@ -471,9 +471,8 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
 
        skb_dst_drop(skb);
 
-       /* if dst.dev is loopback or the VRF device again this is locally
-        * originated traffic destined to a local address. Short circuit
-        * to Rx path
+       /* if dst.dev is the VRF device again this is locally originated traffic
+        * destined to a local address. Short circuit to Rx path.
         */
        if (dst->dev == dev)
                return vrf_local_xmit(skb, dev, dst);
@@ -547,9 +546,8 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
 
        skb_dst_drop(skb);
 
-       /* if dst.dev is loopback or the VRF device again this is locally
-        * originated traffic destined to a local address. Short circuit
-        * to Rx path
+       /* if dst.dev is the VRF device again this is locally originated traffic
+        * destined to a local address. Short circuit to Rx path.
         */
        if (rt->dst.dev == vrf_dev)
                return vrf_local_xmit(skb, vrf_dev, &rt->dst);
index 666dd20..53dbc67 100644 (file)
@@ -2725,12 +2725,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        goto tx_error;
                } else if (err) {
                        if (info) {
+                               struct ip_tunnel_info *unclone;
                                struct in_addr src, dst;
 
+                               unclone = skb_tunnel_info_unclone(skb);
+                               if (unlikely(!unclone))
+                                       goto tx_error;
+
                                src = remote_ip.sin.sin_addr;
                                dst = local_ip.sin.sin_addr;
-                               info->key.u.ipv4.src = src.s_addr;
-                               info->key.u.ipv4.dst = dst.s_addr;
+                               unclone->key.u.ipv4.src = src.s_addr;
+                               unclone->key.u.ipv4.dst = dst.s_addr;
                        }
                        vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
                        dst_release(ndst);
@@ -2781,12 +2786,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        goto tx_error;
                } else if (err) {
                        if (info) {
+                               struct ip_tunnel_info *unclone;
                                struct in6_addr src, dst;
 
+                               unclone = skb_tunnel_info_unclone(skb);
+                               if (unlikely(!unclone))
+                                       goto tx_error;
+
                                src = remote_ip.sin6.sin6_addr;
                                dst = local_ip.sin6.sin6_addr;
-                               info->key.u.ipv6.src = src;
-                               info->key.u.ipv6.dst = dst;
+                               unclone->key.u.ipv6.src = src;
+                               unclone->key.u.ipv6.dst = dst;
                        }
 
                        vxlan_encap_bypass(skb, vxlan, vxlan, vni, false);
index 0720f5f..4d9dc7d 100644 (file)
@@ -415,7 +415,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
 
                if (pad > 0) { /* Pad the frame with zeros */
                        if (__skb_pad(skb, pad, false))
-                               goto drop;
+                               goto out;
                        skb_put(skb, pad);
                }
        }
@@ -448,8 +448,9 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 
 drop:
-       dev->stats.tx_dropped++;
        kfree_skb(skb);
+out:
+       dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
 }
 
index 6d30a0f..34cd8a7 100644 (file)
@@ -2439,7 +2439,7 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool locked)
        vif = ifp->vif;
        cfg = wdev_to_cfg(&vif->wdev);
        cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL;
-       if (locked) {
+       if (!locked) {
                rtnl_lock();
                wiphy_lock(cfg->wiphy);
                cfg80211_unregister_wdev(&vif->wdev);
index 3dbc6f3..231d251 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2005-2014 Intel Corporation
+ * Copyright (C) 2005-2014, 2021 Intel Corporation
  * Copyright (C) 2015-2017 Intel Deutschland GmbH
  */
 #include <linux/sched.h>
@@ -26,7 +26,7 @@ bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
        if (!list_empty(&notif_wait->notif_waits)) {
                struct iwl_notification_wait *w;
 
-               spin_lock(&notif_wait->notif_wait_lock);
+               spin_lock_bh(&notif_wait->notif_wait_lock);
                list_for_each_entry(w, &notif_wait->notif_waits, list) {
                        int i;
                        bool found = false;
@@ -59,7 +59,7 @@ bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
                                triggered = true;
                        }
                }
-               spin_unlock(&notif_wait->notif_wait_lock);
+               spin_unlock_bh(&notif_wait->notif_wait_lock);
        }
 
        return triggered;
@@ -70,10 +70,10 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
 {
        struct iwl_notification_wait *wait_entry;
 
-       spin_lock(&notif_wait->notif_wait_lock);
+       spin_lock_bh(&notif_wait->notif_wait_lock);
        list_for_each_entry(wait_entry, &notif_wait->notif_waits, list)
                wait_entry->aborted = true;
-       spin_unlock(&notif_wait->notif_wait_lock);
+       spin_unlock_bh(&notif_wait->notif_wait_lock);
 
        wake_up_all(&notif_wait->notif_waitq);
 }
index 75f99ff..c4f5da7 100644 (file)
@@ -414,6 +414,7 @@ struct iwl_cfg {
 #define IWL_CFG_MAC_TYPE_QNJ           0x36
 #define IWL_CFG_MAC_TYPE_SO            0x37
 #define IWL_CFG_MAC_TYPE_SNJ           0x42
+#define IWL_CFG_MAC_TYPE_SOF           0x43
 #define IWL_CFG_MAC_TYPE_MA            0x44
 
 #define IWL_CFG_RF_TYPE_TH             0x105
index af684f8..c5a1e84 100644 (file)
@@ -232,7 +232,7 @@ enum iwl_reg_capa_flags_v2 {
        REG_CAPA_V2_MCS_9_ALLOWED       = BIT(6),
        REG_CAPA_V2_WEATHER_DISABLED    = BIT(7),
        REG_CAPA_V2_40MHZ_ALLOWED       = BIT(8),
-       REG_CAPA_V2_11AX_DISABLED       = BIT(13),
+       REG_CAPA_V2_11AX_DISABLED       = BIT(10),
 };
 
 /*
index 1307605..34ddef9 100644 (file)
@@ -1786,10 +1786,13 @@ static ssize_t iwl_dbgfs_rfi_freq_table_write(struct iwl_mvm *mvm, char *buf,
                return -EINVAL;
 
        /* value zero triggers re-sending the default table to the device */
-       if (!op_id)
+       if (!op_id) {
+               mutex_lock(&mvm->mutex);
                ret = iwl_rfi_send_config_cmd(mvm, NULL);
-       else
+               mutex_unlock(&mvm->mutex);
+       } else {
                ret = -EOPNOTSUPP; /* in the future a new table will be added */
+       }
 
        return ret ?: count;
 }
index 8739190..0b81806 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2020 Intel Corporation
+ * Copyright (C) 2020 - 2021 Intel Corporation
  */
 
 #include "mvm.h"
@@ -66,6 +66,8 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t
        if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT))
                return -EOPNOTSUPP;
 
+       lockdep_assert_held(&mvm->mutex);
+
        /* in case no table is passed, use the default one */
        if (!rfi_table) {
                memcpy(cmd.table, iwl_rfi_table, sizeof(cmd.table));
@@ -75,9 +77,7 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t
                cmd.oem = 1;
        }
 
-       mutex_lock(&mvm->mutex);
        ret = iwl_mvm_send_cmd(mvm, &hcmd);
-       mutex_unlock(&mvm->mutex);
 
        if (ret)
                IWL_ERR(mvm, "Failed to send RFI config cmd %d\n", ret);
index c21736f..af5a6dd 100644 (file)
@@ -272,10 +272,10 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
        rx_status->chain_signal[2] = S8_MIN;
 }
 
-static int iwl_mvm_rx_mgmt_crypto(struct ieee80211_sta *sta,
-                                 struct ieee80211_hdr *hdr,
-                                 struct iwl_rx_mpdu_desc *desc,
-                                 u32 status)
+static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
+                               struct ieee80211_hdr *hdr,
+                               struct iwl_rx_mpdu_desc *desc,
+                               u32 status)
 {
        struct iwl_mvm_sta *mvmsta;
        struct iwl_mvm_vif *mvmvif;
@@ -285,6 +285,9 @@ static int iwl_mvm_rx_mgmt_crypto(struct ieee80211_sta *sta,
        u32 len = le16_to_cpu(desc->mpdu_len);
        const u8 *frame = (void *)hdr;
 
+       if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) == IWL_RX_MPDU_STATUS_SEC_NONE)
+               return 0;
+
        /*
         * For non-beacon, we don't really care. But beacons may
         * be filtered out, and we thus need the firmware's replay
@@ -356,6 +359,10 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
            IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on)
                return -1;
 
+       if (unlikely(ieee80211_is_mgmt(hdr->frame_control) &&
+                    !ieee80211_has_protected(hdr->frame_control)))
+               return iwl_mvm_rx_mgmt_prot(sta, hdr, desc, status);
+
        if (!ieee80211_has_protected(hdr->frame_control) ||
            (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
            IWL_RX_MPDU_STATUS_SEC_NONE)
@@ -411,7 +418,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                stats->flag |= RX_FLAG_DECRYPTED;
                return 0;
        case RX_MPDU_RES_STATUS_SEC_CMAC_GMAC_ENC:
-               return iwl_mvm_rx_mgmt_crypto(sta, hdr, desc, status);
+               break;
        default:
                /*
                 * Sometimes we can get frames that were not decrypted
index 8fba190..cecc32e 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #include "iwl-trans.h"
 #include "iwl-fh.h"
@@ -75,15 +75,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
                                 const struct fw_img *fw)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
-                     u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
-                                     CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
-                     u32_encode_bits(250,
-                                     CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
-                     CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
-                     u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
-                                     CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
-                     u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
        struct iwl_context_info_gen3 *ctxt_info_gen3;
        struct iwl_prph_scratch *prph_scratch;
        struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
@@ -217,26 +208,6 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
        iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
                    CSR_AUTO_FUNC_BOOT_ENA);
 
-       /*
-        * To workaround hardware latency issues during the boot process,
-        * initialize the LTR to ~250 usec (see ltr_val above).
-        * The firmware initializes this again later (to a smaller value).
-        */
-       if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
-            trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
-           !trans->trans_cfg->integrated) {
-               iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
-       } else if (trans->trans_cfg->integrated &&
-                  trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
-               iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
-               iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
-       }
-
-       if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
-               iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
-       else
-               iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
-
        return 0;
 
 err_free_ctxt_info:
index d1bb273..74ce31f 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
  * Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #include "iwl-trans.h"
 #include "iwl-fh.h"
@@ -240,7 +240,6 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
 
        /* kick FW self load */
        iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
-       iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
 
        /* Context info will be released upon alive or failure to get one */
 
index ffaf973..558a0b2 100644 (file)
@@ -592,6 +592,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
        IWL_DEV_INFO(0x4DF0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
        IWL_DEV_INFO(0x4DF0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
        IWL_DEV_INFO(0x4DF0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
+       IWL_DEV_INFO(0x4DF0, 0x6074, iwl_ax201_cfg_qu_hr, NULL),
 
        /* So with HR */
        IWL_DEV_INFO(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0, NULL),
@@ -1040,7 +1041,31 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
                      IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
                      IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
                      IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
-                     iwl_cfg_so_a0_hr_a0, iwl_ax201_name)
+                     iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
+
+/* So-F with Hr */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+                     IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+                     iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
+                     IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+                     iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+                     IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+                     iwl_cfg_so_a0_hr_a0, iwl_ax201_name),
+
+/* So-F with Gf */
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+                     IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
+                     IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+                     iwlax211_2ax_cfg_so_gf_a0, iwl_ax211_name),
 
 #endif /* CONFIG_IWLMVM */
 };
index 497ef34..94ffc1a 100644 (file)
@@ -266,6 +266,34 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
        mutex_unlock(&trans_pcie->mutex);
 }
 
+static void iwl_pcie_set_ltr(struct iwl_trans *trans)
+{
+       u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
+                     u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+                                     CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
+                     u32_encode_bits(250,
+                                     CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
+                     CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
+                     u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+                                     CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
+                     u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
+
+       /*
+        * To workaround hardware latency issues during the boot process,
+        * initialize the LTR to ~250 usec (see ltr_val above).
+        * The firmware initializes this again later (to a smaller value).
+        */
+       if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
+            trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
+           !trans->trans_cfg->integrated) {
+               iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
+       } else if (trans->trans_cfg->integrated &&
+                  trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
+               iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
+               iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
+       }
+}
+
 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
                                 const struct fw_img *fw, bool run_in_rfkill)
 {
@@ -332,6 +360,13 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
        if (ret)
                goto out;
 
+       iwl_pcie_set_ltr(trans);
+
+       if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+               iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
+       else
+               iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
+
        /* re-check RF-Kill state since we may have missed the interrupt */
        hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
        if (hw_rfkill && !run_in_rfkill)
index 381e8f9..7ae3249 100644 (file)
@@ -928,6 +928,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
        u32 cmd_pos;
        const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
        u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
+       unsigned long flags;
 
        if (WARN(!trans->wide_cmd_header &&
                 group_id > IWL_ALWAYS_LONG_GROUP,
@@ -1011,10 +1012,10 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
                goto free_dup_buf;
        }
 
-       spin_lock_bh(&txq->lock);
+       spin_lock_irqsave(&txq->lock, flags);
 
        if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
-               spin_unlock_bh(&txq->lock);
+               spin_unlock_irqrestore(&txq->lock, flags);
 
                IWL_ERR(trans, "No space in command queue\n");
                iwl_op_mode_cmd_queue_full(trans->op_mode);
@@ -1174,7 +1175,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
  unlock_reg:
        spin_unlock(&trans_pcie->reg_lock);
  out:
-       spin_unlock_bh(&txq->lock);
+       spin_unlock_irqrestore(&txq->lock, flags);
  free_dup_buf:
        if (idx < 0)
                kfree(dup_buf);
index 18980bb..6dad7f6 100644 (file)
 
 #define MT_WTBLON_TOP_BASE             0x34000
 #define MT_WTBLON_TOP(ofs)             (MT_WTBLON_TOP_BASE + (ofs))
-#define MT_WTBLON_TOP_WDUCR            MT_WTBLON_TOP(0x0)
+#define MT_WTBLON_TOP_WDUCR            MT_WTBLON_TOP(0x200)
 #define MT_WTBLON_TOP_WDUCR_GROUP      GENMASK(2, 0)
 
-#define MT_WTBL_UPDATE                 MT_WTBLON_TOP(0x030)
+#define MT_WTBL_UPDATE                 MT_WTBLON_TOP(0x230)
 #define MT_WTBL_UPDATE_WLAN_IDX                GENMASK(9, 0)
 #define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
 #define MT_WTBL_UPDATE_BUSY            BIT(31)
index c878097..1df9595 100644 (file)
@@ -12,6 +12,7 @@
 #include <net/cfg80211.h>
 #include <net/rtnetlink.h>
 #include <linux/etherdevice.h>
+#include <linux/math64.h>
 #include <linux/module.h>
 
 static struct wiphy *common_wiphy;
@@ -168,11 +169,11 @@ static void virt_wifi_scan_result(struct work_struct *work)
                             scan_result.work);
        struct wiphy *wiphy = priv_to_wiphy(priv);
        struct cfg80211_scan_info scan_info = { .aborted = false };
+       u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
 
        informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
                                           CFG80211_BSS_FTYPE_PRESP,
-                                          fake_router_bssid,
-                                          ktime_get_boottime_ns(),
+                                          fake_router_bssid, tsf,
                                           WLAN_CAPABILITY_ESS, 0,
                                           (void *)&ssid, sizeof(ssid),
                                           DBM_TO_MBM(-50), GFP_KERNEL);
index a5439c1..d24b7a7 100644 (file)
@@ -824,11 +824,15 @@ static void connect(struct backend_info *be)
        xenvif_carrier_on(be->vif);
 
        unregister_hotplug_status_watch(be);
-       err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
-                                  hotplug_status_changed,
-                                  "%s/%s", dev->nodename, "hotplug-status");
-       if (!err)
+       if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
+               err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
+                                          NULL, hotplug_status_changed,
+                                          "%s/%s", dev->nodename,
+                                          "hotplug-status");
+               if (err)
+                       goto err;
                be->have_hotplug_status_watch = 1;
+       }
 
        netif_tx_wake_all_queues(be->vif->dev);
 
index 48f0985..3a777d0 100644 (file)
@@ -631,16 +631,14 @@ void nvdimm_check_and_set_ro(struct gendisk *disk)
        struct nd_region *nd_region = to_nd_region(dev->parent);
        int disk_ro = get_disk_ro(disk);
 
-       /*
-        * Upgrade to read-only if the region is read-only preserve as
-        * read-only if the disk is already read-only.
-        */
-       if (disk_ro || nd_region->ro == disk_ro)
+       /* catch the disk up with the region ro state */
+       if (disk_ro == nd_region->ro)
                return;
 
-       dev_info(dev, "%s read-only, marking %s read-only\n",
-                       dev_name(&nd_region->dev), disk->disk_name);
-       set_disk_ro(disk, 1);
+       dev_info(dev, "%s read-%s, marking %s read-%s\n",
+                dev_name(&nd_region->dev), nd_region->ro ? "only" : "write",
+                disk->disk_name, nd_region->ro ? "only" : "write");
+       set_disk_ro(disk, nd_region->ro);
 }
 EXPORT_SYMBOL(nvdimm_check_and_set_ro);
 
index b8a85bf..7daac79 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/mm.h>
 #include <asm/cacheflush.h>
 #include "pmem.h"
+#include "btt.h"
 #include "pfn.h"
 #include "nd.h"
 
@@ -585,7 +586,7 @@ static void nd_pmem_shutdown(struct device *dev)
        nvdimm_flush(to_nd_region(dev->parent), NULL);
 }
 
-static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
+static void pmem_revalidate_poison(struct device *dev)
 {
        struct nd_region *nd_region;
        resource_size_t offset = 0, end_trunc = 0;
@@ -595,9 +596,6 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
        struct range range;
        struct kernfs_node *bb_state;
 
-       if (event != NVDIMM_REVALIDATE_POISON)
-               return;
-
        if (is_nd_btt(dev)) {
                struct nd_btt *nd_btt = to_nd_btt(dev);
 
@@ -635,6 +633,37 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
                sysfs_notify_dirent(bb_state);
 }
 
+static void pmem_revalidate_region(struct device *dev)
+{
+       struct pmem_device *pmem;
+
+       if (is_nd_btt(dev)) {
+               struct nd_btt *nd_btt = to_nd_btt(dev);
+               struct btt *btt = nd_btt->btt;
+
+               nvdimm_check_and_set_ro(btt->btt_disk);
+               return;
+       }
+
+       pmem = dev_get_drvdata(dev);
+       nvdimm_check_and_set_ro(pmem->disk);
+}
+
+static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
+{
+       switch (event) {
+       case NVDIMM_REVALIDATE_POISON:
+               pmem_revalidate_poison(dev);
+               break;
+       case NVDIMM_REVALIDATE_REGION:
+               pmem_revalidate_region(dev);
+               break;
+       default:
+               dev_WARN_ONCE(dev, 1, "notify: unknown event: %d\n", event);
+               break;
+       }
+}
+
 MODULE_ALIAS("pmem");
 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
index ef23119..9ccf3d6 100644 (file)
@@ -518,6 +518,12 @@ static ssize_t read_only_show(struct device *dev,
        return sprintf(buf, "%d\n", nd_region->ro);
 }
 
+static int revalidate_read_only(struct device *dev, void *data)
+{
+       nd_device_notify(dev, NVDIMM_REVALIDATE_REGION);
+       return 0;
+}
+
 static ssize_t read_only_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t len)
 {
@@ -529,6 +535,7 @@ static ssize_t read_only_store(struct device *dev,
                return rc;
 
        nd_region->ro = ro;
+       device_for_each_child(dev, NULL, revalidate_read_only);
        return len;
 }
 static DEVICE_ATTR_RW(read_only);
@@ -1239,6 +1246,11 @@ int nvdimm_has_flush(struct nd_region *nd_region)
                        || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
                return -ENXIO;
 
+       /* Test if an explicit flush function is defined */
+       if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
+               return 1;
+
+       /* Test if any flush hints for the region are available */
        for (i = 0; i < nd_region->ndr_mappings; i++) {
                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
                struct nvdimm *nvdimm = nd_mapping->nvdimm;
@@ -1249,8 +1261,8 @@ int nvdimm_has_flush(struct nd_region *nd_region)
        }
 
        /*
-        * The platform defines dimm devices without hints, assume
-        * platform persistence mechanism like ADR
+        * The platform defines dimm devices without hints nor explicit flush,
+        * assume platform persistence mechanism like ADR
         */
        return 0;
 }
index dcc1dd9..adb26af 100644 (file)
@@ -205,7 +205,7 @@ static void populate_properties(const void *blob,
                *pprev = NULL;
 }
 
-static bool populate_node(const void *blob,
+static int populate_node(const void *blob,
                          int offset,
                          void **mem,
                          struct device_node *dad,
@@ -214,24 +214,24 @@ static bool populate_node(const void *blob,
 {
        struct device_node *np;
        const char *pathp;
-       unsigned int l, allocl;
+       int len;
 
-       pathp = fdt_get_name(blob, offset, &l);
+       pathp = fdt_get_name(blob, offset, &len);
        if (!pathp) {
                *pnp = NULL;
-               return false;
+               return len;
        }
 
-       allocl = ++l;
+       len++;
 
-       np = unflatten_dt_alloc(mem, sizeof(struct device_node) + allocl,
+       np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len,
                                __alignof__(struct device_node));
        if (!dryrun) {
                char *fn;
                of_node_init(np);
                np->full_name = fn = ((char *)np) + sizeof(*np);
 
-               memcpy(fn, pathp, l);
+               memcpy(fn, pathp, len);
 
                if (dad != NULL) {
                        np->parent = dad;
@@ -295,6 +295,7 @@ static int unflatten_dt_nodes(const void *blob,
        struct device_node *nps[FDT_MAX_DEPTH];
        void *base = mem;
        bool dryrun = !base;
+       int ret;
 
        if (nodepp)
                *nodepp = NULL;
@@ -322,9 +323,10 @@ static int unflatten_dt_nodes(const void *blob,
                    !of_fdt_device_is_available(blob, offset))
                        continue;
 
-               if (!populate_node(blob, offset, &mem, nps[depth],
-                                  &nps[depth+1], dryrun))
-                       return mem - base;
+               ret = populate_node(blob, offset, &mem, nps[depth],
+                                  &nps[depth+1], dryrun);
+               if (ret < 0)
+                       return ret;
 
                if (!dryrun && nodepp && !*nodepp)
                        *nodepp = nps[depth+1];
@@ -372,6 +374,10 @@ void *__unflatten_device_tree(const void *blob,
 {
        int size;
        void *mem;
+       int ret;
+
+       if (mynodes)
+               *mynodes = NULL;
 
        pr_debug(" -> unflatten_device_tree()\n");
 
@@ -392,7 +398,7 @@ void *__unflatten_device_tree(const void *blob,
 
        /* First pass, scan for size */
        size = unflatten_dt_nodes(blob, NULL, dad, NULL);
-       if (size < 0)
+       if (size <= 0)
                return NULL;
 
        size = ALIGN(size, 4);
@@ -410,12 +416,16 @@ void *__unflatten_device_tree(const void *blob,
        pr_debug("  unflattening %p...\n", mem);
 
        /* Second pass, do actual unflattening */
-       unflatten_dt_nodes(blob, mem, dad, mynodes);
+       ret = unflatten_dt_nodes(blob, mem, dad, mynodes);
+
        if (be32_to_cpup(mem + size) != 0xdeadbeef)
                pr_warn("End of tree marker overwritten: %08x\n",
                        be32_to_cpup(mem + size));
 
-       if (detached && mynodes) {
+       if (ret <= 0)
+               return NULL;
+
+       if (detached && mynodes && *mynodes) {
                of_node_set_flag(*mynodes, OF_DETACHED);
                pr_debug("unflattened tree is detached\n");
        }
index d9e6a32..d717efb 100644 (file)
@@ -8,6 +8,8 @@
  * Copyright (C) 1996-2005 Paul Mackerras.
  */
 
+#define FDT_ALIGN_SIZE 8
+
 /**
  * struct alias_prop - Alias property in 'aliases' node
  * @link:      List node to link the structure in aliases_lookup list
index 50bbe0e..23effe5 100644 (file)
@@ -57,7 +57,7 @@ struct fragment {
  * struct overlay_changeset
  * @id:                        changeset identifier
  * @ovcs_list:         list on which we are located
- * @fdt:               FDT that was unflattened to create @overlay_tree
+ * @fdt:               base of memory allocated to hold aligned FDT that was unflattened to create @overlay_tree
  * @overlay_tree:      expanded device tree that contains the fragment nodes
  * @count:             count of fragment structures
  * @fragments:         fragment nodes in the overlay expanded device tree
@@ -719,8 +719,8 @@ static struct device_node *find_target(struct device_node *info_node)
 /**
  * init_overlay_changeset() - initialize overlay changeset from overlay tree
  * @ovcs:      Overlay changeset to build
- * @fdt:       the FDT that was unflattened to create @tree
- * @tree:      Contains all the overlay fragments and overlay fixup nodes
+ * @fdt:       base of memory allocated to hold aligned FDT that was unflattened to create @tree
+ * @tree:      Contains the overlay fragments and overlay fixup nodes
  *
  * Initialize @ovcs.  Populate @ovcs->fragments with node information from
  * the top level of @tree.  The relevant top level nodes are the fragment
@@ -873,7 +873,7 @@ static void free_overlay_changeset(struct overlay_changeset *ovcs)
  * internal documentation
  *
  * of_overlay_apply() - Create and apply an overlay changeset
- * @fdt:       the FDT that was unflattened to create @tree
+ * @fdt:       base of memory allocated to hold the aligned FDT
  * @tree:      Expanded overlay device tree
  * @ovcs_id:   Pointer to overlay changeset id
  *
@@ -953,7 +953,9 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree,
        /*
         * after overlay_notify(), ovcs->overlay_tree related pointers may have
         * leaked to drivers, so can not kfree() tree, aka ovcs->overlay_tree;
-        * and can not free fdt, aka ovcs->fdt
+        * and can not free memory containing aligned fdt.  The aligned fdt
+        * is contained within the memory at ovcs->fdt, possibly at an offset
+        * from ovcs->fdt.
         */
        ret = overlay_notify(ovcs, OF_OVERLAY_PRE_APPLY);
        if (ret) {
@@ -1014,10 +1016,11 @@ out:
 int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
                         int *ovcs_id)
 {
-       const void *new_fdt;
+       void *new_fdt;
+       void *new_fdt_align;
        int ret;
        u32 size;
-       struct device_node *overlay_root;
+       struct device_node *overlay_root = NULL;
 
        *ovcs_id = 0;
        ret = 0;
@@ -1036,11 +1039,14 @@ int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
         * Must create permanent copy of FDT because of_fdt_unflatten_tree()
         * will create pointers to the passed in FDT in the unflattened tree.
         */
-       new_fdt = kmemdup(overlay_fdt, size, GFP_KERNEL);
+       new_fdt = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL);
        if (!new_fdt)
                return -ENOMEM;
 
-       of_fdt_unflatten_tree(new_fdt, NULL, &overlay_root);
+       new_fdt_align = PTR_ALIGN(new_fdt, FDT_ALIGN_SIZE);
+       memcpy(new_fdt_align, overlay_fdt, size);
+
+       of_fdt_unflatten_tree(new_fdt_align, NULL, &overlay_root);
        if (!overlay_root) {
                pr_err("unable to unflatten overlay_fdt\n");
                ret = -EINVAL;
index 5036a36..78427c8 100644 (file)
@@ -1262,7 +1262,16 @@ DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL)
 DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL)
 DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
 DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
-DEFINE_SUFFIX_PROP(gpios, "-gpios", "#gpio-cells")
+
+static struct device_node *parse_gpios(struct device_node *np,
+                                      const char *prop_name, int index)
+{
+       if (!strcmp_suffix(prop_name, ",nr-gpios"))
+               return NULL;
+
+       return parse_suffix_prop_cells(np, prop_name, index, "-gpios",
+                                      "#gpio-cells");
+}
 
 static struct device_node *parse_iommu_maps(struct device_node *np,
                                            const char *prop_name, int index)
index eb10062..819a20a 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
+#include <linux/kernel.h>
 
 #include <linux/i2c.h>
 #include <linux/i2c-mux.h>
@@ -1408,7 +1409,8 @@ static void attach_node_and_children(struct device_node *np)
 static int __init unittest_data_add(void)
 {
        void *unittest_data;
-       struct device_node *unittest_data_node, *np;
+       void *unittest_data_align;
+       struct device_node *unittest_data_node = NULL, *np;
        /*
         * __dtb_testcases_begin[] and __dtb_testcases_end[] are magically
         * created by cmd_dt_S_dtb in scripts/Makefile.lib
@@ -1417,21 +1419,29 @@ static int __init unittest_data_add(void)
        extern uint8_t __dtb_testcases_end[];
        const int size = __dtb_testcases_end - __dtb_testcases_begin;
        int rc;
+       void *ret;
 
        if (!size) {
-               pr_warn("%s: No testcase data to attach; not running tests\n",
-                       __func__);
+               pr_warn("%s: testcases is empty\n", __func__);
                return -ENODATA;
        }
 
        /* creating copy */
-       unittest_data = kmemdup(__dtb_testcases_begin, size, GFP_KERNEL);
+       unittest_data = kmalloc(size + FDT_ALIGN_SIZE, GFP_KERNEL);
        if (!unittest_data)
                return -ENOMEM;
 
-       of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node);
+       unittest_data_align = PTR_ALIGN(unittest_data, FDT_ALIGN_SIZE);
+       memcpy(unittest_data_align, __dtb_testcases_begin, size);
+
+       ret = of_fdt_unflatten_tree(unittest_data_align, NULL, &unittest_data_node);
+       if (!ret) {
+               pr_warn("%s: unflatten testcases tree failed\n", __func__);
+               kfree(unittest_data);
+               return -ENODATA;
+       }
        if (!unittest_data_node) {
-               pr_warn("%s: No tree to attach; not running tests\n", __func__);
+               pr_warn("%s: testcases tree is empty\n", __func__);
                kfree(unittest_data);
                return -ENODATA;
        }
index 8085782..9f3361c 100644 (file)
@@ -1357,6 +1357,7 @@ static int intel_pinctrl_add_padgroups_by_gpps(struct intel_pinctrl *pctrl,
                                gpps[i].gpio_base = 0;
                                break;
                        case INTEL_GPIO_BASE_NOMAP:
+                               break;
                        default:
                                break;
                }
@@ -1393,6 +1394,7 @@ static int intel_pinctrl_add_padgroups_by_size(struct intel_pinctrl *pctrl,
                gpps[i].size = min(gpp_size, npins);
                npins -= gpps[i].size;
 
+               gpps[i].gpio_base = gpps[i].base;
                gpps[i].padown_num = padown_num;
 
                /*
@@ -1491,8 +1493,13 @@ static int intel_pinctrl_probe(struct platform_device *pdev,
                if (IS_ERR(regs))
                        return PTR_ERR(regs);
 
-               /* Determine community features based on the revision */
+               /*
+                * Determine community features based on the revision.
+                * A value of all ones means the device is not present.
+                */
                value = readl(regs + REVID);
+               if (value == ~0u)
+                       return -ENODEV;
                if (((value & REVID_MASK) >> REVID_SHIFT) >= 0x94) {
                        community->features |= PINCTRL_FEATURE_DEBOUNCE;
                        community->features |= PINCTRL_FEATURE_1K_PD;
index f35edb0..c12fa57 100644 (file)
@@ -572,7 +572,7 @@ static void microchip_sgpio_irq_settype(struct irq_data *data,
        /* Type value spread over 2 registers sets: low, high bit */
        sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, addr.bit,
                         BIT(addr.port), (!!(type & 0x1)) << addr.port);
-       sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER + SGPIO_MAX_BITS, addr.bit,
+       sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, SGPIO_MAX_BITS + addr.bit,
                         BIT(addr.port), (!!(type & 0x2)) << addr.port);
 
        if (type == SGPIO_INT_TRG_LEVEL)
index aa1a1c8..53a0bad 100644 (file)
@@ -3727,12 +3727,15 @@ static int __maybe_unused rockchip_pinctrl_suspend(struct device *dev)
 static int __maybe_unused rockchip_pinctrl_resume(struct device *dev)
 {
        struct rockchip_pinctrl *info = dev_get_drvdata(dev);
-       int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
-                              rk3288_grf_gpio6c_iomux |
-                              GPIO6C6_SEL_WRITE_ENABLE);
+       int ret;
 
-       if (ret)
-               return ret;
+       if (info->ctrl->type == RK3288) {
+               ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
+                                  rk3288_grf_gpio6c_iomux |
+                                  GPIO6C6_SEL_WRITE_ENABLE);
+               if (ret)
+                       return ret;
+       }
 
        return pinctrl_force_default(info->pctl_dev);
 }
index 369ee20..2f19ab4 100644 (file)
@@ -392,7 +392,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
                          unsigned long *configs, unsigned int nconfs)
 {
        struct lpi_pinctrl *pctrl = dev_get_drvdata(pctldev->dev);
-       unsigned int param, arg, pullup, strength;
+       unsigned int param, arg, pullup = LPI_GPIO_BIAS_DISABLE, strength = 2;
        bool value, output_enabled = false;
        const struct lpi_pingroup *g;
        unsigned long sval;
index 8daccd5..9d41abf 100644 (file)
@@ -1439,14 +1439,14 @@ static const struct msm_pingroup sc7280_groups[] = {
        [172] = PINGROUP(172, qdss, _, _, _, _, _, _, _, _),
        [173] = PINGROUP(173, qdss, _, _, _, _, _, _, _, _),
        [174] = PINGROUP(174, qdss, _, _, _, _, _, _, _, _),
-       [175] = UFS_RESET(ufs_reset, 0x1be000),
-       [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x1b3000, 15, 0),
-       [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0x1b3000, 13, 6),
-       [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x1b3000, 11, 3),
-       [179] = SDC_QDSD_PINGROUP(sdc1_data, 0x1b3000, 9, 0),
-       [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0x1b4000, 14, 6),
-       [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x1b4000, 11, 3),
-       [182] = SDC_QDSD_PINGROUP(sdc2_data, 0x1b4000, 9, 0),
+       [175] = UFS_RESET(ufs_reset, 0xbe000),
+       [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0xb3004, 0, 6),
+       [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0xb3000, 13, 6),
+       [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0xb3000, 11, 3),
+       [179] = SDC_QDSD_PINGROUP(sdc1_data, 0xb3000, 9, 0),
+       [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0xb4000, 14, 6),
+       [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xb4000, 11, 3),
+       [182] = SDC_QDSD_PINGROUP(sdc2_data, 0xb4000, 9, 0),
 };
 
 static const struct msm_pinctrl_soc_data sc7280_pinctrl = {
index 2b5b0e2..5aaf57b 100644 (file)
@@ -423,7 +423,7 @@ static const char * const gpio_groups[] = {
 
 static const char * const qdss_stm_groups[] = {
        "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio12", "gpio13",
-       "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19" "gpio20", "gpio21", "gpio22",
+       "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
        "gpio23", "gpio44", "gpio45", "gpio52", "gpio53", "gpio56", "gpio57", "gpio61", "gpio62",
        "gpio63", "gpio64", "gpio65", "gpio66",
 };
index 57cc928..078648a 100644 (file)
@@ -483,11 +483,16 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
                        goto wakeup;
 
                /*
-                * Switch events will wake the device and report the new switch
-                * position to the input subsystem.
+                * Some devices send (duplicate) tablet-mode events when moved
+                * around even though the mode has not changed; and they do this
+                * even when suspended.
+                * Update the switch state in case it changed and then return
+                * without waking up to avoid spurious wakeups.
                 */
-               if (priv->switches && (event == 0xcc || event == 0xcd))
-                       goto wakeup;
+               if (event == 0xcc || event == 0xcd) {
+                       report_tablet_mode_event(priv->switches, event);
+                       return;
+               }
 
                /* Wake up on 5-button array events only. */
                if (event == 0xc0 || !priv->array)
@@ -501,9 +506,6 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
 wakeup:
                pm_wakeup_hard_event(&device->dev);
 
-               if (report_tablet_mode_event(priv->switches, event))
-                       return;
-
                return;
        }
 
index ddecf25..d7894f1 100644 (file)
@@ -309,11 +309,20 @@ static bool sanity_check(struct ce_array *ca)
        return ret;
 }
 
+/**
+ * cec_add_elem - Add an element to the CEC array.
+ * @pfn:       page frame number to insert
+ *
+ * Return values:
+ * - <0:       on error
+ * -  0:       on success
+ * - >0:       when the inserted pfn was offlined
+ */
 static int cec_add_elem(u64 pfn)
 {
        struct ce_array *ca = &ce_arr;
+       int count, err, ret = 0;
        unsigned int to = 0;
-       int count, ret = 0;
 
        /*
         * We can be called very early on the identify_cpu() path where we are
@@ -330,8 +339,8 @@ static int cec_add_elem(u64 pfn)
        if (ca->n == MAX_ELEMS)
                WARN_ON(!del_lru_elem_unlocked(ca));
 
-       ret = find_elem(ca, pfn, &to);
-       if (ret < 0) {
+       err = find_elem(ca, pfn, &to);
+       if (err < 0) {
                /*
                 * Shift range [to-end] to make room for one more element.
                 */
index 7b0cd08..ba020a4 100644 (file)
@@ -125,7 +125,7 @@ static const struct regulator_ops vid_ops = {
 
 static const struct regulator_desc regulators[] = {
        BD9571MWV_REG("VD09", "vd09", VD09, avs_ops, 0, 0x7f,
-                     0x80, 600000, 10000, 0x3c),
+                     0x6f, 600000, 10000, 0x3c),
        BD9571MWV_REG("VD18", "vd18", VD18, vid_ops, BD9571MWV_VD18_VID, 0xf,
                      16, 1625000, 25000, 0),
        BD9571MWV_REG("VD25", "vd25", VD25, vid_ops, BD9571MWV_VD25_VID, 0xf,
@@ -134,7 +134,7 @@ static const struct regulator_desc regulators[] = {
                      11, 2800000, 100000, 0),
        BD9571MWV_REG("DVFS", "dvfs", DVFS, reg_ops,
                      BD9571MWV_DVFS_MONIVDAC, 0x7f,
-                     0x80, 600000, 10000, 0x3c),
+                     0x6f, 600000, 10000, 0x3c),
 };
 
 #ifdef CONFIG_PM_SLEEP
@@ -174,7 +174,7 @@ static ssize_t backup_mode_show(struct device *dev,
 {
        struct bd9571mwv_reg *bdreg = dev_get_drvdata(dev);
 
-       return sprintf(buf, "%s\n", bdreg->bkup_mode_enabled ? "on" : "off");
+       return sysfs_emit(buf, "%s\n", bdreg->bkup_mode_enabled ? "on" : "off");
 }
 
 static ssize_t backup_mode_store(struct device *dev,
@@ -301,7 +301,7 @@ static int bd9571mwv_regulator_probe(struct platform_device *pdev)
                                               &config);
                if (IS_ERR(rdev)) {
                        dev_err(&pdev->dev, "failed to register %s regulator\n",
-                               pdev->name);
+                               regulators[i].name);
                        return PTR_ERR(rdev);
                }
        }
index 2667919..dcb380e 100644 (file)
@@ -450,6 +450,24 @@ static void *pru_i_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
        if (len == 0)
                return NULL;
 
+       /*
+        * GNU binutils do not support multiple address spaces. The GNU
+        * linker's default linker script places IRAM at an arbitrary high
+        * offset, in order to differentiate it from DRAM. Hence we need to
+        * strip the artificial offset in the IRAM addresses coming from the
+        * ELF file.
+        *
+        * The TI proprietary linker would never set those higher IRAM address
+        * bits anyway. PRU architecture limits the program counter to 16-bit
+        * word-address range. This in turn corresponds to 18-bit IRAM
+        * byte-address range for ELF.
+        *
+        * Two more bits are added just in case to make the final 20-bit mask.
+        * Idea is to have a safeguard in case TI decides to add banking
+        * in future SoCs.
+        */
+       da &= 0xfffff;
+
        if (da >= PRU_IRAM_DA &&
            da + len <= PRU_IRAM_DA + pru->mem_regions[PRU_IOMEM_IRAM].size) {
                offset = da - PRU_IRAM_DA;
@@ -585,7 +603,7 @@ pru_rproc_load_elf_segments(struct rproc *rproc, const struct firmware *fw)
                        break;
                }
 
-               if (pru->data->is_k3 && is_iram) {
+               if (pru->data->is_k3) {
                        ret = pru_rproc_memcpy(ptr, elf_data + phdr->p_offset,
                                               filesz);
                        if (ret) {
index 5521c44..7c007dd 100644 (file)
@@ -56,7 +56,7 @@ static int qcom_pil_info_init(void)
        memset_io(base, 0, resource_size(&imem));
 
        _reloc.base = base;
-       _reloc.num_entries = resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
+       _reloc.num_entries = (u32)resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
 
        return 0;
 }
index d126bb8..ba6a3aa 100644 (file)
 #ifndef HPSA_CMD_H
 #define HPSA_CMD_H
 
+#include <linux/compiler.h>
+
+#include <linux/build_bug.h> /* static_assert */
+#include <linux/stddef.h> /* offsetof */
+
 /* general boundary defintions */
 #define SENSEINFOBYTES          32 /* may vary between hbas */
 #define SG_ENTRIES_IN_CMD      32 /* Max SG entries excluding chain blocks */
@@ -200,12 +205,10 @@ union u64bit {
        MAX_EXT_TARGETS + 1) /* + 1 is for the controller itself */
 
 /* SCSI-3 Commands */
-#pragma pack(1)
-
 #define HPSA_INQUIRY 0x12
 struct InquiryData {
        u8 data_byte[36];
-};
+} __packed;
 
 #define HPSA_REPORT_LOG 0xc2    /* Report Logical LUNs */
 #define HPSA_REPORT_PHYS 0xc3   /* Report Physical LUNs */
@@ -221,7 +224,7 @@ struct raid_map_disk_data {
        u8    xor_mult[2];            /**< XOR multipliers for this position,
                                        *  valid for data disks only */
        u8    reserved[2];
-};
+} __packed;
 
 struct raid_map_data {
        __le32   structure_size;        /* Size of entire structure in bytes */
@@ -247,14 +250,14 @@ struct raid_map_data {
        __le16   dekindex;              /* Data encryption key index. */
        u8    reserved[16];
        struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES];
-};
+} __packed;
 
 struct ReportLUNdata {
        u8 LUNListLength[4];
        u8 extended_response_flag;
        u8 reserved[3];
        u8 LUN[HPSA_MAX_LUN][8];
-};
+} __packed;
 
 struct ext_report_lun_entry {
        u8 lunid[8];
@@ -269,20 +272,20 @@ struct ext_report_lun_entry {
        u8 lun_count; /* multi-lun device, how many luns */
        u8 redundant_paths;
        u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */
-};
+} __packed;
 
 struct ReportExtendedLUNdata {
        u8 LUNListLength[4];
        u8 extended_response_flag;
        u8 reserved[3];
        struct ext_report_lun_entry LUN[HPSA_MAX_PHYS_LUN];
-};
+} __packed;
 
 struct SenseSubsystem_info {
        u8 reserved[36];
        u8 portname[8];
        u8 reserved1[1108];
-};
+} __packed;
 
 /* BMIC commands */
 #define BMIC_READ 0x26
@@ -317,7 +320,7 @@ union SCSI3Addr {
                u8 Targ:6;
                u8 Mode:2;        /* b10 */
        } LogUnit;
-};
+} __packed;
 
 struct PhysDevAddr {
        u32             TargetId:24;
@@ -325,20 +328,20 @@ struct PhysDevAddr {
        u32             Mode:2;
        /* 2 level target device addr */
        union SCSI3Addr  Target[2];
-};
+} __packed;
 
 struct LogDevAddr {
        u32            VolId:30;
        u32            Mode:2;
        u8             reserved[4];
-};
+} __packed;
 
 union LUNAddr {
        u8               LunAddrBytes[8];
        union SCSI3Addr    SCSI3Lun[4];
        struct PhysDevAddr PhysDev;
        struct LogDevAddr  LogDev;
-};
+} __packed;
 
 struct CommandListHeader {
        u8              ReplyQueue;
@@ -346,7 +349,7 @@ struct CommandListHeader {
        __le16          SGTotal;
        __le64          tag;
        union LUNAddr     LUN;
-};
+} __packed;
 
 struct RequestBlock {
        u8   CDBLen;
@@ -365,18 +368,18 @@ struct RequestBlock {
 #define GET_DIR(tad) (((tad) >> 6) & 0x03)
        u16  Timeout;
        u8   CDB[16];
-};
+} __packed;
 
 struct ErrDescriptor {
        __le64 Addr;
        __le32 Len;
-};
+} __packed;
 
 struct SGDescriptor {
        __le64 Addr;
        __le32 Len;
        __le32 Ext;
-};
+} __packed;
 
 union MoreErrInfo {
        struct {
@@ -390,7 +393,8 @@ union MoreErrInfo {
                u8  offense_num;  /* byte # of offense 0-base */
                u32 offense_value;
        } Invalid_Cmd;
-};
+} __packed;
+
 struct ErrorInfo {
        u8               ScsiStatus;
        u8               SenseLen;
@@ -398,7 +402,7 @@ struct ErrorInfo {
        u32              ResidualCnt;
        union MoreErrInfo  MoreErrInfo;
        u8               SenseInfo[SENSEINFOBYTES];
-};
+} __packed;
 /* Command types */
 #define CMD_IOCTL_PEND  0x01
 #define CMD_SCSI       0x03
@@ -453,6 +457,15 @@ struct CommandList {
        atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */
 } __aligned(COMMANDLIST_ALIGNMENT);
 
+/*
+ * Make sure our embedded atomic variable is aligned. Otherwise we break atomic
+ * operations on architectures that don't support unaligned atomics like IA64.
+ *
+ * The assert guards against reintroductin against unwanted __packed to
+ * the struct CommandList.
+ */
+static_assert(offsetof(struct CommandList, refcount) % __alignof__(atomic_t) == 0);
+
 /* Max S/G elements in I/O accelerator command */
 #define IOACCEL1_MAXSGENTRIES           24
 #define IOACCEL2_MAXSGENTRIES          28
@@ -489,7 +502,7 @@ struct io_accel1_cmd {
        __le64 host_addr;               /* 0x70 - 0x77 */
        u8  CISS_LUN[8];                /* 0x78 - 0x7F */
        struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES];
-} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
+} __packed __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT);
 
 #define IOACCEL1_FUNCTION_SCSIIO        0x00
 #define IOACCEL1_SGLOFFSET              32
@@ -519,7 +532,7 @@ struct ioaccel2_sg_element {
        u8 chain_indicator;
 #define IOACCEL2_CHAIN 0x80
 #define IOACCEL2_LAST_SG 0x40
-};
+} __packed;
 
 /*
  * SCSI Response Format structure for IO Accelerator Mode 2
@@ -559,7 +572,7 @@ struct io_accel2_scsi_response {
        u8 sense_data_len;              /* sense/response data length */
        u8 resid_cnt[4];                /* residual count */
        u8 sense_data_buff[32];         /* sense/response data buffer */
-};
+} __packed;
 
 /*
  * Structure for I/O accelerator (mode 2 or m2) commands.
@@ -592,7 +605,7 @@ struct io_accel2_cmd {
        __le32 tweak_upper;             /* Encryption tweak, upper 4 bytes */
        struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES];
        struct io_accel2_scsi_response error_data;
-} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
+} __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
 
 /*
  * defines for Mode 2 command struct
@@ -618,7 +631,7 @@ struct hpsa_tmf_struct {
        __le64 abort_tag;       /* cciss tag of SCSI cmd or TMF to abort */
        __le64 error_ptr;               /* Error Pointer */
        __le32 error_len;               /* Error Length */
-} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
+} __packed __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT);
 
 /* Configuration Table Structure */
 struct HostWrite {
@@ -626,7 +639,7 @@ struct HostWrite {
        __le32          command_pool_addr_hi;
        __le32          CoalIntDelay;
        __le32          CoalIntCount;
-};
+} __packed;
 
 #define SIMPLE_MODE     0x02
 #define PERFORMANT_MODE 0x04
@@ -675,7 +688,7 @@ struct CfgTable {
 #define                HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30)
 #define                HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31)
        __le32          clear_event_notify;
-};
+} __packed;
 
 #define NUM_BLOCKFETCH_ENTRIES 8
 struct TransTable_struct {
@@ -686,14 +699,14 @@ struct TransTable_struct {
        __le32          RepQCtrAddrHigh32;
 #define MAX_REPLY_QUEUES 64
        struct vals32  RepQAddr[MAX_REPLY_QUEUES];
-};
+} __packed;
 
 struct hpsa_pci_info {
        unsigned char   bus;
        unsigned char   dev_fn;
        unsigned short  domain;
        u32             board_id;
-};
+} __packed;
 
 struct bmic_identify_controller {
        u8      configured_logical_drive_count; /* offset 0 */
@@ -702,7 +715,7 @@ struct bmic_identify_controller {
        u8      pad2[136];
        u8      controller_mode;        /* offset 292 */
        u8      pad3[32];
-};
+} __packed;
 
 
 struct bmic_identify_physical_device {
@@ -845,7 +858,7 @@ struct bmic_identify_physical_device {
        u8     max_link_rate[256];
        u8     neg_phys_link_rate[256];
        u8     box_conn_name[8];
-} __attribute((aligned(512)));
+} __packed __attribute((aligned(512)));
 
 struct bmic_sense_subsystem_info {
        u8      primary_slot_number;
@@ -858,7 +871,7 @@ struct bmic_sense_subsystem_info {
        u8      secondary_array_serial_number[32];
        u8      secondary_cache_serial_number[32];
        u8      pad[332];
-};
+} __packed;
 
 struct bmic_sense_storage_box_params {
        u8      reserved[36];
@@ -870,7 +883,6 @@ struct bmic_sense_storage_box_params {
        u8      reserver_3[84];
        u8      phys_connector[2];
        u8      reserved_4[296];
-};
+} __packed;
 
-#pragma pack()
 #endif /* HPSA_CMD_H */
index 04633e5..4834219 100644 (file)
@@ -3179,9 +3179,10 @@ fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
        }
 }
 
-static void iscsi_start_session_recovery(struct iscsi_session *session,
-                                        struct iscsi_conn *conn, int flag)
+void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
 {
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
        int old_stop_stage;
 
        mutex_lock(&session->eh_mutex);
@@ -3239,27 +3240,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
        spin_unlock_bh(&session->frwd_lock);
        mutex_unlock(&session->eh_mutex);
 }
-
-void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
-{
-       struct iscsi_conn *conn = cls_conn->dd_data;
-       struct iscsi_session *session = conn->session;
-
-       switch (flag) {
-       case STOP_CONN_RECOVER:
-               cls_conn->state = ISCSI_CONN_FAILED;
-               break;
-       case STOP_CONN_TERM:
-               cls_conn->state = ISCSI_CONN_DOWN;
-               break;
-       default:
-               iscsi_conn_printk(KERN_ERR, conn,
-                                 "invalid stop flag %d\n", flag);
-               return;
-       }
-
-       iscsi_start_session_recovery(session, conn, flag);
-}
 EXPORT_SYMBOL_GPL(iscsi_conn_stop);
 
 int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
index 024e5a5..8b9a390 100644 (file)
@@ -201,18 +201,17 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
                memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
                task->total_xfer_len = qc->nbytes;
                task->num_scatter = qc->n_elem;
+               task->data_dir = qc->dma_dir;
+       } else if (qc->tf.protocol == ATA_PROT_NODATA) {
+               task->data_dir = DMA_NONE;
        } else {
                for_each_sg(qc->sg, sg, qc->n_elem, si)
                        xfer += sg_dma_len(sg);
 
                task->total_xfer_len = xfer;
                task->num_scatter = si;
-       }
-
-       if (qc->tf.protocol == ATA_PROT_NODATA)
-               task->data_dir = DMA_NONE;
-       else
                task->data_dir = qc->dma_dir;
+       }
        task->scatter = qc->sg;
        task->ata_task.retry_count = 1;
        task->task_state_flags = SAS_TASK_STATE_PENDING;
index 49bf2f7..31e5455 100644 (file)
@@ -223,7 +223,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
                PM8001_EVENT_LOG_SIZE;
        pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option         = 0x01;
        pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt          = 0x01;
-       for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
+       for (i = 0; i < pm8001_ha->max_q_num; i++) {
                pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt  =
                        PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
                pm8001_ha->inbnd_q_tbl[i].upper_base_addr       =
@@ -249,7 +249,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
                pm8001_ha->inbnd_q_tbl[i].producer_idx          = 0;
                pm8001_ha->inbnd_q_tbl[i].consumer_index        = 0;
        }
-       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
+       for (i = 0; i < pm8001_ha->max_q_num; i++) {
                pm8001_ha->outbnd_q_tbl[i].element_size_cnt     =
                        PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
                pm8001_ha->outbnd_q_tbl[i].upper_base_addr      =
@@ -671,9 +671,9 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
        read_outbnd_queue_table(pm8001_ha);
        /* update main config table ,inbound table and outbound table */
        update_main_config_table(pm8001_ha);
-       for (i = 0; i < PM8001_MAX_INB_NUM; i++)
+       for (i = 0; i < pm8001_ha->max_q_num; i++)
                update_inbnd_queue_table(pm8001_ha, i);
-       for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)
+       for (i = 0; i < pm8001_ha->max_q_num; i++)
                update_outbnd_queue_table(pm8001_ha, i);
        /* 8081 controller donot require these operations */
        if (deviceid != 0x8081 && deviceid != 0x0042) {
index 91074fd..441f015 100644 (file)
@@ -2474,9 +2474,22 @@ static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
         * it works.
         */
        mutex_lock(&conn_mutex);
+       switch (flag) {
+       case STOP_CONN_RECOVER:
+               conn->state = ISCSI_CONN_FAILED;
+               break;
+       case STOP_CONN_TERM:
+               conn->state = ISCSI_CONN_DOWN;
+               break;
+       default:
+               iscsi_cls_conn_printk(KERN_ERR, conn,
+                                     "invalid stop flag %d\n", flag);
+               goto unlock;
+       }
+
        conn->transport->stop_conn(conn, flag);
+unlock:
        mutex_unlock(&conn_mutex);
-
 }
 
 static void stop_conn_work_fn(struct work_struct *work)
@@ -2901,6 +2914,13 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
        default:
                err = transport->set_param(conn, ev->u.set_param.param,
                                           data, ev->u.set_param.len);
+               if ((conn->state == ISCSI_CONN_BOUND) ||
+                       (conn->state == ISCSI_CONN_UP)) {
+                       err = transport->set_param(conn, ev->u.set_param.param,
+                                       data, ev->u.set_param.len);
+               } else {
+                       return -ENOTCONN;
+               }
        }
 
        return err;
@@ -2960,6 +2980,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
                mutex_lock(&conn->ep_mutex);
                conn->ep = NULL;
                mutex_unlock(&conn->ep_mutex);
+               conn->state = ISCSI_CONN_FAILED;
        }
 
        transport->ep_disconnect(ep);
@@ -3727,6 +3748,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
                ev->r.retcode = transport->bind_conn(session, conn,
                                                ev->u.b_conn.transport_eph,
                                                ev->u.b_conn.is_leading);
+               if (!ev->r.retcode)
+                       conn->state = ISCSI_CONN_BOUND;
                mutex_unlock(&conn_mutex);
 
                if (ev->r.retcode || !transport->ep_connect)
@@ -3966,7 +3989,8 @@ iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR);
 static const char *const connection_state_names[] = {
        [ISCSI_CONN_UP] = "up",
        [ISCSI_CONN_DOWN] = "down",
-       [ISCSI_CONN_FAILED] = "failed"
+       [ISCSI_CONN_FAILED] = "failed",
+       [ISCSI_CONN_BOUND] = "bound"
 };
 
 static ssize_t show_conn_state(struct device *dev,
index 1e939a2..98a34ed 100644 (file)
@@ -541,7 +541,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
        res = mutex_lock_interruptible(&rport->mutex);
        if (res)
                goto out;
-       if (rport->state != SRP_RPORT_FAIL_FAST)
+       if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST)
                /*
                 * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
                 * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
index c867607..d3d05e9 100644 (file)
@@ -6386,37 +6386,34 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        DECLARE_COMPLETION_ONSTACK(wait);
        struct request *req;
        unsigned long flags;
-       int free_slot, task_tag, err;
+       int task_tag, err;
 
        /*
-        * Get free slot, sleep if slots are unavailable.
-        * Even though we use wait_event() which sleeps indefinitely,
-        * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
+        * blk_get_request() is used here only to get a free tag.
         */
        req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
        if (IS_ERR(req))
                return PTR_ERR(req);
 
        req->end_io_data = &wait;
-       free_slot = req->tag;
-       WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
        ufshcd_hold(hba, false);
 
        spin_lock_irqsave(host->host_lock, flags);
-       task_tag = hba->nutrs + free_slot;
+       blk_mq_start_request(req);
 
+       task_tag = req->tag;
        treq->req_header.dword_0 |= cpu_to_be32(task_tag);
 
-       memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
-       ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
+       memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
+       ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
 
        /* send command to the controller */
-       __set_bit(free_slot, &hba->outstanding_tasks);
+       __set_bit(task_tag, &hba->outstanding_tasks);
 
        /* Make sure descriptors are ready before ringing the task doorbell */
        wmb();
 
-       ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+       ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
        /* Make sure that doorbell is committed immediately */
        wmb();
 
@@ -6436,24 +6433,24 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
                ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
                dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
                                __func__, tm_function);
-               if (ufshcd_clear_tm_cmd(hba, free_slot))
-                       dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
-                                       __func__, free_slot);
+               if (ufshcd_clear_tm_cmd(hba, task_tag))
+                       dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
+                                       __func__, task_tag);
                err = -ETIMEDOUT;
        } else {
                err = 0;
-               memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
+               memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
 
                ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
        }
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       __clear_bit(free_slot, &hba->outstanding_tasks);
+       __clear_bit(task_tag, &hba->outstanding_tasks);
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+       ufshcd_release(hba);
        blk_put_request(req);
 
-       ufshcd_release(hba);
        return err;
 }
 
index a1b9be1..fde4edd 100644 (file)
@@ -186,7 +186,7 @@ struct qm_eqcr_entry {
        __be32 tag;
        struct qm_fd fd;
        u8 __reserved3[32];
-} __packed;
+} __packed __aligned(8);
 #define QM_EQCR_VERB_VBIT              0x80
 #define QM_EQCR_VERB_CMD_MASK          0x61    /* but only one value; */
 #define QM_EQCR_VERB_CMD_ENQUEUE       0x01
index 6268bfa..c3e379a 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/platform_device.h>
 #include <linux/printk.h>
 #include <linux/module.h>
-#include <linux/errno.h>
 #include <linux/io.h>
 #include <linux/reboot.h>
 
index f42954e..5bdfb15 100644 (file)
@@ -3,7 +3,6 @@
 
 #include <linux/acpi.h>
 #include <linux/clk.h>
-#include <linux/console.h>
 #include <linux/slab.h>
 #include <linux/dma-mapping.h>
 #include <linux/io.h>
@@ -92,14 +91,11 @@ struct geni_wrapper {
        struct device *dev;
        void __iomem *base;
        struct clk_bulk_data ahb_clks[NUM_AHB_CLKS];
-       struct geni_icc_path to_core;
 };
 
 static const char * const icc_path_names[] = {"qup-core", "qup-config",
                                                "qup-memory"};
 
-static struct geni_wrapper *earlycon_wrapper;
-
 #define QUP_HW_VER_REG                 0x4
 
 /* Common SE registers */
@@ -760,6 +756,9 @@ int geni_icc_get(struct geni_se *se, const char *icc_ddr)
        int i, err;
        const char *icc_names[] = {"qup-core", "qup-config", icc_ddr};
 
+       if (has_acpi_companion(se->dev))
+               return 0;
+
        for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
                if (!icc_names[i])
                        continue;
@@ -843,44 +842,11 @@ int geni_icc_disable(struct geni_se *se)
 }
 EXPORT_SYMBOL(geni_icc_disable);
 
-void geni_remove_earlycon_icc_vote(void)
-{
-       struct platform_device *pdev;
-       struct geni_wrapper *wrapper;
-       struct device_node *parent;
-       struct device_node *child;
-
-       if (!earlycon_wrapper)
-               return;
-
-       wrapper = earlycon_wrapper;
-       parent = of_get_next_parent(wrapper->dev->of_node);
-       for_each_child_of_node(parent, child) {
-               if (!of_device_is_compatible(child, "qcom,geni-se-qup"))
-                       continue;
-
-               pdev = of_find_device_by_node(child);
-               if (!pdev)
-                       continue;
-
-               wrapper = platform_get_drvdata(pdev);
-               icc_put(wrapper->to_core.path);
-               wrapper->to_core.path = NULL;
-
-       }
-       of_node_put(parent);
-
-       earlycon_wrapper = NULL;
-}
-EXPORT_SYMBOL(geni_remove_earlycon_icc_vote);
-
 static int geni_se_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct resource *res;
        struct geni_wrapper *wrapper;
-       struct console __maybe_unused *bcon;
-       bool __maybe_unused has_earlycon = false;
        int ret;
 
        wrapper = devm_kzalloc(dev, sizeof(*wrapper), GFP_KERNEL);
@@ -903,43 +869,6 @@ static int geni_se_probe(struct platform_device *pdev)
                }
        }
 
-#ifdef CONFIG_SERIAL_EARLYCON
-       for_each_console(bcon) {
-               if (!strcmp(bcon->name, "qcom_geni")) {
-                       has_earlycon = true;
-                       break;
-               }
-       }
-       if (!has_earlycon)
-               goto exit;
-
-       wrapper->to_core.path = devm_of_icc_get(dev, "qup-core");
-       if (IS_ERR(wrapper->to_core.path))
-               return PTR_ERR(wrapper->to_core.path);
-       /*
-        * Put minmal BW request on core clocks on behalf of early console.
-        * The vote will be removed earlycon exit function.
-        *
-        * Note: We are putting vote on each QUP wrapper instead only to which
-        * earlycon is connected because QUP core clock of different wrapper
-        * share same voltage domain. If core1 is put to 0, then core2 will
-        * also run at 0, if not voted. Default ICC vote will be removed ASA
-        * we touch any of the core clock.
-        * core1 = core2 = max(core1, core2)
-        */
-       ret = icc_set_bw(wrapper->to_core.path, GENI_DEFAULT_BW,
-                               GENI_DEFAULT_BW);
-       if (ret) {
-               dev_err(&pdev->dev, "%s: ICC BW voting failed for core: %d\n",
-                       __func__, ret);
-               return ret;
-       }
-
-       if (of_get_compatible_child(pdev->dev.of_node, "qcom,geni-debug-uart"))
-               earlycon_wrapper = wrapper;
-       of_node_put(pdev->dev.of_node);
-exit:
-#endif
        dev_set_drvdata(dev, wrapper);
        dev_dbg(dev, "GENI SE Driver probed\n");
        return devm_of_platform_populate(dev);
index b84f00b..4cabaf2 100644 (file)
@@ -1105,7 +1105,7 @@ struct rtllib_network {
        bool    bWithAironetIE;
        bool    bCkipSupported;
        bool    bCcxRmEnable;
-       u16     CcxRmState[2];
+       u     CcxRmState[2];
        bool    bMBssidValid;
        u8      MBssidMask;
        u8      MBssid[ETH_ALEN];
index 66c1353..15bbb63 100644 (file)
@@ -1967,7 +1967,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
            info_element->data[2] == 0x96 &&
            info_element->data[3] == 0x01) {
                if (info_element->len == 6) {
-                       memcpy(network->CcxRmState, &info_element[4], 2);
+                       memcpy(network->CcxRmState, &info_element->data[4], 2);
                        if (network->CcxRmState[0] != 0)
                                network->bCcxRmEnable = true;
                        else
index d0e7ed8..e5c443b 100644 (file)
@@ -1166,6 +1166,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
        target_get_sess_cmd(&cmd->se_cmd, true);
 
+       cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
        cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, hdr->cdb);
        if (cmd->sense_reason) {
                if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
@@ -1180,8 +1181,6 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        if (cmd->sense_reason)
                goto attach_cmd;
 
-       /* only used for printks or comparing with ->ref_task_tag */
-       cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
        cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd);
        if (cmd->sense_reason)
                goto attach_cmd;
index 620bcf5..c44fad2 100644 (file)
@@ -347,7 +347,7 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
        ret = tb_retimer_nvm_add(rt);
        if (ret) {
                dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
-               device_del(&rt->dev);
+               device_unregister(&rt->dev);
                return ret;
        }
 
@@ -406,7 +406,7 @@ static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
  */
 int tb_retimer_scan(struct tb_port *port)
 {
-       u32 status[TB_MAX_RETIMER_INDEX] = {};
+       u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
        int ret, i, last_idx = 0;
 
        if (!port->cap_usb4)
index 291649f..0d85b55 100644 (file)
@@ -1177,12 +1177,6 @@ static inline void qcom_geni_serial_enable_early_read(struct geni_se *se,
                                                      struct console *con) { }
 #endif
 
-static int qcom_geni_serial_earlycon_exit(struct console *con)
-{
-       geni_remove_earlycon_icc_vote();
-       return 0;
-}
-
 static struct qcom_geni_private_data earlycon_private_data;
 
 static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
@@ -1233,7 +1227,6 @@ static int __init qcom_geni_serial_earlycon_setup(struct earlycon_device *dev,
        writel(stop_bit_len, uport->membase + SE_UART_TX_STOP_BIT_LEN);
 
        dev->con->write = qcom_geni_serial_earlycon_write;
-       dev->con->exit = qcom_geni_serial_earlycon_exit;
        dev->con->setup = NULL;
        qcom_geni_serial_enable_early_read(&se, dev->con);
 
index f2ebbac..d7d4bdd 100644 (file)
@@ -1128,6 +1128,10 @@ static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep,
                return -ESHUTDOWN;
        }
 
+       /* Requests has been dequeued during disabling endpoint. */
+       if (!(pep->ep_state & EP_ENABLED))
+               return 0;
+
        spin_lock_irqsave(&pdev->lock, flags);
        ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request));
        spin_unlock_irqrestore(&pdev->lock, flags);
index 39ddb55..3fda1ec 100644 (file)
@@ -147,17 +147,29 @@ static inline int acm_set_control(struct acm *acm, int control)
 #define acm_send_break(acm, ms) \
        acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0)
 
-static void acm_kill_urbs(struct acm *acm)
+static void acm_poison_urbs(struct acm *acm)
 {
        int i;
 
-       usb_kill_urb(acm->ctrlurb);
+       usb_poison_urb(acm->ctrlurb);
        for (i = 0; i < ACM_NW; i++)
-               usb_kill_urb(acm->wb[i].urb);
+               usb_poison_urb(acm->wb[i].urb);
        for (i = 0; i < acm->rx_buflimit; i++)
-               usb_kill_urb(acm->read_urbs[i]);
+               usb_poison_urb(acm->read_urbs[i]);
+}
+
+static void acm_unpoison_urbs(struct acm *acm)
+{
+       int i;
+
+       for (i = 0; i < acm->rx_buflimit; i++)
+               usb_unpoison_urb(acm->read_urbs[i]);
+       for (i = 0; i < ACM_NW; i++)
+               usb_unpoison_urb(acm->wb[i].urb);
+       usb_unpoison_urb(acm->ctrlurb);
 }
 
+
 /*
  * Write buffer management.
  * All of these assume proper locks taken by the caller.
@@ -226,9 +238,10 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
 
        rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
        if (rc < 0) {
-               dev_err(&acm->data->dev,
-                       "%s - usb_submit_urb(write bulk) failed: %d\n",
-                       __func__, rc);
+               if (rc != -EPERM)
+                       dev_err(&acm->data->dev,
+                               "%s - usb_submit_urb(write bulk) failed: %d\n",
+                               __func__, rc);
                acm_write_done(acm, wb);
        }
        return rc;
@@ -313,8 +326,10 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
                        acm->iocount.dsr++;
                if (difference & ACM_CTRL_DCD)
                        acm->iocount.dcd++;
-               if (newctrl & ACM_CTRL_BRK)
+               if (newctrl & ACM_CTRL_BRK) {
                        acm->iocount.brk++;
+                       tty_insert_flip_char(&acm->port, 0, TTY_BREAK);
+               }
                if (newctrl & ACM_CTRL_RI)
                        acm->iocount.rng++;
                if (newctrl & ACM_CTRL_FRAMING)
@@ -480,11 +495,6 @@ static void acm_read_bulk_callback(struct urb *urb)
        dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
                rb->index, urb->actual_length, status);
 
-       if (!acm->dev) {
-               dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
-               return;
-       }
-
        switch (status) {
        case 0:
                usb_mark_last_busy(acm->dev);
@@ -649,7 +659,8 @@ static void acm_port_dtr_rts(struct tty_port *port, int raise)
 
        res = acm_set_control(acm, val);
        if (res && (acm->ctrl_caps & USB_CDC_CAP_LINE))
-               dev_err(&acm->control->dev, "failed to set dtr/rts\n");
+               /* This is broken in too many devices to spam the logs */
+               dev_dbg(&acm->control->dev, "failed to set dtr/rts\n");
 }
 
 static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
@@ -731,6 +742,7 @@ static void acm_port_shutdown(struct tty_port *port)
         * Need to grab write_lock to prevent race with resume, but no need to
         * hold it due to the tty-port initialised flag.
         */
+       acm_poison_urbs(acm);
        spin_lock_irq(&acm->write_lock);
        spin_unlock_irq(&acm->write_lock);
 
@@ -747,7 +759,8 @@ static void acm_port_shutdown(struct tty_port *port)
                usb_autopm_put_interface_async(acm->control);
        }
 
-       acm_kill_urbs(acm);
+       acm_unpoison_urbs(acm);
+
 }
 
 static void acm_tty_cleanup(struct tty_struct *tty)
@@ -1296,13 +1309,6 @@ skip_normal_probe:
        if (!combined_interfaces && intf != control_interface)
                return -ENODEV;
 
-       if (!combined_interfaces && usb_interface_claimed(data_interface)) {
-               /* valid in this context */
-               dev_dbg(&intf->dev, "The data interface isn't available\n");
-               return -EBUSY;
-       }
-
-
        if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 ||
            control_interface->cur_altsetting->desc.bNumEndpoints == 0)
                return -EINVAL;
@@ -1323,8 +1329,8 @@ made_compressed_probe:
        dev_dbg(&intf->dev, "interfaces are valid\n");
 
        acm = kzalloc(sizeof(struct acm), GFP_KERNEL);
-       if (acm == NULL)
-               goto alloc_fail;
+       if (!acm)
+               return -ENOMEM;
 
        tty_port_init(&acm->port);
        acm->port.ops = &acm_port_ops;
@@ -1341,7 +1347,7 @@ made_compressed_probe:
 
        minor = acm_alloc_minor(acm);
        if (minor < 0)
-               goto alloc_fail1;
+               goto err_put_port;
 
        acm->minor = minor;
        acm->dev = usb_dev;
@@ -1372,15 +1378,15 @@ made_compressed_probe:
 
        buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
        if (!buf)
-               goto alloc_fail1;
+               goto err_put_port;
        acm->ctrl_buffer = buf;
 
        if (acm_write_buffers_alloc(acm) < 0)
-               goto alloc_fail2;
+               goto err_free_ctrl_buffer;
 
        acm->ctrlurb = usb_alloc_urb(0, GFP_KERNEL);
        if (!acm->ctrlurb)
-               goto alloc_fail3;
+               goto err_free_write_buffers;
 
        for (i = 0; i < num_rx_buf; i++) {
                struct acm_rb *rb = &(acm->read_buffers[i]);
@@ -1389,13 +1395,13 @@ made_compressed_probe:
                rb->base = usb_alloc_coherent(acm->dev, readsize, GFP_KERNEL,
                                                                &rb->dma);
                if (!rb->base)
-                       goto alloc_fail4;
+                       goto err_free_read_urbs;
                rb->index = i;
                rb->instance = acm;
 
                urb = usb_alloc_urb(0, GFP_KERNEL);
                if (!urb)
-                       goto alloc_fail4;
+                       goto err_free_read_urbs;
 
                urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
                urb->transfer_dma = rb->dma;
@@ -1416,8 +1422,8 @@ made_compressed_probe:
                struct acm_wb *snd = &(acm->wb[i]);
 
                snd->urb = usb_alloc_urb(0, GFP_KERNEL);
-               if (snd->urb == NULL)
-                       goto alloc_fail5;
+               if (!snd->urb)
+                       goto err_free_write_urbs;
 
                if (usb_endpoint_xfer_int(epwrite))
                        usb_fill_int_urb(snd->urb, usb_dev, acm->out,
@@ -1435,7 +1441,7 @@ made_compressed_probe:
 
        i = device_create_file(&intf->dev, &dev_attr_bmCapabilities);
        if (i < 0)
-               goto alloc_fail5;
+               goto err_free_write_urbs;
 
        if (h.usb_cdc_country_functional_desc) { /* export the country data */
                struct usb_cdc_country_functional_desc * cfd =
@@ -1480,20 +1486,21 @@ skip_countries:
        acm->nb_index = 0;
        acm->nb_size = 0;
 
-       dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor);
-
        acm->line.dwDTERate = cpu_to_le32(9600);
        acm->line.bDataBits = 8;
        acm_set_line(acm, &acm->line);
 
-       usb_driver_claim_interface(&acm_driver, data_interface, acm);
-       usb_set_intfdata(data_interface, acm);
+       if (!acm->combined_interfaces) {
+               rv = usb_driver_claim_interface(&acm_driver, data_interface, acm);
+               if (rv)
+                       goto err_remove_files;
+       }
 
        tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
                        &control_interface->dev);
        if (IS_ERR(tty_dev)) {
                rv = PTR_ERR(tty_dev);
-               goto alloc_fail6;
+               goto err_release_data_interface;
        }
 
        if (quirks & CLEAR_HALT_CONDITIONS) {
@@ -1501,32 +1508,39 @@ skip_countries:
                usb_clear_halt(usb_dev, acm->out);
        }
 
+       dev_info(&intf->dev, "ttyACM%d: USB ACM device\n", minor);
+
        return 0;
-alloc_fail6:
+
+err_release_data_interface:
+       if (!acm->combined_interfaces) {
+               /* Clear driver data so that disconnect() returns early. */
+               usb_set_intfdata(data_interface, NULL);
+               usb_driver_release_interface(&acm_driver, data_interface);
+       }
+err_remove_files:
        if (acm->country_codes) {
                device_remove_file(&acm->control->dev,
                                &dev_attr_wCountryCodes);
                device_remove_file(&acm->control->dev,
                                &dev_attr_iCountryCodeRelDate);
-               kfree(acm->country_codes);
        }
        device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
-alloc_fail5:
-       usb_set_intfdata(intf, NULL);
+err_free_write_urbs:
        for (i = 0; i < ACM_NW; i++)
                usb_free_urb(acm->wb[i].urb);
-alloc_fail4:
+err_free_read_urbs:
        for (i = 0; i < num_rx_buf; i++)
                usb_free_urb(acm->read_urbs[i]);
        acm_read_buffers_free(acm);
        usb_free_urb(acm->ctrlurb);
-alloc_fail3:
+err_free_write_buffers:
        acm_write_buffers_free(acm);
-alloc_fail2:
+err_free_ctrl_buffer:
        usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
-alloc_fail1:
+err_put_port:
        tty_port_put(&acm->port);
-alloc_fail:
+
        return rv;
 }
 
@@ -1540,8 +1554,14 @@ static void acm_disconnect(struct usb_interface *intf)
        if (!acm)
                return;
 
-       mutex_lock(&acm->mutex);
        acm->disconnected = true;
+       /*
+        * there is a circular dependency. acm_softint() can resubmit
+        * the URBs in error handling so we need to block any
+        * submission right away
+        */
+       acm_poison_urbs(acm);
+       mutex_lock(&acm->mutex);
        if (acm->country_codes) {
                device_remove_file(&acm->control->dev,
                                &dev_attr_wCountryCodes);
@@ -1560,7 +1580,6 @@ static void acm_disconnect(struct usb_interface *intf)
                tty_kref_put(tty);
        }
 
-       acm_kill_urbs(acm);
        cancel_delayed_work_sync(&acm->dwork);
 
        tty_unregister_device(acm_tty_driver, acm->minor);
@@ -1602,7 +1621,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
        if (cnt)
                return 0;
 
-       acm_kill_urbs(acm);
+       acm_poison_urbs(acm);
        cancel_delayed_work_sync(&acm->dwork);
        acm->urbs_in_error_delay = 0;
 
@@ -1615,6 +1634,7 @@ static int acm_resume(struct usb_interface *intf)
        struct urb *urb;
        int rv = 0;
 
+       acm_unpoison_urbs(acm);
        spin_lock_irq(&acm->write_lock);
 
        if (--acm->susp_count)
index 6ade3da..76ac5d6 100644 (file)
@@ -498,6 +498,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* DJI CineSSD */
        { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
 
+       /* Fibocom L850-GL LTE Modem */
+       { USB_DEVICE(0x2cb7, 0x0007), .driver_info =
+                       USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+
        /* INTEL VALUE SSD */
        { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
 
index fc3269f..1a9789e 100644 (file)
@@ -4322,7 +4322,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
        if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
                goto unlock;
 
-       if (hsotg->params.power_down > DWC2_POWER_DOWN_PARAM_PARTIAL)
+       if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL ||
+           hsotg->flags.b.port_connect_status == 0)
                goto skip_power_saving;
 
        /*
@@ -5398,7 +5399,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
        dwc2_writel(hsotg, hprt0, HPRT0);
 
        /* Wait for the HPRT0.PrtSusp register field to be set */
-       if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
+       if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 5000))
                dev_warn(hsotg->dev, "Suspend wasn't generated\n");
 
        /*
index 3d3918a..4c5c697 100644 (file)
@@ -120,6 +120,8 @@ static const struct property_entry dwc3_pci_intel_properties[] = {
 static const struct property_entry dwc3_pci_mrfld_properties[] = {
        PROPERTY_ENTRY_STRING("dr_mode", "otg"),
        PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
+       PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
+       PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
        PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
        {}
 };
index fcaf044..3de291a 100644 (file)
@@ -244,6 +244,9 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
        struct device *dev = qcom->dev;
        int ret;
 
+       if (has_acpi_companion(dev))
+               return 0;
+
        qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr");
        if (IS_ERR(qcom->icc_path_ddr)) {
                dev_err(dev, "failed to get usb-ddr path: %ld\n",
index 4a337f3..c7ef218 100644 (file)
@@ -791,10 +791,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
        reg &= ~DWC3_DALEPENA_EP(dep->number);
        dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
 
-       dep->stream_capable = false;
-       dep->type = 0;
-       dep->flags = 0;
-
        /* Clear out the ep descriptors for non-ep0 */
        if (dep->number > 1) {
                dep->endpoint.comp_desc = NULL;
@@ -803,6 +799,10 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
 
        dwc3_remove_requests(dwc, dep);
 
+       dep->stream_capable = false;
+       dep->type = 0;
+       dep->flags = 0;
+
        return 0;
 }
 
@@ -2083,7 +2083,7 @@ static void __dwc3_gadget_set_speed(struct dwc3 *dwc)
        u32                     reg;
 
        speed = dwc->gadget_max_speed;
-       if (speed > dwc->maximum_speed)
+       if (speed == USB_SPEED_UNKNOWN || speed > dwc->maximum_speed)
                speed = dwc->maximum_speed;
 
        if (speed == USB_SPEED_SUPER_PLUS &&
@@ -2523,6 +2523,7 @@ static void dwc3_gadget_set_ssp_rate(struct usb_gadget *g,
        unsigned long           flags;
 
        spin_lock_irqsave(&dwc->lock, flags);
+       dwc->gadget_max_speed = USB_SPEED_SUPER_PLUS;
        dwc->gadget_ssp_rate = rate;
        spin_unlock_irqrestore(&dwc->lock, flags);
 }
index 8d387e0..c80f9bd 100644 (file)
@@ -153,6 +153,11 @@ static int udc_pci_probe(
        pci_set_master(pdev);
        pci_try_set_mwi(pdev);
 
+       dev->phys_addr = resource;
+       dev->irq = pdev->irq;
+       dev->pdev = pdev;
+       dev->dev = &pdev->dev;
+
        /* init dma pools */
        if (use_dma) {
                retval = init_dma_pools(dev);
@@ -160,11 +165,6 @@ static int udc_pci_probe(
                        goto err_dma;
        }
 
-       dev->phys_addr = resource;
-       dev->irq = pdev->irq;
-       dev->pdev = pdev;
-       dev->dev = &pdev->dev;
-
        /* general probing */
        if (udc_probe(dev)) {
                retval = -ENODEV;
index fe010cc..2f27dc0 100644 (file)
@@ -397,6 +397,13 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
        xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
        if (mtk->lpm_support)
                xhci->quirks |= XHCI_LPM_SUPPORT;
+
+       /*
+        * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream,
+        * and it's 3 when support it.
+        */
+       if (xhci->hci_version < 0x100 && HCC_MAX_PSA(xhci->hcc_params) == 4)
+               xhci->quirks |= XHCI_BROKEN_STREAMS;
 }
 
 /* called during probe() after chip reset completes */
@@ -548,7 +555,8 @@ static int xhci_mtk_probe(struct platform_device *pdev)
        if (ret)
                goto put_usb3_hcd;
 
-       if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+       if (HCC_MAX_PSA(xhci->hcc_params) >= 4 &&
+           !(xhci->quirks & XHCI_BROKEN_STREAMS))
                xhci->shared_hcd->can_do_streams = 1;
 
        ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
index 1cd8772..fc0457d 100644 (file)
@@ -2004,10 +2004,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
                MUSB_DEVCTL_HR;
        switch (devctl & ~s) {
        case MUSB_QUIRK_B_DISCONNECT_99:
-               musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
-               schedule_delayed_work(&musb->irq_work,
-                                     msecs_to_jiffies(1000));
-               break;
+               if (musb->quirk_retries && !musb->flush_irq_work) {
+                       musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
+                       schedule_delayed_work(&musb->irq_work,
+                                             msecs_to_jiffies(1000));
+                       musb->quirk_retries--;
+                       break;
+               }
+               fallthrough;
        case MUSB_QUIRK_B_INVALID_VBUS_91:
                if (musb->quirk_retries && !musb->flush_irq_work) {
                        musb_dbg(musb,
index 8f1de1f..d8d3892 100644 (file)
@@ -63,6 +63,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
 
                dev_info(dev, "stub up\n");
 
+               mutex_lock(&sdev->ud.sysfs_lock);
                spin_lock_irq(&sdev->ud.lock);
 
                if (sdev->ud.status != SDEV_ST_AVAILABLE) {
@@ -87,13 +88,13 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
                tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx");
                if (IS_ERR(tcp_rx)) {
                        sockfd_put(socket);
-                       return -EINVAL;
+                       goto unlock_mutex;
                }
                tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx");
                if (IS_ERR(tcp_tx)) {
                        kthread_stop(tcp_rx);
                        sockfd_put(socket);
-                       return -EINVAL;
+                       goto unlock_mutex;
                }
 
                /* get task structs now */
@@ -112,6 +113,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
                wake_up_process(sdev->ud.tcp_rx);
                wake_up_process(sdev->ud.tcp_tx);
 
+               mutex_unlock(&sdev->ud.sysfs_lock);
+
        } else {
                dev_info(dev, "stub down\n");
 
@@ -122,6 +125,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
                spin_unlock_irq(&sdev->ud.lock);
 
                usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN);
+               mutex_unlock(&sdev->ud.sysfs_lock);
        }
 
        return count;
@@ -130,6 +134,8 @@ sock_err:
        sockfd_put(socket);
 err:
        spin_unlock_irq(&sdev->ud.lock);
+unlock_mutex:
+       mutex_unlock(&sdev->ud.sysfs_lock);
        return -EINVAL;
 }
 static DEVICE_ATTR_WO(usbip_sockfd);
@@ -270,6 +276,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)
        sdev->ud.side           = USBIP_STUB;
        sdev->ud.status         = SDEV_ST_AVAILABLE;
        spin_lock_init(&sdev->ud.lock);
+       mutex_init(&sdev->ud.sysfs_lock);
        sdev->ud.tcp_socket     = NULL;
        sdev->ud.sockfd         = -1;
 
index d60ce17..ea2a20e 100644 (file)
@@ -263,6 +263,9 @@ struct usbip_device {
        /* lock for status */
        spinlock_t lock;
 
+       /* mutex for synchronizing sysfs store paths */
+       struct mutex sysfs_lock;
+
        int sockfd;
        struct socket *tcp_socket;
 
index 5d88917..086ca76 100644 (file)
@@ -70,6 +70,7 @@ static void event_handler(struct work_struct *work)
        while ((ud = get_event()) != NULL) {
                usbip_dbg_eh("pending event %lx\n", ud->event);
 
+               mutex_lock(&ud->sysfs_lock);
                /*
                 * NOTE: shutdown must come first.
                 * Shutdown the device.
@@ -90,6 +91,7 @@ static void event_handler(struct work_struct *work)
                        ud->eh_ops.unusable(ud);
                        unset_event(ud, USBIP_EH_UNUSABLE);
                }
+               mutex_unlock(&ud->sysfs_lock);
 
                wake_up(&ud->eh_waitq);
        }
index 3209b5d..4ba6bcd 100644 (file)
@@ -594,6 +594,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                                pr_err("invalid port number %d\n", wIndex);
                                goto error;
                        }
+                       if (wValue >= 32)
+                               goto error;
                        if (hcd->speed == HCD_USB3) {
                                if ((vhci_hcd->port_status[rhport] &
                                     USB_SS_PORT_STAT_POWER) != 0) {
@@ -1099,6 +1101,7 @@ static void vhci_device_init(struct vhci_device *vdev)
        vdev->ud.side   = USBIP_VHCI;
        vdev->ud.status = VDEV_ST_NULL;
        spin_lock_init(&vdev->ud.lock);
+       mutex_init(&vdev->ud.sysfs_lock);
 
        INIT_LIST_HEAD(&vdev->priv_rx);
        INIT_LIST_HEAD(&vdev->priv_tx);
index c4b4256..e2847cd 100644 (file)
@@ -185,6 +185,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
 
        usbip_dbg_vhci_sysfs("enter\n");
 
+       mutex_lock(&vdev->ud.sysfs_lock);
+
        /* lock */
        spin_lock_irqsave(&vhci->lock, flags);
        spin_lock(&vdev->ud.lock);
@@ -195,6 +197,7 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
                /* unlock */
                spin_unlock(&vdev->ud.lock);
                spin_unlock_irqrestore(&vhci->lock, flags);
+               mutex_unlock(&vdev->ud.sysfs_lock);
 
                return -EINVAL;
        }
@@ -205,6 +208,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
 
        usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);
 
+       mutex_unlock(&vdev->ud.sysfs_lock);
+
        return 0;
 }
 
@@ -349,30 +354,36 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
        else
                vdev = &vhci->vhci_hcd_hs->vdev[rhport];
 
+       mutex_lock(&vdev->ud.sysfs_lock);
+
        /* Extract socket from fd. */
        socket = sockfd_lookup(sockfd, &err);
        if (!socket) {
                dev_err(dev, "failed to lookup sock");
-               return -EINVAL;
+               err = -EINVAL;
+               goto unlock_mutex;
        }
        if (socket->type != SOCK_STREAM) {
                dev_err(dev, "Expecting SOCK_STREAM - found %d",
                        socket->type);
                sockfd_put(socket);
-               return -EINVAL;
+               err = -EINVAL;
+               goto unlock_mutex;
        }
 
        /* create threads before locking */
        tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx");
        if (IS_ERR(tcp_rx)) {
                sockfd_put(socket);
-               return -EINVAL;
+               err = -EINVAL;
+               goto unlock_mutex;
        }
        tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx");
        if (IS_ERR(tcp_tx)) {
                kthread_stop(tcp_rx);
                sockfd_put(socket);
-               return -EINVAL;
+               err = -EINVAL;
+               goto unlock_mutex;
        }
 
        /* get task structs now */
@@ -397,7 +408,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
                 * Will be retried from userspace
                 * if there's another free port.
                 */
-               return -EBUSY;
+               err = -EBUSY;
+               goto unlock_mutex;
        }
 
        dev_info(dev, "pdev(%u) rhport(%u) sockfd(%d)\n",
@@ -423,7 +435,15 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
 
        rh_port_connect(vdev, speed);
 
+       dev_info(dev, "Device attached\n");
+
+       mutex_unlock(&vdev->ud.sysfs_lock);
+
        return count;
+
+unlock_mutex:
+       mutex_unlock(&vdev->ud.sysfs_lock);
+       return err;
 }
 static DEVICE_ATTR_WO(attach);
 
index c8eeabd..2bc428f 100644 (file)
@@ -572,6 +572,7 @@ static int init_vudc_hw(struct vudc *udc)
        init_waitqueue_head(&udc->tx_waitq);
 
        spin_lock_init(&ud->lock);
+       mutex_init(&ud->sysfs_lock);
        ud->status = SDEV_ST_AVAILABLE;
        ud->side = USBIP_VUDC;
 
index 7383a54..f7633ee 100644 (file)
@@ -112,6 +112,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
                dev_err(dev, "no device");
                return -ENODEV;
        }
+       mutex_lock(&udc->ud.sysfs_lock);
        spin_lock_irqsave(&udc->lock, flags);
        /* Don't export what we don't have */
        if (!udc->driver || !udc->pullup) {
@@ -187,6 +188,8 @@ static ssize_t usbip_sockfd_store(struct device *dev,
 
                wake_up_process(udc->ud.tcp_rx);
                wake_up_process(udc->ud.tcp_tx);
+
+               mutex_unlock(&udc->ud.sysfs_lock);
                return count;
 
        } else {
@@ -207,6 +210,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
        }
 
        spin_unlock_irqrestore(&udc->lock, flags);
+       mutex_unlock(&udc->ud.sysfs_lock);
 
        return count;
 
@@ -216,6 +220,7 @@ unlock_ud:
        spin_unlock_irq(&udc->ud.lock);
 unlock:
        spin_unlock_irqrestore(&udc->lock, flags);
+       mutex_unlock(&udc->ud.sysfs_lock);
 
        return ret;
 }
index 08f742f..b6cc53b 100644 (file)
@@ -4,9 +4,13 @@
 #ifndef __MLX5_VDPA_H__
 #define __MLX5_VDPA_H__
 
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
 #include <linux/vdpa.h>
 #include <linux/mlx5/driver.h>
 
+#define MLX5V_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+
 struct mlx5_vdpa_direct_mr {
        u64 start;
        u64 end;
index d300f79..3908ff2 100644 (file)
@@ -219,6 +219,11 @@ static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_m
        mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey);
 }
 
+static struct device *get_dma_device(struct mlx5_vdpa_dev *mvdev)
+{
+       return &mvdev->mdev->pdev->dev;
+}
+
 static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
                         struct vhost_iotlb *iotlb)
 {
@@ -234,7 +239,7 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
        u64 pa;
        u64 paend;
        struct scatterlist *sg;
-       struct device *dma = mvdev->mdev->device;
+       struct device *dma = get_dma_device(mvdev);
 
        for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
             map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
@@ -291,7 +296,7 @@ err_map:
 
 static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
 {
-       struct device *dma = mvdev->mdev->device;
+       struct device *dma = get_dma_device(mvdev);
 
        destroy_direct_mr(mvdev, mr);
        dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
index 96e6421..6521cbd 100644 (file)
@@ -246,7 +246,8 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
        if (err)
                goto err_key;
 
-       kick_addr = pci_resource_start(mdev->pdev, 0) + offset;
+       kick_addr = mdev->bar_addr + offset;
+
        res->kick_addr = ioremap(kick_addr, PAGE_SIZE);
        if (!res->kick_addr) {
                err = -ENOMEM;
index 71397fd..4d2809c 100644 (file)
@@ -820,7 +820,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
        MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn);
        MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent);
        MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0,
-                !!(ndev->mvdev.actual_features & VIRTIO_F_VERSION_1));
+                !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1)));
        MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
        MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
        MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
@@ -1169,6 +1169,7 @@ static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
                return;
        }
        mvq->avail_idx = attr.available_index;
+       mvq->used_idx = attr.used_index;
 }
 
 static void suspend_vqs(struct mlx5_vdpa_net *ndev)
@@ -1426,6 +1427,7 @@ static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx,
                return -EINVAL;
        }
 
+       mvq->used_idx = state->avail_index;
        mvq->avail_idx = state->avail_index;
        return 0;
 }
@@ -1443,7 +1445,11 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
         * that cares about emulating the index after vq is stopped.
         */
        if (!mvq->initialized) {
-               state->avail_index = mvq->avail_idx;
+               /* Firmware returns a wrong value for the available index.
+                * Since both values should be identical, we take the value of
+                * used_idx which is reported correctly.
+                */
+               state->avail_index = mvq->used_idx;
                return 0;
        }
 
@@ -1452,7 +1458,7 @@ static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa
                mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n");
                return err;
        }
-       state->avail_index = attr.available_index;
+       state->avail_index = attr.used_index;
        return 0;
 }
 
@@ -1540,21 +1546,11 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
        }
 }
 
-static void clear_virtqueues(struct mlx5_vdpa_net *ndev)
-{
-       int i;
-
-       for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
-               ndev->vqs[i].avail_idx = 0;
-               ndev->vqs[i].used_idx = 0;
-       }
-}
-
 /* TODO: cross-endian support */
 static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
 {
        return virtio_legacy_is_little_endian() ||
-               (mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1));
+               (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1));
 }
 
 static __virtio16 cpu_to_mlx5vdpa16(struct mlx5_vdpa_dev *mvdev, u16 val)
@@ -1785,7 +1781,6 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
        if (!status) {
                mlx5_vdpa_info(mvdev, "performing device reset\n");
                teardown_driver(ndev);
-               clear_virtqueues(ndev);
                mlx5_vdpa_destroy_mr(&ndev->mvdev);
                ndev->mvdev.status = 0;
                ndev->mvdev.mlx_features = 0;
@@ -1907,6 +1902,19 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
        .free = mlx5_vdpa_free,
 };
 
+static int query_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
+{
+       u16 hw_mtu;
+       int err;
+
+       err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
+       if (err)
+               return err;
+
+       *mtu = hw_mtu - MLX5V_ETH_HARD_MTU;
+       return 0;
+}
+
 static int alloc_resources(struct mlx5_vdpa_net *ndev)
 {
        struct mlx5_vdpa_net_resources *res = &ndev->res;
@@ -1992,7 +2000,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
        init_mvqs(ndev);
        mutex_init(&ndev->reslock);
        config = &ndev->config;
-       err = mlx5_query_nic_vport_mtu(mdev, &ndev->mtu);
+       err = query_mtu(mdev, &ndev->mtu);
        if (err)
                goto err_mtu;
 
index ac3c1dd..4abddbe 100644 (file)
@@ -42,6 +42,6 @@ config VFIO_PCI_IGD
 
 config VFIO_PCI_NVLINK2
        def_bool y
-       depends on VFIO_PCI && PPC_POWERNV
+       depends on VFIO_PCI && PPC_POWERNV && SPAPR_TCE_IOMMU
        help
          VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs
index 65e7e6b..5023e23 100644 (file)
@@ -1656,6 +1656,8 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
 
        index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
 
+       if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
+               return -EINVAL;
        if (vma->vm_end < vma->vm_start)
                return -EINVAL;
        if ((vma->vm_flags & VM_SHARED) == 0)
@@ -1664,7 +1666,7 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
                int regnum = index - VFIO_PCI_NUM_REGIONS;
                struct vfio_pci_region *region = vdev->region + regnum;
 
-               if (region && region->ops && region->ops->mmap &&
+               if (region->ops && region->ops->mmap &&
                    (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
                        return region->ops->mmap(vdev, region, vma);
                return -EINVAL;
index be44440..45cbfd4 100644 (file)
@@ -739,6 +739,12 @@ out:
        ret = vfio_lock_acct(dma, lock_acct, false);
 
 unpin_out:
+       if (batch->size == 1 && !batch->offset) {
+               /* May be a VM_PFNMAP pfn, which the batch can't remember. */
+               put_pfn(pfn, dma->prot);
+               batch->size = 0;
+       }
+
        if (ret < 0) {
                if (pinned && !rsvd) {
                        for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
index 44a5cd2..3406067 100644 (file)
@@ -1333,6 +1333,9 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
 
        ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
 
+       if (!ops->cursor)
+               return;
+
        ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
                    get_color(vc, info, c, 0));
 }
index c8b0ae6..4dc9077 100644 (file)
@@ -1031,7 +1031,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
                        PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
                if (!pdev) {
                        pr_err("Unable to find PCI Hyper-V video\n");
-                       kfree(info->apertures);
                        return -ENODEV;
                }
 
@@ -1129,7 +1128,6 @@ getmem_done:
        } else {
                pci_dev_put(pdev);
        }
-       kfree(info->apertures);
 
        return 0;
 
@@ -1141,7 +1139,6 @@ err2:
 err1:
        if (!gen2vm)
                pci_dev_put(pdev);
-       kfree(info->apertures);
 
        return -ENOMEM;
 }
index e5dcb26..1635f42 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Watchdog driver for Marvell Armada 37xx SoCs
  *
- * Author: Marek Behun <marek.behun@nic.cz>
+ * Author: Marek Behún <kabel@kernel.org>
  */
 
 #include <linux/clk.h>
@@ -366,7 +366,7 @@ static struct platform_driver armada_37xx_wdt_driver = {
 
 module_platform_driver(armada_37xx_wdt_driver);
 
-MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_AUTHOR("Marek Behun <kabel@kernel.org>");
 MODULE_DESCRIPTION("Armada 37xx CPU Watchdog");
 
 MODULE_LICENSE("GPL v2");
index 8236e23..7bbfd58 100644 (file)
@@ -110,7 +110,7 @@ struct irq_info {
        unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
        unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
        u64 eoi_time;           /* Time in jiffies when to EOI. */
-       spinlock_t lock;
+       raw_spinlock_t lock;
 
        union {
                unsigned short virq;
@@ -312,7 +312,7 @@ static int xen_irq_info_common_setup(struct irq_info *info,
        info->evtchn = evtchn;
        info->cpu = cpu;
        info->mask_reason = EVT_MASK_REASON_EXPLICIT;
-       spin_lock_init(&info->lock);
+       raw_spin_lock_init(&info->lock);
 
        ret = set_evtchn_to_irq(evtchn, irq);
        if (ret < 0)
@@ -472,28 +472,28 @@ static void do_mask(struct irq_info *info, u8 reason)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&info->lock, flags);
+       raw_spin_lock_irqsave(&info->lock, flags);
 
        if (!info->mask_reason)
                mask_evtchn(info->evtchn);
 
        info->mask_reason |= reason;
 
-       spin_unlock_irqrestore(&info->lock, flags);
+       raw_spin_unlock_irqrestore(&info->lock, flags);
 }
 
 static void do_unmask(struct irq_info *info, u8 reason)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&info->lock, flags);
+       raw_spin_lock_irqsave(&info->lock, flags);
 
        info->mask_reason &= ~reason;
 
        if (!info->mask_reason)
                unmask_evtchn(info->evtchn);
 
-       spin_unlock_irqrestore(&info->lock, flags);
+       raw_spin_unlock_irqrestore(&info->lock, flags);
 }
 
 #ifdef CONFIG_X86
index 28d583f..09d6f72 100644 (file)
@@ -275,6 +275,8 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
                bio.bi_opf = dio_bio_write_op(iocb);
                task_io_account_write(ret);
        }
+       if (iocb->ki_flags & IOCB_NOWAIT)
+               bio.bi_opf |= REQ_NOWAIT;
        if (iocb->ki_flags & IOCB_HIPRI)
                bio_set_polled(&bio, iocb);
 
@@ -428,6 +430,8 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                        bio->bi_opf = dio_bio_write_op(iocb);
                        task_io_account_write(bio->bi_iter.bi_size);
                }
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       bio->bi_opf |= REQ_NOWAIT;
 
                dio->size += bio->bi_iter.bi_size;
                pos += bio->bi_iter.bi_size;
index 1f972b7..eeb3ebe 100644 (file)
 /* Pseudo write pointer value for conventional zone */
 #define WP_CONVENTIONAL ((u64)-2)
 
+/*
+ * Location of the first zone of superblock logging zone pairs.
+ *
+ * - primary superblock:    0B (zone 0)
+ * - first copy:          512G (zone starting at that offset)
+ * - second copy:           4T (zone starting at that offset)
+ */
+#define BTRFS_SB_LOG_PRIMARY_OFFSET    (0ULL)
+#define BTRFS_SB_LOG_FIRST_OFFSET      (512ULL * SZ_1G)
+#define BTRFS_SB_LOG_SECOND_OFFSET     (4096ULL * SZ_1G)
+
+#define BTRFS_SB_LOG_FIRST_SHIFT       const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET)
+#define BTRFS_SB_LOG_SECOND_SHIFT      const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET)
+
 /* Number of superblock log zones */
 #define BTRFS_NR_SB_LOG_ZONES 2
 
+/*
+ * Maximum supported zone size. Currently, SMR disks have a zone size of
+ * 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range. We do not
+ * expect the zone size to become larger than 8GiB in the near future.
+ */
+#define BTRFS_MAX_ZONE_SIZE            SZ_8G
+
 static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
 {
        struct blk_zone *zones = data;
@@ -111,23 +132,22 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones,
 }
 
 /*
- * The following zones are reserved as the circular buffer on ZONED btrfs.
- *  - The primary superblock: zones 0 and 1
- *  - The first copy: zones 16 and 17
- *  - The second copy: zones 1024 or zone at 256GB which is minimum, and
- *                     the following one
+ * Get the first zone number of the superblock mirror
  */
 static inline u32 sb_zone_number(int shift, int mirror)
 {
-       ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
+       u64 zone;
 
+       ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX);
        switch (mirror) {
-       case 0: return 0;
-       case 1: return 16;
-       case 2: return min_t(u64, btrfs_sb_offset(mirror) >> shift, 1024);
+       case 0: zone = 0; break;
+       case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
+       case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
        }
 
-       return 0;
+       ASSERT(zone <= U32_MAX);
+
+       return (u32)zone;
 }
 
 /*
@@ -300,10 +320,21 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
                zone_sectors = bdev_zone_sectors(bdev);
        }
 
-       nr_sectors = bdev_nr_sectors(bdev);
        /* Check if it's power of 2 (see is_power_of_2) */
        ASSERT(zone_sectors != 0 && (zone_sectors & (zone_sectors - 1)) == 0);
        zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
+
+       /* We reject devices with a zone size larger than 8GB */
+       if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) {
+               btrfs_err_in_rcu(fs_info,
+               "zoned: %s: zone size %llu larger than supported maximum %llu",
+                                rcu_str_deref(device->name),
+                                zone_info->zone_size, BTRFS_MAX_ZONE_SIZE);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       nr_sectors = bdev_nr_sectors(bdev);
        zone_info->zone_size_shift = ilog2(zone_info->zone_size);
        zone_info->max_zone_append_size =
                (u64)queue_max_zone_append_sectors(queue) << SECTOR_SHIFT;
index fe03cbd..bf52e93 100644 (file)
@@ -18,6 +18,7 @@ config CIFS
        select CRYPTO_AES
        select CRYPTO_LIB_DES
        select KEYS
+       select DNS_RESOLVER
        help
          This is the client VFS module for the SMB3 family of NAS protocols,
          (including support for the most recent, most secure dialect SMB3.1.1)
@@ -112,7 +113,6 @@ config CIFS_WEAK_PW_HASH
 config CIFS_UPCALL
        bool "Kerberos/SPNEGO advanced session setup"
        depends on CIFS
-       select DNS_RESOLVER
        help
          Enables an upcall mechanism for CIFS which accesses userspace helper
          utilities to provide SPNEGO packaged (RFC 4178) Kerberos tickets
@@ -179,7 +179,6 @@ config CIFS_DEBUG_DUMP_KEYS
 config CIFS_DFS_UPCALL
        bool "DFS feature support"
        depends on CIFS
-       select DNS_RESOLVER
        help
          Distributed File System (DFS) support is used to access shares
          transparently in an enterprise name space, even if the share
index 5213b20..3ee3b7d 100644 (file)
@@ -10,13 +10,14 @@ cifs-y := trace.o cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o \
          cifs_unicode.o nterr.o cifsencrypt.o \
          readdir.o ioctl.o sess.o export.o smb1ops.o unc.o winucase.o \
          smb2ops.o smb2maperror.o smb2transport.o \
-         smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o
+         smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o \
+         dns_resolve.o
 
 cifs-$(CONFIG_CIFS_XATTR) += xattr.o
 
 cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
 
-cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o dfs_cache.o
+cifs-$(CONFIG_CIFS_DFS_UPCALL) += cifs_dfs_ref.o dfs_cache.o
 
 cifs-$(CONFIG_CIFS_SWN_UPCALL) += netlink.o cifs_swn.o
 
index 099ad9f..5ddd20b 100644 (file)
@@ -476,7 +476,8 @@ static int cifs_show_devname(struct seq_file *m, struct dentry *root)
                seq_puts(m, "none");
        else {
                convert_delimiter(devname, '/');
-               seq_puts(m, devname);
+               /* escape all spaces in share names */
+               seq_escape(m, devname, " \t");
                kfree(devname);
        }
        return 0;
index 67c056a..ec824ab 100644 (file)
@@ -1283,8 +1283,6 @@ struct cifs_aio_ctx {
        bool                    direct_io;
 };
 
-struct cifs_readdata;
-
 /* asynchronous read support */
 struct cifs_readdata {
        struct kref                     refcount;
index eec8a20..24668eb 100644 (file)
@@ -87,7 +87,6 @@ static void cifs_prune_tlinks(struct work_struct *work);
  *
  * This should be called with server->srv_mutex held.
  */
-#ifdef CONFIG_CIFS_DFS_UPCALL
 static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
 {
        int rc;
@@ -124,6 +123,7 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
        return !rc ? -1 : 0;
 }
 
+#ifdef CONFIG_CIFS_DFS_UPCALL
 /* These functions must be called with server->srv_mutex held */
 static void reconn_set_next_dfs_target(struct TCP_Server_Info *server,
                                       struct cifs_sb_info *cifs_sb,
@@ -321,14 +321,29 @@ cifs_reconnect(struct TCP_Server_Info *server)
 #endif
 
 #ifdef CONFIG_CIFS_DFS_UPCALL
+               if (cifs_sb && cifs_sb->origin_fullpath)
                        /*
                         * Set up next DFS target server (if any) for reconnect. If DFS
                         * feature is disabled, then we will retry last server we
                         * connected to before.
                         */
                        reconn_set_next_dfs_target(server, cifs_sb, &tgt_list, &tgt_it);
+               else {
+#endif
+                       /*
+                        * Resolve the hostname again to make sure that IP address is up-to-date.
+                        */
+                       rc = reconn_set_ipaddr_from_hostname(server);
+                       if (rc) {
+                               cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
+                                               __func__, rc);
+                       }
+
+#ifdef CONFIG_CIFS_DFS_UPCALL
+               }
 #endif
 
+
 #ifdef CONFIG_CIFS_SWN_UPCALL
                }
 #endif
index b61491b..b2e86e7 100644 (file)
@@ -812,6 +812,7 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
                    struct buffer_head *map_bh)
 {
        int ret = 0;
+       int boundary = sdio->boundary;  /* dio_send_cur_page may clear it */
 
        if (dio->op == REQ_OP_WRITE) {
                /*
@@ -850,10 +851,10 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
        sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
 out:
        /*
-        * If sdio->boundary then we want to schedule the IO now to
+        * If boundary then we want to schedule the IO now to
         * avoid metadata seeks.
         */
-       if (sdio->boundary) {
+       if (boundary) {
                ret = dio_send_cur_page(dio, sdio, map_bh);
                if (sdio->bio)
                        dio_bio_submit(dio, sdio);
index f3a4bac..f633348 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -629,17 +629,30 @@ int close_fd(unsigned fd)
 }
 EXPORT_SYMBOL(close_fd); /* for ksys_close() */
 
+/**
+ * last_fd - return last valid index into fd table
+ * @cur_fds: files struct
+ *
+ * Context: Either rcu read lock or files_lock must be held.
+ *
+ * Returns: Last valid index into fdtable.
+ */
+static inline unsigned last_fd(struct fdtable *fdt)
+{
+       return fdt->max_fds - 1;
+}
+
 static inline void __range_cloexec(struct files_struct *cur_fds,
                                   unsigned int fd, unsigned int max_fd)
 {
        struct fdtable *fdt;
 
-       if (fd > max_fd)
-               return;
-
+       /* make sure we're using the correct maximum value */
        spin_lock(&cur_fds->file_lock);
        fdt = files_fdtable(cur_fds);
-       bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
+       max_fd = min(last_fd(fdt), max_fd);
+       if (fd <= max_fd)
+               bitmap_set(fdt->close_on_exec, fd, max_fd - fd + 1);
        spin_unlock(&cur_fds->file_lock);
 }
 
index 97076d3..8fb9602 100644 (file)
@@ -162,8 +162,10 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
        int error;
 
        error = init_threads(sdp);
-       if (error)
+       if (error) {
+               gfs2_withdraw_delayed(sdp);
                return error;
+       }
 
        j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
        if (gfs2_withdrawn(sdp)) {
@@ -750,11 +752,13 @@ void gfs2_freeze_func(struct work_struct *work)
 static int gfs2_freeze(struct super_block *sb)
 {
        struct gfs2_sbd *sdp = sb->s_fs_info;
-       int error = 0;
+       int error;
 
        mutex_lock(&sdp->sd_freeze_mutex);
-       if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN)
+       if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) {
+               error = -EBUSY;
                goto out;
+       }
 
        for (;;) {
                if (gfs2_withdrawn(sdp)) {
@@ -795,10 +799,10 @@ static int gfs2_unfreeze(struct super_block *sb)
        struct gfs2_sbd *sdp = sb->s_fs_info;
 
        mutex_lock(&sdp->sd_freeze_mutex);
-        if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
+       if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
            !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
                mutex_unlock(&sdp->sd_freeze_mutex);
-                return 0;
+               return -EINVAL;
        }
 
        gfs2_freeze_unlock(&sdp->sd_freeze_gh);
index 29e4077..743a005 100644 (file)
@@ -144,7 +144,7 @@ static char *follow_link(char *link)
        char *name, *resolved, *end;
        int n;
 
-       name = __getname();
+       name = kmalloc(PATH_MAX, GFP_KERNEL);
        if (!name) {
                n = -ENOMEM;
                goto out_free;
@@ -173,12 +173,11 @@ static char *follow_link(char *link)
                goto out_free;
        }
 
-       __putname(name);
-       kfree(link);
+       kfree(name);
        return resolved;
 
  out_free:
-       __putname(name);
+       kfree(name);
        return ERR_PTR(n);
 }
 
index 7434eb4..4eba531 100644 (file)
@@ -415,6 +415,7 @@ static void io_worker_handle_work(struct io_worker *worker)
 {
        struct io_wqe *wqe = worker->wqe;
        struct io_wq *wq = wqe->wq;
+       bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
 
        do {
                struct io_wq_work *work;
@@ -444,6 +445,9 @@ get_next:
                        unsigned int hash = io_get_work_hash(work);
 
                        next_hashed = wq_next_work(work);
+
+                       if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
+                               work->flags |= IO_WQ_WORK_CANCEL;
                        wq->do_work(work);
                        io_assign_current_work(worker, NULL);
 
@@ -484,7 +488,7 @@ static int io_wqe_worker(void *data)
        worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
        io_wqe_inc_running(worker);
 
-       sprintf(buf, "iou-wrk-%d", wq->task_pid);
+       snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task_pid);
        set_task_comm(current, buf);
 
        while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
@@ -711,7 +715,7 @@ static int io_wq_manager(void *data)
        char buf[TASK_COMM_LEN];
        int node;
 
-       sprintf(buf, "iou-mgr-%d", wq->task_pid);
+       snprintf(buf, sizeof(buf), "iou-mgr-%d", wq->task_pid);
        set_task_comm(current, buf);
 
        do {
index 1949b80..dff3497 100644 (file)
@@ -697,6 +697,7 @@ enum {
        REQ_F_NO_FILE_TABLE_BIT,
        REQ_F_LTIMEOUT_ACTIVE_BIT,
        REQ_F_COMPLETE_INLINE_BIT,
+       REQ_F_REISSUE_BIT,
 
        /* not a real bit, just to check we're not overflowing the space */
        __REQ_F_LAST_BIT,
@@ -740,6 +741,8 @@ enum {
        REQ_F_LTIMEOUT_ACTIVE   = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
        /* completion is deferred through io_comp_state */
        REQ_F_COMPLETE_INLINE   = BIT(REQ_F_COMPLETE_INLINE_BIT),
+       /* caller should reissue async */
+       REQ_F_REISSUE           = BIT(REQ_F_REISSUE_BIT),
 };
 
 struct async_poll {
@@ -1213,7 +1216,7 @@ static void io_prep_async_work(struct io_kiocb *req)
        if (req->flags & REQ_F_ISREG) {
                if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
                        io_wq_hash_work(&req->work, file_inode(req->file));
-       } else {
+       } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
                if (def->unbound_nonreg_file)
                        req->work.flags |= IO_WQ_WORK_UNBOUND;
        }
@@ -2476,6 +2479,11 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
                return false;
        return true;
 }
+#else
+static bool io_rw_should_reissue(struct io_kiocb *req)
+{
+       return false;
+}
 #endif
 
 static bool io_rw_reissue(struct io_kiocb *req)
@@ -2503,8 +2511,10 @@ static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
 
        if (req->rw.kiocb.ki_flags & IOCB_WRITE)
                kiocb_end_write(req);
-       if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_reissue(req))
+       if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_should_reissue(req)) {
+               req->flags |= REQ_F_REISSUE;
                return;
+       }
        if (res != req->result)
                req_set_fail_links(req);
        if (req->flags & REQ_F_BUFFER_SELECTED)
@@ -2752,6 +2762,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
        struct io_async_rw *io = req->async_data;
+       bool check_reissue = kiocb->ki_complete == io_complete_rw;
 
        /* add previously done IO, if any */
        if (io && io->bytes_done > 0) {
@@ -2767,6 +2778,18 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
                __io_complete_rw(req, ret, 0, issue_flags);
        else
                io_rw_done(kiocb, ret);
+
+       if (check_reissue && req->flags & REQ_F_REISSUE) {
+               req->flags &= ~REQ_F_REISSUE;
+               if (!io_rw_reissue(req)) {
+                       int cflags = 0;
+
+                       req_set_fail_links(req);
+                       if (req->flags & REQ_F_BUFFER_SELECTED)
+                               cflags = io_put_rw_kbuf(req);
+                       __io_req_complete(req, issue_flags, ret, cflags);
+               }
+       }
 }
 
 static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
@@ -3283,11 +3306,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
 
        ret = io_iter_do_read(req, iter);
 
-       if (ret == -EIOCBQUEUED) {
-               if (req->async_data)
-                       iov_iter_revert(iter, io_size - iov_iter_count(iter));
-               goto out_free;
-       } else if (ret == -EAGAIN) {
+       if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
+               req->flags &= ~REQ_F_REISSUE;
                /* IOPOLL retry should happen for io-wq threads */
                if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
                        goto done;
@@ -3297,6 +3317,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                /* some cases will consume bytes even on error returns */
                iov_iter_revert(iter, io_size - iov_iter_count(iter));
                ret = 0;
+       } else if (ret == -EIOCBQUEUED) {
+               goto out_free;
        } else if (ret <= 0 || ret == io_size || !force_nonblock ||
                   (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
                /* read all, failed, already did sync or don't want to retry */
@@ -3409,6 +3431,11 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
        else
                ret2 = -EINVAL;
 
+       if (req->flags & REQ_F_REISSUE) {
+               req->flags &= ~REQ_F_REISSUE;
+               ret2 = -EAGAIN;
+       }
+
        /*
         * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
         * retry them without IOCB_NOWAIT.
@@ -3418,8 +3445,6 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
        /* no retry on NONBLOCK nor RWF_NOWAIT */
        if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
                goto done;
-       if (ret2 == -EIOCBQUEUED && req->async_data)
-               iov_iter_revert(iter, io_size - iov_iter_count(iter));
        if (!force_nonblock || ret2 != -EAGAIN) {
                /* IOPOLL retry should happen for io-wq threads */
                if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
@@ -6718,7 +6743,7 @@ static int io_sq_thread(void *data)
        char buf[TASK_COMM_LEN];
        DEFINE_WAIT(wait);
 
-       sprintf(buf, "iou-sqp-%d", sqd->task_pid);
+       snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
        set_task_comm(current, buf);
        current->pf_io_worker = NULL;
 
@@ -6729,26 +6754,32 @@ static int io_sq_thread(void *data)
        current->flags |= PF_NO_SETAFFINITY;
 
        mutex_lock(&sqd->lock);
+       /* a user may had exited before the thread started */
+       io_run_task_work_head(&sqd->park_task_work);
+
        while (!test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state)) {
                int ret;
                bool cap_entries, sqt_spin, needs_sched;
 
-               if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) {
+               if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
+                   signal_pending(current)) {
+                       bool did_sig = false;
+
                        mutex_unlock(&sqd->lock);
+                       if (signal_pending(current)) {
+                               struct ksignal ksig;
+
+                               did_sig = get_signal(&ksig);
+                       }
                        cond_resched();
                        mutex_lock(&sqd->lock);
                        io_run_task_work();
                        io_run_task_work_head(&sqd->park_task_work);
+                       if (did_sig)
+                               break;
                        timeout = jiffies + sqd->sq_thread_idle;
                        continue;
                }
-               if (signal_pending(current)) {
-                       struct ksignal ksig;
-
-                       if (!get_signal(&ksig))
-                               continue;
-                       break;
-               }
                sqt_spin = false;
                cap_entries = !list_is_singular(&sqd->ctx_list);
                list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
@@ -8603,9 +8634,9 @@ static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
                        canceled++;
                }
        }
-       io_commit_cqring(ctx);
+       if (canceled != 0)
+               io_commit_cqring(ctx);
        spin_unlock_irq(&ctx->completion_lock);
-
        if (canceled != 0)
                io_cqring_ev_posted(ctx);
        return canceled != 0;
@@ -9002,6 +9033,8 @@ void __io_uring_task_cancel(void)
 
        /* make sure overflow events are dropped */
        atomic_inc(&tctx->in_idle);
+       __io_uring_files_cancel(NULL);
+
        do {
                /* read completions before cancelations */
                inflight = tctx_inflight(tctx);
index 216f16e..48a2f28 100644 (file)
@@ -579,6 +579,8 @@ static void set_nameidata(struct nameidata *p, int dfd, struct filename *name)
        p->stack = p->internal;
        p->dfd = dfd;
        p->name = name;
+       p->path.mnt = NULL;
+       p->path.dentry = NULL;
        p->total_link_count = old ? old->total_link_count : 0;
        p->saved = old;
        current->nameidata = p;
@@ -652,6 +654,8 @@ static void terminate_walk(struct nameidata *nd)
                rcu_read_unlock();
        }
        nd->depth = 0;
+       nd->path.mnt = NULL;
+       nd->path.dentry = NULL;
 }
 
 /* path_put is needed afterwards regardless of success or failure */
@@ -2322,8 +2326,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags)
        }
 
        nd->root.mnt = NULL;
-       nd->path.mnt = NULL;
-       nd->path.dentry = NULL;
 
        /* Absolute pathname -- fetch the root (LOOKUP_IN_ROOT uses nd->dfd). */
        if (*s == '/' && !(flags & LOOKUP_IN_ROOT)) {
@@ -2419,16 +2421,16 @@ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path
        while (!(err = link_path_walk(s, nd)) &&
               (s = lookup_last(nd)) != NULL)
                ;
+       if (!err && unlikely(nd->flags & LOOKUP_MOUNTPOINT)) {
+               err = handle_lookup_down(nd);
+               nd->flags &= ~LOOKUP_JUMPED; // no d_weak_revalidate(), please...
+       }
        if (!err)
                err = complete_walk(nd);
 
        if (!err && nd->flags & LOOKUP_DIRECTORY)
                if (!d_can_lookup(nd->path.dentry))
                        err = -ENOTDIR;
-       if (!err && unlikely(nd->flags & LOOKUP_MOUNTPOINT)) {
-               err = handle_lookup_down(nd);
-               nd->flags &= ~LOOKUP_JUMPED; // no d_weak_revalidate(), please...
-       }
        if (!err) {
                *path = nd->path;
                nd->path.mnt = NULL;
index 3bfb414..ad20403 100644 (file)
@@ -2295,7 +2295,7 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
        struct ocfs2_alloc_context *meta_ac = NULL;
        handle_t *handle = NULL;
        loff_t end = offset + bytes;
-       int ret = 0, credits = 0, locked = 0;
+       int ret = 0, credits = 0;
 
        ocfs2_init_dealloc_ctxt(&dealloc);
 
@@ -2306,13 +2306,6 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
            !dwc->dw_orphaned)
                goto out;
 
-       /* ocfs2_file_write_iter will get i_mutex, so we need not lock if we
-        * are in that context. */
-       if (dwc->dw_writer_pid != task_pid_nr(current)) {
-               inode_lock(inode);
-               locked = 1;
-       }
-
        ret = ocfs2_inode_lock(inode, &di_bh, 1);
        if (ret < 0) {
                mlog_errno(ret);
@@ -2393,8 +2386,6 @@ out:
        if (meta_ac)
                ocfs2_free_alloc_context(meta_ac);
        ocfs2_run_deallocs(osb, &dealloc);
-       if (locked)
-               inode_unlock(inode);
        ocfs2_dio_free_write_ctx(inode, dwc);
 
        return ret;
index 6611c64..5edc1d0 100644 (file)
@@ -1245,22 +1245,24 @@ int ocfs2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
                                goto bail_unlock;
                        }
                }
+               down_write(&OCFS2_I(inode)->ip_alloc_sem);
                handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
                                           2 * ocfs2_quota_trans_credits(sb));
                if (IS_ERR(handle)) {
                        status = PTR_ERR(handle);
                        mlog_errno(status);
-                       goto bail_unlock;
+                       goto bail_unlock_alloc;
                }
                status = __dquot_transfer(inode, transfer_to);
                if (status < 0)
                        goto bail_commit;
        } else {
+               down_write(&OCFS2_I(inode)->ip_alloc_sem);
                handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
                if (IS_ERR(handle)) {
                        status = PTR_ERR(handle);
                        mlog_errno(status);
-                       goto bail_unlock;
+                       goto bail_unlock_alloc;
                }
        }
 
@@ -1273,6 +1275,8 @@ int ocfs2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
 
 bail_commit:
        ocfs2_commit_trans(osb, handle);
+bail_unlock_alloc:
+       up_write(&OCFS2_I(inode)->ip_alloc_sem);
 bail_unlock:
        if (status && inode_locked) {
                ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
index 19434b3..09e8ed7 100644 (file)
@@ -150,6 +150,9 @@ static int fillonedir(struct dir_context *ctx, const char *name, int namlen,
 
        if (buf->result)
                return -EINVAL;
+       buf->result = verify_dirent_name(name, namlen);
+       if (buf->result < 0)
+               return buf->result;
        d_ino = ino;
        if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
                buf->result = -EOVERFLOW;
@@ -405,6 +408,9 @@ static int compat_fillonedir(struct dir_context *ctx, const char *name,
 
        if (buf->result)
                return -EINVAL;
+       buf->result = verify_dirent_name(name, namlen);
+       if (buf->result < 0)
+               return buf->result;
        d_ino = ino;
        if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
                buf->result = -EOVERFLOW;
index 9b3b06d..e47fde1 100644 (file)
@@ -44,7 +44,7 @@ void reiserfs_security_free(struct reiserfs_security_handle *sec);
 
 static inline int reiserfs_xattrs_initialized(struct super_block *sb)
 {
-       return REISERFS_SB(sb)->priv_root != NULL;
+       return REISERFS_SB(sb)->priv_root && REISERFS_SB(sb)->xattr_root;
 }
 
 #define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header))
index 1538a68..1b4c458 100644 (file)
  *
  * A less-safe version of the macros is provided for optional use if the
  * compiler uses excessive CPU stack (for example, this may happen in the
- * debug case if code optimzation is disabled.)
+ * debug case if code optimization is disabled.)
  */
 
 /* Exit trace helper macro */
index c6f0c41..97e9a06 100644 (file)
@@ -17,7 +17,6 @@
  */
 #define ACPI_SBS_COMPONENT             0x00100000
 #define ACPI_FAN_COMPONENT             0x00200000
-#define ACPI_PCI_COMPONENT             0x00400000
 #define ACPI_CONTAINER_COMPONENT       0x01000000
 #define ACPI_SYSTEM_COMPONENT          0x02000000
 #define ACPI_MEMORY_DEVICE_COMPONENT   0x08000000
index 370293e..f8d44b0 100644 (file)
@@ -12,7 +12,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20210105
+#define ACPI_CA_VERSION                 0x20210331
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
index 9bccac9..8e2319b 100644 (file)
@@ -381,7 +381,7 @@ struct acpi_resource_gpio {
 #define ACPI_IO_RESTRICT_OUTPUT                 2
 #define ACPI_IO_RESTRICT_NONE_PRESERVE          3
 
-/* Common structure for I2C, SPI, and UART serial descriptors */
+/* Common structure for I2C, SPI, UART, CSI2 serial descriptors */
 
 #define ACPI_RESOURCE_SERIAL_COMMON \
        u8                                      revision_id; \
@@ -403,6 +403,7 @@ ACPI_RESOURCE_SERIAL_COMMON};
 #define ACPI_RESOURCE_SERIAL_TYPE_I2C           1
 #define ACPI_RESOURCE_SERIAL_TYPE_SPI           2
 #define ACPI_RESOURCE_SERIAL_TYPE_UART          3
+#define ACPI_RESOURCE_SERIAL_TYPE_CSI2          4
 
 /* Values for slave_mode field above */
 
@@ -505,6 +506,11 @@ struct acpi_resource_uart_serialbus {
 #define ACPI_UART_CLEAR_TO_SEND                 (1<<6)
 #define ACPI_UART_REQUEST_TO_SEND               (1<<7)
 
+struct acpi_resource_csi2_serialbus {
+       ACPI_RESOURCE_SERIAL_COMMON u8 local_port_instance;
+       u8 phy_type;
+};
+
 struct acpi_resource_pin_function {
        u8 revision_id;
        u8 pin_config;
@@ -634,6 +640,7 @@ union acpi_resource_data {
        struct acpi_resource_i2c_serialbus i2c_serial_bus;
        struct acpi_resource_spi_serialbus spi_serial_bus;
        struct acpi_resource_uart_serialbus uart_serial_bus;
+       struct acpi_resource_csi2_serialbus csi2_serial_bus;
        struct acpi_resource_common_serialbus common_serial_bus;
        struct acpi_resource_pin_function pin_function;
        struct acpi_resource_pin_config pin_config;
index af0a8c3..ce59903 100644 (file)
@@ -28,6 +28,7 @@
 #define ACPI_SIG_BERT           "BERT" /* Boot Error Record Table */
 #define ACPI_SIG_BGRT           "BGRT" /* Boot Graphics Resource Table */
 #define ACPI_SIG_BOOT           "BOOT" /* Simple Boot Flag Table */
+#define ACPI_SIG_CEDT           "CEDT" /* CXL Early Discovery Table */
 #define ACPI_SIG_CPEP           "CPEP" /* Corrected Platform Error Polling table */
 #define ACPI_SIG_CSRT           "CSRT" /* Core System Resource Table */
 #define ACPI_SIG_DBG2           "DBG2" /* Debug Port table type 2 */
@@ -301,6 +302,49 @@ struct acpi_table_boot {
        u8 reserved[3];
 };
 
+/*******************************************************************************
+ *
+ * CEDT - CXL Early Discovery Table
+ *        Version 1
+ *
+ * Conforms to the "CXL Early Discovery Table" (CXL 2.0)
+ *
+ ******************************************************************************/
+
+struct acpi_table_cedt {
+       struct acpi_table_header header;        /* Common ACPI table header */
+};
+
+/* CEDT subtable header (Performance Record Structure) */
+
+struct acpi_cedt_header {
+       u8 type;
+       u8 reserved;
+       u16 length;
+};
+
+/* Values for Type field above */
+
+enum acpi_cedt_type {
+       ACPI_CEDT_TYPE_CHBS = 0,
+       ACPI_CEDT_TYPE_RESERVED = 1
+};
+
+/*
+ * CEDT subtables
+ */
+
+/* 0: CXL Host Bridge Structure */
+
+struct acpi_cedt_chbs {
+       struct acpi_cedt_header header;
+       u32 uid;
+       u32 cxl_version;
+       u32 reserved;
+       u64 base;
+       u64 length;
+};
+
 /*******************************************************************************
  *
  * CPEP - Corrected Platform Error Polling table (ACPI 4.0)
@@ -1445,7 +1489,8 @@ struct acpi_hmat_locality {
        struct acpi_hmat_structure header;
        u8 flags;
        u8 data_type;
-       u16 reserved1;
+       u8 min_transfer_size;
+       u8 reserved1;
        u32 number_of_initiator_Pds;
        u32 number_of_target_Pds;
        u32 reserved2;
@@ -1454,15 +1499,18 @@ struct acpi_hmat_locality {
 
 /* Masks for Flags field above */
 
-#define ACPI_HMAT_MEMORY_HIERARCHY  (0x0F)
+#define ACPI_HMAT_MEMORY_HIERARCHY  (0x0F)     /* Bits 0-3 */
 
-/* Values for Memory Hierarchy flag */
+/* Values for Memory Hierarchy flags */
 
 #define ACPI_HMAT_MEMORY            0
 #define ACPI_HMAT_LAST_LEVEL_CACHE  1
 #define ACPI_HMAT_1ST_LEVEL_CACHE   2
 #define ACPI_HMAT_2ND_LEVEL_CACHE   3
 #define ACPI_HMAT_3RD_LEVEL_CACHE   4
+#define ACPI_HMAT_MINIMUM_XFER_SIZE 0x10       /* Bit 4: ACPI 6.4 */
+#define ACPI_HMAT_NON_SEQUENTIAL_XFERS 0x20    /* Bit 5: ACPI 6.4 */
+
 
 /* Values for data_type field above */
 
index d6478c4..18cafe3 100644 (file)
@@ -36,6 +36,7 @@
 #define ACPI_SIG_NFIT           "NFIT" /* NVDIMM Firmware Interface Table */
 #define ACPI_SIG_PCCT           "PCCT" /* Platform Communications Channel Table */
 #define ACPI_SIG_PDTT           "PDTT" /* Platform Debug Trigger Table */
+#define ACPI_SIG_PHAT           "PHAT" /* Platform Health Assessment Table */
 #define ACPI_SIG_PMTT           "PMTT" /* Platform Memory Topology Table */
 #define ACPI_SIG_PPTT           "PPTT" /* Processor Properties Topology Table */
 #define ACPI_SIG_RASF           "RASF" /* RAS Feature table */
@@ -67,7 +68,7 @@
  * IORT - IO Remapping Table
  *
  * Conforms to "IO Remapping Table System Software on ARM Platforms",
- * Document number: ARM DEN 0049D, March 2018
+ * Document number: ARM DEN 0049E.b, Feb 2021
  *
  ******************************************************************************/
 
@@ -85,7 +86,7 @@ struct acpi_iort_node {
        u8 type;
        u16 length;
        u8 revision;
-       u32 reserved;
+       u32 identifier;
        u32 mapping_count;
        u32 mapping_offset;
        char node_data[1];
@@ -99,7 +100,8 @@ enum acpi_iort_node_type {
        ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02,
        ACPI_IORT_NODE_SMMU = 0x03,
        ACPI_IORT_NODE_SMMU_V3 = 0x04,
-       ACPI_IORT_NODE_PMCG = 0x05
+       ACPI_IORT_NODE_PMCG = 0x05,
+       ACPI_IORT_NODE_RMR = 0x06,
 };
 
 struct acpi_iort_id_mapping {
@@ -166,10 +168,11 @@ struct acpi_iort_root_complex {
        u8 reserved[3];         /* Reserved, must be zero */
 };
 
-/* Values for ats_attribute field above */
+/* Masks for ats_attribute field above */
 
-#define ACPI_IORT_ATS_SUPPORTED         0x00000001     /* The root complex supports ATS */
-#define ACPI_IORT_ATS_UNSUPPORTED       0x00000000     /* The root complex doesn't support ATS */
+#define ACPI_IORT_ATS_SUPPORTED         (1)    /* The root complex ATS support */
+#define ACPI_IORT_PRI_SUPPORTED         (1<<1) /* The root complex PRI support */
+#define ACPI_IORT_PASID_FWD_SUPPORTED   (1<<2) /* The root complex PASID forward support */
 
 struct acpi_iort_smmu {
        u64 base_address;       /* SMMU base address */
@@ -240,6 +243,18 @@ struct acpi_iort_pmcg {
        u64 page1_base_address;
 };
 
+struct acpi_iort_rmr {
+       u32 flags;
+       u32 rmr_count;
+       u32 rmr_offset;
+};
+
+struct acpi_iort_rmr_desc {
+       u64 base_address;
+       u64 length;
+       u32 reserved;
+};
+
 /*******************************************************************************
  *
  * IVRS - I/O Virtualization Reporting Structure
@@ -276,6 +291,7 @@ struct acpi_ivrs_header {
 enum acpi_ivrs_type {
        ACPI_IVRS_TYPE_HARDWARE1 = 0x10,
        ACPI_IVRS_TYPE_HARDWARE2 = 0x11,
+       ACPI_IVRS_TYPE_HARDWARE3 = 0x40,
        ACPI_IVRS_TYPE_MEMORY1 = 0x20,
        ACPI_IVRS_TYPE_MEMORY2 = 0x21,
        ACPI_IVRS_TYPE_MEMORY3 = 0x22
@@ -364,7 +380,11 @@ enum acpi_ivrs_device_entry_type {
        ACPI_IVRS_TYPE_ALIAS_START = 67,        /* Uses struct acpi_ivrs_device8a */
        ACPI_IVRS_TYPE_EXT_SELECT = 70, /* Uses struct acpi_ivrs_device8b */
        ACPI_IVRS_TYPE_EXT_START = 71,  /* Uses struct acpi_ivrs_device8b */
-       ACPI_IVRS_TYPE_SPECIAL = 72     /* Uses struct acpi_ivrs_device8c */
+       ACPI_IVRS_TYPE_SPECIAL = 72,    /* Uses struct acpi_ivrs_device8c */
+
+       /* Variable-length device entries */
+
+       ACPI_IVRS_TYPE_HID = 240        /* Uses ACPI_IVRS_DEVICE_HID */
 };
 
 /* Values for Data field above */
@@ -416,6 +436,16 @@ struct acpi_ivrs_device8c {
 #define ACPI_IVHD_IOAPIC            1
 #define ACPI_IVHD_HPET              2
 
+/* Type 240: variable-length device entry */
+
+struct acpi_ivrs_device_hid {
+       struct acpi_ivrs_de_header header;
+       u64 acpi_hid;
+       u64 acpi_cid;
+       u8 uid_type;
+       u8 uid_length;
+};
+
 /* 0x20, 0x21, 0x22: I/O Virtualization Memory Definition Block (IVMD) */
 
 struct acpi_ivrs_memory {
@@ -516,7 +546,8 @@ enum acpi_madt_type {
        ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13,
        ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14,
        ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15,
-       ACPI_MADT_TYPE_RESERVED = 16    /* 16 and greater are reserved */
+       ACPI_MADT_TYPE_MULTIPROC_WAKEUP = 16,
+       ACPI_MADT_TYPE_RESERVED = 17    /* 17 and greater are reserved */
 };
 
 /*
@@ -723,6 +754,15 @@ struct acpi_madt_generic_translator {
        u32 reserved2;
 };
 
+/* 16: Multiprocessor wakeup (ACPI 6.4) */
+
+struct acpi_madt_multiproc_wakeup {
+       struct acpi_subtable_header header;
+       u16 mailbox_version;
+       u32 reserved;           /* reserved - must be zero */
+       u64 base_address;
+};
+
 /*
  * Common flags fields for MADT subtables
  */
@@ -983,12 +1023,14 @@ struct acpi_nfit_system_address {
        u64 address;
        u64 length;
        u64 memory_mapping;
+       u64 location_cookie;    /* ACPI 6.4 */
 };
 
 /* Flags */
 
 #define ACPI_NFIT_ADD_ONLINE_ONLY       (1)    /* 00: Add/Online Operation Only */
 #define ACPI_NFIT_PROXIMITY_VALID       (1<<1) /* 01: Proximity Domain Valid */
+#define ACPI_NFIT_LOCATION_COOKIE_VALID (1<<2) /* 02: SPA location cookie valid (ACPI 6.4) */
 
 /* Range Type GUIDs appear in the include/acuuid.h file */
 
@@ -1184,7 +1226,8 @@ enum acpi_pcct_type {
        ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2,   /* ACPI 6.1 */
        ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE = 3,     /* ACPI 6.2 */
        ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE = 4,      /* ACPI 6.2 */
-       ACPI_PCCT_TYPE_RESERVED = 5     /* 5 and greater are reserved */
+       ACPI_PCCT_TYPE_HW_REG_COMM_SUBSPACE = 5,        /* ACPI 6.4 */
+       ACPI_PCCT_TYPE_RESERVED = 6     /* 6 and greater are reserved */
 };
 
 /*
@@ -1299,6 +1342,24 @@ struct acpi_pcct_ext_pcc_slave {
        u64 error_status_mask;
 };
 
+/* 5: HW Registers based Communications Subspace */
+
+struct acpi_pcct_hw_reg {
+       struct acpi_subtable_header header;
+       u16 version;
+       u64 base_address;
+       u64 length;
+       struct acpi_generic_address doorbell_register;
+       u64 doorbell_preserve;
+       u64 doorbell_write;
+       struct acpi_generic_address cmd_complete_register;
+       u64 cmd_complete_mask;
+       struct acpi_generic_address error_status_register;
+       u64 error_status_mask;
+       u32 nominal_latency;
+       u32 min_turnaround_time;
+};
+
 /* Values for doorbell flags above */
 
 #define ACPI_PCCT_INTERRUPT_POLARITY    (1)
@@ -1355,6 +1416,66 @@ struct acpi_pdtt_channel {
 #define ACPI_PDTT_WAIT_COMPLETION           (1<<1)
 #define ACPI_PDTT_TRIGGER_ORDER             (1<<2)
 
+/*******************************************************************************
+ *
+ * PHAT - Platform Health Assessment Table (ACPI 6.4)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_phat {
+       struct acpi_table_header header;        /* Common ACPI table header */
+};
+
+/* Common header for PHAT subtables that follow main table */
+
+struct acpi_phat_header {
+       u16 type;
+       u16 length;
+       u8 revision;
+};
+
+/* Values for Type field above */
+
+#define ACPI_PHAT_TYPE_FW_VERSION_DATA  0
+#define ACPI_PHAT_TYPE_FW_HEALTH_DATA   1
+#define ACPI_PHAT_TYPE_RESERVED         2      /* 0x02-0xFFFF are reserved */
+
+/*
+ * PHAT subtables, correspond to Type in struct acpi_phat_header
+ */
+
+/* 0: Firmware Version Data Record */
+
+struct acpi_phat_version_data {
+       struct acpi_phat_header header;
+       u8 reserved[3];
+       u32 element_count;
+};
+
+struct acpi_phat_version_element {
+       u8 guid[16];
+       u64 version_value;
+       u32 producer_id;
+};
+
+/* 1: Firmware Health Data Record */
+
+struct acpi_phat_health_data {
+       struct acpi_phat_header header;
+       u8 reserved[2];
+       u8 health;
+       u8 device_guid[16];
+       u32 device_specific_offset;     /* Zero if no Device-specific data */
+};
+
+/* Values for Health field above */
+
+#define ACPI_PHAT_ERRORS_FOUND          0
+#define ACPI_PHAT_NO_ERRORS             1
+#define ACPI_PHAT_UNKNOWN_ERRORS        2
+#define ACPI_PHAT_ADVISORY              3
+
 /*******************************************************************************
  *
  * PMTT - Platform Memory Topology Table (ACPI 5.0)
@@ -1364,7 +1485,11 @@ struct acpi_pdtt_channel {
 
 struct acpi_table_pmtt {
        struct acpi_table_header header;        /* Common ACPI table header */
-       u32 reserved;
+       u32 memory_device_count;
+       /*
+        * Immediately followed by:
+        * MEMORY_DEVICE memory_device_struct[memory_device_count];
+        */
 };
 
 /* Common header for PMTT subtables that follow main table */
@@ -1375,6 +1500,12 @@ struct acpi_pmtt_header {
        u16 length;
        u16 flags;
        u16 reserved2;
+       u32 memory_device_count;        /* Zero means no memory device structs follow */
+       /*
+        * Immediately followed by:
+        * u8 type_specific_data[]
+        * MEMORY_DEVICE memory_device_struct[memory_device_count];
+        */
 };
 
 /* Values for Type field above */
@@ -1382,7 +1513,8 @@ struct acpi_pmtt_header {
 #define ACPI_PMTT_TYPE_SOCKET           0
 #define ACPI_PMTT_TYPE_CONTROLLER       1
 #define ACPI_PMTT_TYPE_DIMM             2
-#define ACPI_PMTT_TYPE_RESERVED         3      /* 0x03-0xFF are reserved */
+#define ACPI_PMTT_TYPE_RESERVED         3      /* 0x03-0xFE are reserved */
+#define ACPI_PMTT_TYPE_VENDOR           0xFF
 
 /* Values for Flags field above */
 
@@ -1401,37 +1533,43 @@ struct acpi_pmtt_socket {
        u16 socket_id;
        u16 reserved;
 };
+       /*
+        * Immediately followed by:
+        * MEMORY_DEVICE memory_device_struct[memory_device_count];
+        */
 
 /* 1: Memory Controller subtable */
 
 struct acpi_pmtt_controller {
        struct acpi_pmtt_header header;
-       u32 read_latency;
-       u32 write_latency;
-       u32 read_bandwidth;
-       u32 write_bandwidth;
-       u16 access_width;
-       u16 alignment;
+       u16 controller_id;
        u16 reserved;
-       u16 domain_count;
-};
-
-/* 1a: Proximity Domain substructure */
-
-struct acpi_pmtt_domain {
-       u32 proximity_domain;
 };
+       /*
+        * Immediately followed by:
+        * MEMORY_DEVICE memory_device_struct[memory_device_count];
+        */
 
 /* 2: Physical Component Identifier (DIMM) */
 
 struct acpi_pmtt_physical_component {
        struct acpi_pmtt_header header;
-       u16 component_id;
-       u16 reserved;
-       u32 memory_size;
        u32 bios_handle;
 };
 
+/* 0xFF: Vendor Specific Data */
+
+struct acpi_pmtt_vendor_specific {
+       struct acpi_pmtt_header header;
+       u8 type_uuid[16];
+       u8 specific[];
+       /*
+        * Immediately followed by:
+        * u8 vendor_specific_data[];
+        * MEMORY_DEVICE memory_device_struct[memory_device_count];
+        */
+};
+
 /*******************************************************************************
  *
  * PPTT - Processor Properties Topology Table (ACPI 6.2)
@@ -1485,6 +1623,12 @@ struct acpi_pptt_cache {
        u16 line_size;
 };
 
+/* 1: Cache Type Structure for PPTT version 3 */
+
+struct acpi_pptt_cache_v1 {
+       u32 cache_id;
+};
+
 /* Flags */
 
 #define ACPI_PPTT_SIZE_PROPERTY_VALID       (1)        /* Physical property valid */
@@ -1494,6 +1638,7 @@ struct acpi_pptt_cache {
 #define ACPI_PPTT_CACHE_TYPE_VALID          (1<<4)     /* Cache type valid */
 #define ACPI_PPTT_WRITE_POLICY_VALID        (1<<5)     /* Write policy valid */
 #define ACPI_PPTT_LINE_SIZE_VALID           (1<<6)     /* Line size valid */
+#define ACPI_PPTT_CACHE_ID_VALID            (1<<7)     /* Cache ID valid */
 
 /* Masks for Attributes */
 
@@ -1679,6 +1824,7 @@ enum acpi_sdev_type {
 /* Values for flags above */
 
 #define ACPI_SDEV_HANDOFF_TO_UNSECURE_OS    (1)
+#define ACPI_SDEV_SECURE_COMPONENTS_PRESENT (1<<1)
 
 /*
  * SDEV subtables
@@ -1694,6 +1840,46 @@ struct acpi_sdev_namespace {
        u16 vendor_data_length;
 };
 
+struct acpi_sdev_secure_component {
+       u16 secure_component_offset;
+       u16 secure_component_length;
+};
+
+/*
+ * SDEV sub-subtables ("Components") for above
+ */
+struct acpi_sdev_component {
+       struct acpi_sdev_header header;
+};
+
+/* Values for sub-subtable type above */
+
+enum acpi_sac_type {
+       ACPI_SDEV_TYPE_ID_COMPONENT = 0,
+       ACPI_SDEV_TYPE_MEM_COMPONENT = 1
+};
+
+struct acpi_sdev_id_component {
+       struct acpi_sdev_header header;
+       u16 hardware_id_offset;
+       u16 hardware_id_length;
+       u16 subsystem_id_offset;
+       u16 subsystem_id_length;
+       u16 hardware_revision;
+       u8 hardware_rev_present;
+       u8 class_code_present;
+       u8 pci_base_class;
+       u8 pci_sub_class;
+       u8 pci_programming_xface;
+};
+
+struct acpi_sdev_mem_component {
+       struct acpi_sdev_header header;
+       u32 reserved;
+       u64 memory_base_address;
+       u64 memory_length;
+};
+
 /* 1: PCIe Endpoint Device Based Device Structure */
 
 struct acpi_sdev_pcie {
index df5f4b2..86903ac 100644 (file)
@@ -33,6 +33,7 @@
 #define ACPI_SIG_TCPA           "TCPA" /* Trusted Computing Platform Alliance table */
 #define ACPI_SIG_TPM2           "TPM2" /* Trusted Platform Module 2.0 H/W interface table */
 #define ACPI_SIG_UEFI           "UEFI" /* Uefi Boot Optimization Table */
+#define ACPI_SIG_VIOT           "VIOT" /* Virtual I/O Translation Table */
 #define ACPI_SIG_WAET           "WAET" /* Windows ACPI Emulated devices Table */
 #define ACPI_SIG_WDAT           "WDAT" /* Watchdog Action Table */
 #define ACPI_SIG_WDDT           "WDDT" /* Watchdog Timer Description Table */
@@ -285,7 +286,8 @@ struct acpi_srat_generic_affinity {
 
 /* Flags for struct acpi_srat_generic_affinity */
 
-#define ACPI_SRAT_GENERIC_AFFINITY_ENABLED (1) /* 00: Use affinity structure */
+#define ACPI_SRAT_GENERIC_AFFINITY_ENABLED     (1)     /* 00: Use affinity structure */
+#define ACPI_SRAT_ARCHITECTURAL_TRANSACTIONS   (1<<1)  /* ACPI 6.4 */
 
 /*******************************************************************************
  *
@@ -483,6 +485,72 @@ struct acpi_table_uefi {
        u16 data_offset;        /* Offset of remaining data in table */
 };
 
+/*******************************************************************************
+ *
+ * VIOT - Virtual I/O Translation Table
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_viot {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u16 node_count;
+       u16 node_offset;
+       u8 reserved[8];
+};
+
+/* VIOT subtable header */
+
+struct acpi_viot_header {
+       u8 type;
+       u8 reserved;
+       u16 length;
+};
+
+/* Values for Type field above */
+
+enum acpi_viot_node_type {
+       ACPI_VIOT_NODE_PCI_RANGE = 0x01,
+       ACPI_VIOT_NODE_MMIO = 0x02,
+       ACPI_VIOT_NODE_VIRTIO_IOMMU_PCI = 0x03,
+       ACPI_VIOT_NODE_VIRTIO_IOMMU_MMIO = 0x04,
+       ACPI_VIOT_RESERVED = 0x05
+};
+
+/* VIOT subtables */
+
+struct acpi_viot_pci_range {
+       struct acpi_viot_header header;
+       u32 endpoint_start;
+       u16 segment_start;
+       u16 segment_end;
+       u16 bdf_start;
+       u16 bdf_end;
+       u16 output_node;
+       u8 reserved[6];
+};
+
+struct acpi_viot_mmio {
+       struct acpi_viot_header header;
+       u32 endpoint;
+       u64 base_address;
+       u16 output_node;
+       u8 reserved[6];
+};
+
+struct acpi_viot_virtio_iommu_pci {
+       struct acpi_viot_header header;
+       u16 segment;
+       u16 bdf;
+       u8 reserved[8];
+};
+
+struct acpi_viot_virtio_iommu_mmio {
+       struct acpi_viot_header header;
+       u8 reserved[4];
+       u64 base_address;
+};
+
 /*******************************************************************************
  *
  * WAET - Windows ACPI Emulated devices Table
index a5c2ca0..bc24388 100644 (file)
@@ -68,5 +68,6 @@
 #define UUID_DEVICE_GRAPHS              "ab02a46b-74c7-45a2-bd68-f7d344ef2153"
 #define UUID_HIERARCHICAL_DATA_EXTENSION "dbb8e3e6-5886-4ba6-8795-1319f52a966b"
 #define UUID_CORESIGHT_GRAPH            "3ecbc8b6-1d0e-4fb3-8107-e627f805c6cd"
+#define UUID_USB4_CAPABILITIES          "23a0d13a-26ab-486c-9c5f-0ffa525a575a"
 
 #endif                         /* __ACUUID_H__ */
index 0cd4f61..f6656be 100644 (file)
@@ -61,7 +61,7 @@ typedef __builtin_va_list va_list;
 #endif
 
 /*
- * Explictly mark intentional explicit fallthrough to silence
+ * Explicitly mark intentional explicit fallthrough to silence
  * -Wimplicit-fallthrough in GCC 7.1+.
  */
 
index dc93454..10528de 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Constant for device tree bindings for Turris Mox module configuration bus
  *
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
  */
 
 #ifndef _DT_BINDINGS_BUS_MOXTET_H
index fcdaab7..3bdcfc4 100644 (file)
@@ -222,10 +222,14 @@ void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
 void __acpi_unmap_table(void __iomem *map, unsigned long size);
 int early_acpi_boot_init(void);
 int acpi_boot_init (void);
+void acpi_boot_table_prepare (void);
 void acpi_boot_table_init (void);
 int acpi_mps_check (void);
 int acpi_numa_init (void);
 
+int acpi_locate_initial_tables (void);
+void acpi_reserve_initial_tables (void);
+void acpi_table_init_complete (void);
 int acpi_table_init (void);
 int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
 int __init acpi_table_parse_entries(char *id, unsigned long table_size,
@@ -814,9 +818,12 @@ static inline int acpi_boot_init(void)
        return 0;
 }
 
+static inline void acpi_boot_table_prepare(void)
+{
+}
+
 static inline void acpi_boot_table_init(void)
 {
-       return;
 }
 
 static inline int acpi_mps_check(void)
index 57bb54f..ef4bd70 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * rWTM BIU Mailbox driver for Armada 37xx
  *
- * Author: Marek Behun <marek.behun@nic.cz>
+ * Author: Marek Behún <kabel@kernel.org>
  */
 
 #ifndef _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_
index 40bad71..532bcbf 100644 (file)
@@ -476,7 +476,6 @@ struct virtchnl_rss_key {
        u16 vsi_id;
        u16 key_len;
        u8 key[1];         /* RSS hash key, packed bytes */
-       u8 pad[1];
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key);
@@ -485,7 +484,6 @@ struct virtchnl_rss_lut {
        u16 vsi_id;
        u16 lut_entries;
        u8 lut[1];        /* RSS lookup table */
-       u8 pad[1];
 };
 
 VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut);
index bc6bc83..158aefa 100644 (file)
@@ -85,8 +85,6 @@ typedef __u32 __bitwise req_flags_t;
 #define RQF_ELVPRIV            ((__force req_flags_t)(1 << 12))
 /* account into disk and partition IO statistics */
 #define RQF_IO_STAT            ((__force req_flags_t)(1 << 13))
-/* request came from our alloc pool */
-#define RQF_ALLOCED            ((__force req_flags_t)(1 << 14))
 /* runtime pm request */
 #define RQF_PM                 ((__force req_flags_t)(1 << 15))
 /* on IO scheduler merge hash */
index 3625f01..fdac053 100644 (file)
@@ -40,6 +40,7 @@ struct bpf_local_storage;
 struct bpf_local_storage_map;
 struct kobject;
 struct mem_cgroup;
+struct module;
 
 extern struct idr btf_idr;
 extern spinlock_t btf_idr_lock;
@@ -623,6 +624,7 @@ struct bpf_trampoline {
        /* Executable image of trampoline */
        struct bpf_tramp_image *cur_image;
        u64 selector;
+       struct module *mod;
 };
 
 struct bpf_attach_target_info {
index ec4cd39..cdca84e 100644 (file)
@@ -87,9 +87,7 @@ u32 ethtool_op_get_link(struct net_device *dev);
 int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
 
 
-/**
- * struct ethtool_link_ext_state_info - link extended state and substate.
- */
+/* Link extended state and substate. */
 struct ethtool_link_ext_state_info {
        enum ethtool_link_ext_state link_ext_state;
        union {
@@ -129,7 +127,6 @@ struct ethtool_link_ksettings {
                __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
        } link_modes;
        u32     lanes;
-       enum ethtool_link_mode_bit_indices link_mode;
 };
 
 /**
@@ -292,6 +289,9 @@ struct ethtool_pause_stats {
  *     do not attach ext_substate attribute to netlink message). If link_ext_state
  *     and link_ext_substate are unknown, return -ENODATA. If not implemented,
  *     link_ext_state and link_ext_substate will not be sent to userspace.
+ * @get_eeprom_len: Read range of EEPROM addresses for validation of
+ *     @get_eeprom and @set_eeprom requests.
+ *     Returns 0 if device does not support EEPROM access.
  * @get_eeprom: Read data from the device EEPROM.
  *     Should fill in the magic field.  Don't need to check len for zero
  *     or wraparound.  Fill in the data argument with the eeprom values
@@ -384,6 +384,8 @@ struct ethtool_pause_stats {
  * @get_module_eeprom: Get the eeprom information from the plug-in module
  * @get_eee: Get Energy-Efficient (EEE) supported and status.
  * @set_eee: Set EEE status (enable/disable) as well as LPI timers.
+ * @get_tunable: Read the value of a driver / device tunable.
+ * @set_tunable: Set the value of a driver / device tunable.
  * @get_per_queue_coalesce: Get interrupt coalescing parameters per queue.
  *     It must check that the given queue number is valid. If neither a RX nor
  *     a TX queue has this number, return -EINVAL. If only a RX queue or a TX
@@ -547,8 +549,8 @@ struct phy_tdr_config;
  * @get_sset_count: Get number of strings that @get_strings will write.
  * @get_strings: Return a set of strings that describe the requested objects
  * @get_stats: Return extended statistics about the PHY device.
- * @start_cable_test - Start a cable test
- * @start_cable_test_tdr - Start a Time Domain Reflectometry cable test
+ * @start_cable_test: Start a cable test
+ * @start_cable_test_tdr: Start a Time Domain Reflectometry cable test
  *
  * All operations are optional (i.e. the function pointer may be set to %NULL)
  * and callers must take this into account. Callers must hold the RTNL lock.
@@ -571,4 +573,12 @@ struct ethtool_phy_ops {
  */
 void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops);
 
+/*
+ * ethtool_params_from_link_mode - Derive link parameters from a given link mode
+ * @link_ksettings: Link parameters to be derived from the link mode
+ * @link_mode: Link mode
+ */
+void
+ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
+                             enum ethtool_link_mode_bit_indices link_mode);
 #endif /* _LINUX_ETHTOOL_H */
index fd183fb..0c19010 100644 (file)
@@ -271,6 +271,29 @@ static inline  void devm_extcon_unregister_notifier(struct device *dev,
                                struct extcon_dev *edev, unsigned int id,
                                struct notifier_block *nb) { }
 
+static inline int extcon_register_notifier_all(struct extcon_dev *edev,
+                                              struct notifier_block *nb)
+{
+       return 0;
+}
+
+static inline int extcon_unregister_notifier_all(struct extcon_dev *edev,
+                                                struct notifier_block *nb)
+{
+       return 0;
+}
+
+static inline int devm_extcon_register_notifier_all(struct device *dev,
+                                                   struct extcon_dev *edev,
+                                                   struct notifier_block *nb)
+{
+       return 0;
+}
+
+static inline void devm_extcon_unregister_notifier_all(struct device *dev,
+                                                      struct extcon_dev *edev,
+                                                      struct notifier_block *nb) { }
+
 static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
 {
        return ERR_PTR(-ENODEV);
index ebc2956..19781b0 100644 (file)
@@ -56,7 +56,7 @@
  * COMMAND_RECONFIG_FLAG_PARTIAL:
  * Set to FPGA configuration type (full or partial).
  */
-#define COMMAND_RECONFIG_FLAG_PARTIAL  1
+#define COMMAND_RECONFIG_FLAG_PARTIAL  0
 
 /*
  * Timeout settings for service clients:
index ce59a6a..9eb77c8 100644 (file)
@@ -320,7 +320,14 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
 int host1x_device_init(struct host1x_device *device);
 int host1x_device_exit(struct host1x_device *device);
 
-int host1x_client_register(struct host1x_client *client);
+int __host1x_client_register(struct host1x_client *client,
+                            struct lock_class_key *key);
+#define host1x_client_register(class) \
+       ({ \
+               static struct lock_class_key __key; \
+               __host1x_client_register(class, &__key); \
+       })
+
 int host1x_client_unregister(struct host1x_client *client);
 
 int host1x_client_suspend(struct host1x_client *client);
index b91732b..14f72ec 100644 (file)
@@ -330,7 +330,7 @@ static inline bool kasan_check_byte(const void *address)
 
 #endif /* CONFIG_KASAN */
 
-#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
+#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
 void kasan_unpoison_task_stack(struct task_struct *task);
 #else
 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
index 52b1610..c544b70 100644 (file)
 /* Marvel 88E1111 in Finisar SFP module with modified PHY ID */
 #define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0
 
-/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
+/* These Ethernet switch families contain embedded PHYs, but they do
  * not have a model ID. So the switch driver traps reads to the ID2
  * register and returns the switch family ID
  */
-#define MARVELL_PHY_ID_88E6390         0x01410f90
+#define MARVELL_PHY_ID_88E6341_FAMILY  0x01410f41
+#define MARVELL_PHY_ID_88E6390_FAMILY  0x01410f90
 
 #define MARVELL_PHY_FAMILY_ID(id)      ((id) >> 4)
 
index df5d91c..9c68b2d 100644 (file)
@@ -437,11 +437,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
        u8         reserved_at_60[0x18];
        u8         log_max_ft_num[0x8];
 
-       u8         reserved_at_80[0x18];
+       u8         reserved_at_80[0x10];
+       u8         log_max_flow_counter[0x8];
        u8         log_max_destination[0x8];
 
-       u8         log_max_flow_counter[0x8];
-       u8         reserved_at_a8[0x10];
+       u8         reserved_at_a0[0x18];
        u8         log_max_flow[0x8];
 
        u8         reserved_at_c0[0x40];
@@ -8835,6 +8835,8 @@ struct mlx5_ifc_pplm_reg_bits {
 
        u8         fec_override_admin_100g_2x[0x10];
        u8         fec_override_admin_50g_1x[0x10];
+
+       u8         reserved_at_140[0x140];
 };
 
 struct mlx5_ifc_ppcnt_reg_bits {
@@ -10198,7 +10200,7 @@ struct mlx5_ifc_pbmc_reg_bits {
 
        struct mlx5_ifc_bufferx_reg_bits buffer[10];
 
-       u8         reserved_at_2e0[0x40];
+       u8         reserved_at_2e0[0x80];
 };
 
 struct mlx5_ifc_qtct_reg_bits {
index 490db68..7918494 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Turris Mox module configuration bus driver
  *
- * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ * Copyright (C) 2019 Marek Behún <kabel@kernel.org>
  */
 
 #ifndef __LINUX_MOXTET_H
index cec526c..ee9ad76 100644 (file)
@@ -11,6 +11,7 @@
 
 enum nvdimm_event {
        NVDIMM_REVALIDATE_POISON,
+       NVDIMM_REVALIDATE_REGION,
 };
 
 enum nvdimm_claim_class {
index 7d3537c..26a1329 100644 (file)
@@ -52,8 +52,9 @@ extern void *arpt_alloc_initial_table(const struct xt_table *);
 int arpt_register_table(struct net *net, const struct xt_table *table,
                        const struct arpt_replace *repl,
                        const struct nf_hook_ops *ops, struct xt_table **res);
-void arpt_unregister_table(struct net *net, struct xt_table *table,
-                          const struct nf_hook_ops *ops);
+void arpt_unregister_table(struct net *net, struct xt_table *table);
+void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+                                   const struct nf_hook_ops *ops);
 extern unsigned int arpt_do_table(struct sk_buff *skb,
                                  const struct nf_hook_state *state,
                                  struct xt_table *table);
index 2f5c4e6..3a95614 100644 (file)
@@ -110,8 +110,9 @@ extern int ebt_register_table(struct net *net,
                              const struct ebt_table *table,
                              const struct nf_hook_ops *ops,
                              struct ebt_table **res);
-extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
-                                const struct nf_hook_ops *);
+extern void ebt_unregister_table(struct net *net, struct ebt_table *table);
+void ebt_unregister_table_pre_exit(struct net *net, const char *tablename,
+                                  const struct nf_hook_ops *ops);
 extern unsigned int ebt_do_table(struct sk_buff *skb,
                                 const struct nf_hook_state *state,
                                 struct ebt_table *table);
index ec2ad4b..c4fdb44 100644 (file)
@@ -460,7 +460,5 @@ void geni_icc_set_tag(struct geni_se *se, u32 tag);
 int geni_icc_enable(struct geni_se *se);
 
 int geni_icc_disable(struct geni_se *se);
-
-void geni_remove_earlycon_icc_vote(void);
 #endif
 #endif
index 8edbbf5..822c048 100644 (file)
@@ -349,8 +349,13 @@ static inline void sk_psock_update_proto(struct sock *sk,
 static inline void sk_psock_restore_proto(struct sock *sk,
                                          struct sk_psock *psock)
 {
-       sk->sk_prot->unhash = psock->saved_unhash;
        if (inet_csk_has_ulp(sk)) {
+               /* TLS does not have an unhash proto in SW cases, but we need
+                * to ensure we stop using the sock_map unhash routine because
+                * the associated psock is being removed. So use the original
+                * unhash handler.
+                */
+               WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash);
                tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space);
        } else {
                sk->sk_write_space = psock->saved_write_space;
index 6b5fcfa..b465f8f 100644 (file)
@@ -62,15 +62,21 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                        return -EINVAL;
        }
 
+       skb_reset_mac_header(skb);
+
        if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
-               u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
-               u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
+               u32 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
+               u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
+               u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
+
+               if (!pskb_may_pull(skb, needed))
+                       return -EINVAL;
 
                if (!skb_partial_csum_set(skb, start, off))
                        return -EINVAL;
 
                p_off = skb_transport_offset(skb) + thlen;
-               if (p_off > skb_headlen(skb))
+               if (!pskb_may_pull(skb, p_off))
                        return -EINVAL;
        } else {
                /* gso packets without NEEDS_CSUM do not set transport_offset.
@@ -100,14 +106,14 @@ retry:
                        }
 
                        p_off = keys.control.thoff + thlen;
-                       if (p_off > skb_headlen(skb) ||
+                       if (!pskb_may_pull(skb, p_off) ||
                            keys.basic.ip_proto != ip_proto)
                                return -EINVAL;
 
                        skb_set_transport_header(skb, keys.control.thoff);
                } else if (gso_type) {
                        p_off = thlen;
-                       if (p_off > skb_headlen(skb))
+                       if (!pskb_may_pull(skb, p_off))
                                return -EINVAL;
                }
        }
index 92c0160..a91e3d9 100644 (file)
@@ -229,9 +229,10 @@ static inline int xa_err(void *entry)
  *
  * This structure is used either directly or via the XA_LIMIT() macro
  * to communicate the range of IDs that are valid for allocation.
- * Two common ranges are predefined for you:
+ * Three common ranges are predefined for you:
  * * xa_limit_32b      - [0 - UINT_MAX]
  * * xa_limit_31b      - [0 - INT_MAX]
+ * * xa_limit_16b      - [0 - USHRT_MAX]
  */
 struct xa_limit {
        u32 max;
@@ -242,6 +243,7 @@ struct xa_limit {
 
 #define xa_limit_32b   XA_LIMIT(0, UINT_MAX)
 #define xa_limit_31b   XA_LIMIT(0, INT_MAX)
+#define xa_limit_16b   XA_LIMIT(0, USHRT_MAX)
 
 typedef unsigned __bitwise xa_mark_t;
 #define XA_MARK_0              ((__force xa_mark_t)0U)
index 2bf3092..086b291 100644 (file)
@@ -170,12 +170,7 @@ void tcf_idr_insert_many(struct tc_action *actions[]);
 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
                        struct tc_action **a, int bind);
-int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
-
-static inline int tcf_idr_release(struct tc_action *a, bool bind)
-{
-       return __tcf_idr_release(a, bind, false);
-}
+int tcf_idr_release(struct tc_action *a, bool bind);
 
 int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
 int tcf_unregister_action(struct tc_action_ops *a,
@@ -185,7 +180,7 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
                    int nr_actions, struct tcf_result *res);
 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
                    struct nlattr *est, char *name, int ovr, int bind,
-                   struct tc_action *actions[], size_t *attr_size,
+                   struct tc_action *actions[], int init_res[], size_t *attr_size,
                    bool rtnl_held, struct netlink_ext_ack *extack);
 struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
                                         bool rtnl_held,
@@ -193,7 +188,8 @@ struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
                                    struct nlattr *nla, struct nlattr *est,
                                    char *name, int ovr, int bind,
-                                   struct tc_action_ops *ops, bool rtnl_held,
+                                   struct tc_action_ops *a_o, int *init_res,
+                                   bool rtnl_held,
                                    struct netlink_ext_ack *extack);
 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
                    int ref, bool terse);
index 59f45b1..e816b6a 100644 (file)
@@ -72,7 +72,9 @@ struct netns_xfrm {
 #if IS_ENABLED(CONFIG_IPV6)
        struct dst_ops          xfrm6_dst_ops;
 #endif
-       spinlock_t xfrm_state_lock;
+       spinlock_t              xfrm_state_lock;
+       seqcount_spinlock_t     xfrm_state_hash_generation;
+
        spinlock_t xfrm_policy_lock;
        struct mutex xfrm_cfg_mutex;
 };
index 0b39eff..be11dbd 100644 (file)
@@ -171,9 +171,9 @@ static inline void red_set_vars(struct red_vars *v)
 static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
                                    u8 Scell_log, u8 *stab)
 {
-       if (fls(qth_min) + Wlog > 32)
+       if (fls(qth_min) + Wlog >= 32)
                return false;
-       if (fls(qth_max) + Wlog > 32)
+       if (fls(qth_max) + Wlog >= 32)
                return false;
        if (Scell_log >= 32)
                return false;
index 4da61c9..479f60e 100644 (file)
@@ -147,8 +147,8 @@ struct rtnl_af_ops {
        int                     (*validate_link_af)(const struct net_device *dev,
                                                    const struct nlattr *attr);
        int                     (*set_link_af)(struct net_device *dev,
-                                              const struct nlattr *attr);
-
+                                              const struct nlattr *attr,
+                                              struct netlink_ext_ack *extack);
        int                     (*fill_stats_af)(struct sk_buff *skb,
                                                 const struct net_device *dev);
        size_t                  (*get_stats_af_size)(const struct net_device *dev);
index 0b6266f..8487f58 100644 (file)
@@ -934,9 +934,13 @@ static inline void sk_acceptq_added(struct sock *sk)
        WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
 }
 
+/* Note: If you think the test should be:
+ *     return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog);
+ * Then please take a look at commit 64a146513f8f ("[NET]: Revert incorrect accept queue backlog changes.")
+ */
 static inline bool sk_acceptq_is_full(const struct sock *sk)
 {
-       return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog);
+       return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
 }
 
 /*
@@ -2221,6 +2225,15 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
        sk_mem_charge(sk, skb->truesize);
 }
 
+static inline void skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
+{
+       if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
+               skb_orphan(skb);
+               skb->destructor = sock_efree;
+               skb->sk = sk;
+       }
+}
+
 void sk_reset_timer(struct sock *sk, struct timer_list *timer,
                    unsigned long expires);
 
index b2a06f1..c58a6d4 100644 (file)
@@ -1097,7 +1097,7 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
                return __xfrm_policy_check(sk, ndir, skb, family);
 
        return  (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
-               (skb_dst(skb)->flags & DST_NOPOLICY) ||
+               (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
                __xfrm_policy_check(sk, ndir, skb, family);
 }
 
@@ -1557,7 +1557,7 @@ int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
 int xfrm_trans_queue(struct sk_buff *skb,
                     int (*finish)(struct net *, struct sock *,
                                   struct sk_buff *));
-int xfrm_output_resume(struct sk_buff *skb, int err);
+int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
 int xfrm_output(struct sock *sk, struct sk_buff *skb);
 
 #if IS_ENABLED(CONFIG_NET_PKTGEN)
index 8a26a2f..fc5a398 100644 (file)
@@ -193,6 +193,7 @@ enum iscsi_connection_state {
        ISCSI_CONN_UP = 0,
        ISCSI_CONN_DOWN,
        ISCSI_CONN_FAILED,
+       ISCSI_CONN_BOUND,
 };
 
 struct iscsi_cls_conn {
index ac6474e..d0a64ee 100644 (file)
@@ -2,29 +2,6 @@
 #ifndef _UAPI__LINUX_BLKPG_H
 #define _UAPI__LINUX_BLKPG_H
 
-/*
- * Partition table and disk geometry handling
- *
- * A single ioctl with lots of subfunctions:
- *
- * Device number stuff:
- *    get_whole_disk()         (given the device number of a partition,
- *                               find the device number of the encompassing disk)
- *    get_all_partitions()     (given the device number of a disk, return the
- *                              device numbers of all its known partitions)
- *
- * Partition stuff:
- *    add_partition()
- *    delete_partition()
- *    test_partition_in_use()  (also for test_disk_in_use)
- *
- * Geometry stuff:
- *    get_geometry()
- *    set_geometry()
- *    get_bios_drivedata()
- *
- * For today, only the partition stuff - aeb, 990515
- */
 #include <linux/compiler.h>
 #include <linux/ioctl.h>
 
@@ -52,9 +29,8 @@ struct blkpg_partition {
        long long start;                /* starting offset in bytes */
        long long length;               /* length in bytes */
        int pno;                        /* partition number */
-       char devname[BLKPG_DEVNAMELTH]; /* partition name, like sda5 or c0d1p2,
-                                          to be used in kernel messages */
-       char volname[BLKPG_VOLNAMELTH]; /* volume label */
+       char devname[BLKPG_DEVNAMELTH]; /* unused / ignored */
+       char volname[BLKPG_VOLNAMELTH]; /* unused / ignore */
 };
 
 #endif /* _UAPI__LINUX_BLKPG_H */
index f75238a..c753535 100644 (file)
@@ -113,7 +113,7 @@ struct can_frame {
                 */
                __u8 len;
                __u8 can_dlc; /* deprecated */
-       };
+       } __attribute__((packed)); /* disable padding added in some ABIs */
        __u8 __pad; /* padding */
        __u8 __res0; /* reserved / padding */
        __u8 len8_dlc; /* optional DLC for 8 byte payload length (9 .. 15) */
index cde753b..5afea69 100644 (file)
  * have the same layout for 32-bit and 64-bit userland.
  */
 
+/* Note on reserved space.
+ * Reserved fields must not be accessed directly by user space because
+ * they may be replaced by a different field in the future. They must
+ * be initialized to zero before making the request, e.g. via memset
+ * of the entire structure or implicitly by not being set in a structure
+ * initializer.
+ */
+
 /**
  * struct ethtool_cmd - DEPRECATED, link control and status
  * This structure is DEPRECATED, please use struct ethtool_link_settings.
@@ -67,6 +75,7 @@
  *     and other link features that the link partner advertised
  *     through autonegotiation; 0 if unknown or not applicable.
  *     Read-only.
+ * @reserved: Reserved for future use; see the note on reserved space.
  *
  * The link speed in Mbps is split between @speed and @speed_hi.  Use
  * the ethtool_cmd_speed() and ethtool_cmd_speed_set() functions to
@@ -155,6 +164,7 @@ static inline __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep)
  * @bus_info: Device bus address.  This should match the dev_name()
  *     string for the underlying bus device, if there is one.  May be
  *     an empty string.
+ * @reserved2: Reserved for future use; see the note on reserved space.
  * @n_priv_flags: Number of flags valid for %ETHTOOL_GPFLAGS and
  *     %ETHTOOL_SPFLAGS commands; also the number of strings in the
  *     %ETH_SS_PRIV_FLAGS set
@@ -356,6 +366,7 @@ struct ethtool_eeprom {
  * @tx_lpi_timer: Time in microseconds the interface delays prior to asserting
  *     its tx lpi (after reaching 'idle' state). Effective only when eee
  *     was negotiated and tx_lpi_enabled was set.
+ * @reserved: Reserved for future use; see the note on reserved space.
  */
 struct ethtool_eee {
        __u32   cmd;
@@ -374,6 +385,7 @@ struct ethtool_eee {
  * @cmd: %ETHTOOL_GMODULEINFO
  * @type: Standard the module information conforms to %ETH_MODULE_SFF_xxxx
  * @eeprom_len: Length of the eeprom
+ * @reserved: Reserved for future use; see the note on reserved space.
  *
  * This structure is used to return the information to
  * properly size memory for a subsequent call to %ETHTOOL_GMODULEEEPROM.
@@ -579,9 +591,7 @@ struct ethtool_pauseparam {
        __u32   tx_pause;
 };
 
-/**
- * enum ethtool_link_ext_state - link extended state
- */
+/* Link extended state */
 enum ethtool_link_ext_state {
        ETHTOOL_LINK_EXT_STATE_AUTONEG,
        ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
@@ -595,10 +605,7 @@ enum ethtool_link_ext_state {
        ETHTOOL_LINK_EXT_STATE_OVERHEAT,
 };
 
-/**
- * enum ethtool_link_ext_substate_autoneg - more information in addition to
- * ETHTOOL_LINK_EXT_STATE_AUTONEG.
- */
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_AUTONEG. */
 enum ethtool_link_ext_substate_autoneg {
        ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1,
        ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED,
@@ -608,9 +615,7 @@ enum ethtool_link_ext_substate_autoneg {
        ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD,
 };
 
-/**
- * enum ethtool_link_ext_substate_link_training - more information in addition to
- * ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE.
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE.
  */
 enum ethtool_link_ext_substate_link_training {
        ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1,
@@ -619,9 +624,7 @@ enum ethtool_link_ext_substate_link_training {
        ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT,
 };
 
-/**
- * enum ethtool_link_ext_substate_logical_mismatch - more information in addition
- * to ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH.
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH.
  */
 enum ethtool_link_ext_substate_link_logical_mismatch {
        ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1,
@@ -631,19 +634,14 @@ enum ethtool_link_ext_substate_link_logical_mismatch {
        ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED,
 };
 
-/**
- * enum ethtool_link_ext_substate_bad_signal_integrity - more information in
- * addition to ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY.
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY.
  */
 enum ethtool_link_ext_substate_bad_signal_integrity {
        ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1,
        ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE,
 };
 
-/**
- * enum ethtool_link_ext_substate_cable_issue - more information in
- * addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE.
- */
+/* More information in addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE. */
 enum ethtool_link_ext_substate_cable_issue {
        ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1,
        ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE,
@@ -661,6 +659,7 @@ enum ethtool_link_ext_substate_cable_issue {
  *     now deprecated
  * @ETH_SS_FEATURES: Device feature names
  * @ETH_SS_RSS_HASH_FUNCS: RSS hush function names
+ * @ETH_SS_TUNABLES: tunable names
  * @ETH_SS_PHY_STATS: Statistic names, for use with %ETHTOOL_GPHYSTATS
  * @ETH_SS_PHY_TUNABLES: PHY tunable names
  * @ETH_SS_LINK_MODES: link mode names
@@ -670,6 +669,8 @@ enum ethtool_link_ext_substate_cable_issue {
  * @ETH_SS_TS_TX_TYPES: timestamping Tx types
  * @ETH_SS_TS_RX_FILTERS: timestamping Rx filters
  * @ETH_SS_UDP_TUNNEL_TYPES: UDP tunnel types
+ *
+ * @ETH_SS_COUNT: number of defined string sets
  */
 enum ethtool_stringset {
        ETH_SS_TEST             = 0,
@@ -715,6 +716,7 @@ struct ethtool_gstrings {
 /**
  * struct ethtool_sset_info - string set information
  * @cmd: Command number = %ETHTOOL_GSSET_INFO
+ * @reserved: Reserved for future use; see the note on reserved space.
  * @sset_mask: On entry, a bitmask of string sets to query, with bits
  *     numbered according to &enum ethtool_stringset.  On return, a
  *     bitmask of those string sets queried that are supported.
@@ -759,6 +761,7 @@ enum ethtool_test_flags {
  * @flags: A bitmask of flags from &enum ethtool_test_flags.  Some
  *     flags may be set by the user on entry; others may be set by
  *     the driver on return.
+ * @reserved: Reserved for future use; see the note on reserved space.
  * @len: On return, the number of test results
  * @data: Array of test results
  *
@@ -959,6 +962,7 @@ union ethtool_flow_union {
  * @vlan_etype: VLAN EtherType
  * @vlan_tci: VLAN tag control information
  * @data: user defined data
+ * @padding: Reserved for future use; see the note on reserved space.
  *
  * Note, @vlan_etype, @vlan_tci, and @data are only valid if %FLOW_EXT
  * is set in &struct ethtool_rx_flow_spec @flow_type.
@@ -1134,7 +1138,8 @@ struct ethtool_rxfh_indir {
  *     hardware hash key.
  * @hfunc: Defines the current RSS hash function used by HW (or to be set to).
  *     Valid values are one of the %ETH_RSS_HASH_*.
- * @rsvd:      Reserved for future extensions.
+ * @rsvd8: Reserved for future use; see the note on reserved space.
+ * @rsvd32: Reserved for future use; see the note on reserved space.
  * @rss_config: RX ring/queue index for each hash value i.e., indirection table
  *     of @indir_size __u32 elements, followed by hash key of @key_size
  *     bytes.
@@ -1302,7 +1307,9 @@ struct ethtool_sfeatures {
  * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
  * @phc_index: device index of the associated PHC, or -1 if there is none
  * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
+ * @tx_reserved: Reserved for future use; see the note on reserved space.
  * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
+ * @rx_reserved: Reserved for future use; see the note on reserved space.
  *
  * The bits in the 'tx_types' and 'rx_filters' fields correspond to
  * the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
@@ -1958,6 +1965,11 @@ enum ethtool_reset_flags {
  *     autonegotiation; 0 if unknown or not applicable.  Read-only.
  * @transceiver: Used to distinguish different possible PHY types,
  *     reported consistently by PHYLIB.  Read-only.
+ * @master_slave_cfg: Master/slave port mode.
+ * @master_slave_state: Master/slave port state.
+ * @reserved: Reserved for future use; see the note on reserved space.
+ * @reserved1: Reserved for future use; see the note on reserved space.
+ * @link_mode_masks: Variable length bitmaps.
  *
  * If autonegotiation is disabled, the speed and @duplex represent the
  * fixed link mode and are writable if the driver supports multiple
index 236d437..e33997b 100644 (file)
@@ -247,8 +247,8 @@ struct dsa_completion_record {
                        uint32_t        rsvd2:8;
                };
 
-               uint16_t        delta_rec_size;
-               uint16_t        crc_val;
+               uint32_t        delta_rec_size;
+               uint32_t        crc_val;
 
                /* DIF check & strip */
                struct {
index 03e8af8..9b77cfc 100644 (file)
@@ -86,34 +86,90 @@ enum rfkill_hard_block_reasons {
  * @op: operation code
  * @hard: hard state (0/1)
  * @soft: soft state (0/1)
+ *
+ * Structure used for userspace communication on /dev/rfkill,
+ * used for events from the kernel and control to the kernel.
+ */
+struct rfkill_event {
+       __u32 idx;
+       __u8  type;
+       __u8  op;
+       __u8  soft;
+       __u8  hard;
+} __attribute__((packed));
+
+/**
+ * struct rfkill_event_ext - events for userspace on /dev/rfkill
+ * @idx: index of dev rfkill
+ * @type: type of the rfkill struct
+ * @op: operation code
+ * @hard: hard state (0/1)
+ * @soft: soft state (0/1)
  * @hard_block_reasons: valid if hard is set. One or several reasons from
  *     &enum rfkill_hard_block_reasons.
  *
  * Structure used for userspace communication on /dev/rfkill,
  * used for events from the kernel and control to the kernel.
+ *
+ * See the extensibility docs below.
  */
-struct rfkill_event {
+struct rfkill_event_ext {
        __u32 idx;
        __u8  type;
        __u8  op;
        __u8  soft;
        __u8  hard;
+
+       /*
+        * older kernels will accept/send only up to this point,
+        * and if extended further up to any chunk marked below
+        */
+
        __u8  hard_block_reasons;
 } __attribute__((packed));
 
-/*
- * We are planning to be backward and forward compatible with changes
- * to the event struct, by adding new, optional, members at the end.
- * When reading an event (whether the kernel from userspace or vice
- * versa) we need to accept anything that's at least as large as the
- * version 1 event size, but might be able to accept other sizes in
- * the future.
+/**
+ * DOC: Extensibility
+ *
+ * Originally, we had planned to allow backward and forward compatible
+ * changes by just adding fields at the end of the structure that are
+ * then not reported on older kernels on read(), and not written to by
+ * older kernels on write(), with the kernel reporting the size it did
+ * accept as the result.
+ *
+ * This would have allowed userspace to detect on read() and write()
+ * which kernel structure version it was dealing with, and if was just
+ * recompiled it would have gotten the new fields, but obviously not
+ * accessed them, but things should've continued to work.
+ *
+ * Unfortunately, while actually exercising this mechanism to add the
+ * hard block reasons field, we found that userspace (notably systemd)
+ * did all kinds of fun things not in line with this scheme:
+ *
+ * 1. treat the (expected) short writes as an error;
+ * 2. ask to read sizeof(struct rfkill_event) but then compare the
+ *    actual return value to RFKILL_EVENT_SIZE_V1 and treat any
+ *    mismatch as an error.
+ *
+ * As a consequence, just recompiling with a new struct version caused
+ * things to no longer work correctly on old and new kernels.
+ *
+ * Hence, we've rolled back &struct rfkill_event to the original version
+ * and added &struct rfkill_event_ext. This effectively reverts to the
+ * old behaviour for all userspace, unless it explicitly opts in to the
+ * rules outlined here by using the new &struct rfkill_event_ext.
+ *
+ * Userspace using &struct rfkill_event_ext must adhere to the following
+ * rules
  *
- * One exception is the kernel -- we already have two event sizes in
- * that we've made the 'hard' member optional since our only option
- * is to ignore it anyway.
+ * 1. accept short writes, optionally using them to detect that it's
+ *    running on an older kernel;
+ * 2. accept short reads, knowing that this means it's running on an
+ *    older kernel;
+ * 3. treat reads that are as long as requested as acceptable, not
+ *    checking against RFKILL_EVENT_SIZE_V1 or such.
  */
-#define RFKILL_EVENT_SIZE_V1   8
+#define RFKILL_EVENT_SIZE_V1   sizeof(struct rfkill_event)
 
 /* ioctl for turning off rfkill-input (if present) */
 #define RFKILL_IOC_MAGIC       'R'
index 3acc7e0..faa54d5 100644 (file)
@@ -84,7 +84,7 @@ static const char *const bpf_atomic_alu_string[16] = {
        [BPF_ADD >> 4]  = "add",
        [BPF_AND >> 4]  = "and",
        [BPF_OR >> 4]  = "or",
-       [BPF_XOR >> 4]  = "or",
+       [BPF_XOR >> 4]  = "xor",
 };
 
 static const char *const bpf_ldst_string[] = {
index 1576ff3..d2de2ab 100644 (file)
@@ -543,11 +543,11 @@ int bpf_obj_get_user(const char __user *pathname, int flags)
                return PTR_ERR(raw);
 
        if (type == BPF_TYPE_PROG)
-               ret = bpf_prog_new_fd(raw);
+               ret = (f_flags != O_RDWR) ? -EINVAL : bpf_prog_new_fd(raw);
        else if (type == BPF_TYPE_MAP)
                ret = bpf_map_new_fd(raw, f_flags);
        else if (type == BPF_TYPE_LINK)
-               ret = bpf_link_new_fd(raw);
+               ret = (f_flags != O_RDWR) ? -EINVAL : bpf_link_new_fd(raw);
        else
                return -ENOENT;
 
index be35bfb..6fbc2ab 100644 (file)
@@ -517,9 +517,17 @@ const struct bpf_func_proto bpf_get_stack_proto = {
 BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
           u32, size, u64, flags)
 {
-       struct pt_regs *regs = task_pt_regs(task);
+       struct pt_regs *regs;
+       long res;
 
-       return __bpf_get_stack(regs, task, NULL, buf, size, flags);
+       if (!try_get_task_stack(task))
+               return -EFAULT;
+
+       regs = task_pt_regs(task);
+       res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
+       put_task_stack(task);
+
+       return res;
 }
 
 BTF_ID_LIST_SINGLE(bpf_get_task_stack_btf_ids, struct, task_struct)
index 1f3a4be..4aa8b52 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/btf.h>
 #include <linux/rcupdate_trace.h>
 #include <linux/rcupdate_wait.h>
+#include <linux/module.h>
 
 /* dummy _ops. The verifier will operate on target program's ops. */
 const struct bpf_verifier_ops bpf_extension_verifier_ops = {
@@ -87,6 +88,26 @@ out:
        return tr;
 }
 
+static int bpf_trampoline_module_get(struct bpf_trampoline *tr)
+{
+       struct module *mod;
+       int err = 0;
+
+       preempt_disable();
+       mod = __module_text_address((unsigned long) tr->func.addr);
+       if (mod && !try_module_get(mod))
+               err = -ENOENT;
+       preempt_enable();
+       tr->mod = mod;
+       return err;
+}
+
+static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
+{
+       module_put(tr->mod);
+       tr->mod = NULL;
+}
+
 static int is_ftrace_location(void *ip)
 {
        long addr;
@@ -108,6 +129,9 @@ static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
                ret = unregister_ftrace_direct((long)ip, (long)old_addr);
        else
                ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
+
+       if (!ret)
+               bpf_trampoline_module_put(tr);
        return ret;
 }
 
@@ -134,10 +158,16 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
                return ret;
        tr->func.ftrace_managed = ret;
 
+       if (bpf_trampoline_module_get(tr))
+               return -ENOENT;
+
        if (tr->func.ftrace_managed)
                ret = register_ftrace_direct((long)ip, (long)new_addr);
        else
                ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
+
+       if (ret)
+               bpf_trampoline_module_put(tr);
        return ret;
 }
 
index 44e4ec1..0399ac0 100644 (file)
@@ -5856,40 +5856,51 @@ static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
        return &env->insn_aux_data[env->insn_idx];
 }
 
+enum {
+       REASON_BOUNDS   = -1,
+       REASON_TYPE     = -2,
+       REASON_PATHS    = -3,
+       REASON_LIMIT    = -4,
+       REASON_STACK    = -5,
+};
+
 static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
-                             u32 *ptr_limit, u8 opcode, bool off_is_neg)
+                             const struct bpf_reg_state *off_reg,
+                             u32 *alu_limit, u8 opcode)
 {
+       bool off_is_neg = off_reg->smin_value < 0;
        bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
                            (opcode == BPF_SUB && !off_is_neg);
-       u32 off, max;
+       u32 max = 0, ptr_limit = 0;
+
+       if (!tnum_is_const(off_reg->var_off) &&
+           (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
+               return REASON_BOUNDS;
 
        switch (ptr_reg->type) {
        case PTR_TO_STACK:
                /* Offset 0 is out-of-bounds, but acceptable start for the
-                * left direction, see BPF_REG_FP.
+                * left direction, see BPF_REG_FP. Also, unknown scalar
+                * offset where we would need to deal with min/max bounds is
+                * currently prohibited for unprivileged.
                 */
                max = MAX_BPF_STACK + mask_to_left;
-               /* Indirect variable offset stack access is prohibited in
-                * unprivileged mode so it's not handled here.
-                */
-               off = ptr_reg->off + ptr_reg->var_off.value;
-               if (mask_to_left)
-                       *ptr_limit = MAX_BPF_STACK + off;
-               else
-                       *ptr_limit = -off - 1;
-               return *ptr_limit >= max ? -ERANGE : 0;
+               ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
+               break;
        case PTR_TO_MAP_VALUE:
                max = ptr_reg->map_ptr->value_size;
-               if (mask_to_left) {
-                       *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
-               } else {
-                       off = ptr_reg->smin_value + ptr_reg->off;
-                       *ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
-               }
-               return *ptr_limit >= max ? -ERANGE : 0;
+               ptr_limit = (mask_to_left ?
+                            ptr_reg->smin_value :
+                            ptr_reg->umax_value) + ptr_reg->off;
+               break;
        default:
-               return -EINVAL;
+               return REASON_TYPE;
        }
+
+       if (ptr_limit >= max)
+               return REASON_LIMIT;
+       *alu_limit = ptr_limit;
+       return 0;
 }
 
 static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
@@ -5907,7 +5918,7 @@ static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
        if (aux->alu_state &&
            (aux->alu_state != alu_state ||
             aux->alu_limit != alu_limit))
-               return -EACCES;
+               return REASON_PATHS;
 
        /* Corresponding fixup done in fixup_bpf_calls(). */
        aux->alu_state = alu_state;
@@ -5926,14 +5937,22 @@ static int sanitize_val_alu(struct bpf_verifier_env *env,
        return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
 }
 
+static bool sanitize_needed(u8 opcode)
+{
+       return opcode == BPF_ADD || opcode == BPF_SUB;
+}
+
 static int sanitize_ptr_alu(struct bpf_verifier_env *env,
                            struct bpf_insn *insn,
                            const struct bpf_reg_state *ptr_reg,
+                           const struct bpf_reg_state *off_reg,
                            struct bpf_reg_state *dst_reg,
-                           bool off_is_neg)
+                           struct bpf_insn_aux_data *tmp_aux,
+                           const bool commit_window)
 {
+       struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
        struct bpf_verifier_state *vstate = env->cur_state;
-       struct bpf_insn_aux_data *aux = cur_aux(env);
+       bool off_is_neg = off_reg->smin_value < 0;
        bool ptr_is_dst_reg = ptr_reg == dst_reg;
        u8 opcode = BPF_OP(insn->code);
        u32 alu_state, alu_limit;
@@ -5951,18 +5970,33 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
        if (vstate->speculative)
                goto do_sim;
 
-       alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
-       alu_state |= ptr_is_dst_reg ?
-                    BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
-
-       err = retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg);
+       err = retrieve_ptr_limit(ptr_reg, off_reg, &alu_limit, opcode);
        if (err < 0)
                return err;
 
+       if (commit_window) {
+               /* In commit phase we narrow the masking window based on
+                * the observed pointer move after the simulated operation.
+                */
+               alu_state = tmp_aux->alu_state;
+               alu_limit = abs(tmp_aux->alu_limit - alu_limit);
+       } else {
+               alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
+               alu_state |= ptr_is_dst_reg ?
+                            BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
+       }
+
        err = update_alu_sanitation_state(aux, alu_state, alu_limit);
        if (err < 0)
                return err;
 do_sim:
+       /* If we're in commit phase, we're done here given we already
+        * pushed the truncated dst_reg into the speculative verification
+        * stack.
+        */
+       if (commit_window)
+               return 0;
+
        /* Simulate and find potential out-of-bounds access under
         * speculative execution from truncation as a result of
         * masking when off was not within expected range. If off
@@ -5979,7 +6013,46 @@ do_sim:
        ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
        if (!ptr_is_dst_reg && ret)
                *dst_reg = tmp;
-       return !ret ? -EFAULT : 0;
+       return !ret ? REASON_STACK : 0;
+}
+
+static int sanitize_err(struct bpf_verifier_env *env,
+                       const struct bpf_insn *insn, int reason,
+                       const struct bpf_reg_state *off_reg,
+                       const struct bpf_reg_state *dst_reg)
+{
+       static const char *err = "pointer arithmetic with it prohibited for !root";
+       const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
+       u32 dst = insn->dst_reg, src = insn->src_reg;
+
+       switch (reason) {
+       case REASON_BOUNDS:
+               verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n",
+                       off_reg == dst_reg ? dst : src, err);
+               break;
+       case REASON_TYPE:
+               verbose(env, "R%d has pointer with unsupported alu operation, %s\n",
+                       off_reg == dst_reg ? src : dst, err);
+               break;
+       case REASON_PATHS:
+               verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n",
+                       dst, op, err);
+               break;
+       case REASON_LIMIT:
+               verbose(env, "R%d tried to %s beyond pointer bounds, %s\n",
+                       dst, op, err);
+               break;
+       case REASON_STACK:
+               verbose(env, "R%d could not be pushed for speculative verification, %s\n",
+                       dst, err);
+               break;
+       default:
+               verbose(env, "verifier internal error: unknown reason (%d)\n",
+                       reason);
+               break;
+       }
+
+       return -EACCES;
 }
 
 /* check that stack access falls within stack limits and that 'reg' doesn't
@@ -6016,6 +6089,37 @@ static int check_stack_access_for_ptr_arithmetic(
        return 0;
 }
 
+static int sanitize_check_bounds(struct bpf_verifier_env *env,
+                                const struct bpf_insn *insn,
+                                const struct bpf_reg_state *dst_reg)
+{
+       u32 dst = insn->dst_reg;
+
+       /* For unprivileged we require that resulting offset must be in bounds
+        * in order to be able to sanitize access later on.
+        */
+       if (env->bypass_spec_v1)
+               return 0;
+
+       switch (dst_reg->type) {
+       case PTR_TO_STACK:
+               if (check_stack_access_for_ptr_arithmetic(env, dst, dst_reg,
+                                       dst_reg->off + dst_reg->var_off.value))
+                       return -EACCES;
+               break;
+       case PTR_TO_MAP_VALUE:
+               if (check_map_access(env, dst, dst_reg->off, 1, false)) {
+                       verbose(env, "R%d pointer arithmetic of map value goes out of range, "
+                               "prohibited for !root\n", dst);
+                       return -EACCES;
+               }
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
 
 /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
  * Caller should also handle BPF_MOV case separately.
@@ -6035,8 +6139,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
            smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
        u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
            umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
-       u32 dst = insn->dst_reg, src = insn->src_reg;
+       struct bpf_insn_aux_data tmp_aux = {};
        u8 opcode = BPF_OP(insn->code);
+       u32 dst = insn->dst_reg;
        int ret;
 
        dst_reg = &regs[dst];
@@ -6084,13 +6189,6 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                verbose(env, "R%d pointer arithmetic on %s prohibited\n",
                        dst, reg_type_str[ptr_reg->type]);
                return -EACCES;
-       case PTR_TO_MAP_VALUE:
-               if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
-                       verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
-                               off_reg == dst_reg ? dst : src);
-                       return -EACCES;
-               }
-               fallthrough;
        default:
                break;
        }
@@ -6108,13 +6206,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        /* pointer types do not carry 32-bit bounds at the moment. */
        __mark_reg32_unbounded(dst_reg);
 
+       if (sanitize_needed(opcode)) {
+               ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
+                                      &tmp_aux, false);
+               if (ret < 0)
+                       return sanitize_err(env, insn, ret, off_reg, dst_reg);
+       }
+
        switch (opcode) {
        case BPF_ADD:
-               ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
-               if (ret < 0) {
-                       verbose(env, "R%d tried to add from different maps, paths, or prohibited types\n", dst);
-                       return ret;
-               }
                /* We can take a fixed offset as long as it doesn't overflow
                 * the s32 'off' field
                 */
@@ -6165,11 +6265,6 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                }
                break;
        case BPF_SUB:
-               ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
-               if (ret < 0) {
-                       verbose(env, "R%d tried to sub from different maps, paths, or prohibited types\n", dst);
-                       return ret;
-               }
                if (dst_reg == off_reg) {
                        /* scalar -= pointer.  Creates an unknown scalar */
                        verbose(env, "R%d tried to subtract pointer from scalar\n",
@@ -6250,21 +6345,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        __reg_deduce_bounds(dst_reg);
        __reg_bound_offset(dst_reg);
 
-       /* For unprivileged we require that resulting offset must be in bounds
-        * in order to be able to sanitize access later on.
-        */
-       if (!env->bypass_spec_v1) {
-               if (dst_reg->type == PTR_TO_MAP_VALUE &&
-                   check_map_access(env, dst, dst_reg->off, 1, false)) {
-                       verbose(env, "R%d pointer arithmetic of map value goes out of range, "
-                               "prohibited for !root\n", dst);
-                       return -EACCES;
-               } else if (dst_reg->type == PTR_TO_STACK &&
-                          check_stack_access_for_ptr_arithmetic(
-                                  env, dst, dst_reg, dst_reg->off +
-                                  dst_reg->var_off.value)) {
-                       return -EACCES;
-               }
+       if (sanitize_check_bounds(env, insn, dst_reg) < 0)
+               return -EACCES;
+       if (sanitize_needed(opcode)) {
+               ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
+                                      &tmp_aux, true);
+               if (ret < 0)
+                       return sanitize_err(env, insn, ret, off_reg, dst_reg);
        }
 
        return 0;
@@ -6858,9 +6945,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        s32 s32_min_val, s32_max_val;
        u32 u32_min_val, u32_max_val;
        u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
-       u32 dst = insn->dst_reg;
-       int ret;
        bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64);
+       int ret;
 
        smin_val = src_reg.smin_value;
        smax_val = src_reg.smax_value;
@@ -6902,6 +6988,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                return 0;
        }
 
+       if (sanitize_needed(opcode)) {
+               ret = sanitize_val_alu(env, insn);
+               if (ret < 0)
+                       return sanitize_err(env, insn, ret, NULL, NULL);
+       }
+
        /* Calculate sign/unsigned bounds and tnum for alu32 and alu64 bit ops.
         * There are two classes of instructions: The first class we track both
         * alu32 and alu64 sign/unsigned bounds independently this provides the
@@ -6918,21 +7010,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
         */
        switch (opcode) {
        case BPF_ADD:
-               ret = sanitize_val_alu(env, insn);
-               if (ret < 0) {
-                       verbose(env, "R%d tried to add from different pointers or scalars\n", dst);
-                       return ret;
-               }
                scalar32_min_max_add(dst_reg, &src_reg);
                scalar_min_max_add(dst_reg, &src_reg);
                dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
                break;
        case BPF_SUB:
-               ret = sanitize_val_alu(env, insn);
-               if (ret < 0) {
-                       verbose(env, "R%d tried to sub from different pointers or scalars\n", dst);
-                       return ret;
-               }
                scalar32_min_max_sub(dst_reg, &src_reg);
                scalar_min_max_sub(dst_reg, &src_reg);
                dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
@@ -12158,6 +12240,11 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
        u32 btf_id, member_idx;
        const char *mname;
 
+       if (!prog->gpl_compatible) {
+               verbose(env, "struct ops programs must have a GPL compatible license\n");
+               return -EINVAL;
+       }
+
        btf_id = prog->aux->attach_btf_id;
        st_ops = bpf_struct_ops_find(btf_id);
        if (!st_ops) {
index 8743150..b81f282 100644 (file)
@@ -70,7 +70,9 @@ struct gcov_fn_info {
 
        u32 ident;
        u32 checksum;
+#if CONFIG_CLANG_VERSION < 110000
        u8 use_extra_checksum;
+#endif
        u32 cfg_checksum;
 
        u32 num_counters;
@@ -145,10 +147,8 @@ void llvm_gcda_emit_function(u32 ident, const char *function_name,
 
        list_add_tail(&info->head, &current_info->functions);
 }
-EXPORT_SYMBOL(llvm_gcda_emit_function);
 #else
-void llvm_gcda_emit_function(u32 ident, u32 func_checksum,
-               u8 use_extra_checksum, u32 cfg_checksum)
+void llvm_gcda_emit_function(u32 ident, u32 func_checksum, u32 cfg_checksum)
 {
        struct gcov_fn_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
 
@@ -158,12 +158,11 @@ void llvm_gcda_emit_function(u32 ident, u32 func_checksum,
        INIT_LIST_HEAD(&info->head);
        info->ident = ident;
        info->checksum = func_checksum;
-       info->use_extra_checksum = use_extra_checksum;
        info->cfg_checksum = cfg_checksum;
        list_add_tail(&info->head, &current_info->functions);
 }
-EXPORT_SYMBOL(llvm_gcda_emit_function);
 #endif
+EXPORT_SYMBOL(llvm_gcda_emit_function);
 
 void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters)
 {
@@ -293,11 +292,16 @@ int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2)
                !list_is_last(&fn_ptr2->head, &info2->functions)) {
                if (fn_ptr1->checksum != fn_ptr2->checksum)
                        return false;
+#if CONFIG_CLANG_VERSION < 110000
                if (fn_ptr1->use_extra_checksum != fn_ptr2->use_extra_checksum)
                        return false;
                if (fn_ptr1->use_extra_checksum &&
                        fn_ptr1->cfg_checksum != fn_ptr2->cfg_checksum)
                        return false;
+#else
+               if (fn_ptr1->cfg_checksum != fn_ptr2->cfg_checksum)
+                       return false;
+#endif
                fn_ptr1 = list_next_entry(fn_ptr1, head);
                fn_ptr2 = list_next_entry(fn_ptr2, head);
        }
@@ -365,7 +369,7 @@ static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
        INIT_LIST_HEAD(&fn_dup->head);
 
        cv_size = fn->num_counters * sizeof(fn->counters[0]);
-       fn_dup->counters = vmalloc(cv_size);
+       fn_dup->counters = kvmalloc(cv_size, GFP_KERNEL);
        if (!fn_dup->counters) {
                kfree(fn_dup);
                return NULL;
@@ -529,17 +533,22 @@ static size_t convert_to_gcda(char *buffer, struct gcov_info *info)
 
        list_for_each_entry(fi_ptr, &info->functions, head) {
                u32 i;
-               u32 len = 2;
-
-               if (fi_ptr->use_extra_checksum)
-                       len++;
 
                pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION);
-               pos += store_gcov_u32(buffer, pos, len);
+#if CONFIG_CLANG_VERSION < 110000
+               pos += store_gcov_u32(buffer, pos,
+                       fi_ptr->use_extra_checksum ? 3 : 2);
+#else
+               pos += store_gcov_u32(buffer, pos, 3);
+#endif
                pos += store_gcov_u32(buffer, pos, fi_ptr->ident);
                pos += store_gcov_u32(buffer, pos, fi_ptr->checksum);
+#if CONFIG_CLANG_VERSION < 110000
                if (fi_ptr->use_extra_checksum)
                        pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum);
+#else
+               pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum);
+#endif
 
                pos += store_gcov_u32(buffer, pos, GCOV_TAG_COUNTER_BASE);
                pos += store_gcov_u32(buffer, pos, fi_ptr->num_counters * 2);
index c6d0c1d..f160f1c 100644 (file)
@@ -705,7 +705,7 @@ static void print_lock_name(struct lock_class *class)
 
        printk(KERN_CONT " (");
        __print_lock_name(class);
-       printk(KERN_CONT "){%s}-{%hd:%hd}", usage,
+       printk(KERN_CONT "){%s}-{%d:%d}", usage,
                        class->wait_type_outer ?: class->wait_type_inner,
                        class->wait_type_inner);
 }
@@ -930,7 +930,8 @@ static bool assign_lock_key(struct lockdep_map *lock)
                /* Debug-check: all keys must be persistent! */
                debug_locks_off();
                pr_err("INFO: trying to register non-static key.\n");
-               pr_err("the code is fine but needs lockdep annotation.\n");
+               pr_err("The code is fine but needs lockdep annotation, or maybe\n");
+               pr_err("you didn't initialize this object before use?\n");
                pr_err("turning off the locking correctness validator.\n");
                dump_stack();
                return false;
index b7e29db..3ba52d4 100644 (file)
@@ -3231,7 +3231,8 @@ ftrace_allocate_pages(unsigned long num_to_init)
        pg = start_pg;
        while (pg) {
                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-               free_pages((unsigned long)pg->records, order);
+               if (order >= 0)
+                       free_pages((unsigned long)pg->records, order);
                start_pg = pg->next;
                kfree(pg);
                pg = start_pg;
@@ -6451,7 +6452,8 @@ void ftrace_release_mod(struct module *mod)
                clear_mod_from_hashes(pg);
 
                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-               free_pages((unsigned long)pg->records, order);
+               if (order >= 0)
+                       free_pages((unsigned long)pg->records, order);
                tmp_page = pg->next;
                kfree(pg);
                ftrace_number_of_pages -= 1 << order;
@@ -6811,7 +6813,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
                if (!pg->index) {
                        *last_pg = pg->next;
                        order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-                       free_pages((unsigned long)pg->records, order);
+                       if (order >= 0)
+                               free_pages((unsigned long)pg->records, order);
                        ftrace_number_of_pages -= 1 << order;
                        ftrace_number_of_groups--;
                        kfree(pg);
index eccb4e1..5c77762 100644 (file)
@@ -2984,7 +2984,8 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
 
        size = nr_entries * sizeof(unsigned long);
        event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
-                                           sizeof(*entry) + size, trace_ctx);
+                                   (sizeof(*entry) - sizeof(entry->caller)) + size,
+                                   trace_ctx);
        if (!event)
                goto out;
        entry = ring_buffer_event_data(event);
index dc971a6..e57cc08 100644 (file)
@@ -63,8 +63,10 @@ int dyn_event_release(const char *raw_command, struct dyn_event_operations *type
                event = p + 1;
                *p = '\0';
        }
-       if (event[0] == '\0')
-               return -EINVAL;
+       if (event[0] == '\0') {
+               ret = -EINVAL;
+               goto out;
+       }
 
        mutex_lock(&event_mutex);
        for_each_dyn_event_safe(pos, n) {
index 7110906..107bc38 100644 (file)
@@ -278,9 +278,10 @@ void touch_all_softlockup_watchdogs(void)
         * update as well, the only side effect might be a cycle delay for
         * the softlockup check.
         */
-       for_each_cpu(cpu, &watchdog_allowed_mask)
+       for_each_cpu(cpu, &watchdog_allowed_mask) {
                per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
-       wq_watchdog_touch(-1);
+               wq_watchdog_touch(cpu);
+       }
 }
 
 void touch_softlockup_watchdog_sync(void)
index 0d150da..79f2319 100644 (file)
@@ -1412,7 +1412,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
         */
        lockdep_assert_irqs_disabled();
 
-       debug_work_activate(work);
 
        /* if draining, only works from the same workqueue are allowed */
        if (unlikely(wq->flags & __WQ_DRAINING) &&
@@ -1494,6 +1493,7 @@ retry:
                worklist = &pwq->delayed_works;
        }
 
+       debug_work_activate(work);
        insert_work(pwq, work, worklist, work_flags);
 
 out:
@@ -5787,22 +5787,17 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
                        continue;
 
                /* get the latest of pool and touched timestamps */
+               if (pool->cpu >= 0)
+                       touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
+               else
+                       touched = READ_ONCE(wq_watchdog_touched);
                pool_ts = READ_ONCE(pool->watchdog_ts);
-               touched = READ_ONCE(wq_watchdog_touched);
 
                if (time_after(pool_ts, touched))
                        ts = pool_ts;
                else
                        ts = touched;
 
-               if (pool->cpu >= 0) {
-                       unsigned long cpu_touched =
-                               READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
-                                                 pool->cpu));
-                       if (time_after(cpu_touched, ts))
-                               ts = cpu_touched;
-               }
-
                /* did we stall? */
                if (time_after(jiffies, ts + thresh)) {
                        lockup_detected = true;
@@ -5826,8 +5821,8 @@ notrace void wq_watchdog_touch(int cpu)
 {
        if (cpu >= 0)
                per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
-       else
-               wq_watchdog_touched = jiffies;
+
+       wq_watchdog_touched = jiffies;
 }
 
 static void wq_watchdog_set_thresh(unsigned long thresh)
index 2779c29..417c3d3 100644 (file)
@@ -1363,7 +1363,7 @@ config LOCKDEP
        bool
        depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
        select STACKTRACE
-       select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !X86
+       depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
        select KALLSYMS
        select KALLSYMS_ALL
 
@@ -1665,7 +1665,7 @@ config LATENCYTOP
        depends on DEBUG_KERNEL
        depends on STACKTRACE_SUPPORT
        depends on PROC_FS
-       select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
+       depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
        select KALLSYMS
        select KALLSYMS_ALL
        select STACKTRACE
@@ -1918,7 +1918,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
        depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
        depends on !X86_64
        select STACKTRACE
-       select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
+       depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
        help
          Provide stacktrace filter for fault-injection capabilities
 
index fba9909..cffc2eb 100644 (file)
@@ -138,9 +138,10 @@ config KASAN_INLINE
 
 endchoice
 
-config KASAN_STACK_ENABLE
+config KASAN_STACK
        bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
        depends on KASAN_GENERIC || KASAN_SW_TAGS
+       default y if CC_IS_GCC
        help
          The LLVM stack address sanitizer has a know problem that
          causes excessive stack usage in a lot of functions, see
@@ -154,12 +155,6 @@ config KASAN_STACK_ENABLE
          CONFIG_COMPILE_TEST.  On gcc it is assumed to always be safe
          to use and enabled by default.
 
-config KASAN_STACK
-       int
-       depends on KASAN_GENERIC || KASAN_SW_TAGS
-       default 1 if KASAN_STACK_ENABLE || CC_IS_GCC
-       default 0
-
 config KASAN_SW_TAGS_IDENTIFY
        bool "Enable memory corruption identification"
        depends on KASAN_SW_TAGS
index e836288..7921193 100644 (file)
@@ -40,7 +40,7 @@ enum cpio_fields {
 };
 
 /**
- * cpio_data find_cpio_data - Search for files in an uncompressed cpio
+ * find_cpio_data - Search for files in an uncompressed cpio
  * @path:       The directory to search for, including a slash at the end
  * @data:       Pointer to the cpio archive or a header inside
  * @len:        Remaining length of the cpio based on data pointer
@@ -49,7 +49,7 @@ enum cpio_fields {
  *              matching file itself. It can be used to iterate through the cpio
  *              to find all files inside of a directory path.
  *
- * @return:     struct cpio_data containing the address, length and
+ * Return:      &struct cpio_data containing the address, length and
  *              filename (with the directory path cut off) of the found file.
  *              If you search for a filename and not for files in a directory,
  *              pass the absolute path of the filename in the cpio and make sure
index c69ee53..52313ac 100644 (file)
@@ -76,6 +76,7 @@ int lc_try_lock(struct lru_cache *lc)
 /**
  * lc_create - prepares to track objects in an active set
  * @name: descriptive name only used in lc_seq_printf_stats and lc_seq_dump_details
+ * @cache: cache root pointer
  * @max_pending_changes: maximum changes to accumulate until a transaction is required
  * @e_count: number of elements allowed to be active simultaneously
  * @e_size: size of the tracked objects
@@ -627,7 +628,7 @@ void lc_set(struct lru_cache *lc, unsigned int enr, int index)
 }
 
 /**
- * lc_dump - Dump a complete LRU cache to seq in textual form.
+ * lc_seq_dump_details - Dump a complete LRU cache to seq in textual form.
  * @lc: the lru cache to operate on
  * @seq: the &struct seq_file pointer to seq_printf into
  * @utext: user supplied additional "heading" or other info
index a11f2f6..3f8f8d4 100644 (file)
@@ -297,7 +297,7 @@ EXPORT_SYMBOL(parman_destroy);
  * parman_prio_init - initializes a parman priority chunk
  * @parman:    parman instance
  * @prio:      parman prio structure to be initialized
- * @prority:   desired priority of the chunk
+ * @priority:  desired priority of the chunk
  *
  * Note: all locking must be provided by the caller.
  *
@@ -356,7 +356,7 @@ int parman_item_add(struct parman *parman, struct parman_prio *prio,
 EXPORT_SYMBOL(parman_item_add);
 
 /**
- * parman_item_del - deletes parman item
+ * parman_item_remove - deletes parman item
  * @parman:    parman instance
  * @prio:      parman prio instance to delete the item from
  * @item:      parman item instance
index 3a4da11..b3afafe 100644 (file)
@@ -166,9 +166,9 @@ static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
 /**
  * radix_tree_find_next_bit - find the next set bit in a memory region
  *
- * @addr: The address to base the search on
- * @size: The bitmap size in bits
- * @offset: The bitnumber to start searching at
+ * @node: where to begin the search
+ * @tag: the tag index
+ * @offset: the bitnumber to start searching at
  *
  * Unrollable variant of find_next_bit() for constant size arrays.
  * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
@@ -461,7 +461,7 @@ out:
 
 /**
  *     radix_tree_shrink    -    shrink radix tree to minimum height
- *     @root           radix tree root
+ *     @root:          radix tree root
  */
 static inline bool radix_tree_shrink(struct radix_tree_root *root)
 {
@@ -691,7 +691,7 @@ static inline int insert_entries(struct radix_tree_node *node,
 }
 
 /**
- *     __radix_tree_insert    -    insert into a radix tree
+ *     radix_tree_insert    -    insert into a radix tree
  *     @root:          radix tree root
  *     @index:         index key
  *     @item:          item to insert
@@ -919,6 +919,7 @@ EXPORT_SYMBOL(radix_tree_replace_slot);
 /**
  * radix_tree_iter_replace - replace item in a slot
  * @root:      radix tree root
+ * @iter:      iterator state
  * @slot:      pointer to slot
  * @item:      new item to store in the slot.
  *
index eee017f..f1017f3 100644 (file)
@@ -22,7 +22,7 @@ static noinline void __init copy_user_test(void)
        char *kmem;
        char __user *usermem;
        size_t size = 10;
-       int unused;
+       int __maybe_unused unused;
 
        kmem = kmalloc(size, GFP_KERNEL);
        if (!kmem)
index 8294f43..8b1c318 100644 (file)
@@ -1530,24 +1530,24 @@ static noinline void check_store_range(struct xarray *xa)
 
 #ifdef CONFIG_XARRAY_MULTI
 static void check_split_1(struct xarray *xa, unsigned long index,
-                                                       unsigned int order)
+                               unsigned int order, unsigned int new_order)
 {
-       XA_STATE(xas, xa, index);
-       void *entry;
-       unsigned int i = 0;
+       XA_STATE_ORDER(xas, xa, index, new_order);
+       unsigned int i;
 
        xa_store_order(xa, index, order, xa, GFP_KERNEL);
 
        xas_split_alloc(&xas, xa, order, GFP_KERNEL);
        xas_lock(&xas);
        xas_split(&xas, xa, order);
+       for (i = 0; i < (1 << order); i += (1 << new_order))
+               __xa_store(xa, index + i, xa_mk_index(index + i), 0);
        xas_unlock(&xas);
 
-       xa_for_each(xa, index, entry) {
-               XA_BUG_ON(xa, entry != xa);
-               i++;
+       for (i = 0; i < (1 << order); i++) {
+               unsigned int val = index + (i & ~((1 << new_order) - 1));
+               XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
        }
-       XA_BUG_ON(xa, i != 1 << order);
 
        xa_set_mark(xa, index, XA_MARK_0);
        XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
@@ -1557,14 +1557,16 @@ static void check_split_1(struct xarray *xa, unsigned long index,
 
 static noinline void check_split(struct xarray *xa)
 {
-       unsigned int order;
+       unsigned int order, new_order;
 
        XA_BUG_ON(xa, !xa_empty(xa));
 
        for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
-               check_split_1(xa, 0, order);
-               check_split_1(xa, 1UL << order, order);
-               check_split_1(xa, 3UL << order, order);
+               for (new_order = 0; new_order < order; new_order++) {
+                       check_split_1(xa, 0, order, new_order);
+                       check_split_1(xa, 1UL << order, order, new_order);
+                       check_split_1(xa, 3UL << order, order, new_order);
+               }
        }
 }
 #else
index 5fa5161..f5d8f54 100644 (file)
@@ -987,7 +987,7 @@ static void node_set_marks(struct xa_node *node, unsigned int offset,
  * xas_split_alloc() - Allocate memory for splitting an entry.
  * @xas: XArray operation state.
  * @entry: New entry which will be stored in the array.
- * @order: New entry order.
+ * @order: Current entry order.
  * @gfp: Memory allocation flags.
  *
  * This function should be called before calling xas_split().
@@ -1011,7 +1011,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
 
        do {
                unsigned int i;
-               void *sibling;
+               void *sibling = NULL;
                struct xa_node *node;
 
                node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
@@ -1021,7 +1021,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
                for (i = 0; i < XA_CHUNK_SIZE; i++) {
                        if ((i & mask) == 0) {
                                RCU_INIT_POINTER(node->slots[i], entry);
-                               sibling = xa_mk_sibling(0);
+                               sibling = xa_mk_sibling(i);
                        } else {
                                RCU_INIT_POINTER(node->slots[i], sibling);
                        }
@@ -1041,9 +1041,10 @@ EXPORT_SYMBOL_GPL(xas_split_alloc);
  * xas_split() - Split a multi-index entry into smaller entries.
  * @xas: XArray operation state.
  * @entry: New entry to store in the array.
- * @order: New entry order.
+ * @order: Current entry order.
  *
- * The value in the entry is copied to all the replacement entries.
+ * The size of the new entries is set in @xas.  The value in @entry is
+ * copied to all the replacement entries.
  *
  * Context: Any context.  The caller should hold the xa_lock.
  */
index e405796..ef7d2da 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1535,6 +1535,10 @@ struct page *get_dump_page(unsigned long addr)
                                      FOLL_FORCE | FOLL_DUMP | FOLL_GET);
        if (locked)
                mmap_read_unlock(mm);
+
+       if (ret == 1 && is_page_poisoned(page))
+               return NULL;
+
        return (ret == 1) ? page : NULL;
 }
 #endif /* CONFIG_ELF_CORE */
index 1432fee..cb3c5e0 100644 (file)
@@ -97,6 +97,26 @@ static inline void set_page_refcounted(struct page *page)
        set_page_count(page, 1);
 }
 
+/*
+ * When kernel touch the user page, the user page may be have been marked
+ * poison but still mapped in user space, if without this page, the kernel
+ * can guarantee the data integrity and operation success, the kernel is
+ * better to check the posion status and avoid touching it, be good not to
+ * panic, coredump for process fatal signal is a sample case matching this
+ * scenario. Or if kernel can't guarantee the data integrity, it's better
+ * not to call this function, let kernel touch the poison page and get to
+ * panic.
+ */
+static inline bool is_page_poisoned(struct page *page)
+{
+       if (PageHWPoison(page))
+               return true;
+       else if (PageHuge(page) && PageHWPoison(compound_head(page)))
+               return true;
+
+       return false;
+}
+
 extern unsigned long highest_memmap_pfn;
 
 /*
index b5e08d4..7b53291 100644 (file)
@@ -63,7 +63,7 @@ void __kasan_unpoison_range(const void *address, size_t size)
        kasan_unpoison(address, size);
 }
 
-#if CONFIG_KASAN_STACK
+#ifdef CONFIG_KASAN_STACK
 /* Unpoison the entire stack for a task. */
 void kasan_unpoison_task_stack(struct task_struct *task)
 {
index 8c55634..3436c6b 100644 (file)
@@ -231,7 +231,7 @@ void *kasan_find_first_bad_addr(void *addr, size_t size);
 const char *kasan_get_bug_type(struct kasan_access_info *info);
 void kasan_metadata_fetch_row(char *buffer, void *row);
 
-#if defined(CONFIG_KASAN_GENERIC) && CONFIG_KASAN_STACK
+#if defined(CONFIG_KASAN_GENERIC) && defined(CONFIG_KASAN_STACK)
 void kasan_print_address_stack_frame(const void *addr);
 #else
 static inline void kasan_print_address_stack_frame(const void *addr) { }
index 41f3745..de732bc 100644 (file)
@@ -128,7 +128,7 @@ void kasan_metadata_fetch_row(char *buffer, void *row)
        memcpy(buffer, kasan_mem_to_shadow(row), META_BYTES_PER_ROW);
 }
 
-#if CONFIG_KASAN_STACK
+#ifdef CONFIG_KASAN_STACK
 static bool __must_check tokenize_frame_descr(const char **frame_descr,
                                              char *token, size_t max_tok_len,
                                              unsigned long *value)
index b59054e..b890854 100644 (file)
@@ -165,10 +165,12 @@ static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end,
                return 0;
        }
 
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
        /* Huge pud */
        walk->action = ACTION_CONTINUE;
        if (pud_trans_huge(pudval) || pud_devmap(pudval))
                WARN_ON(pud_write(pudval) || pud_dirty(pudval));
+#endif
 
        return 0;
 }
index 5efa07f..550405f 100644 (file)
@@ -166,7 +166,7 @@ static int __init init_zero_pfn(void)
        zero_pfn = page_to_pfn(ZERO_PAGE(0));
        return 0;
 }
-core_initcall(init_zero_pfn);
+early_initcall(init_zero_pfn);
 
 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
 {
index 0dc7149..1b98374 100644 (file)
@@ -249,16 +249,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
        tlb_flush_mmu_free(tlb);
 }
 
-/**
- * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
- * @tlb: the mmu_gather structure to initialize
- * @mm: the mm_struct of the target address space
- * @fullmm: @mm is without users and we're going to destroy the full address
- *         space (exit/execve)
- *
- * Called to initialize an (on-stack) mmu_gather structure for page-table
- * tear-down from @mm.
- */
 static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
                             bool fullmm)
 {
@@ -283,11 +273,30 @@ static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
        inc_tlb_flush_pending(tlb->mm);
 }
 
+/**
+ * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
+ * @tlb: the mmu_gather structure to initialize
+ * @mm: the mm_struct of the target address space
+ *
+ * Called to initialize an (on-stack) mmu_gather structure for page-table
+ * tear-down from @mm.
+ */
 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
 {
        __tlb_gather_mmu(tlb, mm, false);
 }
 
+/**
+ * tlb_gather_mmu_fullmm - initialize an mmu_gather structure for page-table tear-down
+ * @tlb: the mmu_gather structure to initialize
+ * @mm: the mm_struct of the target address space
+ *
+ * In this case, @mm is without users and we're going to destroy the
+ * full address space (exit/execve).
+ *
+ * Called to initialize an (on-stack) mmu_gather structure for page-table
+ * tear-down from @mm.
+ */
 void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm)
 {
        __tlb_gather_mmu(tlb, mm, true);
index 9efaf43..fa1cf18 100644 (file)
@@ -170,7 +170,7 @@ static bool oom_unkillable_task(struct task_struct *p)
        return false;
 }
 
-/**
+/*
  * Check whether unreclaimable slab amount is greater than
  * all user memory(LRU pages).
  * dump_unreclaimable_slab() could help in the case that
index 65cdf84..655dc58 100644 (file)
@@ -77,12 +77,14 @@ static void unpoison_page(struct page *page)
        void *addr;
 
        addr = kmap_atomic(page);
+       kasan_disable_current();
        /*
         * Page poisoning when enabled poisons each and every page
         * that is freed to buddy. Thus no extra check is done to
         * see if a page was poisoned.
         */
-       check_poison_mem(addr, PAGE_SIZE);
+       check_poison_mem(kasan_reset_tag(addr), PAGE_SIZE);
+       kasan_enable_current();
        kunmap_atomic(addr);
 }
 
index 18b768a..095d7ea 100644 (file)
@@ -87,7 +87,7 @@ extern spinlock_t pcpu_lock;
 
 extern struct list_head *pcpu_chunk_lists;
 extern int pcpu_nr_slots;
-extern int pcpu_nr_empty_pop_pages;
+extern int pcpu_nr_empty_pop_pages[];
 
 extern struct pcpu_chunk *pcpu_first_chunk;
 extern struct pcpu_chunk *pcpu_reserved_chunk;
index c8400a2..f6026db 100644 (file)
@@ -145,6 +145,7 @@ static int percpu_stats_show(struct seq_file *m, void *v)
        int slot, max_nr_alloc;
        int *buffer;
        enum pcpu_chunk_type type;
+       int nr_empty_pop_pages;
 
 alloc_buffer:
        spin_lock_irq(&pcpu_lock);
@@ -165,7 +166,11 @@ alloc_buffer:
                goto alloc_buffer;
        }
 
-#define PL(X) \
+       nr_empty_pop_pages = 0;
+       for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
+               nr_empty_pop_pages += pcpu_nr_empty_pop_pages[type];
+
+#define PL(X)                                                          \
        seq_printf(m, "  %-20s: %12lld\n", #X, (long long int)pcpu_stats_ai.X)
 
        seq_printf(m,
@@ -196,7 +201,7 @@ alloc_buffer:
        PU(nr_max_chunks);
        PU(min_alloc_size);
        PU(max_alloc_size);
-       P("empty_pop_pages", pcpu_nr_empty_pop_pages);
+       P("empty_pop_pages", nr_empty_pop_pages);
        seq_putc(m, '\n');
 
 #undef PU
index 6596a0a..2330811 100644 (file)
@@ -173,10 +173,10 @@ struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
 static LIST_HEAD(pcpu_map_extend_chunks);
 
 /*
- * The number of empty populated pages, protected by pcpu_lock.  The
- * reserved chunk doesn't contribute to the count.
+ * The number of empty populated pages by chunk type, protected by pcpu_lock.
+ * The reserved chunk doesn't contribute to the count.
  */
-int pcpu_nr_empty_pop_pages;
+int pcpu_nr_empty_pop_pages[PCPU_NR_CHUNK_TYPES];
 
 /*
  * The number of populated pages in use by the allocator, protected by
@@ -556,7 +556,7 @@ static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
 {
        chunk->nr_empty_pop_pages += nr;
        if (chunk != pcpu_reserved_chunk)
-               pcpu_nr_empty_pop_pages += nr;
+               pcpu_nr_empty_pop_pages[pcpu_chunk_type(chunk)] += nr;
 }
 
 /*
@@ -1832,7 +1832,7 @@ area_found:
                mutex_unlock(&pcpu_alloc_mutex);
        }
 
-       if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
+       if (pcpu_nr_empty_pop_pages[type] < PCPU_EMPTY_POP_PAGES_LOW)
                pcpu_schedule_balance_work();
 
        /* clear the areas and return address relative to base address */
@@ -2000,7 +2000,7 @@ retry_pop:
                pcpu_atomic_alloc_failed = false;
        } else {
                nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
-                                 pcpu_nr_empty_pop_pages,
+                                 pcpu_nr_empty_pop_pages[type],
                                  0, PCPU_EMPTY_POP_PAGES_HIGH);
        }
 
@@ -2580,7 +2580,7 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 
        /* link the first chunk in */
        pcpu_first_chunk = chunk;
-       pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
+       pcpu_nr_empty_pop_pages[PCPU_CHUNK_ROOT] = pcpu_first_chunk->nr_empty_pop_pages;
        pcpu_chunk_relocate(pcpu_first_chunk, -1);
 
        /* include all regions of the first chunk */
index 4354c14..da75144 100644 (file)
@@ -111,7 +111,7 @@ static int ptdump_pte_entry(pte_t *pte, unsigned long addr,
                            unsigned long next, struct mm_walk *walk)
 {
        struct ptdump_state *st = walk->private;
-       pte_t val = READ_ONCE(*pte);
+       pte_t val = ptep_get(pte);
 
        if (st->effective_prot)
                st->effective_prot(st, 4, pte_val(val));
index 9c2e145..c13c33b 100644 (file)
@@ -147,8 +147,8 @@ void __meminit __shuffle_zone(struct zone *z)
        spin_unlock_irqrestore(&z->lock, flags);
 }
 
-/**
- * shuffle_free_memory - reduce the predictability of the page allocator
+/*
+ * __shuffle_free_memory - reduce the predictability of the page allocator
  * @pgdat: node page data
  */
 void __meminit __shuffle_free_memory(pg_data_t *pgdat)
index f876128..434b4f0 100644 (file)
@@ -890,6 +890,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
        hlist_for_each_entry(vlan, &orig_node->vlan_list, list) {
                tt_vlan->vid = htons(vlan->vid);
                tt_vlan->crc = htonl(vlan->tt.crc);
+               tt_vlan->reserved = 0;
 
                tt_vlan++;
        }
@@ -973,6 +974,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
 
                tt_vlan->vid = htons(vlan->vid);
                tt_vlan->crc = htonl(vlan->tt.crc);
+               tt_vlan->reserved = 0;
 
                tt_vlan++;
        }
index 66e7af1..32bc282 100644 (file)
@@ -105,14 +105,20 @@ static int __net_init broute_net_init(struct net *net)
                                  &net->xt.broute_table);
 }
 
+static void __net_exit broute_net_pre_exit(struct net *net)
+{
+       ebt_unregister_table_pre_exit(net, "broute", &ebt_ops_broute);
+}
+
 static void __net_exit broute_net_exit(struct net *net)
 {
-       ebt_unregister_table(net, net->xt.broute_table, &ebt_ops_broute);
+       ebt_unregister_table(net, net->xt.broute_table);
 }
 
 static struct pernet_operations broute_net_ops = {
        .init = broute_net_init,
        .exit = broute_net_exit,
+       .pre_exit = broute_net_pre_exit,
 };
 
 static int __init ebtable_broute_init(void)
index 78cb9b2..bcf982e 100644 (file)
@@ -99,14 +99,20 @@ static int __net_init frame_filter_net_init(struct net *net)
                                  &net->xt.frame_filter);
 }
 
+static void __net_exit frame_filter_net_pre_exit(struct net *net)
+{
+       ebt_unregister_table_pre_exit(net, "filter", ebt_ops_filter);
+}
+
 static void __net_exit frame_filter_net_exit(struct net *net)
 {
-       ebt_unregister_table(net, net->xt.frame_filter, ebt_ops_filter);
+       ebt_unregister_table(net, net->xt.frame_filter);
 }
 
 static struct pernet_operations frame_filter_net_ops = {
        .init = frame_filter_net_init,
        .exit = frame_filter_net_exit,
+       .pre_exit = frame_filter_net_pre_exit,
 };
 
 static int __init ebtable_filter_init(void)
index 0888936..0d09277 100644 (file)
@@ -99,14 +99,20 @@ static int __net_init frame_nat_net_init(struct net *net)
                                  &net->xt.frame_nat);
 }
 
+static void __net_exit frame_nat_net_pre_exit(struct net *net)
+{
+       ebt_unregister_table_pre_exit(net, "nat", ebt_ops_nat);
+}
+
 static void __net_exit frame_nat_net_exit(struct net *net)
 {
-       ebt_unregister_table(net, net->xt.frame_nat, ebt_ops_nat);
+       ebt_unregister_table(net, net->xt.frame_nat);
 }
 
 static struct pernet_operations frame_nat_net_ops = {
        .init = frame_nat_net_init,
        .exit = frame_nat_net_exit,
+       .pre_exit = frame_nat_net_pre_exit,
 };
 
 static int __init ebtable_nat_init(void)
index ebe33b6..d481ff2 100644 (file)
@@ -1232,10 +1232,34 @@ out:
        return ret;
 }
 
-void ebt_unregister_table(struct net *net, struct ebt_table *table,
-                         const struct nf_hook_ops *ops)
+static struct ebt_table *__ebt_find_table(struct net *net, const char *name)
+{
+       struct ebt_table *t;
+
+       mutex_lock(&ebt_mutex);
+
+       list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
+               if (strcmp(t->name, name) == 0) {
+                       mutex_unlock(&ebt_mutex);
+                       return t;
+               }
+       }
+
+       mutex_unlock(&ebt_mutex);
+       return NULL;
+}
+
+void ebt_unregister_table_pre_exit(struct net *net, const char *name, const struct nf_hook_ops *ops)
+{
+       struct ebt_table *table = __ebt_find_table(net, name);
+
+       if (table)
+               nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+}
+EXPORT_SYMBOL(ebt_unregister_table_pre_exit);
+
+void ebt_unregister_table(struct net *net, struct ebt_table *table)
 {
-       nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
        __ebt_unregister_table(net, table);
 }
 
index 0e5c37b..909b9e6 100644 (file)
@@ -86,6 +86,8 @@ MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
 MODULE_ALIAS("can-proto-2");
 
+#define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
+
 /*
  * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
  * 64 bit aligned so the offset has to be multiples of 8 which is ensured
@@ -1292,7 +1294,7 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
                /* no bound device as default => check msg_name */
                DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
 
-               if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+               if (msg->msg_namelen < BCM_MIN_NAMELEN)
                        return -EINVAL;
 
                if (addr->can_family != AF_CAN)
@@ -1534,7 +1536,7 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
        struct net *net = sock_net(sk);
        int ret = 0;
 
-       if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+       if (len < BCM_MIN_NAMELEN)
                return -EINVAL;
 
        lock_sock(sk);
@@ -1616,8 +1618,8 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        sock_recv_ts_and_drops(msg, sk, skb);
 
        if (msg->msg_name) {
-               __sockaddr_check_size(sizeof(struct sockaddr_can));
-               msg->msg_namelen = sizeof(struct sockaddr_can);
+               __sockaddr_check_size(BCM_MIN_NAMELEN);
+               msg->msg_namelen = BCM_MIN_NAMELEN;
                memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
        }
 
index 15ea123..9f94ad3 100644 (file)
@@ -77,6 +77,8 @@ MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
 MODULE_ALIAS("can-proto-6");
 
+#define ISOTP_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp)
+
 #define SINGLE_MASK(id) (((id) & CAN_EFF_FLAG) ? \
                         (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
                         (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
@@ -986,7 +988,8 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        sock_recv_timestamp(msg, sk, skb);
 
        if (msg->msg_name) {
-               msg->msg_namelen = sizeof(struct sockaddr_can);
+               __sockaddr_check_size(ISOTP_MIN_NAMELEN);
+               msg->msg_namelen = ISOTP_MIN_NAMELEN;
                memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
        }
 
@@ -1056,7 +1059,7 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
        int notify_enetdown = 0;
        int do_rx_reg = 1;
 
-       if (len < CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp))
+       if (len < ISOTP_MIN_NAMELEN)
                return -EINVAL;
 
        /* do not register frame reception for functional addressing */
@@ -1152,13 +1155,13 @@ static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
        if (peer)
                return -EOPNOTSUPP;
 
-       memset(addr, 0, sizeof(*addr));
+       memset(addr, 0, ISOTP_MIN_NAMELEN);
        addr->can_family = AF_CAN;
        addr->can_ifindex = so->ifindex;
        addr->can_addr.tp.rx_id = so->rxid;
        addr->can_addr.tp.tx_id = so->txid;
 
-       return sizeof(*addr);
+       return ISOTP_MIN_NAMELEN;
 }
 
 static int isotp_setsockopt(struct socket *sock, int level, int optname,
index 37b47a3..139d947 100644 (file)
@@ -60,6 +60,8 @@ MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
 MODULE_ALIAS("can-proto-1");
 
+#define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
+
 #define MASK_ALL 0
 
 /* A raw socket has a list of can_filters attached to it, each receiving
@@ -394,7 +396,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
        int err = 0;
        int notify_enetdown = 0;
 
-       if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+       if (len < RAW_MIN_NAMELEN)
                return -EINVAL;
        if (addr->can_family != AF_CAN)
                return -EINVAL;
@@ -475,11 +477,11 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
        if (peer)
                return -EOPNOTSUPP;
 
-       memset(addr, 0, sizeof(*addr));
+       memset(addr, 0, RAW_MIN_NAMELEN);
        addr->can_family  = AF_CAN;
        addr->can_ifindex = ro->ifindex;
 
-       return sizeof(*addr);
+       return RAW_MIN_NAMELEN;
 }
 
 static int raw_setsockopt(struct socket *sock, int level, int optname,
@@ -739,7 +741,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        if (msg->msg_name) {
                DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
 
-               if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
+               if (msg->msg_namelen < RAW_MIN_NAMELEN)
                        return -EINVAL;
 
                if (addr->can_family != AF_CAN)
@@ -832,8 +834,8 @@ static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        sock_recv_ts_and_drops(msg, sk, skb);
 
        if (msg->msg_name) {
-               __sockaddr_check_size(sizeof(struct sockaddr_can));
-               msg->msg_namelen = sizeof(struct sockaddr_can);
+               __sockaddr_check_size(RAW_MIN_NAMELEN);
+               msg->msg_namelen = RAW_MIN_NAMELEN;
                memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
        }
 
index 0f72ff5..1f79b9a 100644 (file)
@@ -5924,7 +5924,8 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
        NAPI_GRO_CB(skb)->frag0_len = 0;
 
        if (!skb_headlen(skb) && pinfo->nr_frags &&
-           !PageHighMem(skb_frag_page(frag0))) {
+           !PageHighMem(skb_frag_page(frag0)) &&
+           (!NET_IP_ALIGN || !(skb_frag_off(frag0) & 3))) {
                NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
                NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
                                                    skb_frag_size(frag0),
@@ -6992,7 +6993,7 @@ static int napi_thread_wait(struct napi_struct *napi)
 
        set_current_state(TASK_INTERRUPTIBLE);
 
-       while (!kthread_should_stop() && !napi_disable_pending(napi)) {
+       while (!kthread_should_stop()) {
                /* Testing SCHED_THREADED bit here to make sure the current
                 * kthread owns this napi and could poll on this napi.
                 * Testing SCHED bit is not enough because SCHED bit might be
@@ -7010,6 +7011,7 @@ static int napi_thread_wait(struct napi_struct *napi)
                set_current_state(TASK_INTERRUPTIBLE);
        }
        __set_current_state(TASK_RUNNING);
+
        return -1;
 }
 
index e2982b3..8379719 100644 (file)
@@ -1379,7 +1379,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
                         * we can reinject the packet there.
                         */
                        n2 = NULL;
-                       if (dst) {
+                       if (dst && dst->obsolete != DST_OBSOLETE_DEAD) {
                                n2 = dst_neigh_lookup_skb(dst, skb);
                                if (n2)
                                        n1 = n2;
index 1bdcb33..3485b16 100644 (file)
@@ -2863,7 +2863,7 @@ static int do_setlink(const struct sk_buff *skb,
 
                        BUG_ON(!(af_ops = rtnl_af_lookup(nla_type(af))));
 
-                       err = af_ops->set_link_af(dev, af);
+                       err = af_ops->set_link_af(dev, af, extack);
                        if (err < 0) {
                                rcu_read_unlock();
                                goto errout;
index 1261512..5def3a2 100644 (file)
@@ -488,6 +488,7 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
        if (unlikely(!msg))
                return -EAGAIN;
        sk_msg_init(msg);
+       skb_set_owner_r(skb, sk);
        return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
 }
 
@@ -790,7 +791,6 @@ static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int
 {
        switch (verdict) {
        case __SK_REDIRECT:
-               skb_set_owner_r(skb, sk);
                sk_psock_skb_redirect(skb);
                break;
        case __SK_PASS:
@@ -808,10 +808,6 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
        rcu_read_lock();
        prog = READ_ONCE(psock->progs.skb_verdict);
        if (likely(prog)) {
-               /* We skip full set_owner_r here because if we do a SK_PASS
-                * or SK_DROP we can skip skb memory accounting and use the
-                * TLS context.
-                */
                skb->sk = psock->sk;
                tcp_skb_bpf_redirect_clear(skb);
                ret = sk_psock_bpf_run(psock, prog, skb);
@@ -880,12 +876,13 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
                kfree_skb(skb);
                goto out;
        }
-       skb_set_owner_r(skb, sk);
        prog = READ_ONCE(psock->progs.skb_verdict);
        if (likely(prog)) {
+               skb->sk = sk;
                tcp_skb_bpf_redirect_clear(skb);
                ret = sk_psock_bpf_run(psock, prog, skb);
                ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
+               skb->sk = NULL;
        }
        sk_psock_verdict_apply(psock, skb, ret);
 out:
@@ -956,12 +953,13 @@ static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
                kfree_skb(skb);
                goto out;
        }
-       skb_set_owner_r(skb, sk);
        prog = READ_ONCE(psock->progs.skb_verdict);
        if (likely(prog)) {
+               skb->sk = sk;
                tcp_skb_bpf_redirect_clear(skb);
                ret = sk_psock_bpf_run(psock, prog, skb);
                ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
+               skb->sk = NULL;
        }
        sk_psock_verdict_apply(psock, skb, ret);
 out:
index cc31b60..5ec90f9 100644 (file)
@@ -2132,16 +2132,10 @@ void skb_orphan_partial(struct sk_buff *skb)
        if (skb_is_tcp_pure_ack(skb))
                return;
 
-       if (can_skb_orphan_partial(skb)) {
-               struct sock *sk = skb->sk;
-
-               if (refcount_inc_not_zero(&sk->sk_refcnt)) {
-                       WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
-                       skb->destructor = sock_efree;
-               }
-       } else {
+       if (can_skb_orphan_partial(skb))
+               skb_set_owner_sk_safe(skb, skb->sk);
+       else
                skb_orphan(skb);
-       }
 }
 EXPORT_SYMBOL(skb_orphan_partial);
 
index 0535497..858276e 100644 (file)
@@ -350,7 +350,8 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
                /* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
                xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
                page = virt_to_head_page(data);
-               napi_direct &= !xdp_return_frame_no_direct();
+               if (napi_direct && xdp_return_frame_no_direct())
+                       napi_direct = false;
                page_pool_put_full_page(xa->page_pool, page, napi_direct);
                rcu_read_unlock();
                break;
index d142eb2..3c3e56a 100644 (file)
@@ -795,8 +795,14 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
 
        list_for_each_entry(dp, &dst->ports, list) {
                err = dsa_port_setup(dp);
-               if (err)
+               if (err) {
+                       dsa_port_devlink_teardown(dp);
+                       dp->type = DSA_PORT_TYPE_UNUSED;
+                       err = dsa_port_devlink_setup(dp);
+                       if (err)
+                               goto teardown;
                        continue;
+               }
        }
 
        return 0;
index 4b5da89..3296327 100644 (file)
@@ -107,7 +107,7 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
        bool unset_vlan_filtering = br_vlan_enabled(info->br);
        struct dsa_switch_tree *dst = ds->dst;
        struct netlink_ext_ack extack = {0};
-       int err, i;
+       int err, port;
 
        if (dst->index == info->tree_index && ds->index == info->sw_index &&
            ds->ops->port_bridge_join)
@@ -124,13 +124,16 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
         * it. That is a good thing, because that lets us handle it and also
         * handle the case where the switch's vlan_filtering setting is global
         * (not per port). When that happens, the correct moment to trigger the
-        * vlan_filtering callback is only when the last port left this bridge.
+        * vlan_filtering callback is only when the last port leaves the last
+        * VLAN-aware bridge.
         */
        if (unset_vlan_filtering && ds->vlan_filtering_is_global) {
-               for (i = 0; i < ds->num_ports; i++) {
-                       if (i == info->port)
-                               continue;
-                       if (dsa_to_port(ds, i)->bridge_dev == info->br) {
+               for (port = 0; port < ds->num_ports; port++) {
+                       struct net_device *bridge_dev;
+
+                       bridge_dev = dsa_to_port(ds, port)->bridge_dev;
+
+                       if (bridge_dev && br_vlan_enabled(bridge_dev)) {
                                unset_vlan_filtering = false;
                                break;
                        }
index c6a383d..f9dcbad 100644 (file)
@@ -273,6 +273,7 @@ const struct link_mode_info link_mode_params[] = {
        __DEFINE_LINK_MODE_PARAMS(10000, KR, Full),
        [ETHTOOL_LINK_MODE_10000baseR_FEC_BIT] = {
                .speed  = SPEED_10000,
+               .lanes  = 1,
                .duplex = DUPLEX_FULL,
        },
        __DEFINE_LINK_MODE_PARAMS(20000, MLD2, Full),
@@ -562,3 +563,19 @@ void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops)
        rtnl_unlock();
 }
 EXPORT_SYMBOL_GPL(ethtool_set_ethtool_phy_ops);
+
+void
+ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings,
+                             enum ethtool_link_mode_bit_indices link_mode)
+{
+       const struct link_mode_info *link_info;
+
+       if (WARN_ON_ONCE(link_mode >= __ETHTOOL_LINK_MODE_MASK_NBITS))
+               return;
+
+       link_info = &link_mode_params[link_mode];
+       link_ksettings->base.speed = link_info->speed;
+       link_ksettings->lanes = link_info->lanes;
+       link_ksettings->base.duplex = link_info->duplex;
+}
+EXPORT_SYMBOL_GPL(ethtool_params_from_link_mode);
index 901b7de..e10bfcc 100644 (file)
@@ -169,8 +169,8 @@ int ethnl_set_eee(struct sk_buff *skb, struct genl_info *info)
        ethnl_update_bool32(&eee.eee_enabled, tb[ETHTOOL_A_EEE_ENABLED], &mod);
        ethnl_update_bool32(&eee.tx_lpi_enabled,
                            tb[ETHTOOL_A_EEE_TX_LPI_ENABLED], &mod);
-       ethnl_update_bool32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER],
-                           &mod);
+       ethnl_update_u32(&eee.tx_lpi_timer, tb[ETHTOOL_A_EEE_TX_LPI_TIMER],
+                        &mod);
        ret = 0;
        if (!mod)
                goto out_ops;
index 24783b7..771688e 100644 (file)
@@ -426,29 +426,13 @@ struct ethtool_link_usettings {
 int __ethtool_get_link_ksettings(struct net_device *dev,
                                 struct ethtool_link_ksettings *link_ksettings)
 {
-       const struct link_mode_info *link_info;
-       int err;
-
        ASSERT_RTNL();
 
        if (!dev->ethtool_ops->get_link_ksettings)
                return -EOPNOTSUPP;
 
        memset(link_ksettings, 0, sizeof(*link_ksettings));
-
-       link_ksettings->link_mode = -1;
-       err = dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
-       if (err)
-               return err;
-
-       if (link_ksettings->link_mode != -1) {
-               link_info = &link_mode_params[link_ksettings->link_mode];
-               link_ksettings->base.speed = link_info->speed;
-               link_ksettings->lanes = link_info->lanes;
-               link_ksettings->base.duplex = link_info->duplex;
-       }
-
-       return 0;
+       return dev->ethtool_ops->get_link_ksettings(dev, link_ksettings);
 }
 EXPORT_SYMBOL(__ethtool_get_link_ksettings);
 
index 6eabd58..cde9f31 100644 (file)
@@ -36,9 +36,9 @@ static inline int ethnl_strz_size(const char *s)
 
 /**
  * ethnl_put_strz() - put string attribute with fixed size string
- * @skb:     skb with the message
- * @attrype: attribute type
- * @s:       ETH_GSTRING_LEN sized string (may not be null terminated)
+ * @skb:      skb with the message
+ * @attrtype: attribute type
+ * @s:        ETH_GSTRING_LEN sized string (may not be null terminated)
  *
  * Puts an attribute with null terminated string from @s into the message.
  *
index 09998dc..d4ac027 100644 (file)
@@ -38,16 +38,16 @@ static int pause_prepare_data(const struct ethnl_req_info *req_base,
        if (!dev->ethtool_ops->get_pauseparam)
                return -EOPNOTSUPP;
 
+       ethtool_stats_init((u64 *)&data->pausestat,
+                          sizeof(data->pausestat) / 8);
+
        ret = ethnl_ops_begin(dev);
        if (ret < 0)
                return ret;
        dev->ethtool_ops->get_pauseparam(dev, &data->pauseparam);
        if (req_base->flags & ETHTOOL_FLAG_STATS &&
-           dev->ethtool_ops->get_pause_stats) {
-               ethtool_stats_init((u64 *)&data->pausestat,
-                                  sizeof(data->pausestat) / 8);
+           dev->ethtool_ops->get_pause_stats)
                dev->ethtool_ops->get_pause_stats(dev, &data->pausestat);
-       }
        ethnl_ops_complete(dev);
 
        return 0;
index 7444ec6..bfcdc75 100644 (file)
@@ -217,6 +217,7 @@ static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
        if (master) {
                skb->dev = master->dev;
+               skb_reset_mac_header(skb);
                hsr_forward_skb(skb, master);
        } else {
                atomic_long_inc(&dev->tx_dropped);
index ed82a47..b218e45 100644 (file)
@@ -555,12 +555,6 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
 {
        struct hsr_frame_info frame;
 
-       if (skb_mac_header(skb) != skb->data) {
-               WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n",
-                         __FILE__, __LINE__, port->dev->name);
-               goto out_drop;
-       }
-
        if (fill_frame_info(&frame, skb, port) < 0)
                goto out_drop;
 
index 9c640d6..0c1b077 100644 (file)
@@ -551,9 +551,7 @@ ieee802154_llsec_parse_key_id(struct genl_info *info,
        desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]);
 
        if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
-               if (!info->attrs[IEEE802154_ATTR_PAN_ID] &&
-                   !(info->attrs[IEEE802154_ATTR_SHORT_ADDR] ||
-                     info->attrs[IEEE802154_ATTR_HW_ADDR]))
+               if (!info->attrs[IEEE802154_ATTR_PAN_ID])
                        return -EINVAL;
 
                desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
@@ -562,6 +560,9 @@ ieee802154_llsec_parse_key_id(struct genl_info *info,
                        desc->device_addr.mode = IEEE802154_ADDR_SHORT;
                        desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
                } else {
+                       if (!info->attrs[IEEE802154_ATTR_HW_ADDR])
+                               return -EINVAL;
+
                        desc->device_addr.mode = IEEE802154_ADDR_LONG;
                        desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
                }
index 7c5a1aa..05f6bd8 100644 (file)
@@ -820,8 +820,13 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
                goto nla_put_failure;
 
 #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               goto out;
+
        if (nl802154_get_llsec_params(msg, rdev, wpan_dev) < 0)
                goto nla_put_failure;
+
+out:
 #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
 
        genlmsg_end(msg, hdr);
@@ -1384,6 +1389,9 @@ static int nl802154_set_llsec_params(struct sk_buff *skb,
        u32 changed = 0;
        int ret;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
        if (info->attrs[NL802154_ATTR_SEC_ENABLED]) {
                u8 enabled;
 
@@ -1490,6 +1498,11 @@ nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb)
        if (err)
                return err;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+               err = skb->len;
+               goto out_err;
+       }
+
        if (!wpan_dev->netdev) {
                err = -EINVAL;
                goto out_err;
@@ -1544,7 +1557,11 @@ static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info)
        struct ieee802154_llsec_key_id id = { };
        u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { };
 
-       if (nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
+           nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
                return -EINVAL;
 
        if (!attrs[NL802154_KEY_ATTR_USAGE_FRAMES] ||
@@ -1592,7 +1609,11 @@ static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info)
        struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
        struct ieee802154_llsec_key_id id;
 
-       if (nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
+           nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
                return -EINVAL;
 
        if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
@@ -1656,6 +1677,11 @@ nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb)
        if (err)
                return err;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+               err = skb->len;
+               goto out_err;
+       }
+
        if (!wpan_dev->netdev) {
                err = -EINVAL;
                goto out_err;
@@ -1742,6 +1768,9 @@ static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info)
        struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
        struct ieee802154_llsec_device dev_desc;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
        if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE],
                                          &dev_desc) < 0)
                return -EINVAL;
@@ -1757,7 +1786,11 @@ static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info)
        struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
        __le64 extended_addr;
 
-       if (nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL802154_ATTR_SEC_DEVICE] ||
+           nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
                return -EINVAL;
 
        if (!attrs[NL802154_DEV_ATTR_EXTENDED_ADDR])
@@ -1825,6 +1858,11 @@ nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb)
        if (err)
                return err;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+               err = skb->len;
+               goto out_err;
+       }
+
        if (!wpan_dev->netdev) {
                err = -EINVAL;
                goto out_err;
@@ -1882,6 +1920,9 @@ static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info
        struct ieee802154_llsec_device_key key;
        __le64 extended_addr;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
        if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
            nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack) < 0)
                return -EINVAL;
@@ -1913,7 +1954,11 @@ static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info
        struct ieee802154_llsec_device_key key;
        __le64 extended_addr;
 
-       if (nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
+       if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
+           nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
                return -EINVAL;
 
        if (!attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR])
@@ -1986,6 +2031,11 @@ nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb)
        if (err)
                return err;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR) {
+               err = skb->len;
+               goto out_err;
+       }
+
        if (!wpan_dev->netdev) {
                err = -EINVAL;
                goto out_err;
@@ -2070,6 +2120,9 @@ static int nl802154_add_llsec_seclevel(struct sk_buff *skb,
        struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
        struct ieee802154_llsec_seclevel sl;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
        if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
                                 &sl) < 0)
                return -EINVAL;
@@ -2085,6 +2138,9 @@ static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
        struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
        struct ieee802154_llsec_seclevel sl;
 
+       if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
+               return -EOPNOTSUPP;
+
        if (!info->attrs[NL802154_ATTR_SEC_LEVEL] ||
            llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
                                 &sl) < 0)
@@ -2098,11 +2154,7 @@ static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
 #define NL802154_FLAG_NEED_NETDEV      0x02
 #define NL802154_FLAG_NEED_RTNL                0x04
 #define NL802154_FLAG_CHECK_NETDEV_UP  0x08
-#define NL802154_FLAG_NEED_NETDEV_UP   (NL802154_FLAG_NEED_NETDEV |\
-                                        NL802154_FLAG_CHECK_NETDEV_UP)
 #define NL802154_FLAG_NEED_WPAN_DEV    0x10
-#define NL802154_FLAG_NEED_WPAN_DEV_UP (NL802154_FLAG_NEED_WPAN_DEV |\
-                                        NL802154_FLAG_CHECK_NETDEV_UP)
 
 static int nl802154_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
                             struct genl_info *info)
index d99e1be..36ed85b 100644 (file)
@@ -141,7 +141,7 @@ static void ah_output_done(struct crypto_async_request *base, int err)
        }
 
        kfree(AH_SKB_CB(skb)->tmp);
-       xfrm_output_resume(skb, err);
+       xfrm_output_resume(skb->sk, skb, err);
 }
 
 static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
index 75f6799..2e35f68 100644 (file)
@@ -1978,7 +1978,8 @@ static int inet_validate_link_af(const struct net_device *dev,
        return 0;
 }
 
-static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
+static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla,
+                           struct netlink_ext_ack *extack)
 {
        struct in_device *in_dev = __in_dev_get_rcu(dev);
        struct nlattr *a, *tb[IFLA_INET_MAX+1];
index a3271ec..4b834bb 100644 (file)
@@ -279,7 +279,7 @@ static void esp_output_done(struct crypto_async_request *base, int err)
                    x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
                        esp_output_tail_tcp(x, skb);
                else
-                       xfrm_output_resume(skb, err);
+                       xfrm_output_resume(skb->sk, skb, err);
        }
 }
 
index 601f5fb..33687cf 100644 (file)
@@ -217,10 +217,12 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
 
        if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
             !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
-               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
+                                           NETIF_F_SCTP_CRC);
        else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
                 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
-               esp_features = features & ~NETIF_F_CSUM_MASK;
+               esp_features = features & ~(NETIF_F_CSUM_MASK |
+                                           NETIF_F_SCTP_CRC);
 
        xo->flags |= XFRM_GSO_SEGMENT;
 
@@ -312,8 +314,17 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_
        ip_hdr(skb)->tot_len = htons(skb->len);
        ip_send_check(ip_hdr(skb));
 
-       if (hw_offload)
+       if (hw_offload) {
+               if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
+                       return -ENOMEM;
+
+               xo = xfrm_offload(skb);
+               if (!xo)
+                       return -EINVAL;
+
+               xo->flags |= XFRM_XMIT;
                return 0;
+       }
 
        err = esp_output_tail(x, skb, &esp);
        if (err)
index eb20708..31c6c6d 100644 (file)
@@ -218,7 +218,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        }
 
        if (dst->flags & DST_XFRM_QUEUE)
-               goto queued;
+               goto xmit;
 
        if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) {
                dev->stats.tx_carrier_errors++;
@@ -238,6 +238,8 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        if (skb->len > mtu) {
                skb_dst_update_pmtu_no_confirm(skb, mtu);
                if (skb->protocol == htons(ETH_P_IP)) {
+                       if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+                               goto xmit;
                        icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                      htonl(mtu));
                } else {
@@ -251,7 +253,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
                goto tx_error;
        }
 
-queued:
+xmit:
        skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
        skb_dst_set(skb, dst);
        skb->dev = skb_dst(skb)->dev;
index d1e04d2..d6d45d8 100644 (file)
@@ -1193,6 +1193,8 @@ static int translate_compat_table(struct net *net,
        if (!newinfo)
                goto out_unlock;
 
+       memset(newinfo->entries, 0, size);
+
        newinfo->number = compatr->num_entries;
        for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
                newinfo->hook_entry[i] = compatr->hook_entry[i];
@@ -1539,10 +1541,15 @@ out_free:
        return ret;
 }
 
-void arpt_unregister_table(struct net *net, struct xt_table *table,
-                          const struct nf_hook_ops *ops)
+void arpt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+                                   const struct nf_hook_ops *ops)
 {
        nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+}
+EXPORT_SYMBOL(arpt_unregister_table_pre_exit);
+
+void arpt_unregister_table(struct net *net, struct xt_table *table)
+{
        __arpt_unregister_table(net, table);
 }
 
index c216b9a..6c300ba 100644 (file)
@@ -56,16 +56,24 @@ static int __net_init arptable_filter_table_init(struct net *net)
        return err;
 }
 
+static void __net_exit arptable_filter_net_pre_exit(struct net *net)
+{
+       if (net->ipv4.arptable_filter)
+               arpt_unregister_table_pre_exit(net, net->ipv4.arptable_filter,
+                                              arpfilter_ops);
+}
+
 static void __net_exit arptable_filter_net_exit(struct net *net)
 {
        if (!net->ipv4.arptable_filter)
                return;
-       arpt_unregister_table(net, net->ipv4.arptable_filter, arpfilter_ops);
+       arpt_unregister_table(net, net->ipv4.arptable_filter);
        net->ipv4.arptable_filter = NULL;
 }
 
 static struct pernet_operations arptable_filter_net_ops = {
        .exit = arptable_filter_net_exit,
+       .pre_exit = arptable_filter_net_pre_exit,
 };
 
 static int __init arptable_filter_init(void)
index f15bc21..f77ea0d 100644 (file)
@@ -1428,6 +1428,8 @@ translate_compat_table(struct net *net,
        if (!newinfo)
                goto out_unlock;
 
+       memset(newinfo->entries, 0, size);
+
        newinfo->number = compatr->num_entries;
        for (i = 0; i < NF_INET_NUMHOOKS; i++) {
                newinfo->hook_entry[i] = compatr->hook_entry[i];
index f55095d..60465f0 100644 (file)
@@ -1378,9 +1378,19 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
                if (!table)
                        goto err_alloc;
 
-               /* Update the variables to point into the current struct net */
-               for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++)
-                       table[i].data += (void *)net - (void *)&init_net;
+               for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++) {
+                       if (table[i].data) {
+                               /* Update the variables to point into
+                                * the current struct net
+                                */
+                               table[i].data += (void *)net - (void *)&init_net;
+                       } else {
+                               /* Entries without data pointer are global;
+                                * Make them read-only in non-init_net ns
+                                */
+                               table[i].mode &= ~0222;
+                       }
+               }
        }
 
        net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
index 4a0478b..99d743e 100644 (file)
@@ -2754,6 +2754,10 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
                val = up->gso_size;
                break;
 
+       case UDP_GRO:
+               val = up->gro_enabled;
+               break;
+
        /* The following two cannot be changed on UDP sockets, the return is
         * always 0 (which corresponds to the full checksum coverage of UDP). */
        case UDPLITE_SEND_CSCOV:
index f2337fb..a9e53f5 100644 (file)
@@ -5669,7 +5669,8 @@ static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
        return 0;
 }
 
-static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
+static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token,
+                            struct netlink_ext_ack *extack)
 {
        struct inet6_ifaddr *ifp;
        struct net_device *dev = idev->dev;
@@ -5680,12 +5681,29 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
 
        if (!token)
                return -EINVAL;
-       if (dev->flags & (IFF_LOOPBACK | IFF_NOARP))
+
+       if (dev->flags & IFF_LOOPBACK) {
+               NL_SET_ERR_MSG_MOD(extack, "Device is loopback");
                return -EINVAL;
-       if (!ipv6_accept_ra(idev))
+       }
+
+       if (dev->flags & IFF_NOARP) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Device does not do neighbour discovery");
+               return -EINVAL;
+       }
+
+       if (!ipv6_accept_ra(idev)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Router advertisement is disabled on device");
                return -EINVAL;
-       if (idev->cnf.rtr_solicits == 0)
+       }
+
+       if (idev->cnf.rtr_solicits == 0) {
+               NL_SET_ERR_MSG(extack,
+                              "Router solicitation is disabled on device");
                return -EINVAL;
+       }
 
        write_lock_bh(&idev->lock);
 
@@ -5793,7 +5811,8 @@ static int inet6_validate_link_af(const struct net_device *dev,
        return 0;
 }
 
-static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
+static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
+                            struct netlink_ext_ack *extack)
 {
        struct inet6_dev *idev = __in6_dev_get(dev);
        struct nlattr *tb[IFLA_INET6_MAX + 1];
@@ -5806,7 +5825,8 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
                BUG();
 
        if (tb[IFLA_INET6_TOKEN]) {
-               err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
+               err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
+                                       extack);
                if (err)
                        return err;
        }
index 440080d..080ee7f 100644 (file)
@@ -316,7 +316,7 @@ static void ah6_output_done(struct crypto_async_request *base, int err)
        }
 
        kfree(AH_SKB_CB(skb)->tmp);
-       xfrm_output_resume(skb, err);
+       xfrm_output_resume(skb->sk, skb, err);
 }
 
 static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
index 153ad10..727d791 100644 (file)
@@ -314,7 +314,7 @@ static void esp_output_done(struct crypto_async_request *base, int err)
                    x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
                        esp_output_tail_tcp(x, skb);
                else
-                       xfrm_output_resume(skb, err);
+                       xfrm_output_resume(skb->sk, skb, err);
        }
 }
 
index 1ca516f..4af56af 100644 (file)
@@ -254,9 +254,11 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
        skb->encap_hdr_csum = 1;
 
        if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
-               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
+               esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
+                                           NETIF_F_SCTP_CRC);
        else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
-               esp_features = features & ~NETIF_F_CSUM_MASK;
+               esp_features = features & ~(NETIF_F_CSUM_MASK |
+                                           NETIF_F_SCTP_CRC);
 
        xo->flags |= XFRM_GSO_SEGMENT;
 
@@ -346,8 +348,17 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features
 
        ipv6_hdr(skb)->payload_len = htons(len);
 
-       if (hw_offload)
+       if (hw_offload) {
+               if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
+                       return -ENOMEM;
+
+               xo = xfrm_offload(skb);
+               if (!xo)
+                       return -EINVAL;
+
+               xo->flags |= XFRM_XMIT;
                return 0;
+       }
 
        err = esp6_output_tail(x, skb, &esp);
        if (err)
index 3fa0eca..42fe7db 100644 (file)
@@ -2244,6 +2244,16 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head
                        t = rtnl_dereference(t->next);
                }
        }
+
+       t = rtnl_dereference(ip6n->tnls_wc[0]);
+       while (t) {
+               /* If dev is in the same netns, it has already
+                * been added to the list by the previous loop.
+                */
+               if (!net_eq(dev_net(t->dev), net))
+                       unregister_netdevice_queue(t->dev, list);
+               t = rtnl_dereference(t->next);
+       }
 }
 
 static int __net_init ip6_tnl_init_net(struct net *net)
index f10e7a7..e0cc32e 100644 (file)
@@ -494,7 +494,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        }
 
        if (dst->flags & DST_XFRM_QUEUE)
-               goto queued;
+               goto xmit;
 
        x = dst->xfrm;
        if (!vti6_state_check(x, &t->parms.raddr, &t->parms.laddr))
@@ -523,6 +523,8 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 
                        icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
                } else {
+                       if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+                               goto xmit;
                        icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                      htonl(mtu));
                }
@@ -531,7 +533,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
                goto tx_err_dst_release;
        }
 
-queued:
+xmit:
        skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
        skb_dst_set(skb, dst);
        skb->dev = skb_dst(skb)->dev;
index 2e2119b..eb2b540 100644 (file)
@@ -1443,6 +1443,8 @@ translate_compat_table(struct net *net,
        if (!newinfo)
                goto out_unlock;
 
+       memset(newinfo->entries, 0, size);
+
        newinfo->number = compatr->num_entries;
        for (i = 0; i < NF_INET_NUMHOOKS; i++) {
                newinfo->hook_entry[i] = compatr->hook_entry[i];
index 1f56d9a..bf3646b 100644 (file)
@@ -298,7 +298,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                 */
                v4addr = LOOPBACK4_IPV6;
                if (!(addr_type & IPV6_ADDR_MULTICAST) &&
-                   !sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) {
+                   !ipv6_can_nonlocal_bind(sock_net(sk), inet)) {
                        err = -EADDRNOTAVAIL;
                        if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
                                           dev, 0)) {
index 1056b02..373d480 100644 (file)
@@ -5209,9 +5209,11 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
                 * nexthops have been replaced by first new, the rest should
                 * be added to it.
                 */
-               cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
-                                                    NLM_F_REPLACE);
-               cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
+               if (cfg->fc_nlinfo.nlh) {
+                       cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
+                                                            NLM_F_REPLACE);
+                       cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
+               }
                nhn++;
        }
 
index 63ccd9f..9fdccf0 100644 (file)
@@ -1867,9 +1867,9 @@ static void __net_exit sit_destroy_tunnels(struct net *net,
                if (dev->rtnl_link_ops == &sit_link_ops)
                        unregister_netdevice_queue(dev, head);
 
-       for (prio = 1; prio < 4; prio++) {
+       for (prio = 0; prio < 4; prio++) {
                int h;
-               for (h = 0; h < IP6_SIT_HASH_SIZE; h++) {
+               for (h = 0; h < (prio ? IP6_SIT_HASH_SIZE : 1); h++) {
                        struct ip_tunnel *t;
 
                        t = rtnl_dereference(sitn->tunnels[prio][h]);
index 68a0de0..860bc35 100644 (file)
@@ -1788,8 +1788,10 @@ static int ieee80211_change_station(struct wiphy *wiphy,
                }
 
                if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
-                   sta->sdata->u.vlan.sta)
+                   sta->sdata->u.vlan.sta) {
+                       ieee80211_clear_fast_rx(sta);
                        RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
+               }
 
                if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
                        ieee80211_vif_dec_num_mcast(sta->sdata);
index ce4e385..96f487f 100644 (file)
@@ -4707,7 +4707,10 @@ static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
                timeout = sta->rx_stats.last_rx;
        timeout += IEEE80211_CONNECTION_IDLE_TIME;
 
-       if (time_is_before_jiffies(timeout)) {
+       /* If timeout is after now, then update timer to fire at
+        * the later date, but do not actually probe at this time.
+        */
+       if (time_is_after_jiffies(timeout)) {
                mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(timeout));
                return;
        }
index 5d06de6..3b3bcef 100644 (file)
@@ -3573,7 +3573,7 @@ begin:
            test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
                goto out;
 
-       if (vif->txqs_stopped[ieee80211_ac_from_tid(txq->tid)]) {
+       if (vif->txqs_stopped[txq->ac]) {
                set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
                goto out;
        }
index 585d331..55550ea 100644 (file)
@@ -152,7 +152,7 @@ err_tfm0:
        crypto_free_sync_skcipher(key->tfm0);
 err_tfm:
        for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
-               if (key->tfm[i])
+               if (!IS_ERR_OR_NULL(key->tfm[i]))
                        crypto_free_aead(key->tfm[i]);
 
        kfree_sensitive(key);
index 1590b9d..4bde960 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/netdevice.h>
 #include <linux/sched/signal.h>
 #include <linux/atomic.h>
-#include <linux/igmp.h>
 #include <net/sock.h>
 #include <net/inet_common.h>
 #include <net/inet_hashtables.h>
@@ -20,7 +19,6 @@
 #include <net/tcp_states.h>
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
 #include <net/transp_v6.h>
-#include <net/addrconf.h>
 #endif
 #include <net/mptcp.h>
 #include <net/xfrm.h>
@@ -2878,6 +2876,48 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
        return ret;
 }
 
+static bool mptcp_unsupported(int level, int optname)
+{
+       if (level == SOL_IP) {
+               switch (optname) {
+               case IP_ADD_MEMBERSHIP:
+               case IP_ADD_SOURCE_MEMBERSHIP:
+               case IP_DROP_MEMBERSHIP:
+               case IP_DROP_SOURCE_MEMBERSHIP:
+               case IP_BLOCK_SOURCE:
+               case IP_UNBLOCK_SOURCE:
+               case MCAST_JOIN_GROUP:
+               case MCAST_LEAVE_GROUP:
+               case MCAST_JOIN_SOURCE_GROUP:
+               case MCAST_LEAVE_SOURCE_GROUP:
+               case MCAST_BLOCK_SOURCE:
+               case MCAST_UNBLOCK_SOURCE:
+               case MCAST_MSFILTER:
+                       return true;
+               }
+               return false;
+       }
+       if (level == SOL_IPV6) {
+               switch (optname) {
+               case IPV6_ADDRFORM:
+               case IPV6_ADD_MEMBERSHIP:
+               case IPV6_DROP_MEMBERSHIP:
+               case IPV6_JOIN_ANYCAST:
+               case IPV6_LEAVE_ANYCAST:
+               case MCAST_JOIN_GROUP:
+               case MCAST_LEAVE_GROUP:
+               case MCAST_JOIN_SOURCE_GROUP:
+               case MCAST_LEAVE_SOURCE_GROUP:
+               case MCAST_BLOCK_SOURCE:
+               case MCAST_UNBLOCK_SOURCE:
+               case MCAST_MSFILTER:
+                       return true;
+               }
+               return false;
+       }
+       return false;
+}
+
 static int mptcp_setsockopt(struct sock *sk, int level, int optname,
                            sockptr_t optval, unsigned int optlen)
 {
@@ -2886,6 +2926,9 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname,
 
        pr_debug("msk=%p", msk);
 
+       if (mptcp_unsupported(level, optname))
+               return -ENOPROTOOPT;
+
        if (level == SOL_SOCKET)
                return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
 
@@ -3419,34 +3462,10 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
        return mask;
 }
 
-static int mptcp_release(struct socket *sock)
-{
-       struct mptcp_subflow_context *subflow;
-       struct sock *sk = sock->sk;
-       struct mptcp_sock *msk;
-
-       if (!sk)
-               return 0;
-
-       lock_sock(sk);
-
-       msk = mptcp_sk(sk);
-
-       mptcp_for_each_subflow(msk, subflow) {
-               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-
-               ip_mc_drop_socket(ssk);
-       }
-
-       release_sock(sk);
-
-       return inet_release(sock);
-}
-
 static const struct proto_ops mptcp_stream_ops = {
        .family            = PF_INET,
        .owner             = THIS_MODULE,
-       .release           = mptcp_release,
+       .release           = inet_release,
        .bind              = mptcp_bind,
        .connect           = mptcp_stream_connect,
        .socketpair        = sock_no_socketpair,
@@ -3538,35 +3557,10 @@ void __init mptcp_proto_init(void)
 }
 
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
-static int mptcp6_release(struct socket *sock)
-{
-       struct mptcp_subflow_context *subflow;
-       struct mptcp_sock *msk;
-       struct sock *sk = sock->sk;
-
-       if (!sk)
-               return 0;
-
-       lock_sock(sk);
-
-       msk = mptcp_sk(sk);
-
-       mptcp_for_each_subflow(msk, subflow) {
-               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
-
-               ip_mc_drop_socket(ssk);
-               ipv6_sock_mc_close(ssk);
-               ipv6_sock_ac_close(ssk);
-       }
-
-       release_sock(sk);
-       return inet6_release(sock);
-}
-
 static const struct proto_ops mptcp_v6_stream_ops = {
        .family            = PF_INET6,
        .owner             = THIS_MODULE,
-       .release           = mptcp6_release,
+       .release           = inet6_release,
        .bind              = mptcp_bind,
        .connect           = mptcp_stream_connect,
        .socketpair        = sock_no_socketpair,
index a9cb355..ffff8da 100644 (file)
@@ -105,13 +105,20 @@ static void ncsi_channel_monitor(struct timer_list *t)
        monitor_state = nc->monitor.state;
        spin_unlock_irqrestore(&nc->lock, flags);
 
-       if (!enabled || chained) {
-               ncsi_stop_channel_monitor(nc);
-               return;
-       }
+       if (!enabled)
+               return;         /* expected race disabling timer */
+       if (WARN_ON_ONCE(chained))
+               goto bad_state;
+
        if (state != NCSI_CHANNEL_INACTIVE &&
            state != NCSI_CHANNEL_ACTIVE) {
-               ncsi_stop_channel_monitor(nc);
+bad_state:
+               netdev_warn(ndp->ndev.dev,
+                           "Bad NCSI monitor state channel %d 0x%x %s queue\n",
+                           nc->id, state, chained ? "on" : "off");
+               spin_lock_irqsave(&nc->lock, flags);
+               nc->monitor.enabled = false;
+               spin_unlock_irqrestore(&nc->lock, flags);
                return;
        }
 
@@ -136,10 +143,9 @@ static void ncsi_channel_monitor(struct timer_list *t)
                ncsi_report_link(ndp, true);
                ndp->flags |= NCSI_DEV_RESHUFFLE;
 
-               ncsi_stop_channel_monitor(nc);
-
                ncm = &nc->modes[NCSI_MODE_LINK];
                spin_lock_irqsave(&nc->lock, flags);
+               nc->monitor.enabled = false;
                nc->state = NCSI_CHANNEL_INVISIBLE;
                ncm->data[2] &= ~0x1;
                spin_unlock_irqrestore(&nc->lock, flags);
index 0ee702d..c6c0cb4 100644 (file)
@@ -266,6 +266,7 @@ static const char* l4proto_name(u16 proto)
        case IPPROTO_GRE: return "gre";
        case IPPROTO_SCTP: return "sctp";
        case IPPROTO_UDPLITE: return "udplite";
+       case IPPROTO_ICMPV6: return "icmpv6";
        }
 
        return "unknown";
index 2a6993f..1c5460e 100644 (file)
@@ -305,12 +305,12 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
                                     const __be32 *addr, const __be32 *mask)
 {
        struct flow_action_entry *entry;
-       int i;
+       int i, j;
 
-       for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32)) {
+       for (i = 0, j = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32), j++) {
                entry = flow_action_entry_next(flow_rule);
                flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
-                                   offset + i, &addr[i], mask);
+                                   offset + i, &addr[j], mask);
        }
 }
 
index f57f1a6..589d2f6 100644 (file)
@@ -5295,16 +5295,35 @@ err_expr:
        return -ENOMEM;
 }
 
-static void nft_set_elem_expr_setup(const struct nft_set_ext *ext, int i,
-                                   struct nft_expr *expr_array[])
+static int nft_set_elem_expr_setup(struct nft_ctx *ctx,
+                                  const struct nft_set_ext *ext,
+                                  struct nft_expr *expr_array[],
+                                  u32 num_exprs)
 {
        struct nft_set_elem_expr *elem_expr = nft_set_ext_expr(ext);
-       struct nft_expr *expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
+       struct nft_expr *expr;
+       int i, err;
+
+       for (i = 0; i < num_exprs; i++) {
+               expr = nft_setelem_expr_at(elem_expr, elem_expr->size);
+               err = nft_expr_clone(expr, expr_array[i]);
+               if (err < 0)
+                       goto err_elem_expr_setup;
+
+               elem_expr->size += expr_array[i]->ops->size;
+               nft_expr_destroy(ctx, expr_array[i]);
+               expr_array[i] = NULL;
+       }
+
+       return 0;
+
+err_elem_expr_setup:
+       for (; i < num_exprs; i++) {
+               nft_expr_destroy(ctx, expr_array[i]);
+               expr_array[i] = NULL;
+       }
 
-       memcpy(expr, expr_array[i], expr_array[i]->ops->size);
-       elem_expr->size += expr_array[i]->ops->size;
-       kfree(expr_array[i]);
-       expr_array[i] = NULL;
+       return -ENOMEM;
 }
 
 static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
@@ -5556,12 +5575,15 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                *nft_set_ext_obj(ext) = obj;
                obj->use++;
        }
-       for (i = 0; i < num_exprs; i++)
-               nft_set_elem_expr_setup(ext, i, expr_array);
+       err = nft_set_elem_expr_setup(ctx, ext, expr_array, num_exprs);
+       if (err < 0)
+               goto err_elem_expr;
 
        trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
-       if (trans == NULL)
-               goto err_trans;
+       if (trans == NULL) {
+               err = -ENOMEM;
+               goto err_elem_expr;
+       }
 
        ext->genmask = nft_genmask_cur(ctx->net) | NFT_SET_ELEM_BUSY_MASK;
        err = set->ops->insert(ctx->net, set, &elem, &ext2);
@@ -5605,7 +5627,7 @@ err_set_full:
        set->ops->remove(ctx->net, set, &elem);
 err_element_clash:
        kfree(trans);
-err_trans:
+err_elem_expr:
        if (obj)
                obj->use--;
 
index 0e2c315..82ec27b 100644 (file)
@@ -76,13 +76,13 @@ static int nft_limit_init(struct nft_limit *limit,
                return -EOVERFLOW;
 
        if (pkts) {
-               tokens = div_u64(limit->nsecs, limit->rate) * limit->burst;
+               tokens = div64_u64(limit->nsecs, limit->rate) * limit->burst;
        } else {
                /* The token bucket size limits the number of tokens can be
                 * accumulated. tokens_max specifies the bucket size.
                 * tokens_max = unit * (rate + burst) / rate.
                 */
-               tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
+               tokens = div64_u64(limit->nsecs * (limit->rate + limit->burst),
                                 limit->rate);
        }
 
index 6bd31a7..92e9d4e 100644 (file)
@@ -733,7 +733,7 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
 {
        const struct xt_match *match = m->u.kernel.match;
        struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
-       int pad, off = xt_compat_match_offset(match);
+       int off = xt_compat_match_offset(match);
        u_int16_t msize = cm->u.user.match_size;
        char name[sizeof(m->u.user.name)];
 
@@ -743,9 +743,6 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
                match->compat_from_user(m->data, cm->data);
        else
                memcpy(m->data, cm->data, msize - sizeof(*cm));
-       pad = XT_ALIGN(match->matchsize) - match->matchsize;
-       if (pad > 0)
-               memset(m->data + match->matchsize, 0, pad);
 
        msize += off;
        m->u.user.match_size = msize;
@@ -1116,7 +1113,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
 {
        const struct xt_target *target = t->u.kernel.target;
        struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
-       int pad, off = xt_compat_target_offset(target);
+       int off = xt_compat_target_offset(target);
        u_int16_t tsize = ct->u.user.target_size;
        char name[sizeof(t->u.user.name)];
 
@@ -1126,9 +1123,6 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
                target->compat_from_user(t->data, ct->data);
        else
                memcpy(t->data, ct->data, tsize - sizeof(*ct));
-       pad = XT_ALIGN(target->targetsize) - target->targetsize;
-       if (pad > 0)
-               memset(t->data + target->targetsize, 0, pad);
 
        tsize += off;
        t->u.user.target_size = tsize;
index dd48893..3a62f97 100644 (file)
@@ -1019,7 +1019,6 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
                        return -EINVAL;
        }
 
-       netlink_lock_table();
        if (nlk->netlink_bind && groups) {
                int group;
 
@@ -1031,13 +1030,14 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
                        if (!err)
                                continue;
                        netlink_undo_bind(group, groups, sk);
-                       goto unlock;
+                       return err;
                }
        }
 
        /* No need for barriers here as we return to user-space without
         * using any of the bound attributes.
         */
+       netlink_lock_table();
        if (!bound) {
                err = nladdr->nl_pid ?
                        netlink_insert(sk, nladdr->nl_pid) :
index d257ed3..a3b46f8 100644 (file)
@@ -108,11 +108,13 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
                                          llcp_sock->service_name_len,
                                          GFP_KERNEL);
        if (!llcp_sock->service_name) {
+               nfc_llcp_local_put(llcp_sock->local);
                ret = -ENOMEM;
                goto put_dev;
        }
        llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
        if (llcp_sock->ssap == LLCP_SAP_MAX) {
+               nfc_llcp_local_put(llcp_sock->local);
                kfree(llcp_sock->service_name);
                llcp_sock->service_name = NULL;
                ret = -EADDRINUSE;
@@ -671,6 +673,10 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
                ret = -EISCONN;
                goto error;
        }
+       if (sk->sk_state == LLCP_CONNECTING) {
+               ret = -EINPROGRESS;
+               goto error;
+       }
 
        dev = nfc_get_device(addr->dev_idx);
        if (dev == NULL) {
@@ -702,6 +708,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
        llcp_sock->local = nfc_llcp_local_get(local);
        llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
        if (llcp_sock->ssap == LLCP_SAP_MAX) {
+               nfc_llcp_local_put(llcp_sock->local);
                ret = -ENOMEM;
                goto put_dev;
        }
@@ -743,9 +750,12 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
 
 sock_unlink:
        nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
+       kfree(llcp_sock->service_name);
+       llcp_sock->service_name = NULL;
 
 sock_llcp_release:
        nfc_llcp_put_ssap(local, llcp_sock->ssap);
+       nfc_llcp_local_put(llcp_sock->local);
 
 put_dev:
        nfc_put_device(dev);
index 71cec03..d217bd9 100644 (file)
@@ -2034,10 +2034,10 @@ static int ovs_ct_limit_del_zone_limit(struct nlattr *nla_zone_limit,
 static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info,
                                          struct sk_buff *reply)
 {
-       struct ovs_zone_limit zone_limit;
-
-       zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
-       zone_limit.limit = info->default_limit;
+       struct ovs_zone_limit zone_limit = {
+               .zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE,
+               .limit   = info->default_limit,
+       };
 
        return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
 }
index dfc820e..1e4fb56 100644 (file)
@@ -271,7 +271,10 @@ static int qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port,
                flow = kzalloc(sizeof(*flow), GFP_KERNEL);
                if (flow) {
                        init_waitqueue_head(&flow->resume_tx);
-                       radix_tree_insert(&node->qrtr_tx_flow, key, flow);
+                       if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) {
+                               kfree(flow);
+                               flow = NULL;
+                       }
                }
        }
        mutex_unlock(&node->qrtr_tx_lock);
index 071a261..4fc66ff 100644 (file)
@@ -180,6 +180,7 @@ void rds_message_put(struct rds_message *rm)
                rds_message_purge(rm);
 
                kfree(rm);
+               rm = NULL;
        }
 }
 EXPORT_SYMBOL_GPL(rds_message_put);
@@ -347,8 +348,9 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
        rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
        rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
        if (IS_ERR(rm->data.op_sg)) {
+               void *err = ERR_CAST(rm->data.op_sg);
                rds_message_put(rm);
-               return ERR_CAST(rm->data.op_sg);
+               return err;
        }
 
        for (i = 0; i < rm->data.op_nents; ++i) {
index 985d0b7..fe5264b 100644 (file)
@@ -665,7 +665,7 @@ static void rds_send_remove_from_sock(struct list_head *messages, int status)
 unlock_and_drop:
                spin_unlock_irqrestore(&rm->m_rs_lock, flags);
                rds_message_put(rm);
-               if (was_on_sock)
+               if (was_on_sock && rm)
                        rds_message_put(rm);
        }
 
index 68d6ef9..ac15a94 100644 (file)
@@ -69,7 +69,7 @@ struct rfkill {
 
 struct rfkill_int_event {
        struct list_head        list;
-       struct rfkill_event     ev;
+       struct rfkill_event_ext ev;
 };
 
 struct rfkill_data {
@@ -253,7 +253,8 @@ static void rfkill_global_led_trigger_unregister(void)
 }
 #endif /* CONFIG_RFKILL_LEDS */
 
-static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
+static void rfkill_fill_event(struct rfkill_event_ext *ev,
+                             struct rfkill *rfkill,
                              enum rfkill_operation op)
 {
        unsigned long flags;
@@ -1237,7 +1238,7 @@ static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
                                size_t count, loff_t *pos)
 {
        struct rfkill *rfkill;
-       struct rfkill_event ev;
+       struct rfkill_event_ext ev;
        int ret;
 
        /* we don't need the 'hard' variable but accept it */
index b919826..f6d5755 100644 (file)
@@ -158,7 +158,7 @@ static int __tcf_action_put(struct tc_action *p, bool bind)
        return 0;
 }
 
-int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
+static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
 {
        int ret = 0;
 
@@ -184,7 +184,18 @@ int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
 
        return ret;
 }
-EXPORT_SYMBOL(__tcf_idr_release);
+
+int tcf_idr_release(struct tc_action *a, bool bind)
+{
+       const struct tc_action_ops *ops = a->ops;
+       int ret;
+
+       ret = __tcf_idr_release(a, bind, false);
+       if (ret == ACT_P_DELETED)
+               module_put(ops->owner);
+       return ret;
+}
+EXPORT_SYMBOL(tcf_idr_release);
 
 static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
 {
@@ -493,6 +504,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
        }
 
        p->idrinfo = idrinfo;
+       __module_get(ops->owner);
        p->ops = ops;
        *a = p;
        return 0;
@@ -992,7 +1004,8 @@ struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
                                    struct nlattr *nla, struct nlattr *est,
                                    char *name, int ovr, int bind,
-                                   struct tc_action_ops *a_o, bool rtnl_held,
+                                   struct tc_action_ops *a_o, int *init_res,
+                                   bool rtnl_held,
                                    struct netlink_ext_ack *extack)
 {
        struct nla_bitfield32 flags = { 0, 0 };
@@ -1028,6 +1041,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        }
        if (err < 0)
                goto err_out;
+       *init_res = err;
 
        if (!name && tb[TCA_ACT_COOKIE])
                tcf_set_action_cookie(&a->act_cookie, cookie);
@@ -1035,13 +1049,6 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        if (!name)
                a->hw_stats = hw_stats;
 
-       /* module count goes up only when brand new policy is created
-        * if it exists and is only bound to in a_o->init() then
-        * ACT_P_CREATED is not returned (a zero is).
-        */
-       if (err != ACT_P_CREATED)
-               module_put(a_o->owner);
-
        return a;
 
 err_out:
@@ -1056,7 +1063,7 @@ err_out:
 
 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
                    struct nlattr *est, char *name, int ovr, int bind,
-                   struct tc_action *actions[], size_t *attr_size,
+                   struct tc_action *actions[], int init_res[], size_t *attr_size,
                    bool rtnl_held, struct netlink_ext_ack *extack)
 {
        struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
@@ -1084,7 +1091,8 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
 
        for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
                act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind,
-                                       ops[i - 1], rtnl_held, extack);
+                                       ops[i - 1], &init_res[i - 1], rtnl_held,
+                                       extack);
                if (IS_ERR(act)) {
                        err = PTR_ERR(act);
                        goto err;
@@ -1100,7 +1108,8 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
        tcf_idr_insert_many(actions);
 
        *attr_size = tcf_action_full_attrs_size(sz);
-       return i - 1;
+       err = i - 1;
+       goto err_mod;
 
 err:
        tcf_action_destroy(actions, bind);
@@ -1497,12 +1506,13 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
                          struct netlink_ext_ack *extack)
 {
        size_t attr_size = 0;
-       int loop, ret;
+       int loop, ret, i;
        struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
+       int init_res[TCA_ACT_MAX_PRIO] = {};
 
        for (loop = 0; loop < 10; loop++) {
                ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
-                                     actions, &attr_size, true, extack);
+                                     actions, init_res, &attr_size, true, extack);
                if (ret != -EAGAIN)
                        break;
        }
@@ -1510,8 +1520,12 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
        if (ret < 0)
                return ret;
        ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
-       if (ovr)
-               tcf_action_put_many(actions);
+
+       /* only put existing actions */
+       for (i = 0; i < TCA_ACT_MAX_PRIO; i++)
+               if (init_res[i] == ACT_P_CREATED)
+                       actions[i] = NULL;
+       tcf_action_put_many(actions);
 
        return ret;
 }
index 13341e7..340d5af 100644 (file)
@@ -646,7 +646,7 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
        struct net_device *dev = block_cb->indr.dev;
        struct Qdisc *sch = block_cb->indr.sch;
        struct netlink_ext_ack extack = {};
-       struct flow_block_offload bo;
+       struct flow_block_offload bo = {};
 
        tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
                               block_cb->indr.binder_type,
@@ -3040,6 +3040,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
 {
 #ifdef CONFIG_NET_CLS_ACT
        {
+               int init_res[TCA_ACT_MAX_PRIO] = {};
                struct tc_action *act;
                size_t attr_size = 0;
 
@@ -3051,12 +3052,11 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
                                return PTR_ERR(a_o);
                        act = tcf_action_init_1(net, tp, tb[exts->police],
                                                rate_tlv, "police", ovr,
-                                               TCA_ACT_BIND, a_o, rtnl_held,
-                                               extack);
-                       if (IS_ERR(act)) {
-                               module_put(a_o->owner);
+                                               TCA_ACT_BIND, a_o, init_res,
+                                               rtnl_held, extack);
+                       module_put(a_o->owner);
+                       if (IS_ERR(act))
                                return PTR_ERR(act);
-                       }
 
                        act->type = exts->type = TCA_OLD_COMPAT;
                        exts->actions[0] = act;
@@ -3067,8 +3067,8 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
 
                        err = tcf_action_init(net, tp, tb[exts->action],
                                              rate_tlv, NULL, ovr, TCA_ACT_BIND,
-                                             exts->actions, &attr_size,
-                                             rtnl_held, extack);
+                                             exts->actions, init_res,
+                                             &attr_size, rtnl_held, extack);
                        if (err < 0)
                                return err;
                        exts->nr_actions = err;
index 62e12cb..081c11d 100644 (file)
@@ -1675,9 +1675,10 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
                                          cl->parent->common.classid,
                                          NULL);
                if (q->offload) {
-                       if (new_q)
+                       if (new_q) {
                                htb_set_lockdep_class_child(new_q);
-                       htb_parent_to_leaf_offload(sch, dev_queue, new_q);
+                               htb_parent_to_leaf_offload(sch, dev_queue, new_q);
+                       }
                }
        }
 
index 2f1f0a3..6af6b95 100644 (file)
@@ -134,6 +134,9 @@ teql_destroy(struct Qdisc *sch)
        struct teql_sched_data *dat = qdisc_priv(sch);
        struct teql_master *master = dat->m;
 
+       if (!master)
+               return;
+
        prev = master->slaves;
        if (prev) {
                do {
index c3e89c7..bd08807 100644 (file)
@@ -664,8 +664,8 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
        if (!(type & IPV6_ADDR_UNICAST))
                return 0;
 
-       return sp->inet.freebind || net->ipv6.sysctl.ip_nonlocal_bind ||
-               ipv6_chk_addr(net, in6, NULL, 0);
+       return ipv6_can_nonlocal_bind(net, &sp->inet) ||
+              ipv6_chk_addr(net, in6, NULL, 0);
 }
 
 /* This function checks if the address is a valid address to be used for
@@ -954,8 +954,7 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
                        net = sock_net(&opt->inet.sk);
                        rcu_read_lock();
                        dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
-                       if (!dev || !(opt->inet.freebind ||
-                                     net->ipv6.sysctl.ip_nonlocal_bind ||
+                       if (!dev || !(ipv6_can_nonlocal_bind(net, &opt->inet) ||
                                      ipv6_chk_addr(net, &addr->v6.sin6_addr,
                                                    dev, 0))) {
                                rcu_read_unlock();
index a710917..b9b3d89 100644 (file)
@@ -1520,11 +1520,9 @@ static void sctp_close(struct sock *sk, long timeout)
 
        /* Supposedly, no process has access to the socket, but
         * the net layers still may.
-        * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
-        * held and that should be grabbed before socket lock.
         */
-       spin_lock_bh(&net->sctp.addr_wq_lock);
-       bh_lock_sock_nested(sk);
+       local_bh_disable();
+       bh_lock_sock(sk);
 
        /* Hold the sock, since sk_common_release() will put sock_put()
         * and we have just a little more cleanup.
@@ -1533,7 +1531,7 @@ static void sctp_close(struct sock *sk, long timeout)
        sk_common_release(sk);
 
        bh_unlock_sock(sk);
-       spin_unlock_bh(&net->sctp.addr_wq_lock);
+       local_bh_enable();
 
        sock_put(sk);
 
@@ -4993,9 +4991,6 @@ static int sctp_init_sock(struct sock *sk)
        sk_sockets_allocated_inc(sk);
        sock_prot_inuse_add(net, sk->sk_prot, 1);
 
-       /* Nothing can fail after this block, otherwise
-        * sctp_destroy_sock() will be called without addr_wq_lock held
-        */
        if (net->sctp.default_auto_asconf) {
                spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
                list_add_tail(&sp->auto_asconf_list,
@@ -5030,7 +5025,9 @@ static void sctp_destroy_sock(struct sock *sk)
 
        if (sp->do_auto_asconf) {
                sp->do_auto_asconf = 0;
+               spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
                list_del(&sp->auto_asconf_list);
+               spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
        }
        sctp_endpoint_free(sp->ep);
        local_bh_disable();
index 6bf4550..57c6a1a 100644 (file)
@@ -154,9 +154,9 @@ struct tipc_media {
  * care of initializing all other fields.
  */
 struct tipc_bearer {
-       void __rcu *media_ptr;                  /* initalized by media */
-       u32 mtu;                                /* initalized by media */
-       struct tipc_media_addr addr;            /* initalized by media */
+       void __rcu *media_ptr;                  /* initialized by media */
+       u32 mtu;                                /* initialized by media */
+       struct tipc_media_addr addr;            /* initialized by media */
        char name[TIPC_MAX_BEARER_NAME];
        struct tipc_media *media;
        struct tipc_media_addr bcast_addr;
index f4fca8f..97710ce 100644 (file)
@@ -1941,12 +1941,13 @@ static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
                        goto rcv;
                if (tipc_aead_clone(&tmp, aead) < 0)
                        goto rcv;
+               WARN_ON(!refcount_inc_not_zero(&tmp->refcnt));
                if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) {
                        tipc_aead_free(&tmp->rcu);
                        goto rcv;
                }
                tipc_aead_put(aead);
-               aead = tipc_aead_get(tmp);
+               aead = tmp;
        }
 
        if (unlikely(err)) {
index a129f66..faf6bf5 100644 (file)
@@ -89,7 +89,7 @@
  *     - A spin lock to protect the registry of kernel/driver users (reg.c)
  *     - A global spin_lock (tipc_port_lock), which only task is to ensure
  *       consistency where more than one port is involved in an operation,
- *       i.e., whe a port is part of a linked list of ports.
+ *       i.e., when a port is part of a linked list of ports.
  *       There are two such lists; 'port_list', which is used for management,
  *       and 'wait_list', which is used to queue ports during congestion.
  *
index 136338b..e0ee832 100644 (file)
@@ -1734,7 +1734,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
 }
 
 /* tipc_node_xmit_skb(): send single buffer to destination
- * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
+ * Buffers sent via this function are generally TIPC_SYSTEM_IMPORTANCE
  * messages, which will not be rejected
  * The only exception is datagram messages rerouted after secondary
  * lookup, which are rare and safe to dispose of anyway.
index cebcc10..022999e 100644 (file)
@@ -1265,7 +1265,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
                spin_lock_bh(&inputq->lock);
                if (skb_peek(arrvq) == skb) {
                        skb_queue_splice_tail_init(&tmpq, inputq);
-                       kfree_skb(__skb_dequeue(arrvq));
+                       __skb_dequeue(arrvq);
                }
                spin_unlock_bh(&inputq->lock);
                __skb_queue_purge(&tmpq);
index 034af85..b1df42e 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 
 #include <linux/if.h>
@@ -229,9 +229,13 @@ static int validate_beacon_head(const struct nlattr *attr,
        unsigned int len = nla_len(attr);
        const struct element *elem;
        const struct ieee80211_mgmt *mgmt = (void *)data;
-       bool s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control);
        unsigned int fixedlen, hdrlen;
+       bool s1g_bcn;
 
+       if (len < offsetofend(typeof(*mgmt), frame_control))
+               goto err;
+
+       s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control);
        if (s1g_bcn) {
                fixedlen = offsetof(struct ieee80211_ext,
                                    u.s1g_beacon.variable);
@@ -5485,7 +5489,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
                        rdev, info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP],
                        &params);
                if (err)
-                       return err;
+                       goto out;
        }
 
        nl80211_calculate_ap_params(&params);
index 019952d..758eb7d 100644 (file)
@@ -2352,14 +2352,16 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
                return NULL;
 
        if (ext) {
-               struct ieee80211_s1g_bcn_compat_ie *compat;
-               u8 *ie;
+               const struct ieee80211_s1g_bcn_compat_ie *compat;
+               const struct element *elem;
 
-               ie = (void *)cfg80211_find_ie(WLAN_EID_S1G_BCN_COMPAT,
-                                             variable, ielen);
-               if (!ie)
+               elem = cfg80211_find_elem(WLAN_EID_S1G_BCN_COMPAT,
+                                         variable, ielen);
+               if (!elem)
+                       return NULL;
+               if (elem->datalen < sizeof(*compat))
                        return NULL;
-               compat = (void *)(ie + 2);
+               compat = (void *)elem->data;
                bssid = ext->u.s1g_beacon.sa;
                capability = le16_to_cpu(compat->compat_info);
                beacon_int = le16_to_cpu(compat->beacon_int);
index 07756ca..08a70b4 100644 (file)
@@ -529,7 +529,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
                cfg80211_sme_free(wdev);
        }
 
-       if (WARN_ON(wdev->conn))
+       if (wdev->conn)
                return -EINPROGRESS;
 
        wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL);
index d8e8a11..a20aec9 100644 (file)
@@ -216,7 +216,7 @@ static struct nlmsghdr *xfrm_nlmsg_put_compat(struct sk_buff *skb,
        case XFRM_MSG_GETSADINFO:
        case XFRM_MSG_GETSPDINFO:
        default:
-               WARN_ONCE(1, "unsupported nlmsg_type %d", nlh_src->nlmsg_type);
+               pr_warn_once("unsupported nlmsg_type %d\n", nlh_src->nlmsg_type);
                return ERR_PTR(-EOPNOTSUPP);
        }
 
@@ -277,7 +277,7 @@ static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src)
                return xfrm_nla_cpy(dst, src, nla_len(src));
        default:
                BUILD_BUG_ON(XFRMA_MAX != XFRMA_IF_ID);
-               WARN_ONCE(1, "unsupported nla_type %d", src->nla_type);
+               pr_warn_once("unsupported nla_type %d\n", src->nla_type);
                return -EOPNOTSUPP;
        }
 }
@@ -315,8 +315,10 @@ static int xfrm_alloc_compat(struct sk_buff *skb, const struct nlmsghdr *nlh_src
        struct sk_buff *new = NULL;
        int err;
 
-       if (WARN_ON_ONCE(type >= ARRAY_SIZE(xfrm_msg_min)))
+       if (type >= ARRAY_SIZE(xfrm_msg_min)) {
+               pr_warn_once("unsupported nlmsg_type %d\n", nlh_src->nlmsg_type);
                return -EOPNOTSUPP;
+       }
 
        if (skb_shinfo(skb)->frag_list == NULL) {
                new = alloc_skb(skb->len + skb_tailroom(skb), GFP_ATOMIC);
@@ -378,6 +380,10 @@ static int xfrm_attr_cpy32(void *dst, size_t *pos, const struct nlattr *src,
        struct nlmsghdr *nlmsg = dst;
        struct nlattr *nla;
 
+       /* xfrm_user_rcv_msg_compat() relies on fact that 32-bit messages
+        * have the same len or shorted than 64-bit ones.
+        * 32-bit translation that is bigger than 64-bit original is unexpected.
+        */
        if (WARN_ON_ONCE(copy_len > payload))
                copy_len = payload;
 
index edf1189..6d6917b 100644 (file)
@@ -134,8 +134,6 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
                return skb;
        }
 
-       xo->flags |= XFRM_XMIT;
-
        if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) {
                struct sk_buff *segs;
 
index 495b1f5..8831f5a 100644 (file)
@@ -306,6 +306,8 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 
                        icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
                } else {
+                       if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
+                               goto xmit;
                        icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
                                      htonl(mtu));
                }
@@ -314,6 +316,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
                return -EMSGSIZE;
        }
 
+xmit:
        xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
        skb_dst_set(skb, dst);
        skb->dev = tdev;
index a7ab193..e4cb0ff 100644 (file)
@@ -503,22 +503,22 @@ out:
        return err;
 }
 
-int xfrm_output_resume(struct sk_buff *skb, int err)
+int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err)
 {
        struct net *net = xs_net(skb_dst(skb)->xfrm);
 
        while (likely((err = xfrm_output_one(skb, err)) == 0)) {
                nf_reset_ct(skb);
 
-               err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
+               err = skb_dst(skb)->ops->local_out(net, sk, skb);
                if (unlikely(err != 1))
                        goto out;
 
                if (!skb_dst(skb)->xfrm)
-                       return dst_output(net, skb->sk, skb);
+                       return dst_output(net, sk, skb);
 
                err = nf_hook(skb_dst(skb)->ops->family,
-                             NF_INET_POST_ROUTING, net, skb->sk, skb,
+                             NF_INET_POST_ROUTING, net, sk, skb,
                              NULL, skb_dst(skb)->dev, xfrm_output2);
                if (unlikely(err != 1))
                        goto out;
@@ -534,7 +534,7 @@ EXPORT_SYMBOL_GPL(xfrm_output_resume);
 
 static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
-       return xfrm_output_resume(skb, 1);
+       return xfrm_output_resume(sk, skb, 1);
 }
 
 static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
@@ -660,6 +660,12 @@ static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
 {
        int err;
 
+       if (x->outer_mode.encap == XFRM_MODE_BEET &&
+           ip_is_fragment(ip_hdr(skb))) {
+               net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n");
+               return -EAFNOSUPPORT;
+       }
+
        err = xfrm4_tunnel_check_size(skb);
        if (err)
                return err;
@@ -705,8 +711,15 @@ out:
 static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
 {
 #if IS_ENABLED(CONFIG_IPV6)
+       unsigned int ptr = 0;
        int err;
 
+       if (x->outer_mode.encap == XFRM_MODE_BEET &&
+           ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL) >= 0) {
+               net_warn_ratelimited("BEET mode doesn't support inner IPv6 fragments\n");
+               return -EAFNOSUPPORT;
+       }
+
        err = xfrm6_tunnel_check_size(skb);
        if (err)
                return err;
index d01ca1a..4496f7e 100644 (file)
@@ -44,7 +44,6 @@ static void xfrm_state_gc_task(struct work_struct *work);
  */
 
 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
-static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation);
 static struct kmem_cache *xfrm_state_cache __ro_after_init;
 
 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
@@ -140,7 +139,7 @@ static void xfrm_hash_resize(struct work_struct *work)
        }
 
        spin_lock_bh(&net->xfrm.xfrm_state_lock);
-       write_seqcount_begin(&xfrm_state_hash_generation);
+       write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
 
        nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
        odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
@@ -156,7 +155,7 @@ static void xfrm_hash_resize(struct work_struct *work)
        rcu_assign_pointer(net->xfrm.state_byspi, nspi);
        net->xfrm.state_hmask = nhashmask;
 
-       write_seqcount_end(&xfrm_state_hash_generation);
+       write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
        spin_unlock_bh(&net->xfrm.xfrm_state_lock);
 
        osize = (ohashmask + 1) * sizeof(struct hlist_head);
@@ -1063,7 +1062,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
 
        to_put = NULL;
 
-       sequence = read_seqcount_begin(&xfrm_state_hash_generation);
+       sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
 
        rcu_read_lock();
        h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
@@ -1176,7 +1175,7 @@ out:
        if (to_put)
                xfrm_state_put(to_put);
 
-       if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) {
+       if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) {
                *err = -EAGAIN;
                if (x) {
                        xfrm_state_put(x);
@@ -2666,6 +2665,8 @@ int __net_init xfrm_state_init(struct net *net)
        net->xfrm.state_num = 0;
        INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
        spin_lock_init(&net->xfrm.xfrm_state_lock);
+       seqcount_spinlock_init(&net->xfrm.xfrm_state_hash_generation,
+                              &net->xfrm.xfrm_state_lock);
        return 0;
 
 out_byspi:
index 1e000cc..3d79190 100644 (file)
@@ -2,6 +2,14 @@
 CFLAGS_KASAN_NOSANITIZE := -fno-builtin
 KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
 
+cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
+
+ifdef CONFIG_KASAN_STACK
+       stack_enable := 1
+else
+       stack_enable := 0
+endif
+
 ifdef CONFIG_KASAN_GENERIC
 
 ifdef CONFIG_KASAN_INLINE
@@ -12,8 +20,6 @@ endif
 
 CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
 
-cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
-
 # -fasan-shadow-offset fails without -fsanitize
 CFLAGS_KASAN_SHADOW := $(call cc-option, -fsanitize=kernel-address \
                        -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \
@@ -27,7 +33,7 @@ else
        CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
         $(call cc-param,asan-globals=1) \
         $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
-        $(call cc-param,asan-stack=$(CONFIG_KASAN_STACK)) \
+        $(call cc-param,asan-stack=$(stack_enable)) \
         $(call cc-param,asan-instrument-allocas=1)
 endif
 
@@ -36,14 +42,14 @@ endif # CONFIG_KASAN_GENERIC
 ifdef CONFIG_KASAN_SW_TAGS
 
 ifdef CONFIG_KASAN_INLINE
-    instrumentation_flags := -mllvm -hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET)
+    instrumentation_flags := $(call cc-param,hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET))
 else
-    instrumentation_flags := -mllvm -hwasan-instrument-with-calls=1
+    instrumentation_flags := $(call cc-param,hwasan-instrument-with-calls=1)
 endif
 
 CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
-               -mllvm -hwasan-instrument-stack=$(CONFIG_KASAN_STACK) \
-               -mllvm -hwasan-use-short-granules=0 \
+               $(call cc-param,hwasan-instrument-stack=$(stack_enable)) \
+               $(call cc-param,hwasan-use-short-granules=0) \
                $(instrumentation_flags)
 
 endif # CONFIG_KASAN_SW_TAGS
index 168cd27..2c52535 100644 (file)
@@ -20,6 +20,7 @@ SECTIONS {
 
        __patchable_function_entries : { *(__patchable_function_entries) }
 
+#ifdef CONFIG_LTO_CLANG
        /*
         * With CONFIG_LTO_CLANG, LLD always enables -fdata-sections and
         * -ffunction-sections, which increases the size of the final module.
@@ -41,6 +42,7 @@ SECTIONS {
        }
 
        .text : { *(.text .text.[0-9a-zA-Z_]*) }
+#endif
 }
 
 /* bring in arch-specific sections */
index 269967c..a56c364 100644 (file)
@@ -64,7 +64,7 @@ choice
        config GCC_PLUGIN_STRUCTLEAK_BYREF
                bool "zero-init structs passed by reference (strong)"
                depends on GCC_PLUGINS
-               depends on !(KASAN && KASAN_STACK=1)
+               depends on !(KASAN && KASAN_STACK)
                select GCC_PLUGIN_STRUCTLEAK
                help
                  Zero-initialize any structures on the stack that may
@@ -82,7 +82,7 @@ choice
        config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
                bool "zero-init anything passed by reference (very strong)"
                depends on GCC_PLUGINS
-               depends on !(KASAN && KASAN_STACK=1)
+               depends on !(KASAN && KASAN_STACK)
                select GCC_PLUGIN_STRUCTLEAK
                help
                  Zero-initialize any stack variables that may be passed
index 6dcb6aa..75df329 100644 (file)
@@ -109,7 +109,7 @@ static int avtab_insert(struct avtab *h, struct avtab_key *key, struct avtab_dat
        struct avtab_node *prev, *cur, *newnode;
        u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
 
-       if (!h)
+       if (!h || !h->nslot)
                return -EINVAL;
 
        hvalue = avtab_hash(key, h->mask);
@@ -154,7 +154,7 @@ avtab_insert_nonunique(struct avtab *h, struct avtab_key *key, struct avtab_datu
        struct avtab_node *prev, *cur;
        u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
 
-       if (!h)
+       if (!h || !h->nslot)
                return NULL;
        hvalue = avtab_hash(key, h->mask);
        for (prev = NULL, cur = h->htable[hvalue];
@@ -184,7 +184,7 @@ struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *key)
        struct avtab_node *cur;
        u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
 
-       if (!h)
+       if (!h || !h->nslot)
                return NULL;
 
        hvalue = avtab_hash(key, h->mask);
@@ -220,7 +220,7 @@ avtab_search_node(struct avtab *h, struct avtab_key *key)
        struct avtab_node *cur;
        u16 specified = key->specified & ~(AVTAB_ENABLED|AVTAB_ENABLED_OLD);
 
-       if (!h)
+       if (!h || !h->nslot)
                return NULL;
 
        hvalue = avtab_hash(key, h->mask);
@@ -295,6 +295,7 @@ void avtab_destroy(struct avtab *h)
        }
        kvfree(h->htable);
        h->htable = NULL;
+       h->nel = 0;
        h->nslot = 0;
        h->mask = 0;
 }
@@ -303,88 +304,52 @@ void avtab_init(struct avtab *h)
 {
        h->htable = NULL;
        h->nel = 0;
+       h->nslot = 0;
+       h->mask = 0;
 }
 
-int avtab_alloc(struct avtab *h, u32 nrules)
+static int avtab_alloc_common(struct avtab *h, u32 nslot)
 {
-       u32 mask = 0;
-       u32 shift = 0;
-       u32 work = nrules;
-       u32 nslot = 0;
-
-       if (nrules == 0)
-               goto avtab_alloc_out;
-
-       while (work) {
-               work  = work >> 1;
-               shift++;
-       }
-       if (shift > 2)
-               shift = shift - 2;
-       nslot = 1 << shift;
-       if (nslot > MAX_AVTAB_HASH_BUCKETS)
-               nslot = MAX_AVTAB_HASH_BUCKETS;
-       mask = nslot - 1;
+       if (!nslot)
+               return 0;
 
        h->htable = kvcalloc(nslot, sizeof(void *), GFP_KERNEL);
        if (!h->htable)
                return -ENOMEM;
 
- avtab_alloc_out:
-       h->nel = 0;
        h->nslot = nslot;
-       h->mask = mask;
-       pr_debug("SELinux: %d avtab hash slots, %d rules.\n",
-              h->nslot, nrules);
+       h->mask = nslot - 1;
        return 0;
 }
 
-int avtab_duplicate(struct avtab *new, struct avtab *orig)
+int avtab_alloc(struct avtab *h, u32 nrules)
 {
-       int i;
-       struct avtab_node *node, *tmp, *tail;
-
-       memset(new, 0, sizeof(*new));
+       int rc;
+       u32 nslot = 0;
 
-       new->htable = kvcalloc(orig->nslot, sizeof(void *), GFP_KERNEL);
-       if (!new->htable)
-               return -ENOMEM;
-       new->nslot = orig->nslot;
-       new->mask = orig->mask;
-
-       for (i = 0; i < orig->nslot; i++) {
-               tail = NULL;
-               for (node = orig->htable[i]; node; node = node->next) {
-                       tmp = kmem_cache_zalloc(avtab_node_cachep, GFP_KERNEL);
-                       if (!tmp)
-                               goto error;
-                       tmp->key = node->key;
-                       if (tmp->key.specified & AVTAB_XPERMS) {
-                               tmp->datum.u.xperms =
-                                       kmem_cache_zalloc(avtab_xperms_cachep,
-                                                       GFP_KERNEL);
-                               if (!tmp->datum.u.xperms) {
-                                       kmem_cache_free(avtab_node_cachep, tmp);
-                                       goto error;
-                               }
-                               tmp->datum.u.xperms = node->datum.u.xperms;
-                       } else
-                               tmp->datum.u.data = node->datum.u.data;
-
-                       if (tail)
-                               tail->next = tmp;
-                       else
-                               new->htable[i] = tmp;
-
-                       tail = tmp;
-                       new->nel++;
+       if (nrules != 0) {
+               u32 shift = 1;
+               u32 work = nrules >> 3;
+               while (work) {
+                       work >>= 1;
+                       shift++;
                }
+               nslot = 1 << shift;
+               if (nslot > MAX_AVTAB_HASH_BUCKETS)
+                       nslot = MAX_AVTAB_HASH_BUCKETS;
+
+               rc = avtab_alloc_common(h, nslot);
+               if (rc)
+                       return rc;
        }
 
+       pr_debug("SELinux: %d avtab hash slots, %d rules.\n", nslot, nrules);
        return 0;
-error:
-       avtab_destroy(new);
-       return -ENOMEM;
+}
+
+int avtab_alloc_dup(struct avtab *new, const struct avtab *orig)
+{
+       return avtab_alloc_common(new, orig->nslot);
 }
 
 void avtab_hash_eval(struct avtab *h, char *tag)
index 4c4445c..f2eeb36 100644 (file)
@@ -89,7 +89,7 @@ struct avtab {
 
 void avtab_init(struct avtab *h);
 int avtab_alloc(struct avtab *, u32);
-int avtab_duplicate(struct avtab *new, struct avtab *orig);
+int avtab_alloc_dup(struct avtab *new, const struct avtab *orig);
 struct avtab_datum *avtab_search(struct avtab *h, struct avtab_key *k);
 void avtab_destroy(struct avtab *h);
 void avtab_hash_eval(struct avtab *h, char *tag);
index 0b32f3a..1ef74c0 100644 (file)
@@ -605,7 +605,6 @@ static int cond_dup_av_list(struct cond_av_list *new,
                        struct cond_av_list *orig,
                        struct avtab *avtab)
 {
-       struct avtab_node *avnode;
        u32 i;
 
        memset(new, 0, sizeof(*new));
@@ -615,10 +614,11 @@ static int cond_dup_av_list(struct cond_av_list *new,
                return -ENOMEM;
 
        for (i = 0; i < orig->len; i++) {
-               avnode = avtab_search_node(avtab, &orig->nodes[i]->key);
-               if (WARN_ON(!avnode))
-                       return -EINVAL;
-               new->nodes[i] = avnode;
+               new->nodes[i] = avtab_insert_nonunique(avtab,
+                                                      &orig->nodes[i]->key,
+                                                      &orig->nodes[i]->datum);
+               if (!new->nodes[i])
+                       return -ENOMEM;
                new->len++;
        }
 
@@ -630,7 +630,7 @@ static int duplicate_policydb_cond_list(struct policydb *newp,
 {
        int rc, i, j;
 
-       rc = avtab_duplicate(&newp->te_cond_avtab, &origp->te_cond_avtab);
+       rc = avtab_alloc_dup(&newp->te_cond_avtab, &origp->te_cond_avtab);
        if (rc)
                return rc;
 
index d91e41d..3016331 100644 (file)
@@ -1552,6 +1552,7 @@ static int security_context_to_sid_core(struct selinux_state *state,
                if (!str)
                        goto out;
        }
+retry:
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -1565,6 +1566,15 @@ static int security_context_to_sid_core(struct selinux_state *state,
        } else if (rc)
                goto out_unlock;
        rc = sidtab_context_to_sid(sidtab, &context, sid);
+       if (rc == -ESTALE) {
+               rcu_read_unlock();
+               if (context.str) {
+                       str = context.str;
+                       context.str = NULL;
+               }
+               context_destroy(&context);
+               goto retry;
+       }
        context_destroy(&context);
 out_unlock:
        rcu_read_unlock();
@@ -1714,7 +1724,7 @@ static int security_compute_sid(struct selinux_state *state,
        struct selinux_policy *policy;
        struct policydb *policydb;
        struct sidtab *sidtab;
-       struct class_datum *cladatum = NULL;
+       struct class_datum *cladatum;
        struct context *scontext, *tcontext, newcontext;
        struct sidtab_entry *sentry, *tentry;
        struct avtab_key avkey;
@@ -1736,6 +1746,8 @@ static int security_compute_sid(struct selinux_state *state,
                goto out;
        }
 
+retry:
+       cladatum = NULL;
        context_init(&newcontext);
 
        rcu_read_lock();
@@ -1880,6 +1892,11 @@ static int security_compute_sid(struct selinux_state *state,
        }
        /* Obtain the sid for the context. */
        rc = sidtab_context_to_sid(sidtab, &newcontext, out_sid);
+       if (rc == -ESTALE) {
+               rcu_read_unlock();
+               context_destroy(&newcontext);
+               goto retry;
+       }
 out_unlock:
        rcu_read_unlock();
        context_destroy(&newcontext);
@@ -2192,6 +2209,7 @@ void selinux_policy_commit(struct selinux_state *state,
                           struct selinux_load_state *load_state)
 {
        struct selinux_policy *oldpolicy, *newpolicy = load_state->policy;
+       unsigned long flags;
        u32 seqno;
 
        oldpolicy = rcu_dereference_protected(state->policy,
@@ -2213,7 +2231,13 @@ void selinux_policy_commit(struct selinux_state *state,
        seqno = newpolicy->latest_granting;
 
        /* Install the new policy. */
-       rcu_assign_pointer(state->policy, newpolicy);
+       if (oldpolicy) {
+               sidtab_freeze_begin(oldpolicy->sidtab, &flags);
+               rcu_assign_pointer(state->policy, newpolicy);
+               sidtab_freeze_end(oldpolicy->sidtab, &flags);
+       } else {
+               rcu_assign_pointer(state->policy, newpolicy);
+       }
 
        /* Load the policycaps from the new policy */
        security_load_policycaps(state, newpolicy);
@@ -2357,13 +2381,15 @@ int security_port_sid(struct selinux_state *state,
        struct policydb *policydb;
        struct sidtab *sidtab;
        struct ocontext *c;
-       int rc = 0;
+       int rc;
 
        if (!selinux_initialized(state)) {
                *out_sid = SECINITSID_PORT;
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2382,6 +2408,10 @@ int security_port_sid(struct selinux_state *state,
                if (!c->sid[0]) {
                        rc = sidtab_context_to_sid(sidtab, &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2408,13 +2438,15 @@ int security_ib_pkey_sid(struct selinux_state *state,
        struct policydb *policydb;
        struct sidtab *sidtab;
        struct ocontext *c;
-       int rc = 0;
+       int rc;
 
        if (!selinux_initialized(state)) {
                *out_sid = SECINITSID_UNLABELED;
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2435,6 +2467,10 @@ int security_ib_pkey_sid(struct selinux_state *state,
                        rc = sidtab_context_to_sid(sidtab,
                                                   &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2460,13 +2496,15 @@ int security_ib_endport_sid(struct selinux_state *state,
        struct policydb *policydb;
        struct sidtab *sidtab;
        struct ocontext *c;
-       int rc = 0;
+       int rc;
 
        if (!selinux_initialized(state)) {
                *out_sid = SECINITSID_UNLABELED;
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2487,6 +2525,10 @@ int security_ib_endport_sid(struct selinux_state *state,
                if (!c->sid[0]) {
                        rc = sidtab_context_to_sid(sidtab, &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2510,7 +2552,7 @@ int security_netif_sid(struct selinux_state *state,
        struct selinux_policy *policy;
        struct policydb *policydb;
        struct sidtab *sidtab;
-       int rc = 0;
+       int rc;
        struct ocontext *c;
 
        if (!selinux_initialized(state)) {
@@ -2518,6 +2560,8 @@ int security_netif_sid(struct selinux_state *state,
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2534,10 +2578,18 @@ int security_netif_sid(struct selinux_state *state,
                if (!c->sid[0] || !c->sid[1]) {
                        rc = sidtab_context_to_sid(sidtab, &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                        rc = sidtab_context_to_sid(sidtab, &c->context[1],
                                                   &c->sid[1]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2587,6 +2639,7 @@ int security_node_sid(struct selinux_state *state,
                return 0;
        }
 
+retry:
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2635,6 +2688,10 @@ int security_node_sid(struct selinux_state *state,
                        rc = sidtab_context_to_sid(sidtab,
                                                   &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2676,18 +2733,24 @@ int security_get_user_sids(struct selinux_state *state,
        struct sidtab *sidtab;
        struct context *fromcon, usercon;
        u32 *mysids = NULL, *mysids2, sid;
-       u32 mynel = 0, maxnel = SIDS_NEL;
+       u32 i, j, mynel, maxnel = SIDS_NEL;
        struct user_datum *user;
        struct role_datum *role;
        struct ebitmap_node *rnode, *tnode;
-       int rc = 0, i, j;
+       int rc;
 
        *sids = NULL;
        *nel = 0;
 
        if (!selinux_initialized(state))
-               goto out;
+               return 0;
+
+       mysids = kcalloc(maxnel, sizeof(*mysids), GFP_KERNEL);
+       if (!mysids)
+               return -ENOMEM;
 
+retry:
+       mynel = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2707,11 +2770,6 @@ int security_get_user_sids(struct selinux_state *state,
 
        usercon.user = user->value;
 
-       rc = -ENOMEM;
-       mysids = kcalloc(maxnel, sizeof(*mysids), GFP_ATOMIC);
-       if (!mysids)
-               goto out_unlock;
-
        ebitmap_for_each_positive_bit(&user->roles, rnode, i) {
                role = policydb->role_val_to_struct[i];
                usercon.role = i + 1;
@@ -2723,6 +2781,10 @@ int security_get_user_sids(struct selinux_state *state,
                                continue;
 
                        rc = sidtab_context_to_sid(sidtab, &usercon, &sid);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out_unlock;
                        if (mynel < maxnel) {
@@ -2745,14 +2807,14 @@ out_unlock:
        rcu_read_unlock();
        if (rc || !mynel) {
                kfree(mysids);
-               goto out;
+               return rc;
        }
 
        rc = -ENOMEM;
        mysids2 = kcalloc(mynel, sizeof(*mysids2), GFP_KERNEL);
        if (!mysids2) {
                kfree(mysids);
-               goto out;
+               return rc;
        }
        for (i = 0, j = 0; i < mynel; i++) {
                struct av_decision dummy_avd;
@@ -2765,12 +2827,10 @@ out_unlock:
                        mysids2[j++] = mysids[i];
                cond_resched();
        }
-       rc = 0;
        kfree(mysids);
        *sids = mysids2;
        *nel = j;
-out:
-       return rc;
+       return 0;
 }
 
 /**
@@ -2783,6 +2843,9 @@ out:
  * Obtain a SID to use for a file in a filesystem that
  * cannot support xattr or use a fixed labeling behavior like
  * transition SIDs or task SIDs.
+ *
+ * WARNING: This function may return -ESTALE, indicating that the caller
+ * must retry the operation after re-acquiring the policy pointer!
  */
 static inline int __security_genfs_sid(struct selinux_policy *policy,
                                       const char *fstype,
@@ -2861,11 +2924,13 @@ int security_genfs_sid(struct selinux_state *state,
                return 0;
        }
 
-       rcu_read_lock();
-       policy = rcu_dereference(state->policy);
-       retval = __security_genfs_sid(policy,
-                               fstype, path, orig_sclass, sid);
-       rcu_read_unlock();
+       do {
+               rcu_read_lock();
+               policy = rcu_dereference(state->policy);
+               retval = __security_genfs_sid(policy, fstype, path,
+                                             orig_sclass, sid);
+               rcu_read_unlock();
+       } while (retval == -ESTALE);
        return retval;
 }
 
@@ -2888,7 +2953,7 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
        struct selinux_policy *policy;
        struct policydb *policydb;
        struct sidtab *sidtab;
-       int rc = 0;
+       int rc;
        struct ocontext *c;
        struct superblock_security_struct *sbsec = sb->s_security;
        const char *fstype = sb->s_type->name;
@@ -2899,6 +2964,8 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -2916,6 +2983,10 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
                if (!c->sid[0]) {
                        rc = sidtab_context_to_sid(sidtab, &c->context[0],
                                                   &c->sid[0]);
+                       if (rc == -ESTALE) {
+                               rcu_read_unlock();
+                               goto retry;
+                       }
                        if (rc)
                                goto out;
                }
@@ -2923,6 +2994,10 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
        } else {
                rc = __security_genfs_sid(policy, fstype, "/",
                                        SECCLASS_DIR, &sbsec->sid);
+               if (rc == -ESTALE) {
+                       rcu_read_unlock();
+                       goto retry;
+               }
                if (rc) {
                        sbsec->behavior = SECURITY_FS_USE_NONE;
                        rc = 0;
@@ -3132,12 +3207,13 @@ int security_sid_mls_copy(struct selinux_state *state,
        u32 len;
        int rc;
 
-       rc = 0;
        if (!selinux_initialized(state)) {
                *new_sid = sid;
-               goto out;
+               return 0;
        }
 
+retry:
+       rc = 0;
        context_init(&newcon);
 
        rcu_read_lock();
@@ -3196,10 +3272,14 @@ int security_sid_mls_copy(struct selinux_state *state,
                }
        }
        rc = sidtab_context_to_sid(sidtab, &newcon, new_sid);
+       if (rc == -ESTALE) {
+               rcu_read_unlock();
+               context_destroy(&newcon);
+               goto retry;
+       }
 out_unlock:
        rcu_read_unlock();
        context_destroy(&newcon);
-out:
        return rc;
 }
 
@@ -3792,6 +3872,8 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state,
                return 0;
        }
 
+retry:
+       rc = 0;
        rcu_read_lock();
        policy = rcu_dereference(state->policy);
        policydb = &policy->policydb;
@@ -3818,23 +3900,24 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state,
                                goto out;
                }
                rc = -EIDRM;
-               if (!mls_context_isvalid(policydb, &ctx_new))
-                       goto out_free;
+               if (!mls_context_isvalid(policydb, &ctx_new)) {
+                       ebitmap_destroy(&ctx_new.range.level[0].cat);
+                       goto out;
+               }
 
                rc = sidtab_context_to_sid(sidtab, &ctx_new, sid);
+               ebitmap_destroy(&ctx_new.range.level[0].cat);
+               if (rc == -ESTALE) {
+                       rcu_read_unlock();
+                       goto retry;
+               }
                if (rc)
-                       goto out_free;
+                       goto out;
 
                security_netlbl_cache_add(secattr, *sid);
-
-               ebitmap_destroy(&ctx_new.range.level[0].cat);
        } else
                *sid = SECSID_NULL;
 
-       rcu_read_unlock();
-       return 0;
-out_free:
-       ebitmap_destroy(&ctx_new.range.level[0].cat);
 out:
        rcu_read_unlock();
        return rc;
index 5ee190b..656d50b 100644 (file)
@@ -39,6 +39,7 @@ int sidtab_init(struct sidtab *s)
        for (i = 0; i < SECINITSID_NUM; i++)
                s->isids[i].set = 0;
 
+       s->frozen = false;
        s->count = 0;
        s->convert = NULL;
        hash_init(s->context_to_sid);
@@ -281,6 +282,15 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context,
        if (*sid)
                goto out_unlock;
 
+       if (unlikely(s->frozen)) {
+               /*
+                * This sidtab is now frozen - tell the caller to abort and
+                * get the new one.
+                */
+               rc = -ESTALE;
+               goto out_unlock;
+       }
+
        count = s->count;
        convert = s->convert;
 
@@ -474,6 +484,17 @@ void sidtab_cancel_convert(struct sidtab *s)
        spin_unlock_irqrestore(&s->lock, flags);
 }
 
+void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock)
+{
+       spin_lock_irqsave(&s->lock, *flags);
+       s->frozen = true;
+       s->convert = NULL;
+}
+void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock)
+{
+       spin_unlock_irqrestore(&s->lock, *flags);
+}
+
 static void sidtab_destroy_entry(struct sidtab_entry *entry)
 {
        context_destroy(&entry->context);
index 80c744d..4eff0e4 100644 (file)
@@ -86,6 +86,7 @@ struct sidtab {
        u32 count;
        /* access only under spinlock */
        struct sidtab_convert_params *convert;
+       bool frozen;
        spinlock_t lock;
 
 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
@@ -125,6 +126,9 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params);
 
 void sidtab_cancel_convert(struct sidtab *s);
 
+void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock);
+void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock);
+
 int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid);
 
 void sidtab_destroy(struct sidtab *s);
index 478f757..8dc6133 100644 (file)
@@ -613,7 +613,7 @@ static int tomoyo_check_unix_address(struct sockaddr *addr,
 static bool tomoyo_kernel_service(void)
 {
        /* Nothing to do if I am a kernel service. */
-       return (current->flags & (PF_KTHREAD | PF_IO_WORKER)) == PF_KTHREAD;
+       return current->flags & PF_KTHREAD;
 }
 
 /**
index 5263718..80b814b 100644 (file)
@@ -1571,6 +1571,14 @@ static int loopback_mixer_new(struct loopback *loopback, int notify)
                                        return -ENOMEM;
                                kctl->id.device = dev;
                                kctl->id.subdevice = substr;
+
+                               /* Add the control before copying the id so that
+                                * the numid field of the id is set in the copy.
+                                */
+                               err = snd_ctl_add(card, kctl);
+                               if (err < 0)
+                                       return err;
+
                                switch (idx) {
                                case ACTIVE_IDX:
                                        setup->active_id = kctl->id;
@@ -1587,9 +1595,6 @@ static int loopback_mixer_new(struct loopback *loopback, int notify)
                                default:
                                        break;
                                }
-                               err = snd_ctl_add(card, kctl);
-                               if (err < 0)
-                                       return err;
                        }
                }
        }
index b59b0f3..79ade33 100644 (file)
@@ -989,8 +989,12 @@ static int azx_prepare(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip;
 
+       if (!azx_is_pm_ready(card))
+               return 0;
+
        chip = card->private_data;
        chip->pm_prepared = 1;
+       snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
 
        flush_work(&azx_bus(chip)->unsol_work);
 
@@ -1005,7 +1009,11 @@ static void azx_complete(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip;
 
+       if (!azx_is_pm_ready(card))
+               return;
+
        chip = card->private_data;
+       snd_power_change_state(card, SNDRV_CTL_POWER_D0);
        chip->pm_prepared = 0;
 }
 
index c20dad4..dfef9c1 100644 (file)
@@ -944,6 +944,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
        SND_PCI_QUIRK(0x103c, 0x8427, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x844f, "HP ZBook Studio G5", CXT_FIXUP_HP_ZBOOK_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
index 316b9b4..a7544b7 100644 (file)
@@ -3927,6 +3927,15 @@ static void alc271_fixup_dmic(struct hda_codec *codec,
                snd_hda_sequence_write(codec, verbs);
 }
 
+/* Fix the speaker amp after resume, etc */
+static void alc269vb_fixup_aspire_e1_coef(struct hda_codec *codec,
+                                         const struct hda_fixup *fix,
+                                         int action)
+{
+       if (action == HDA_FIXUP_ACT_INIT)
+               alc_update_coef_idx(codec, 0x0d, 0x6000, 0x6000);
+}
+
 static void alc269_fixup_pcm_44k(struct hda_codec *codec,
                                 const struct hda_fixup *fix, int action)
 {
@@ -5256,7 +5265,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
        case 0x10ec0274:
        case 0x10ec0294:
                alc_process_coef_fw(codec, coef0274);
-               msleep(80);
+               msleep(850);
                val = alc_read_coef_idx(codec, 0x46);
                is_ctia = (val & 0x00f0) == 0x00f0;
                break;
@@ -5440,6 +5449,7 @@ static void alc_update_headset_jack_cb(struct hda_codec *codec,
                                       struct hda_jack_callback *jack)
 {
        snd_hda_gen_hp_automute(codec, jack);
+       alc_update_headset_mode(codec);
 }
 
 static void alc_probe_headset_mode(struct hda_codec *codec)
@@ -6300,6 +6310,7 @@ enum {
        ALC283_FIXUP_HEADSET_MIC,
        ALC255_FIXUP_MIC_MUTE_LED,
        ALC282_FIXUP_ASPIRE_V5_PINS,
+       ALC269VB_FIXUP_ASPIRE_E1_COEF,
        ALC280_FIXUP_HP_GPIO4,
        ALC286_FIXUP_HP_GPIO_LED,
        ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
@@ -6978,6 +6989,10 @@ static const struct hda_fixup alc269_fixups[] = {
                        { },
                },
        },
+       [ALC269VB_FIXUP_ASPIRE_E1_COEF] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269vb_fixup_aspire_e1_coef,
+       },
        [ALC280_FIXUP_HP_GPIO4] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc280_fixup_hp_gpio4,
@@ -7900,6 +7915,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
+       SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
        SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
        SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
@@ -8057,6 +8073,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
                      ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x87f2, "HP ProBook 640 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
@@ -8393,6 +8410,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC283_FIXUP_HEADSET_MIC, .name = "alc283-headset"},
        {.id = ALC255_FIXUP_MIC_MUTE_LED, .name = "alc255-dell-mute"},
        {.id = ALC282_FIXUP_ASPIRE_V5_PINS, .name = "aspire-v5"},
+       {.id = ALC269VB_FIXUP_ASPIRE_E1_COEF, .name = "aspire-e1-coef"},
        {.id = ALC280_FIXUP_HP_GPIO4, .name = "hp-gpio4"},
        {.id = ALC286_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
        {.id = ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, .name = "hp-gpio2-hotkey"},
index 6e634b4..aa16a23 100644 (file)
@@ -1348,8 +1348,10 @@ static int cygnus_ssp_probe(struct platform_device *pdev)
                                        &cygnus_ssp_dai[active_port_count]);
 
                /* negative is err, 0 is active and good, 1 is disabled */
-               if (err < 0)
+               if (err < 0) {
+                       of_node_put(child_node);
                        return err;
+               }
                else if (!err) {
                        dev_dbg(dev, "Activating DAI: %s\n",
                                cygnus_ssp_dai[active_port_count].name);
index 8c04b3b..7878da8 100644 (file)
@@ -3551,7 +3551,7 @@ static int rx_macro_probe(struct platform_device *pdev)
 
        /* set MCLK and NPL rates */
        clk_set_rate(rx->clks[2].clk, MCLK_FREQ);
-       clk_set_rate(rx->clks[3].clk, MCLK_FREQ);
+       clk_set_rate(rx->clks[3].clk, 2 * MCLK_FREQ);
 
        ret = clk_bulk_prepare_enable(RX_NUM_CLKS_MAX, rx->clks);
        if (ret)
index 36d7a64..e8c6c73 100644 (file)
@@ -1811,7 +1811,7 @@ static int tx_macro_probe(struct platform_device *pdev)
 
        /* set MCLK and NPL rates */
        clk_set_rate(tx->clks[2].clk, MCLK_FREQ);
-       clk_set_rate(tx->clks[3].clk, MCLK_FREQ);
+       clk_set_rate(tx->clks[3].clk, 2 * MCLK_FREQ);
 
        ret = clk_bulk_prepare_enable(TX_NUM_CLKS_MAX, tx->clks);
        if (ret)
index 85f6865..ddb6436 100644 (file)
@@ -446,6 +446,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
        case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
        case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
        case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+       case MAX98373_R20FF_GLOBAL_SHDN:
        case MAX98373_R21FF_REV_ID:
                return true;
        default:
index d8c4766..f3a1220 100644 (file)
@@ -220,6 +220,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
        case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
        case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
        case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+       case MAX98373_R20FF_GLOBAL_SHDN:
        case MAX98373_R21FF_REV_ID:
        /* SoundWire Control Port Registers */
        case MAX98373_R0040_SCP_INIT_STAT_1 ... MAX98373_R0070_SCP_FRAME_CTLR:
index 746c829..1346a98 100644 (file)
@@ -28,11 +28,13 @@ static int max98373_dac_event(struct snd_soc_dapm_widget *w,
                regmap_update_bits(max98373->regmap,
                        MAX98373_R20FF_GLOBAL_SHDN,
                        MAX98373_GLOBAL_EN_MASK, 1);
+               usleep_range(30000, 31000);
                break;
        case SND_SOC_DAPM_POST_PMD:
                regmap_update_bits(max98373->regmap,
                        MAX98373_R20FF_GLOBAL_SHDN,
                        MAX98373_GLOBAL_EN_MASK, 0);
+               usleep_range(30000, 31000);
                max98373->tdm_mode = false;
                break;
        default:
index df35151..cda9cd9 100644 (file)
@@ -707,7 +707,13 @@ int wm8960_configure_pll(struct snd_soc_component *component, int freq_in,
        best_freq_out = -EINVAL;
        *sysclk_idx = *dac_idx = *bclk_idx = -1;
 
-       for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
+       /*
+        * From Datasheet, the PLL performs best when f2 is between
+        * 90MHz and 100MHz, the desired sysclk output is 11.2896MHz
+        * or 12.288MHz, then sysclkdiv = 2 is the best choice.
+        * So search sysclk_divs from 2 to 1 other than from 1 to 2.
+        */
+       for (i = ARRAY_SIZE(sysclk_divs) - 1; i >= 0; --i) {
                if (sysclk_divs[i] == -1)
                        continue;
                for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
index 08056fa..a857a62 100644 (file)
@@ -519,11 +519,13 @@ static int fsl_esai_startup(struct snd_pcm_substream *substream,
                                   ESAI_SAICR_SYNC, esai_priv->synchronous ?
                                   ESAI_SAICR_SYNC : 0);
 
-               /* Set a default slot number -- 2 */
+               /* Set slots count */
                regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR,
-                                  ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
+                                  ESAI_xCCR_xDC_MASK,
+                                  ESAI_xCCR_xDC(esai_priv->slots));
                regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
-                                  ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(2));
+                                  ESAI_xCCR_xDC_MASK,
+                                  ESAI_xCCR_xDC(esai_priv->slots));
        }
 
        return 0;
index 9e9b058..4124aa2 100644 (file)
@@ -487,15 +487,15 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
                .stream_name = "Headset Playback",
                .channels_min = SST_STEREO,
                .channels_max = SST_STEREO,
-               .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
-               .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+               .rates = SNDRV_PCM_RATE_48000,
+               .formats = SNDRV_PCM_FMTBIT_S16_LE,
        },
        .capture = {
                .stream_name = "Headset Capture",
                .channels_min = 1,
                .channels_max = 2,
-               .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
-               .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+               .rates = SNDRV_PCM_RATE_48000,
+               .formats = SNDRV_PCM_FMTBIT_S16_LE,
        },
 },
 {
@@ -505,8 +505,8 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
                .stream_name = "Deepbuffer Playback",
                .channels_min = SST_STEREO,
                .channels_max = SST_STEREO,
-               .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
-               .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
+               .rates = SNDRV_PCM_RATE_48000,
+               .formats = SNDRV_PCM_FMTBIT_S16_LE,
        },
 },
 {
index 6d8f7d9..4a3d522 100644 (file)
@@ -399,7 +399,13 @@ int snd_sof_device_shutdown(struct device *dev)
 {
        struct snd_sof_dev *sdev = dev_get_drvdata(dev);
 
-       return snd_sof_shutdown(sdev);
+       if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+               cancel_work_sync(&sdev->probe_work);
+
+       if (sdev->fw_state == SOF_FW_BOOT_COMPLETE)
+               return snd_sof_shutdown(sdev);
+
+       return 0;
 }
 EXPORT_SYMBOL(snd_sof_device_shutdown);
 
index fc29b91..c7ed2b3 100644 (file)
@@ -27,9 +27,10 @@ static const struct snd_sof_debugfs_map apl_dsp_debugfs[] = {
 
 /* apollolake ops */
 const struct snd_sof_dsp_ops sof_apl_ops = {
-       /* probe and remove */
+       /* probe/remove/shutdown */
        .probe          = hda_dsp_probe,
        .remove         = hda_dsp_remove,
+       .shutdown       = hda_dsp_shutdown,
 
        /* Register IO */
        .write          = sof_io_write,
index e38db51..821f25f 100644 (file)
@@ -232,9 +232,10 @@ void cnl_ipc_dump(struct snd_sof_dev *sdev)
 
 /* cannonlake ops */
 const struct snd_sof_dsp_ops sof_cnl_ops = {
-       /* probe and remove */
+       /* probe/remove/shutdown */
        .probe          = hda_dsp_probe,
        .remove         = hda_dsp_remove,
+       .shutdown       = hda_dsp_shutdown,
 
        /* Register IO */
        .write          = sof_io_write,
@@ -349,22 +350,6 @@ const struct sof_intel_dsp_desc cnl_chip_info = {
 };
 EXPORT_SYMBOL_NS(cnl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
 
-const struct sof_intel_dsp_desc ehl_chip_info = {
-       /* Elkhartlake */
-       .cores_num = 4,
-       .init_core_mask = 1,
-       .host_managed_cores_mask = BIT(0),
-       .ipc_req = CNL_DSP_REG_HIPCIDR,
-       .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
-       .ipc_ack = CNL_DSP_REG_HIPCIDA,
-       .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
-       .ipc_ctl = CNL_DSP_REG_HIPCCTL,
-       .rom_init_timeout       = 300,
-       .ssp_count = ICL_SSP_COUNT,
-       .ssp_base_offset = CNL_SSP_BASE_OFFSET,
-};
-EXPORT_SYMBOL_NS(ehl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
-
 const struct sof_intel_dsp_desc jsl_chip_info = {
        /* Jasperlake */
        .cores_num = 2,
index c3b757c..736a54b 100644 (file)
@@ -226,10 +226,17 @@ bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev,
 
        val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
 
-       is_enable = (val & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) &&
-                   (val & HDA_DSP_ADSPCS_SPA_MASK(core_mask)) &&
-                   !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
-                   !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
+#define MASK_IS_EQUAL(v, m, field) ({  \
+       u32 _m = field(m);              \
+       ((v) & _m) == _m;               \
+})
+
+       is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
+               MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
+               !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
+               !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
+
+#undef MASK_IS_EQUAL
 
        dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
                is_enable, core_mask);
@@ -885,6 +892,12 @@ int hda_dsp_suspend(struct snd_sof_dev *sdev, u32 target_state)
        return snd_sof_dsp_set_power_state(sdev, &target_dsp_state);
 }
 
+int hda_dsp_shutdown(struct snd_sof_dev *sdev)
+{
+       sdev->system_suspend_target = SOF_SUSPEND_S3;
+       return snd_sof_suspend(sdev->dev);
+}
+
 int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev)
 {
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
index 7c7579d..ae80725 100644 (file)
@@ -517,6 +517,7 @@ int hda_dsp_resume(struct snd_sof_dev *sdev);
 int hda_dsp_runtime_suspend(struct snd_sof_dev *sdev);
 int hda_dsp_runtime_resume(struct snd_sof_dev *sdev);
 int hda_dsp_runtime_idle(struct snd_sof_dev *sdev);
+int hda_dsp_shutdown(struct snd_sof_dev *sdev);
 int hda_dsp_set_hw_params_upon_resume(struct snd_sof_dev *sdev);
 void hda_dsp_dump(struct snd_sof_dev *sdev, u32 flags);
 void hda_ipc_dump(struct snd_sof_dev *sdev);
index e9d5a0a..88a74be 100644 (file)
@@ -26,9 +26,10 @@ static const struct snd_sof_debugfs_map icl_dsp_debugfs[] = {
 
 /* Icelake ops */
 const struct snd_sof_dsp_ops sof_icl_ops = {
-       /* probe and remove */
+       /* probe/remove/shutdown */
        .probe          = hda_dsp_probe,
        .remove         = hda_dsp_remove,
+       .shutdown       = hda_dsp_shutdown,
 
        /* Register IO */
        .write          = sof_io_write,
index 4856074..38bc353 100644 (file)
@@ -65,7 +65,7 @@ static const struct sof_dev_desc ehl_desc = {
        .default_tplg_path = "intel/sof-tplg",
        .default_fw_filename = "sof-ehl.ri",
        .nocodec_tplg_filename = "sof-ehl-nocodec.tplg",
-       .ops = &sof_cnl_ops,
+       .ops = &sof_tgl_ops,
 };
 
 static const struct sof_dev_desc adls_desc = {
index 419f05b..54ba1b8 100644 (file)
@@ -25,7 +25,7 @@ const struct snd_sof_dsp_ops sof_tgl_ops = {
        /* probe/remove/shutdown */
        .probe          = hda_dsp_probe,
        .remove         = hda_dsp_remove,
-       .shutdown       = hda_dsp_remove,
+       .shutdown       = hda_dsp_shutdown,
 
        /* Register IO */
        .write          = sof_io_write,
@@ -156,6 +156,22 @@ const struct sof_intel_dsp_desc tglh_chip_info = {
 };
 EXPORT_SYMBOL_NS(tglh_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
 
+const struct sof_intel_dsp_desc ehl_chip_info = {
+       /* Elkhartlake */
+       .cores_num = 4,
+       .init_core_mask = 1,
+       .host_managed_cores_mask = BIT(0),
+       .ipc_req = CNL_DSP_REG_HIPCIDR,
+       .ipc_req_mask = CNL_DSP_REG_HIPCIDR_BUSY,
+       .ipc_ack = CNL_DSP_REG_HIPCIDA,
+       .ipc_ack_mask = CNL_DSP_REG_HIPCIDA_DONE,
+       .ipc_ctl = CNL_DSP_REG_HIPCCTL,
+       .rom_init_timeout       = 300,
+       .ssp_count = ICL_SSP_COUNT,
+       .ssp_base_offset = CNL_SSP_BASE_OFFSET,
+};
+EXPORT_SYMBOL_NS(ehl_chip_info, SND_SOC_SOF_INTEL_HDA_COMMON);
+
 const struct sof_intel_dsp_desc adls_chip_info = {
        /* Alderlake-S */
        .cores_num = 2,
index 6c13cc8..2173991 100644 (file)
@@ -1364,6 +1364,7 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev)
                return ERR_PTR(-ENOMEM);
 
        card->dev               = dev;
+       card->owner             = THIS_MODULE;
        card->name              = "sun4i-codec";
        card->dapm_widgets      = sun4i_codec_card_dapm_widgets;
        card->num_dapm_widgets  = ARRAY_SIZE(sun4i_codec_card_dapm_widgets);
@@ -1396,6 +1397,7 @@ static struct snd_soc_card *sun6i_codec_create_card(struct device *dev)
                return ERR_PTR(-ENOMEM);
 
        card->dev               = dev;
+       card->owner             = THIS_MODULE;
        card->name              = "A31 Audio Codec";
        card->dapm_widgets      = sun6i_codec_card_dapm_widgets;
        card->num_dapm_widgets  = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
@@ -1449,6 +1451,7 @@ static struct snd_soc_card *sun8i_a23_codec_create_card(struct device *dev)
                return ERR_PTR(-ENOMEM);
 
        card->dev               = dev;
+       card->owner             = THIS_MODULE;
        card->name              = "A23 Audio Codec";
        card->dapm_widgets      = sun6i_codec_card_dapm_widgets;
        card->num_dapm_widgets  = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
@@ -1487,6 +1490,7 @@ static struct snd_soc_card *sun8i_h3_codec_create_card(struct device *dev)
                return ERR_PTR(-ENOMEM);
 
        card->dev               = dev;
+       card->owner             = THIS_MODULE;
        card->name              = "H3 Audio Codec";
        card->dapm_widgets      = sun6i_codec_card_dapm_widgets;
        card->num_dapm_widgets  = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
@@ -1525,6 +1529,7 @@ static struct snd_soc_card *sun8i_v3s_codec_create_card(struct device *dev)
                return ERR_PTR(-ENOMEM);
 
        card->dev               = dev;
+       card->owner             = THIS_MODULE;
        card->name              = "V3s Audio Codec";
        card->dapm_widgets      = sun6i_codec_card_dapm_widgets;
        card->num_dapm_widgets  = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
index d3001fb..176437a 100644 (file)
@@ -1521,6 +1521,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
        case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
        case USB_ID(0x413c, 0xa506): /* Dell AE515 sound bar */
+       case USB_ID(0x046d, 0x084c): /* Logitech ConferenceCam Connect */
                return true;
        }
 
index 4d471d9..6fffe56 100644 (file)
@@ -39,9 +39,6 @@
  * sequential memory pages only.
  */
 
-/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */
-#define ia64_mf()       asm volatile ("mf" ::: "memory")
-
 #define mb()           ia64_mf()
 #define rmb()          mb()
 #define wmb()          mb()
index 637189e..d30439b 100644 (file)
@@ -9,8 +9,6 @@
 #include "../../../arch/alpha/include/uapi/asm/errno.h"
 #elif defined(__mips__)
 #include "../../../arch/mips/include/uapi/asm/errno.h"
-#elif defined(__ia64__)
-#include "../../../arch/ia64/include/uapi/asm/errno.h"
 #elif defined(__xtensa__)
 #include "../../../arch/xtensa/include/uapi/asm/errno.h"
 #else
index 71aabaf..8f13b84 100644 (file)
@@ -9,6 +9,7 @@ Type=simple
 ExecStart=/usr/bin/kvm_stat -dtcz -s 10 -L /var/log/kvm_stat.csv
 ExecReload=/bin/kill -HUP $MAINPID
 Restart=always
+RestartSec=60s
 SyslogIdentifier=kvm_stat
 SyslogLevel=debug
 
index 8caaafe..e7a8d84 100644 (file)
@@ -227,7 +227,7 @@ static int ringbuf_process_ring(struct ring* r)
                        if ((len & BPF_RINGBUF_DISCARD_BIT) == 0) {
                                sample = (void *)len_ptr + BPF_RINGBUF_HDR_SZ;
                                err = r->sample_cb(r->ctx, sample, len);
-                               if (err) {
+                               if (err < 0) {
                                        /* update consumer pos and bail out */
                                        smp_store_release(r->consumer_pos,
                                                          cons_pos);
index 526fc35..007fe5d 100644 (file)
@@ -59,6 +59,8 @@ struct xsk_umem {
        int fd;
        int refcount;
        struct list_head ctx_list;
+       bool rx_ring_setup_done;
+       bool tx_ring_setup_done;
 };
 
 struct xsk_ctx {
@@ -743,26 +745,30 @@ static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
        return NULL;
 }
 
-static void xsk_put_ctx(struct xsk_ctx *ctx)
+static void xsk_put_ctx(struct xsk_ctx *ctx, bool unmap)
 {
        struct xsk_umem *umem = ctx->umem;
        struct xdp_mmap_offsets off;
        int err;
 
-       if (--ctx->refcount == 0) {
-               err = xsk_get_mmap_offsets(umem->fd, &off);
-               if (!err) {
-                       munmap(ctx->fill->ring - off.fr.desc,
-                              off.fr.desc + umem->config.fill_size *
-                              sizeof(__u64));
-                       munmap(ctx->comp->ring - off.cr.desc,
-                              off.cr.desc + umem->config.comp_size *
-                              sizeof(__u64));
-               }
+       if (--ctx->refcount)
+               return;
 
-               list_del(&ctx->list);
-               free(ctx);
-       }
+       if (!unmap)
+               goto out_free;
+
+       err = xsk_get_mmap_offsets(umem->fd, &off);
+       if (err)
+               goto out_free;
+
+       munmap(ctx->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size *
+              sizeof(__u64));
+       munmap(ctx->comp->ring - off.cr.desc, off.cr.desc + umem->config.comp_size *
+              sizeof(__u64));
+
+out_free:
+       list_del(&ctx->list);
+       free(ctx);
 }
 
 static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
@@ -797,8 +803,6 @@ static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
        memcpy(ctx->ifname, ifname, IFNAMSIZ - 1);
        ctx->ifname[IFNAMSIZ - 1] = '\0';
 
-       umem->fill_save = NULL;
-       umem->comp_save = NULL;
        ctx->fill = fill;
        ctx->comp = comp;
        list_add(&ctx->list, &umem->ctx_list);
@@ -848,6 +852,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
                              struct xsk_ring_cons *comp,
                              const struct xsk_socket_config *usr_config)
 {
+       bool unmap, rx_setup_done = false, tx_setup_done = false;
        void *rx_map = NULL, *tx_map = NULL;
        struct sockaddr_xdp sxdp = {};
        struct xdp_mmap_offsets off;
@@ -858,6 +863,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
        if (!umem || !xsk_ptr || !(rx || tx))
                return -EFAULT;
 
+       unmap = umem->fill_save != fill;
+
        xsk = calloc(1, sizeof(*xsk));
        if (!xsk)
                return -ENOMEM;
@@ -881,6 +888,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
                }
        } else {
                xsk->fd = umem->fd;
+               rx_setup_done = umem->rx_ring_setup_done;
+               tx_setup_done = umem->tx_ring_setup_done;
        }
 
        ctx = xsk_get_ctx(umem, ifindex, queue_id);
@@ -899,7 +908,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
        }
        xsk->ctx = ctx;
 
-       if (rx) {
+       if (rx && !rx_setup_done) {
                err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
                                 &xsk->config.rx_size,
                                 sizeof(xsk->config.rx_size));
@@ -907,8 +916,10 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
                        err = -errno;
                        goto out_put_ctx;
                }
+               if (xsk->fd == umem->fd)
+                       umem->rx_ring_setup_done = true;
        }
-       if (tx) {
+       if (tx && !tx_setup_done) {
                err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
                                 &xsk->config.tx_size,
                                 sizeof(xsk->config.tx_size));
@@ -916,6 +927,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
                        err = -errno;
                        goto out_put_ctx;
                }
+               if (xsk->fd == umem->fd)
+                       umem->rx_ring_setup_done = true;
        }
 
        err = xsk_get_mmap_offsets(xsk->fd, &off);
@@ -994,6 +1007,8 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
        }
 
        *xsk_ptr = xsk;
+       umem->fill_save = NULL;
+       umem->comp_save = NULL;
        return 0;
 
 out_mmap_tx:
@@ -1005,7 +1020,7 @@ out_mmap_rx:
                munmap(rx_map, off.rx.desc +
                       xsk->config.rx_size * sizeof(struct xdp_desc));
 out_put_ctx:
-       xsk_put_ctx(ctx);
+       xsk_put_ctx(ctx, unmap);
 out_socket:
        if (--umem->refcount)
                close(xsk->fd);
@@ -1019,6 +1034,9 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
                       struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
                       const struct xsk_socket_config *usr_config)
 {
+       if (!umem)
+               return -EFAULT;
+
        return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem,
                                         rx, tx, umem->fill_save,
                                         umem->comp_save, usr_config);
@@ -1068,7 +1086,7 @@ void xsk_socket__delete(struct xsk_socket *xsk)
                }
        }
 
-       xsk_put_ctx(ctx);
+       xsk_put_ctx(ctx, true);
 
        umem->refcount--;
        /* Do not close an fd that also has an associated umem connected
index 6fe44d9..ddccc0e 100644 (file)
@@ -906,7 +906,7 @@ int cmd_inject(int argc, const char **argv)
        }
 
        data.path = inject.input_name;
-       inject.session = perf_session__new(&data, true, &inject.tool);
+       inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool);
        if (IS_ERR(inject.session))
                return PTR_ERR(inject.session);
 
index f3ac9d4..2e5eff4 100644 (file)
@@ -210,8 +210,10 @@ static int arm_spe_do_get_packet(const unsigned char *buf, size_t len,
 
        if ((hdr & SPE_HEADER0_MASK2) == SPE_HEADER0_EXTENDED) {
                /* 16-bit extended format header */
-               ext_hdr = 1;
+               if (len == 1)
+                       return ARM_SPE_BAD_PACKET;
 
+               ext_hdr = 1;
                hdr = buf[1];
                if (hdr == SPE_HEADER1_ALIGNMENT)
                        return arm_spe_get_alignment(buf, len, packet);
index 423ec69..5ecd4f4 100644 (file)
@@ -201,7 +201,7 @@ static int block_total_cycles_pct_entry(struct perf_hpp_fmt *fmt,
        double ratio = 0.0;
 
        if (block_fmt->total_cycles)
-               ratio = (double)bi->cycles / (double)block_fmt->total_cycles;
+               ratio = (double)bi->cycles_aggr / (double)block_fmt->total_cycles;
 
        return color_pct(hpp, block_fmt->width, 100.0 * ratio);
 }
@@ -216,9 +216,9 @@ static int64_t block_total_cycles_pct_sort(struct perf_hpp_fmt *fmt,
        double l, r;
 
        if (block_fmt->total_cycles) {
-               l = ((double)bi_l->cycles /
+               l = ((double)bi_l->cycles_aggr /
                        (double)block_fmt->total_cycles) * 100000.0;
-               r = ((double)bi_r->cycles /
+               r = ((double)bi_r->cycles_aggr /
                        (double)block_fmt->total_cycles) * 100000.0;
                return (int64_t)l - (int64_t)r;
        }
index 9ea2c0a..185b8c5 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
 /******************************************************************************
  *
- * Module Name: cfsize - Common get file size function
+ * Module Name: cmfsize - Common get file size function
  *
  * Copyright (C) 2000 - 2021, Intel Corp.
  *
index 3b796dd..ca24f68 100644 (file)
@@ -296,21 +296,34 @@ static void *idr_throbber(void *arg)
        return NULL;
 }
 
+/*
+ * There are always either 1 or 2 objects in the IDR.  If we find nothing,
+ * or we find something at an ID we didn't expect, that's a bug.
+ */
 void idr_find_test_1(int anchor_id, int throbber_id)
 {
        pthread_t throbber;
        time_t start = time(NULL);
 
-       pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
-
        BUG_ON(idr_alloc(&find_idr, xa_mk_value(anchor_id), anchor_id,
                                anchor_id + 1, GFP_KERNEL) != anchor_id);
 
+       pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
+
+       rcu_read_lock();
        do {
                int id = 0;
                void *entry = idr_get_next(&find_idr, &id);
-               BUG_ON(entry != xa_mk_value(id));
+               rcu_read_unlock();
+               if ((id != anchor_id && id != throbber_id) ||
+                   entry != xa_mk_value(id)) {
+                       printf("%s(%d, %d): %p at %d\n", __func__, anchor_id,
+                               throbber_id, entry, id);
+                       abort();
+               }
+               rcu_read_lock();
        } while (time(NULL) < start + 11);
+       rcu_read_unlock();
 
        pthread_join(throbber, NULL);
 
@@ -577,6 +590,7 @@ void ida_tests(void)
 
 int __weak main(void)
 {
+       rcu_register_thread();
        radix_tree_init();
        idr_checks();
        ida_tests();
@@ -584,5 +598,6 @@ int __weak main(void)
        rcu_barrier();
        if (nr_allocated)
                printf("nr_allocated = %d\n", nr_allocated);
+       rcu_unregister_thread();
        return 0;
 }
diff --git a/tools/testing/radix-tree/linux/compiler_types.h b/tools/testing/radix-tree/linux/compiler_types.h
deleted file mode 100644 (file)
index e69de29..0000000
index 9eae0fb..e00520c 100644 (file)
@@ -224,7 +224,9 @@ void multiorder_checks(void)
 
 int __weak main(void)
 {
+       rcu_register_thread();
        radix_tree_init();
        multiorder_checks();
+       rcu_unregister_thread();
        return 0;
 }
index e61e43e..f20e12c 100644 (file)
@@ -25,11 +25,13 @@ void xarray_tests(void)
 
 int __weak main(void)
 {
+       rcu_register_thread();
        radix_tree_init();
        xarray_tests();
        radix_tree_cpu_dead(1);
        rcu_barrier();
        if (nr_allocated)
                printf("nr_allocated = %d\n", nr_allocated);
+       rcu_unregister_thread();
        return 0;
 }
index 37c5494..e25917f 100644 (file)
@@ -6,6 +6,7 @@
 #include <test_progs.h>
 #include "bpf_dctcp.skel.h"
 #include "bpf_cubic.skel.h"
+#include "bpf_tcp_nogpl.skel.h"
 
 #define min(a, b) ((a) < (b) ? (a) : (b))
 
@@ -227,10 +228,53 @@ static void test_dctcp(void)
        bpf_dctcp__destroy(dctcp_skel);
 }
 
+static char *err_str;
+static bool found;
+
+static int libbpf_debug_print(enum libbpf_print_level level,
+                             const char *format, va_list args)
+{
+       char *log_buf;
+
+       if (level != LIBBPF_WARN ||
+           strcmp(format, "libbpf: \n%s\n")) {
+               vprintf(format, args);
+               return 0;
+       }
+
+       log_buf = va_arg(args, char *);
+       if (!log_buf)
+               goto out;
+       if (err_str && strstr(log_buf, err_str) != NULL)
+               found = true;
+out:
+       printf(format, log_buf);
+       return 0;
+}
+
+static void test_invalid_license(void)
+{
+       libbpf_print_fn_t old_print_fn;
+       struct bpf_tcp_nogpl *skel;
+
+       err_str = "struct ops programs must have a GPL compatible license";
+       found = false;
+       old_print_fn = libbpf_set_print(libbpf_debug_print);
+
+       skel = bpf_tcp_nogpl__open_and_load();
+       ASSERT_NULL(skel, "bpf_tcp_nogpl");
+       ASSERT_EQ(found, true, "expected_err_msg");
+
+       bpf_tcp_nogpl__destroy(skel);
+       libbpf_set_print(old_print_fn);
+}
+
 void test_bpf_tcp_ca(void)
 {
        if (test__start_subtest("dctcp"))
                test_dctcp();
        if (test__start_subtest("cubic"))
                test_cubic();
+       if (test__start_subtest("invalid_license"))
+               test_invalid_license();
 }
diff --git a/tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c b/tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c
new file mode 100644 (file)
index 0000000..2ecd833
--- /dev/null
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <linux/types.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_tcp_helpers.h"
+
+char _license[] SEC("license") = "X";
+
+void BPF_STRUCT_OPS(nogpltcp_init, struct sock *sk)
+{
+}
+
+SEC(".struct_ops")
+struct tcp_congestion_ops bpf_nogpltcp = {
+       .init           = (void *)nogpltcp_init,
+       .name           = "bpf_nogpltcp",
+};
index 57ed67b..8a1caf4 100644 (file)
        },
        .fixup_map_hash_8b = { 3 },
        /* not actually fully unbounded, but the bound is very high */
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root",
-       .result_unpriv = REJECT,
        .errstr = "value -4294967168 makes map_value pointer be out of bounds",
        .result = REJECT,
 },
        BPF_EXIT_INSN(),
        },
        .fixup_map_hash_8b = { 3 },
-       /* not actually fully unbounded, but the bound is very high */
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root",
-       .result_unpriv = REJECT,
        .errstr = "value -4294967168 makes map_value pointer be out of bounds",
        .result = REJECT,
 },
index c162498..91869ae 100644 (file)
@@ -6,7 +6,7 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .errstr = "R0 tried to subtract pointer from scalar",
        .result = REJECT,
 },
@@ -21,7 +21,7 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 1,
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .errstr = "R0 tried to subtract pointer from scalar",
        .result = REJECT,
 },
 {
        "check deducing bounds from const, 4",
        .insns = {
+               BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
                BPF_MOV64_IMM(BPF_REG_0, 0),
                BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
                BPF_EXIT_INSN(),
                BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
                BPF_EXIT_INSN(),
-               BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+               BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_0),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R6 has pointer with unsupported alu operation",
        .result_unpriv = REJECT,
        .result = ACCEPT,
 },
@@ -61,7 +62,7 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .errstr = "R0 tried to subtract pointer from scalar",
        .result = REJECT,
 },
@@ -74,7 +75,7 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .errstr = "R0 tried to subtract pointer from scalar",
        .result = REJECT,
 },
@@ -88,7 +89,7 @@
                            offsetof(struct __sk_buff, mark)),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .errstr = "dereference of modified ctx ptr",
        .result = REJECT,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
                            offsetof(struct __sk_buff, mark)),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .errstr = "dereference of modified ctx ptr",
        .result = REJECT,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .errstr = "R0 tried to subtract pointer from scalar",
        .result = REJECT,
 },
index 9baca7a..c2aa6f2 100644 (file)
@@ -19,7 +19,6 @@
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
@@ -43,7 +42,6 @@
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
@@ -69,7 +67,6 @@
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
@@ -94,7 +91,6 @@
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R7 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
        },
        .fixup_map_hash_8b = { 4 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
 },
 {
        },
        .fixup_map_hash_8b = { 3 },
        .errstr = "unbounded min value",
-       .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
        .result = REJECT,
-       .result_unpriv = REJECT,
 },
index 6f610cf..1f82021 100644 (file)
@@ -76,7 +76,7 @@
        },
        .fixup_map_hash_16b = { 4 },
        .result_unpriv = REJECT,
-       .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 has pointer with unsupported alu operation",
        .result = ACCEPT,
 },
 {
@@ -94,6 +94,6 @@
        },
        .fixup_map_hash_16b = { 4 },
        .result_unpriv = REJECT,
-       .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R0 has pointer with unsupported alu operation",
        .result = ACCEPT,
 },
index 3e32400..bd436df 100644 (file)
        BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
        BPF_EXIT_INSN(),
        },
-       .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
        .result_unpriv = REJECT,
        .result = ACCEPT,
 },
index feb9126..e5913fd 100644 (file)
@@ -21,8 +21,6 @@
        .fixup_map_hash_16b = { 5 },
        .fixup_map_array_48b = { 8 },
        .result = ACCEPT,
-       .result_unpriv = REJECT,
-       .errstr_unpriv = "R1 tried to add from different maps",
        .retval = 1,
 },
 {
        .fixup_map_array_48b = { 1 },
        .result = ACCEPT,
        .result_unpriv = REJECT,
-       .errstr_unpriv = "R2 tried to add from different pointers or scalars",
+       .errstr_unpriv = "R2 tried to add from different maps, paths or scalars",
        .retval = 0,
 },
 {
        .fixup_map_array_48b = { 1 },
        .result = ACCEPT,
        .result_unpriv = REJECT,
-       .errstr_unpriv = "R2 tried to add from different maps, paths, or prohibited types",
+       .errstr_unpriv = "R2 tried to add from different maps, paths or scalars",
        .retval = 0,
 },
 {
index 2f2eeb8..5aadf84 100644 (file)
@@ -108,7 +108,7 @@ static void run_test(uint32_t run)
        kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
        vm_create_irqchip(vm);
 
-       fprintf(stderr, "%s: [%d] start vcpus\n", __func__, run);
+       pr_debug("%s: [%d] start vcpus\n", __func__, run);
        for (i = 0; i < VCPU_NUM; ++i) {
                vm_vcpu_add_default(vm, i, guest_code);
                payloads[i].vm = vm;
@@ -124,7 +124,7 @@ static void run_test(uint32_t run)
                        check_set_affinity(throw_away, &cpu_set);
                }
        }
-       fprintf(stderr, "%s: [%d] all threads launched\n", __func__, run);
+       pr_debug("%s: [%d] all threads launched\n", __func__, run);
        sem_post(sem);
        for (i = 0; i < VCPU_NUM; ++i)
                check_join(threads[i], &b);
@@ -147,16 +147,16 @@ int main(int argc, char **argv)
                if (pid == 0)
                        run_test(i); /* This function always exits */
 
-               fprintf(stderr, "%s: [%d] waiting semaphore\n", __func__, i);
+               pr_debug("%s: [%d] waiting semaphore\n", __func__, i);
                sem_wait(sem);
                r = (rand() % DELAY_US_MAX) + 1;
-               fprintf(stderr, "%s: [%d] waiting %dus\n", __func__, i, r);
+               pr_debug("%s: [%d] waiting %dus\n", __func__, i, r);
                usleep(r);
                r = waitpid(pid, &s, WNOHANG);
                TEST_ASSERT(r != pid,
                            "%s: [%d] child exited unexpectedly status: [%d]",
                            __func__, i, s);
-               fprintf(stderr, "%s: [%d] killing child\n", __func__, i);
+               pr_debug("%s: [%d] killing child\n", __func__, i);
                kill(pid, SIGKILL);
        }
 
index ffbc455..7f1d276 100644 (file)
@@ -80,19 +80,24 @@ static inline void check_tsc_msr_rdtsc(void)
        GUEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100);
 }
 
+static inline u64 get_tscpage_ts(struct ms_hyperv_tsc_page *tsc_page)
+{
+       return mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset;
+}
+
 static inline void check_tsc_msr_tsc_page(struct ms_hyperv_tsc_page *tsc_page)
 {
        u64 r1, r2, t1, t2;
 
        /* Compare TSC page clocksource with HV_X64_MSR_TIME_REF_COUNT */
-       t1 = mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset;
+       t1 = get_tscpage_ts(tsc_page);
        r1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
 
        /* 10 ms tolerance */
        GUEST_ASSERT(r1 >= t1 && r1 - t1 < 100000);
        nop_loop();
 
-       t2 = mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset;
+       t2 = get_tscpage_ts(tsc_page);
        r2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
        GUEST_ASSERT(r2 >= t1 && r2 - t2 < 100000);
 }
@@ -130,7 +135,11 @@ static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_
 
        tsc_offset = tsc_page->tsc_offset;
        /* Call KVM_SET_CLOCK from userspace, check that TSC page was updated */
+
        GUEST_SYNC(7);
+       /* Sanity check TSC page timestamp, it should be close to 0 */
+       GUEST_ASSERT(get_tscpage_ts(tsc_page) < 100000);
+
        GUEST_ASSERT(tsc_page->tsc_offset != tsc_offset);
 
        nop_loop();
index 0ccb1dd..eb307ca 100755 (executable)
@@ -657,10 +657,21 @@ test_ecn_decap()
 {
        # In accordance with INET_ECN_decapsulate()
        __test_ecn_decap 00 00 0x00
+       __test_ecn_decap 00 01 0x00
+       __test_ecn_decap 00 02 0x00
+       # 00 03 is tested in test_ecn_decap_error()
+       __test_ecn_decap 01 00 0x01
        __test_ecn_decap 01 01 0x01
-       __test_ecn_decap 02 01 0x01
+       __test_ecn_decap 01 02 0x01
        __test_ecn_decap 01 03 0x03
+       __test_ecn_decap 02 00 0x02
+       __test_ecn_decap 02 01 0x01
+       __test_ecn_decap 02 02 0x02
        __test_ecn_decap 02 03 0x03
+       __test_ecn_decap 03 00 0x03
+       __test_ecn_decap 03 01 0x03
+       __test_ecn_decap 03 02 0x03
+       __test_ecn_decap 03 03 0x03
        test_ecn_decap_error
 }