Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorJakub Kicinski <kuba@kernel.org>
Fri, 29 Jan 2021 01:09:31 +0000 (17:09 -0800)
committerJakub Kicinski <kuba@kernel.org>
Fri, 29 Jan 2021 01:09:31 +0000 (17:09 -0800)
drivers/net/can/dev.c
  b552766c872f ("can: dev: prevent potential information leak in can_fill_info()")
  3e77f70e7345 ("can: dev: move driver related infrastructure into separate subdir")
  0a042c6ec991 ("can: dev: move netlink related code into seperate file")

  Code move.

drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
  57ac4a31c483 ("net/mlx5e: Correctly handle changing the number of queues when the interface is down")
  214baf22870c ("net/mlx5e: Support HTB offload")

  Adjacent code changes

net/switchdev/switchdev.c
  20776b465c0c ("net: switchdev: don't set port_obj_info->handled true when -EOPNOTSUPP")
  ffb68fc58e96 ("net: switchdev: remove the transaction structure from port object notifiers")
  bae33f2b5afe ("net: switchdev: remove the transaction structure from port attributes")

  Transaction parameter gets dropped otherwise keep the fix.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
463 files changed:
.mailmap
Documentation/ABI/testing/sysfs-class-devlink
Documentation/ABI/testing/sysfs-devices-consumer
Documentation/ABI/testing/sysfs-devices-supplier
Documentation/ABI/testing/sysfs-driver-ufs
Documentation/admin-guide/device-mapper/dm-integrity.rst
Documentation/dev-tools/kasan.rst
Documentation/dev-tools/kunit/usage.rst
Documentation/devicetree/bindings/iio/accel/bosch,bma255.yaml
Documentation/devicetree/bindings/sound/mt8192-mt6359-rt1015-rt5682.yaml
Documentation/networking/ip-sysctl.rst
Documentation/virt/kvm/api.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/imx6q-tbs2910.dts
arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
arch/arm/boot/dts/imx6qdl-sr-som.dtsi
arch/arm/boot/dts/imx7d-flex-concentrator.dts
arch/arm/boot/dts/ste-db8500.dtsi
arch/arm/boot/dts/ste-db8520.dtsi
arch/arm/boot/dts/ste-db9500.dtsi [new file with mode: 0644]
arch/arm/boot/dts/ste-snowball.dts
arch/arm/mach-imx/suspend-imx6.S
arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
arch/arm64/boot/dts/freescale/imx8mn.dtsi
arch/arm64/boot/dts/freescale/imx8mp.dtsi
arch/arm64/configs/defconfig
arch/arm64/kernel/probes/kprobes.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/psci-relay.c
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/sys_regs.c
arch/arm64/mm/fault.c
arch/ia64/include/uapi/asm/cmpxchg.h
arch/ia64/kernel/time.c
arch/mips/include/asm/highmem.h
arch/openrisc/include/asm/io.h
arch/openrisc/mm/ioremap.c
arch/parisc/Kconfig
arch/parisc/include/asm/irq.h
arch/parisc/kernel/entry.S
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/include/asm/feature-fixups.h
arch/powerpc/include/asm/highmem.h
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/lib/feature-fixups.c
arch/sh/Kconfig
arch/sh/boards/mach-sh03/rtc.c
arch/sh/configs/landisk_defconfig
arch/sh/configs/microdev_defconfig
arch/sh/configs/sdk7780_defconfig
arch/sh/configs/sdk7786_defconfig
arch/sh/configs/se7750_defconfig
arch/sh/configs/sh03_defconfig
arch/sh/drivers/dma/Kconfig
arch/sh/include/asm/gpio.h
arch/sh/kernel/cpu/sh3/entry.S
arch/sh/mm/Kconfig
arch/sh/mm/asids-debugfs.c
arch/sh/mm/cache-debugfs.c
arch/sh/mm/pmb.c
arch/sparc/include/asm/highmem.h
arch/x86/entry/common.c
arch/x86/include/asm/fpu/api.h
arch/x86/include/asm/idtentry.h
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/topology.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/topology.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/setup.c
arch/x86/kernel/sev-es.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/kvm_cache_regs.h
arch/x86/kvm/mmu.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/mmx_32.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/xen-asm.S
drivers/acpi/scan.c
drivers/base/core.c
drivers/base/dd.c
drivers/base/platform.c
drivers/block/xen-blkfront.c
drivers/bus/arm-integrator-lm.c
drivers/clk/imx/Kconfig
drivers/clk/mmp/clk-audio.c
drivers/clk/qcom/gcc-sc7180.c
drivers/clk/qcom/gcc-sm8250.c
drivers/counter/ti-eqep.c
drivers/crypto/marvell/cesa/cesa.h
drivers/firmware/imx/Kconfig
drivers/gpio/Kconfig
drivers/gpio/gpio-mvebu.c
drivers/gpio/gpiolib-cdev.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_gem_vram_helper.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp.h
drivers/gpu/drm/i915/display/intel_hdcp.c
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_timeline.c
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/ttm/ttm_pool.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/hid/hid-multitouch.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.h
drivers/hwtracing/intel_th/pci.c
drivers/hwtracing/stm/heartbeat.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-octeon-core.c
drivers/i2c/busses/i2c-tegra-bpmp.c
drivers/i2c/busses/i2c-tegra.c
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/common/st_sensors/st_sensors_trigger.c
drivers/iio/dac/ad5504.c
drivers/iio/proximity/sx9310.c
drivers/iio/temperature/mlx90632.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_qp.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-bcm2836.c
drivers/irqchip/irq-loongson-liointc.c
drivers/irqchip/irq-mips-cpu.c
drivers/irqchip/irq-sl28cpld.c
drivers/lightnvm/core.c
drivers/md/dm-crypt.c
drivers/md/dm-integrity.c
drivers/md/dm-table.c
drivers/md/md.c
drivers/media/cec/platform/Makefile
drivers/media/common/videobuf2/videobuf2-v4l2.c
drivers/media/i2c/ccs-pll.c
drivers/media/i2c/ccs/ccs-data.c
drivers/media/pci/intel/ipu3/ipu3-cio2.c
drivers/media/platform/qcom/venus/core.c
drivers/media/platform/rcar-vin/rcar-core.c
drivers/media/rc/ir-mce_kbd-decoder.c
drivers/media/rc/ite-cir.c
drivers/media/rc/rc-main.c
drivers/media/rc/serial_ir.c
drivers/media/v4l2-core/v4l2-common.c
drivers/misc/cardreader/rtsx_pcr.c
drivers/misc/habanalabs/common/device.c
drivers/misc/habanalabs/common/firmware_if.c
drivers/misc/habanalabs/common/habanalabs.h
drivers/misc/habanalabs/common/habanalabs_ioctl.c
drivers/misc/habanalabs/common/memory.c
drivers/misc/habanalabs/common/mmu.c
drivers/misc/habanalabs/common/mmu_v1.c
drivers/misc/habanalabs/gaudi/gaudi.c
drivers/misc/habanalabs/goya/goya.c
drivers/mmc/core/queue.c
drivers/mmc/host/sdhci-brcmstb.c
drivers/mmc/host/sdhci-of-dwcmshc.c
drivers/mmc/host/sdhci-xenon.c
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/raw/intel-nand-controller.c
drivers/mtd/nand/raw/nandsim.c
drivers/mtd/nand/raw/omap2.c
drivers/mtd/nand/spi/core.c
drivers/net/can/dev/netlink.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/microchip/ksz8795.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/igc/igc_ethtool.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
drivers/net/ethernet/mellanox/mlx5/core/en/health.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
drivers/net/team/team.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/qmi_wwan.c
drivers/net/wireless/intel/iwlwifi/cfg/22000.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.h
drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
drivers/net/wireless/intel/iwlwifi/iwl-io.c
drivers/net/wireless/intel/iwlwifi/iwl-io.h
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/intel/iwlwifi/queue/tx.c
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
drivers/net/wireless/mediatek/mt7601u/dma.c
drivers/nvme/host/core.c
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/admin-cmd.c
drivers/phy/ingenic/Makefile
drivers/phy/mediatek/Kconfig
drivers/phy/motorola/phy-cpcap-usb.c
drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
drivers/pinctrl/nomadik/pinctrl-nomadik.c
drivers/pinctrl/pinctrl-ingenic.c
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/pinctrl/qcom/pinctrl-msm.h
drivers/platform/surface/Kconfig
drivers/platform/surface/surface_gpe.c
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/i2c-multi-instantiate.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel-vbtn.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/touchscreen_dmi.c
drivers/regulator/core.c
drivers/scsi/fnic/vnic_dev.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/libfc/fc_exch.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/scsi_transport_srp.c
drivers/scsi/ufs/Kconfig
drivers/scsi/ufs/ufshcd.c
drivers/sh/intc/core.c
drivers/sh/intc/virq-debugfs.c
drivers/soc/atmel/soc.c
drivers/soc/imx/Kconfig
drivers/soc/litex/litex_soc_ctrl.c
drivers/spi/spi-altera.c
drivers/spi/spidev.c
drivers/staging/media/hantro/hantro_v4l2.c
drivers/staging/media/sunxi/cedrus/cedrus_h264.c
drivers/staging/rtl8723bs/include/rtw_wifi_regd.h
drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
drivers/staging/rtl8723bs/os_dep/wifi_regd.c
drivers/target/target_core_user.c
drivers/tee/optee/call.c
drivers/thunderbolt/icm.c
drivers/tty/n_tty.c
drivers/tty/serial/mvebu-uart.c
drivers/tty/tty_io.c
drivers/usb/cdns3/cdns3-imx.c
drivers/usb/gadget/udc/aspeed-vhub/epn.c
drivers/usb/gadget/udc/bdc/Kconfig
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci-tegra.c
drivers/xen/xenbus/xenbus_probe.c
fs/btrfs/backref.c
fs/btrfs/block-group.c
fs/btrfs/extent-tree.c
fs/btrfs/send.c
fs/btrfs/transaction.c
fs/btrfs/volumes.c
fs/ceph/mds_client.c
fs/cifs/connect.c
fs/cifs/transport.c
fs/fs-writeback.c
fs/io_uring.c
fs/kernfs/file.c
fs/pipe.c
fs/proc/proc_sysctl.c
fs/udf/super.c
include/dt-bindings/sound/apq8016-lpass.h
include/dt-bindings/sound/qcom,lpass.h [new file with mode: 0644]
include/dt-bindings/sound/sc7180-lpass.h
include/linux/device.h
include/linux/kthread.h
include/linux/ktime.h
include/linux/mlx5/driver.h
include/linux/nvme.h
include/linux/regulator/consumer.h
include/linux/timekeeping32.h [deleted file]
include/linux/tty.h
include/media/v4l2-common.h
include/net/lapb.h
include/net/netfilter/nf_tables.h
include/net/tcp.h
include/sound/pcm.h
include/trace/events/sched.h
include/uapi/linux/mrp_bridge.h
include/uapi/linux/rpl.h
include/uapi/linux/v4l2-subdev.h
include/uapi/rdma/vmw_pvrdma-abi.h
kernel/fork.c
kernel/futex.c
kernel/irq/manage.c
kernel/irq/msi.c
kernel/kthread.c
kernel/locking/lockdep.c
kernel/locking/rtmutex.c
kernel/locking/rtmutex_common.h
kernel/printk/printk.c
kernel/printk/printk_ringbuffer.c
kernel/sched/core.c
kernel/sched/sched.h
kernel/signal.c
kernel/smpboot.c
kernel/time/ntp.c
kernel/time/timekeeping.c
kernel/workqueue.c
lib/Kconfig.ubsan
mm/highmem.c
mm/kasan/hw_tags.c
mm/kasan/init.c
mm/memblock.c
mm/memcontrol.c
mm/memory-failure.c
mm/migrate.c
mm/page_alloc.c
mm/slub.c
net/bridge/br_private_mrp.h
net/ceph/auth_x.c
net/ceph/crypto.c
net/ceph/messenger_v1.c
net/ceph/messenger_v2.c
net/ceph/mon_client.c
net/ceph/osd_client.c
net/decnet/dn_route.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/tcp_recovery.c
net/ipv4/tcp_timer.c
net/key/af_key.c
net/lapb/lapb_iface.c
net/lapb/lapb_timer.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/spectmgmt.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_dynset.c
net/nfc/netlink.c
net/nfc/rawsock.c
net/rxrpc/call_accept.c
net/switchdev/switchdev.c
net/wireless/wext-core.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_policy.c
sound/core/pcm_native.c
sound/core/seq/oss/seq_oss_synth.c
sound/hda/intel-dsp-config.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/soc/amd/renoir/rn-pci-acp3x.c
sound/soc/codecs/ak4458.c
sound/soc/codecs/hdmi-codec.c
sound/soc/codecs/wm_adsp.c
sound/soc/fsl/imx-hdmi.c
sound/soc/intel/boards/sof_sdw.c
sound/soc/intel/skylake/skl-topology.c
sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c
sound/soc/qcom/lpass-cpu.c
sound/soc/qcom/lpass-ipq806x.c
sound/soc/qcom/lpass-lpaif-reg.h
sound/soc/qcom/lpass-platform.c
sound/soc/qcom/lpass-sc7180.c
sound/soc/qcom/lpass.h
sound/soc/soc-topology.c
sound/soc/sof/intel/Kconfig
sound/soc/sof/intel/hda-codec.c
sound/soc/sof/intel/hda-dsp.c
sound/soc/sof/intel/hda.h
sound/soc/sof/sof-acpi-dev.c
sound/soc/sof/sof-pci-dev.c
sound/usb/clock.c
sound/usb/endpoint.c
sound/usb/format.c
sound/usb/implicit.c
sound/usb/pcm.c
sound/usb/quirks.c
tools/gpio/gpio-event-mon.c
tools/gpio/gpio-watch.c
tools/lib/perf/evlist.c
tools/objtool/check.c
tools/objtool/elf.c
tools/perf/builtin-script.c
tools/perf/util/metricgroup.c
tools/power/x86/intel-speed-select/isst-config.c
tools/testing/kunit/kunit.py
tools/testing/kunit/kunit_config.py
tools/testing/kunit/kunit_json.py
tools/testing/kunit/kunit_kernel.py
tools/testing/kunit/kunit_parser.py
tools/testing/selftests/net/forwarding/router_mpath_nh.sh
tools/testing/selftests/net/forwarding/router_multipath.sh
tools/testing/selftests/net/xfrm_policy.sh
tools/testing/selftests/powerpc/alignment/alignment_handler.c
tools/testing/selftests/powerpc/mm/pkey_exec_prot.c
tools/testing/selftests/powerpc/mm/pkey_siginfo.c
virt/kvm/kvm_main.c

index b1ab012..cc4e91d 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -9,9 +9,6 @@
 #
 # Please keep this list dictionary sorted.
 #
-# This comment is parsed by git-shortlog:
-# repo-abbrev: /pub/scm/linux/kernel/git/
-#
 Aaron Durbin <adurbin@google.com>
 Adam Oldham <oldhamca@gmail.com>
 Adam Radford <aradford@gmail.com>
index b662f74..8a21ce5 100644 (file)
@@ -5,8 +5,8 @@ Description:
                Provide a place in sysfs for the device link objects in the
                kernel at any given time.  The name of a device link directory,
                denoted as ... above, is of the form <supplier>--<consumer>
-               where <supplier> is the supplier device name and <consumer> is
-               the consumer device name.
+               where <supplier> is the supplier bus:device name and <consumer>
+               is the consumer bus:device name.
 
 What:          /sys/class/devlink/.../auto_remove_on
 Date:          May 2020
index 1f06d74..0809fda 100644 (file)
@@ -4,5 +4,6 @@ Contact:        Saravana Kannan <saravanak@google.com>
 Description:
                The /sys/devices/.../consumer:<consumer> are symlinks to device
                links where this device is the supplier. <consumer> denotes the
-               name of the consumer in that device link. There can be zero or
-               more of these symlinks for a given device.
+               name of the consumer in that device link and is of the form
+               bus:device name. There can be zero or more of these symlinks
+               for a given device.
index a919e0d..207f597 100644 (file)
@@ -4,5 +4,6 @@ Contact:        Saravana Kannan <saravanak@google.com>
 Description:
                The /sys/devices/.../supplier:<supplier> are symlinks to device
                links where this device is the consumer. <supplier> denotes the
-               name of the supplier in that device link. There can be zero or
-               more of these symlinks for a given device.
+               name of the supplier in that device link and is of the form
+               bus:device name. There can be zero or more of these symlinks
+               for a given device.
index adc0d0e..75ccc5c 100644 (file)
@@ -916,21 +916,25 @@ Date:             September 2014
 Contact:       Subhash Jadavani <subhashj@codeaurora.org>
 Description:   This entry could be used to set or show the UFS device
                runtime power management level. The current driver
-               implementation supports 6 levels with next target states:
+               implementation supports 7 levels with next target states:
 
                ==  ====================================================
-               0   an UFS device will stay active, an UIC link will
+               0   UFS device will stay active, UIC link will
                    stay active
-               1   an UFS device will stay active, an UIC link will
+               1   UFS device will stay active, UIC link will
                    hibernate
-               2   an UFS device will moved to sleep, an UIC link will
+               2   UFS device will be moved to sleep, UIC link will
                    stay active
-               3   an UFS device will moved to sleep, an UIC link will
+               3   UFS device will be moved to sleep, UIC link will
                    hibernate
-               4   an UFS device will be powered off, an UIC link will
+               4   UFS device will be powered off, UIC link will
                    hibernate
-               5   an UFS device will be powered off, an UIC link will
+               5   UFS device will be powered off, UIC link will
                    be powered off
+               6   UFS device will be moved to deep sleep, UIC link
+                   will be powered off. Note, deep sleep might not be
+                   supported in which case this value will not be
+                   accepted
                ==  ====================================================
 
 What:          /sys/bus/platform/drivers/ufshcd/*/rpm_target_dev_state
@@ -954,21 +958,25 @@ Date:             September 2014
 Contact:       Subhash Jadavani <subhashj@codeaurora.org>
 Description:   This entry could be used to set or show the UFS device
                system power management level. The current driver
-               implementation supports 6 levels with next target states:
+               implementation supports 7 levels with next target states:
 
                ==  ====================================================
-               0   an UFS device will stay active, an UIC link will
+               0   UFS device will stay active, UIC link will
                    stay active
-               1   an UFS device will stay active, an UIC link will
+               1   UFS device will stay active, UIC link will
                    hibernate
-               2   an UFS device will moved to sleep, an UIC link will
+               2   UFS device will be moved to sleep, UIC link will
                    stay active
-               3   an UFS device will moved to sleep, an UIC link will
+               3   UFS device will be moved to sleep, UIC link will
                    hibernate
-               4   an UFS device will be powered off, an UIC link will
+               4   UFS device will be powered off, UIC link will
                    hibernate
-               5   an UFS device will be powered off, an UIC link will
+               5   UFS device will be powered off, UIC link will
                    be powered off
+               6   UFS device will be moved to deep sleep, UIC link
+                   will be powered off. Note, deep sleep might not be
+                   supported in which case this value will not be
+                   accepted
                ==  ====================================================
 
 What:          /sys/bus/platform/drivers/ufshcd/*/spm_target_dev_state
index 4e6f504..2cc5488 100644 (file)
@@ -177,14 +177,20 @@ bitmap_flush_interval:number
        The bitmap flush interval in milliseconds. The metadata buffers
        are synchronized when this interval expires.
 
+allow_discards
+       Allow block discard requests (a.k.a. TRIM) for the integrity device.
+       Discards are only allowed to devices using internal hash.
+
 fix_padding
        Use a smaller padding of the tag area that is more
        space-efficient. If this option is not present, large padding is
        used - that is for compatibility with older kernels.
 
-allow_discards
-       Allow block discard requests (a.k.a. TRIM) for the integrity device.
-       Discards are only allowed to devices using internal hash.
+legacy_recalculate
+       Allow recalculating of volumes with HMAC keys. This is disabled by
+       default for security reasons - an attacker could modify the volume,
+       set recalc_sector to zero, and the kernel would not detect the
+       modification.
 
 The journal mode (D/J), buffer_sectors, journal_watermark, commit_time and
 allow_discards can be changed when reloading the target (load an inactive
index 0fc3fb1..1651d96 100644 (file)
@@ -160,29 +160,14 @@ intended for use in production as a security mitigation. Therefore it supports
 boot parameters that allow to disable KASAN competely or otherwise control
 particular KASAN features.
 
-The things that can be controlled are:
+- ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
 
-1. Whether KASAN is enabled at all.
-2. Whether KASAN collects and saves alloc/free stacks.
-3. Whether KASAN panics on a detected bug or not.
+- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
+  traces collection (default: ``on`` for ``CONFIG_DEBUG_KERNEL=y``, otherwise
+  ``off``).
 
-The ``kasan.mode`` boot parameter allows to choose one of three main modes:
-
-- ``kasan.mode=off`` - KASAN is disabled, no tag checks are performed
-- ``kasan.mode=prod`` - only essential production features are enabled
-- ``kasan.mode=full`` - all KASAN features are enabled
-
-The chosen mode provides default control values for the features mentioned
-above. However it's also possible to override the default values by providing:
-
-- ``kasan.stacktrace=off`` or ``=on`` - enable alloc/free stack collection
-                                       (default: ``on`` for ``mode=full``,
-                                        otherwise ``off``)
-- ``kasan.fault=report`` or ``=panic`` - only print KASAN report or also panic
-                                        (default: ``report``)
-
-If ``kasan.mode`` parameter is not provided, it defaults to ``full`` when
-``CONFIG_DEBUG_KERNEL`` is enabled, and to ``prod`` otherwise.
+- ``kasan.fault=report`` or ``=panic`` controls whether to only print a KASAN
+  report or also panic the kernel (default: ``report``).
 
 For developers
 ~~~~~~~~~~~~~~
index d9fdc14..650f995 100644 (file)
@@ -522,6 +522,63 @@ There's more boilerplate involved, but it can:
   * E.g. if we wanted to also test ``sha256sum``, we could add a ``sha256``
     field and reuse ``cases``.
 
+* be converted to a "parameterized test", see below.
+
+Parameterized Testing
+~~~~~~~~~~~~~~~~~~~~~
+
+The table-driven testing pattern is common enough that KUnit has special
+support for it.
+
+Reusing the same ``cases`` array from above, we can write the test as a
+"parameterized test" with the following.
+
+.. code-block:: c
+
+       // This is copy-pasted from above.
+       struct sha1_test_case {
+               const char *str;
+               const char *sha1;
+       };
+       struct sha1_test_case cases[] = {
+               {
+                       .str = "hello world",
+                       .sha1 = "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed",
+               },
+               {
+                       .str = "hello world!",
+                       .sha1 = "430ce34d020724ed75a196dfc2ad67c77772d169",
+               },
+       };
+
+       // Need a helper function to generate a name for each test case.
+       static void case_to_desc(const struct sha1_test_case *t, char *desc)
+       {
+               strcpy(desc, t->str);
+       }
+       // Creates `sha1_gen_params()` to iterate over `cases`.
+       KUNIT_ARRAY_PARAM(sha1, cases, case_to_desc);
+
+       // Looks no different from a normal test.
+       static void sha1_test(struct kunit *test)
+       {
+               // This function can just contain the body of the for-loop.
+               // The former `cases[i]` is accessible under test->param_value.
+               char out[40];
+               struct sha1_test_case *test_param = (struct sha1_test_case *)(test->param_value);
+
+               sha1sum(test_param->str, out);
+               KUNIT_EXPECT_STREQ_MSG(test, (char *)out, test_param->sha1,
+                                     "sha1sum(%s)", test_param->str);
+       }
+
+       // Instead of KUNIT_CASE, we use KUNIT_CASE_PARAM and pass in the
+       // function declared by KUNIT_ARRAY_PARAM.
+       static struct kunit_case sha1_test_cases[] = {
+               KUNIT_CASE_PARAM(sha1_test, sha1_gen_params),
+               {}
+       };
+
 .. _kunit-on-non-uml:
 
 KUnit on non-UML architectures
index 6eef348..c2efbb8 100644 (file)
@@ -16,8 +16,8 @@ description:
 properties:
   compatible:
     enum:
-      - bosch,bmc150
-      - bosch,bmi055
+      - bosch,bmc150_accel
+      - bosch,bmi055_accel
       - bosch,bma255
       - bosch,bma250e
       - bosch,bma222
index bf8c8ba..5465082 100644 (file)
@@ -7,8 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Mediatek MT8192 with MT6359, RT1015 and RT5682 ASoC sound card driver
 
 maintainers:
-   - Jiaxin Yu <jiaxin.yu@mediatek.com>
-   - Shane Chien <shane.chien@mediatek.com>
+  - Jiaxin Yu <jiaxin.yu@mediatek.com>
+  - Shane Chien <shane.chien@mediatek.com>
 
 description:
   This binding describes the MT8192 sound card.
index 0e51ddd..f0353fb 100644 (file)
@@ -1807,12 +1807,24 @@ seg6_flowlabel - INTEGER
 ``conf/default/*``:
        Change the interface-specific default settings.
 
+       These settings would be used during creating new interfaces.
+
 
 ``conf/all/*``:
        Change all the interface-specific settings.
 
        [XXX:  Other special features than forwarding?]
 
+conf/all/disable_ipv6 - BOOLEAN
+       Changing this value is same as changing ``conf/default/disable_ipv6``
+       setting and also all per-interface ``disable_ipv6`` settings to the same
+       value.
+
+       Reading this value does not have any particular meaning. It does not say
+       whether IPv6 support is enabled or disabled. Returned value can be 1
+       also in the case when some interface has ``disable_ipv6`` set to 0 and
+       has configured IPv6 addresses.
+
 conf/all/forwarding - BOOLEAN
        Enable global IPv6 forwarding between all interfaces.
 
index c136e25..99ceb97 100644 (file)
@@ -360,10 +360,9 @@ since the last call to this ioctl.  Bit 0 is the first page in the
 memory slot.  Ensure the entire structure is cleared to avoid padding
 issues.
 
-If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies
-the address space for which you want to return the dirty bitmap.
-They must be less than the value that KVM_CHECK_EXTENSION returns for
-the KVM_CAP_MULTI_ADDRESS_SPACE capability.
+If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of slot field specifies
+the address space for which you want to return the dirty bitmap.  See
+KVM_SET_USER_MEMORY_REGION for details on the usage of slot field.
 
 The bits in the dirty bitmap are cleared before the ioctl returns, unless
 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled.  For more information,
@@ -1281,6 +1280,9 @@ field userspace_addr, which must point at user addressable memory for
 the entire memory slot size.  Any object may back this memory, including
 anonymous memory, ordinary files, and hugetlbfs.
 
+On architectures that support a form of address tagging, userspace_addr must
+be an untagged address.
+
 It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr
 be identical.  This allows large pages in the guest to be backed by large
 pages in the host.
@@ -1333,7 +1335,7 @@ documentation when it pops into existence).
 
 :Capability: KVM_CAP_ENABLE_CAP_VM
 :Architectures: all
-:Type: vcpu ioctl
+:Type: vm ioctl
 :Parameters: struct kvm_enable_cap (in)
 :Returns: 0 on success; -1 on error
 
@@ -4432,7 +4434,7 @@ to I/O ports.
 :Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
 :Architectures: x86, arm, arm64, mips
 :Type: vm ioctl
-:Parameters: struct kvm_dirty_log (in)
+:Parameters: struct kvm_clear_dirty_log (in)
 :Returns: 0 on success, -1 on error
 
 ::
@@ -4459,10 +4461,9 @@ in KVM's dirty bitmap, and dirty tracking is re-enabled for that page
 (for example via write-protection, or by clearing the dirty bit in
 a page table entry).
 
-If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies
-the address space for which you want to return the dirty bitmap.
-They must be less than the value that KVM_CHECK_EXTENSION returns for
-the KVM_CAP_MULTI_ADDRESS_SPACE capability.
+If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of slot field specifies
+the address space for which you want to clear the dirty status.  See
+KVM_SET_USER_MEMORY_REGION for details on the usage of slot field.
 
 This ioctl is mostly useful when KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
 is enabled; for more information, see the description of the capability.
index 650deb9..bed957b 100644 (file)
@@ -3247,6 +3247,7 @@ L:        netdev@vger.kernel.org
 S:     Supported
 W:     http://sourceforge.net/projects/bonding/
 F:     drivers/net/bonding/
+F:     include/net/bonding.h
 F:     include/uapi/linux/if_bonding.h
 
 BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
@@ -3420,7 +3421,7 @@ F:        Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
 F:     drivers/pci/controller/pcie-brcmstb.c
 F:     drivers/staging/vc04_services
 N:     bcm2711
-N:     bcm2835
+N:     bcm283*
 
 BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
@@ -3899,7 +3900,7 @@ F:        Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
 F:     drivers/mtd/nand/raw/cadence-nand-controller.c
 
 CADENCE USB3 DRD IP DRIVER
-M:     Peter Chen <peter.chen@nxp.com>
+M:     Peter Chen <peter.chen@kernel.org>
 M:     Pawel Laszczak <pawell@cadence.com>
 R:     Roger Quadros <rogerq@kernel.org>
 R:     Aswath Govindraju <a-govindraju@ti.com>
@@ -4184,7 +4185,7 @@ S:        Maintained
 F:     Documentation/translations/zh_CN/
 
 CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
-M:     Peter Chen <Peter.Chen@nxp.com>
+M:     Peter Chen <peter.chen@kernel.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
@@ -4334,7 +4335,9 @@ W:        https://clangbuiltlinux.github.io/
 B:     https://github.com/ClangBuiltLinux/linux/issues
 C:     irc://chat.freenode.net/clangbuiltlinux
 F:     Documentation/kbuild/llvm.rst
+F:     include/linux/compiler-clang.h
 F:     scripts/clang-tools/
+F:     scripts/clang-version.sh
 F:     scripts/lld-version.sh
 K:     \b(?i:clang|llvm)\b
 
@@ -8454,11 +8457,8 @@ F:       drivers/i3c/
 F:     include/linux/i3c/
 
 IA64 (Itanium) PLATFORM
-M:     Tony Luck <tony.luck@intel.com>
-M:     Fenghua Yu <fenghua.yu@intel.com>
 L:     linux-ia64@vger.kernel.org
-S:     Odd Fixes
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
+S:     Orphan
 F:     Documentation/ia64/
 F:     arch/ia64/
 
@@ -12436,6 +12436,7 @@ F:      tools/testing/selftests/net/ipsec.c
 NETWORKING [IPv4/IPv6]
 M:     "David S. Miller" <davem@davemloft.net>
 M:     Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
+M:     David Ahern <dsahern@kernel.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
@@ -14527,10 +14528,18 @@ S:    Supported
 F:     drivers/crypto/qat/
 
 QCOM AUDIO (ASoC) DRIVERS
-M:     Patrick Lai <plai@codeaurora.org>
+M:     Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
 M:     Banajit Goswami <bgoswami@codeaurora.org>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
+F:     sound/soc/codecs/lpass-va-macro.c
+F:     sound/soc/codecs/lpass-wsa-macro.*
+F:     sound/soc/codecs/msm8916-wcd-analog.c
+F:     sound/soc/codecs/msm8916-wcd-digital.c
+F:     sound/soc/codecs/wcd9335.*
+F:     sound/soc/codecs/wcd934x.c
+F:     sound/soc/codecs/wcd-clsh-v2.*
+F:     sound/soc/codecs/wsa881x.c
 F:     sound/soc/qcom/
 
 QCOM IPA DRIVER
@@ -16982,7 +16991,7 @@ M:      Olivier Moysan <olivier.moysan@st.com>
 M:     Arnaud Pouliquen <arnaud.pouliquen@st.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
-F:     Documentation/devicetree/bindings/sound/st,stm32-*.txt
+F:     Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
 F:     sound/soc/stm/
 
 STM32 TIMER/LPTIMER DRIVERS
@@ -18435,7 +18444,7 @@ F:      Documentation/usb/ohci.rst
 F:     drivers/usb/host/ohci*
 
 USB OTG FSM (Finite State Machine)
-M:     Peter Chen <Peter.Chen@nxp.com>
+M:     Peter Chen <peter.chen@kernel.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
index 61357f7..917f536 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 11
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
index 861e05d..343364d 100644 (file)
                stdout-path = &uart1;
        };
 
+       aliases {
+               mmc0 = &usdhc2;
+               mmc1 = &usdhc3;
+               mmc2 = &usdhc4;
+               /delete-property/ mmc3;
+       };
+
        memory@10000000 {
                device_type = "memory";
                reg = <0x10000000 0x80000000>;
index 736074f..959d8ac 100644 (file)
 
                        /* VDD_AUD_1P8: Audio codec */
                        reg_aud_1p8v: ldo3 {
-                               regulator-name = "vdd1p8";
+                               regulator-name = "vdd1p8a";
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <1800000>;
                                regulator-boot-on;
index d6df598..b167b33 100644 (file)
 
        lcd_backlight: lcd-backlight {
                compatible = "pwm-backlight";
-               pwms = <&pwm4 0 5000000>;
+               pwms = <&pwm4 0 5000000 0>;
                pwm-names = "LCD_BKLT_PWM";
 
                brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
                i2c-gpio,delay-us = <2>; /* ~100 kHz */
                #address-cells = <1>;
                #size-cells = <0>;
-               status = "disabld";
+               status = "disabled";
        };
 
        i2c_cam: i2c-gpio-cam {
                i2c-gpio,delay-us = <2>; /* ~100 kHz */
                #address-cells = <1>;
                #size-cells = <0>;
-               status = "disabld";
+               status = "disabled";
        };
 };
 
index b065778..7e4e5fd 100644 (file)
@@ -53,7 +53,6 @@
 &fec {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_microsom_enet_ar8035>;
-       phy-handle = <&phy>;
        phy-mode = "rgmii-id";
        phy-reset-duration = <2>;
        phy-reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
                #address-cells = <1>;
                #size-cells = <0>;
 
-               phy: ethernet-phy@0 {
+               /*
+                * The PHY can appear at either address 0 or 4 due to the
+                * configuration (LED) pin not being pulled sufficiently.
+                */
+               ethernet-phy@0 {
                        reg = <0>;
                        qca,clk-out-frequency = <125000000>;
                };
+
+               ethernet-phy@4 {
+                       reg = <4>;
+                       qca,clk-out-frequency = <125000000>;
+               };
        };
 };
 
index 84b0952..bd6b528 100644 (file)
                compatible = "nxp,pcf2127";
                reg = <0>;
                spi-max-frequency = <2000000>;
+               reset-source;
        };
 };
 
index d309fad..344d298 100644 (file)
                                            200000 0>;
                };
        };
+
+       reserved-memory {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               /* Modem trace memory */
+               ram@06000000 {
+                       reg = <0x06000000 0x00f00000>;
+                       no-map;
+               };
+
+               /* Modem shared memory */
+               ram@06f00000 {
+                       reg = <0x06f00000 0x00100000>;
+                       no-map;
+               };
+
+               /* Modem private memory */
+               ram@07000000 {
+                       reg = <0x07000000 0x01000000>;
+                       no-map;
+               };
+
+               /*
+                * Initial Secure Software ISSW memory
+                *
+                * This is probably only used if the kernel tries
+                * to actually call into trustzone to run secure
+                * applications, which the mainline kernel probably
+                * will not do on this old chipset. But you can never
+                * be too careful, so reserve this memory anyway.
+                */
+               ram@17f00000 {
+                       reg = <0x17f00000 0x00100000>;
+                       no-map;
+               };
+       };
 };
index 48bd872..287804e 100644 (file)
                                            200000 0>;
                };
        };
+
+       reserved-memory {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               /* Modem trace memory */
+               ram@06000000 {
+                       reg = <0x06000000 0x00f00000>;
+                       no-map;
+               };
+
+               /* Modem shared memory */
+               ram@06f00000 {
+                       reg = <0x06f00000 0x00100000>;
+                       no-map;
+               };
+
+               /* Modem private memory */
+               ram@07000000 {
+                       reg = <0x07000000 0x01000000>;
+                       no-map;
+               };
+
+               /*
+                * Initial Secure Software ISSW memory
+                *
+                * This is probably only used if the kernel tries
+                * to actually call into trustzone to run secure
+                * applications, which the mainline kernel probably
+                * will not do on this old chipset. But you can never
+                * be too careful, so reserve this memory anyway.
+                */
+               ram@17f00000 {
+                       reg = <0x17f00000 0x00100000>;
+                       no-map;
+               };
+       };
 };
diff --git a/arch/arm/boot/dts/ste-db9500.dtsi b/arch/arm/boot/dts/ste-db9500.dtsi
new file mode 100644 (file)
index 0000000..0afff70
--- /dev/null
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "ste-dbx5x0.dtsi"
+
+/ {
+       cpus {
+               cpu@300 {
+                       /* cpufreq controls */
+                       operating-points = <1152000 0
+                                           800000 0
+                                           400000 0
+                                           200000 0>;
+               };
+       };
+
+       reserved-memory {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               /*
+                * Initial Secure Software ISSW memory
+                *
+                * This is probably only used if the kernel tries
+                * to actually call into trustzone to run secure
+                * applications, which the mainline kernel probably
+                * will not do on this old chipset. But you can never
+                * be too careful, so reserve this memory anyway.
+                */
+               ram@17f00000 {
+                       reg = <0x17f00000 0x00100000>;
+                       no-map;
+               };
+       };
+};
index be90e73..27d8a07 100644 (file)
@@ -4,7 +4,7 @@
  */
 
 /dts-v1/;
-#include "ste-db8500.dtsi"
+#include "ste-db9500.dtsi"
 #include "ste-href-ab8500.dtsi"
 #include "ste-href-family-pinctrl.dtsi"
 
index 1eabf2d..e06f946 100644 (file)
@@ -67,6 +67,7 @@
 #define MX6Q_CCM_CCR   0x0
 
        .align 3
+       .arm
 
        .macro  sync_l2_cache
 
index aef8f2b..5401a64 100644 (file)
@@ -4,11 +4,16 @@
  */
        usb {
                compatible = "simple-bus";
-               dma-ranges;
                #address-cells = <2>;
                #size-cells = <2>;
                ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>;
 
+               /*
+                * Internally, USB bus to the interconnect can only address up
+                * to 40-bit
+                */
+               dma-ranges = <0 0 0 0 0x100 0x0>;
+
                usbphy0: usb-phy@0 {
                        compatible = "brcm,sr-usb-combo-phy";
                        reg = <0x0 0x00000000 0x0 0x100>;
index 60ff19f..6c8a61c 100644 (file)
        reboot {
                compatible ="syscon-reboot";
                regmap = <&rst>;
-               offset = <0xb0>;
+               offset = <0>;
                mask = <0x02>;
        };
 
index ee17902..2a79e89 100644 (file)
                        #size-cells = <1>;
                        ranges;
 
-                       spba: bus@30000000 {
+                       spba: spba-bus@30000000 {
                                compatible = "fsl,spba-bus", "simple-bus";
                                #address-cells = <1>;
                                #size-cells = <1>;
index ecccfbb..23f5a5e 100644 (file)
                                #gpio-cells = <2>;
                                interrupt-controller;
                                #interrupt-cells = <2>;
-                               gpio-ranges = <&iomuxc 0 56 26>, <&iomuxc 0 144 4>;
+                               gpio-ranges = <&iomuxc 0 56 26>, <&iomuxc 26 144 4>;
                        };
 
                        gpio4: gpio@30230000 {
index 8383016..a0bcf02 100644 (file)
@@ -991,8 +991,6 @@ CONFIG_ARCH_TEGRA_210_SOC=y
 CONFIG_ARCH_TEGRA_186_SOC=y
 CONFIG_ARCH_TEGRA_194_SOC=y
 CONFIG_ARCH_TEGRA_234_SOC=y
-CONFIG_ARCH_K3_AM6_SOC=y
-CONFIG_ARCH_K3_J721E_SOC=y
 CONFIG_TI_SCI_PM_DOMAINS=y
 CONFIG_EXTCON_PTN5150=m
 CONFIG_EXTCON_USB_GPIO=y
index 89c64ad..66aac28 100644 (file)
@@ -352,8 +352,8 @@ kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr)
        unsigned long addr = instruction_pointer(regs);
        struct kprobe *cur = kprobe_running();
 
-       if (cur && (kcb->kprobe_status == KPROBE_HIT_SS)
-           && ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
+       if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
+           ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
                kprobes_restore_local_irqflag(kcb, regs);
                post_kprobe_handler(cur, kcb, regs);
 
index 04c4485..fe60d25 100644 (file)
@@ -1396,8 +1396,9 @@ static void cpu_init_hyp_mode(void)
         * Calculate the raw per-cpu offset without a translation from the
         * kernel's mapping to the linear mapping, and store it in tpidr_el2
         * so that we can use adr_l to access per-cpu variables in EL2.
+        * Also drop the KASAN tag which gets in the way...
         */
-       params->tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
+       params->tpidr_el2 = (unsigned long)kasan_reset_tag(this_cpu_ptr_nvhe_sym(__per_cpu_start)) -
                            (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
 
        params->mair_el2 = read_sysreg(mair_el1);
index e394784..8e7128c 100644 (file)
@@ -77,12 +77,6 @@ static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt)
                         cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
 }
 
-static __noreturn unsigned long psci_forward_noreturn(struct kvm_cpu_context *host_ctxt)
-{
-       psci_forward(host_ctxt);
-       hyp_panic(); /* unreachable */
-}
-
 static unsigned int find_cpu_id(u64 mpidr)
 {
        unsigned int i;
@@ -251,10 +245,13 @@ static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_
        case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
        case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
                return psci_forward(host_ctxt);
+       /*
+        * SYSTEM_OFF/RESET should not return according to the spec.
+        * Allow it so as to stay robust to broken firmware.
+        */
        case PSCI_0_2_FN_SYSTEM_OFF:
        case PSCI_0_2_FN_SYSTEM_RESET:
-               psci_forward_noreturn(host_ctxt);
-               unreachable();
+               return psci_forward(host_ctxt);
        case PSCI_0_2_FN64_CPU_SUSPEND:
                return psci_cpu_suspend(func_id, host_ctxt);
        case PSCI_0_2_FN64_CPU_ON:
index 4ad66a5..247422a 100644 (file)
@@ -788,7 +788,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
 {
        unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
        u64 val, mask = 0;
-       int base, i;
+       int base, i, nr_events;
 
        if (!pmceid1) {
                val = read_sysreg(pmceid0_el0);
@@ -801,13 +801,17 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
        if (!bmap)
                return val;
 
+       nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
+
        for (i = 0; i < 32; i += 8) {
                u64 byte;
 
                byte = bitmap_get_value8(bmap, base + i);
                mask |= byte << i;
-               byte = bitmap_get_value8(bmap, 0x4000 + base + i);
-               mask |= byte << (32 + i);
+               if (nr_events >= (0x4000 + base + 32)) {
+                       byte = bitmap_get_value8(bmap, 0x4000 + base + i);
+                       mask |= byte << (32 + i);
+               }
        }
 
        return val & mask;
index 42ccc27..7c4f795 100644 (file)
  * 64bit interface.
  */
 
+#define reg_to_encoding(x)                                             \
+       sys_reg((u32)(x)->Op0, (u32)(x)->Op1,                           \
+               (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
+
 static bool read_from_write_only(struct kvm_vcpu *vcpu,
                                 struct sys_reg_params *params,
                                 const struct sys_reg_desc *r)
@@ -273,8 +277,7 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
                          const struct sys_reg_desc *r)
 {
        u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
-       u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
-                        (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+       u32 sr = reg_to_encoding(r);
 
        if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
                kvm_inject_undefined(vcpu);
@@ -590,6 +593,15 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
        vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
 }
 
+static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
+                                  const struct sys_reg_desc *r)
+{
+       if (kvm_vcpu_has_pmu(vcpu))
+               return 0;
+
+       return REG_HIDDEN;
+}
+
 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 {
        u64 pmcr, val;
@@ -613,9 +625,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
 {
        u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
-       bool enabled = kvm_vcpu_has_pmu(vcpu);
+       bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
 
-       enabled &= (reg & flags) || vcpu_mode_priv(vcpu);
        if (!enabled)
                kvm_inject_undefined(vcpu);
 
@@ -900,11 +911,6 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                             const struct sys_reg_desc *r)
 {
-       if (!kvm_vcpu_has_pmu(vcpu)) {
-               kvm_inject_undefined(vcpu);
-               return false;
-       }
-
        if (p->is_write) {
                if (!vcpu_mode_priv(vcpu)) {
                        kvm_inject_undefined(vcpu);
@@ -921,10 +927,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
        return true;
 }
 
-#define reg_to_encoding(x)                                             \
-       sys_reg((u32)(x)->Op0, (u32)(x)->Op1,                           \
-               (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
-
 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
 #define DBG_BCR_BVR_WCR_WVR_EL1(n)                                     \
        { SYS_DESC(SYS_DBGBVRn_EL1(n)),                                 \
@@ -936,15 +938,18 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
        { SYS_DESC(SYS_DBGWCRn_EL1(n)),                                 \
          trap_wcr, reset_wcr, 0, 0,  get_wcr, set_wcr }
 
+#define PMU_SYS_REG(r)                                         \
+       SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
+
 /* Macro to expand the PMEVCNTRn_EL0 register */
 #define PMU_PMEVCNTR_EL0(n)                                            \
-       { SYS_DESC(SYS_PMEVCNTRn_EL0(n)),                                       \
-         access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
+       { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)),                            \
+         .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
 
 /* Macro to expand the PMEVTYPERn_EL0 register */
 #define PMU_PMEVTYPER_EL0(n)                                           \
-       { SYS_DESC(SYS_PMEVTYPERn_EL0(n)),                                      \
-         access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
+       { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)),                           \
+         .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
 
 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                         const struct sys_reg_desc *r)
@@ -1020,8 +1025,7 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
                struct sys_reg_desc const *r, bool raz)
 {
-       u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
-                        (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+       u32 id = reg_to_encoding(r);
        u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
 
        if (id == SYS_ID_AA64PFR0_EL1) {
@@ -1062,8 +1066,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
                                  const struct sys_reg_desc *r)
 {
-       u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
-                        (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+       u32 id = reg_to_encoding(r);
 
        switch (id) {
        case SYS_ID_AA64ZFR0_EL1:
@@ -1486,8 +1489,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
        { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
 
-       { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
-       { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
+       { PMU_SYS_REG(SYS_PMINTENSET_EL1),
+         .access = access_pminten, .reg = PMINTENSET_EL1 },
+       { PMU_SYS_REG(SYS_PMINTENCLR_EL1),
+         .access = access_pminten, .reg = PMINTENSET_EL1 },
 
        { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
        { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
@@ -1526,23 +1531,36 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
        { SYS_DESC(SYS_CTR_EL0), access_ctr },
 
-       { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
-       { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
-       { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
-       { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
-       { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
-       { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
-       { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
-       { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
-       { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
-       { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
-       { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
+       { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
+         .reset = reset_pmcr, .reg = PMCR_EL0 },
+       { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
+         .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
+       { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
+         .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
+       { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
+         .access = access_pmovs, .reg = PMOVSSET_EL0 },
+       { PMU_SYS_REG(SYS_PMSWINC_EL0),
+         .access = access_pmswinc, .reg = PMSWINC_EL0 },
+       { PMU_SYS_REG(SYS_PMSELR_EL0),
+         .access = access_pmselr, .reg = PMSELR_EL0 },
+       { PMU_SYS_REG(SYS_PMCEID0_EL0),
+         .access = access_pmceid, .reset = NULL },
+       { PMU_SYS_REG(SYS_PMCEID1_EL0),
+         .access = access_pmceid, .reset = NULL },
+       { PMU_SYS_REG(SYS_PMCCNTR_EL0),
+         .access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
+       { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
+         .access = access_pmu_evtyper, .reset = NULL },
+       { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
+         .access = access_pmu_evcntr, .reset = NULL },
        /*
         * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
         * in 32bit mode. Here we choose to reset it as zero for consistency.
         */
-       { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
-       { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
+       { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
+         .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
+       { PMU_SYS_REG(SYS_PMOVSSET_EL0),
+         .access = access_pmovs, .reg = PMOVSSET_EL0 },
 
        { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
        { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
@@ -1694,7 +1712,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
         * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
         * in 32bit mode. Here we choose to reset it as zero for consistency.
         */
-       { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
+       { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
+         .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
 
        { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
        { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
index 3c40da4..35d75c6 100644 (file)
@@ -709,10 +709,11 @@ static int do_tag_check_fault(unsigned long far, unsigned int esr,
                              struct pt_regs *regs)
 {
        /*
-        * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN for tag
-        * check faults. Mask them out now so that userspace doesn't see them.
+        * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN
+        * for tag check faults. Set them to corresponding bits in the untagged
+        * address.
         */
-       far &= (1UL << 60) - 1;
+       far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK);
        do_bad_area(far, esr, regs);
        return 0;
 }
index d69c979..5d90307 100644 (file)
@@ -54,7 +54,7 @@ extern void ia64_xchg_called_with_bad_pointer(void);
 })
 
 #define xchg(ptr, x)                                                   \
-((__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr))))
+({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
 
 /*
  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
index ed9fc3d..43e8050 100644 (file)
@@ -171,29 +171,34 @@ void vtime_account_hardirq(struct task_struct *tsk)
 static irqreturn_t
 timer_interrupt (int irq, void *dev_id)
 {
-       unsigned long cur_itm, new_itm, ticks;
+       unsigned long new_itm;
 
        if (cpu_is_offline(smp_processor_id())) {
                return IRQ_HANDLED;
        }
 
        new_itm = local_cpu_data->itm_next;
-       cur_itm = ia64_get_itc();
 
-       if (!time_after(cur_itm, new_itm)) {
+       if (!time_after(ia64_get_itc(), new_itm))
                printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
-                      cur_itm, new_itm);
-               ticks = 1;
-       } else {
-               ticks = DIV_ROUND_UP(cur_itm - new_itm,
-                                    local_cpu_data->itm_delta);
-               new_itm += ticks * local_cpu_data->itm_delta;
-       }
+                      ia64_get_itc(), new_itm);
+
+       while (1) {
+               new_itm += local_cpu_data->itm_delta;
+
+               legacy_timer_tick(smp_processor_id() == time_keeper_id);
 
-       if (smp_processor_id() != time_keeper_id)
-               ticks = 0;
+               local_cpu_data->itm_next = new_itm;
 
-       legacy_timer_tick(ticks);
+               if (time_after(new_itm, ia64_get_itc()))
+                       break;
+
+               /*
+                * Allow IPIs to interrupt the timer loop.
+                */
+               local_irq_enable();
+               local_irq_disable();
+       }
 
        do {
                /*
index 19edf8e..292d042 100644 (file)
@@ -51,6 +51,7 @@ extern void kmap_flush_tlb(unsigned long addr);
 
 #define flush_cache_kmaps()    BUG_ON(cpu_has_dc_aliases)
 
+#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) set_pte(ptep, ptev)
 #define arch_kmap_local_post_map(vaddr, pteval)        local_flush_tlb_one(vaddr)
 #define arch_kmap_local_post_unmap(vaddr)      local_flush_tlb_one(vaddr)
 
index 7d6b4a7..c298061 100644 (file)
@@ -31,7 +31,7 @@
 void __iomem *ioremap(phys_addr_t offset, unsigned long size);
 
 #define iounmap iounmap
-extern void iounmap(void *addr);
+extern void iounmap(void __iomem *addr);
 
 #include <asm-generic/io.h>
 
index 5aed97a..daae13a 100644 (file)
@@ -77,7 +77,7 @@ void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size)
 }
 EXPORT_SYMBOL(ioremap);
 
-void iounmap(void *addr)
+void iounmap(void __iomem *addr)
 {
        /* If the page is from the fixmap pool then we just clear out
         * the fixmap mapping.
index 78b1762..2784621 100644 (file)
@@ -202,9 +202,8 @@ config PREFETCH
        depends on PA8X00 || PA7200
 
 config MLONGCALLS
-       bool "Enable the -mlong-calls compiler option for big kernels"
-       default y if !MODULES || UBSAN || FTRACE
-       default n
+       def_bool y if !MODULES || UBSAN || FTRACE
+       bool "Enable the -mlong-calls compiler option for big kernels" if MODULES && !UBSAN && !FTRACE
        depends on PA8X00
        help
          If you configure the kernel to include many drivers built-in instead
index 959e79c..378f63c 100644 (file)
@@ -47,7 +47,4 @@ extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
 extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *);
 extern int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest);
 
-/* soft power switch support (power.c) */
-extern struct tasklet_struct power_tasklet;
-
 #endif /* _ASM_PARISC_IRQ_H */
index beba981..4d37cc9 100644 (file)
@@ -997,10 +997,17 @@ intr_do_preempt:
        bb,<,n  %r20, 31 - PSW_SM_I, intr_restore
        nop
 
+       /* ssm PSW_SM_I done later in intr_restore */
+#ifdef CONFIG_MLONGCALLS
+       ldil    L%intr_restore, %r2
+       load32  preempt_schedule_irq, %r1
+       bv      %r0(%r1)
+       ldo     R%intr_restore(%r2), %r2
+#else
+       ldil    L%intr_restore, %r1
        BL      preempt_schedule_irq, %r2
-       nop
-
-       b,n     intr_restore            /* ssm PSW_SM_I done by intr_restore */
+       ldo     R%intr_restore(%r1), %r2
+#endif
 #endif /* CONFIG_PREEMPTION */
 
        /*
index 1d32b17..c1a8aac 100644 (file)
        nop;                                                            \
        nop;
 
+#define SCV_ENTRY_FLUSH_SLOT                                           \
+       SCV_ENTRY_FLUSH_FIXUP_SECTION;                                  \
+       nop;                                                            \
+       nop;                                                            \
+       nop;
+
 /*
  * r10 must be free to use, r13 must be paca
  */
        STF_ENTRY_BARRIER_SLOT;                                         \
        ENTRY_FLUSH_SLOT
 
+/*
+ * r10, ctr must be free to use, r13 must be paca
+ */
+#define SCV_INTERRUPT_TO_KERNEL                                                \
+       STF_ENTRY_BARRIER_SLOT;                                         \
+       SCV_ENTRY_FLUSH_SLOT
+
 /*
  * Macros for annotating the expected destination of (h)rfid
  *
index f6d2acb..ac605fc 100644 (file)
@@ -240,6 +240,14 @@ label##3:                                          \
        FTR_ENTRY_OFFSET 957b-958b;                     \
        .popsection;
 
+#define SCV_ENTRY_FLUSH_FIXUP_SECTION                  \
+957:                                                   \
+       .pushsection __scv_entry_flush_fixup,"a";       \
+       .align 2;                                       \
+958:                                                   \
+       FTR_ENTRY_OFFSET 957b-958b;                     \
+       .popsection;
+
 #define RFI_FLUSH_FIXUP_SECTION                                \
 951:                                                   \
        .pushsection __rfi_flush_fixup,"a";             \
@@ -273,10 +281,12 @@ label##3:                                         \
 
 extern long stf_barrier_fallback;
 extern long entry_flush_fallback;
+extern long scv_entry_flush_fallback;
 extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
 extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
 extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
 extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
+extern long __start___scv_entry_flush_fixup, __stop___scv_entry_flush_fixup;
 extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
 extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
 extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
index 80a5ae7..c0fcd1b 100644 (file)
@@ -58,6 +58,8 @@ extern pte_t *pkmap_page_table;
 
 #define flush_cache_kmaps()    flush_cache_all()
 
+#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
+       __set_pte_at(mm, vaddr, ptep, ptev, 1)
 #define arch_kmap_local_post_map(vaddr, pteval)        \
        local_flush_tlb_page(NULL, vaddr)
 #define arch_kmap_local_post_unmap(vaddr)      \
index aa1af13..33ddfee 100644 (file)
@@ -75,7 +75,7 @@ BEGIN_FTR_SECTION
        bne     .Ltabort_syscall
 END_FTR_SECTION_IFSET(CPU_FTR_TM)
 #endif
-       INTERRUPT_TO_KERNEL
+       SCV_INTERRUPT_TO_KERNEL
        mr      r10,r1
        ld      r1,PACAKSAVE(r13)
        std     r10,0(r1)
index e02ad6f..6e53f76 100644 (file)
@@ -2993,6 +2993,25 @@ TRAMP_REAL_BEGIN(entry_flush_fallback)
        ld      r11,PACA_EXRFI+EX_R11(r13)
        blr
 
+/*
+ * The SCV entry flush happens with interrupts enabled, so it must disable
+ * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10
+ * (containing LR) does not need to be preserved here because scv entry
+ * puts 0 in the pt_regs, CTR can be clobbered for the same reason.
+ */
+TRAMP_REAL_BEGIN(scv_entry_flush_fallback)
+       li      r10,0
+       mtmsrd  r10,1
+       lbz     r10,PACAIRQHAPPENED(r13)
+       ori     r10,r10,PACA_IRQ_HARD_DIS
+       stb     r10,PACAIRQHAPPENED(r13)
+       std     r11,PACA_EXRFI+EX_R11(r13)
+       L1D_DISPLACEMENT_FLUSH
+       ld      r11,PACA_EXRFI+EX_R11(r13)
+       li      r10,MSR_RI
+       mtmsrd  r10,1
+       blr
+
 TRAMP_REAL_BEGIN(rfi_flush_fallback)
        SET_SCRATCH0(r13);
        GET_PACA(r13);
index 4ab426b..72fa3c0 100644 (file)
@@ -145,6 +145,13 @@ SECTIONS
                __stop___entry_flush_fixup = .;
        }
 
+       . = ALIGN(8);
+       __scv_entry_flush_fixup : AT(ADDR(__scv_entry_flush_fixup) - LOAD_OFFSET) {
+               __start___scv_entry_flush_fixup = .;
+               *(__scv_entry_flush_fixup)
+               __stop___scv_entry_flush_fixup = .;
+       }
+
        . = ALIGN(8);
        __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
                __start___stf_exit_barrier_fixup = .;
index 4782105..1fd31b4 100644 (file)
@@ -290,9 +290,6 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
        long *start, *end;
        int i;
 
-       start = PTRRELOC(&__start___entry_flush_fixup);
-       end = PTRRELOC(&__stop___entry_flush_fixup);
-
        instrs[0] = 0x60000000; /* nop */
        instrs[1] = 0x60000000; /* nop */
        instrs[2] = 0x60000000; /* nop */
@@ -312,6 +309,8 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
        if (types & L1D_FLUSH_MTTRIG)
                instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
 
+       start = PTRRELOC(&__start___entry_flush_fixup);
+       end = PTRRELOC(&__stop___entry_flush_fixup);
        for (i = 0; start < end; start++, i++) {
                dest = (void *)start + *start;
 
@@ -328,6 +327,25 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
                patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
        }
 
+       start = PTRRELOC(&__start___scv_entry_flush_fixup);
+       end = PTRRELOC(&__stop___scv_entry_flush_fixup);
+       for (; start < end; start++, i++) {
+               dest = (void *)start + *start;
+
+               pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+               patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+
+               if (types == L1D_FLUSH_FALLBACK)
+                       patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&scv_entry_flush_fallback,
+                                    BRANCH_SET_LINK);
+               else
+                       patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
+
+               patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+       }
+
+
        printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
                (types == L1D_FLUSH_NONE)       ? "no" :
                (types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
index 5fa5802..52646f5 100644 (file)
@@ -29,7 +29,6 @@ config SUPERH
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
-       select HAVE_COPY_THREAD_TLS
        select HAVE_DEBUG_BUGVERBOSE
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DYNAMIC_FTRACE
index 8b23ed7..7fb4748 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/sched.h>
 #include <linux/time.h>
 #include <linux/bcd.h>
-#include <linux/rtc.h>
 #include <linux/spinlock.h>
 #include <linux/io.h>
 #include <linux/rtc.h>
index ba6ec04..e6c5ddf 100644 (file)
@@ -27,13 +27,12 @@ CONFIG_NETFILTER=y
 CONFIG_ATALK=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_OFFBOARD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_AEC62XX=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_ATP867X=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
 CONFIG_SCSI_MULTI_LUN=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=m
index c65667d..e982519 100644 (file)
@@ -20,8 +20,6 @@ CONFIG_IP_PNP=y
 # CONFIG_IPV6 is not set
 # CONFIG_FW_LOADER is not set
 CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_SMC91X=y
index d10a041..d00376e 100644 (file)
@@ -44,16 +44,14 @@ CONFIG_NET_SCHED=y
 CONFIG_PARPORT=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
-CONFIG_BLK_DEV_GENERIC=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
 CONFIG_CHR_DEV_SG=y
 CONFIG_SCSI_SPI_ATTRS=y
 CONFIG_SCSI_FC_ATTRS=y
 CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_PLATFORM=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_NETDEVICES=y
index 61bec46..4a44cac 100644 (file)
@@ -116,9 +116,6 @@ CONFIG_MTD_UBI_GLUEBI=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=y
 CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
 CONFIG_SCSI_MULTI_LUN=y
index 3f1c137..4defc76 100644 (file)
@@ -29,7 +29,6 @@ CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_ROM=y
-CONFIG_IDE=y
 CONFIG_SCSI=y
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
index f0073ed..48b457d 100644 (file)
@@ -39,9 +39,6 @@ CONFIG_IP_PNP_RARP=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_NBD=y
 CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_BLK_DEV_IDETAPE=m
 CONFIG_SCSI=m
 CONFIG_BLK_DEV_SD=m
 CONFIG_BLK_DEV_SR=m
index d0de378..7d54f28 100644 (file)
@@ -63,8 +63,7 @@ config PVR2_DMA
 
 config G2_DMA
        tristate "G2 Bus DMA support"
-       depends on SH_DREAMCAST
-       select SH_DMA_API
+       depends on SH_DREAMCAST && SH_DMA_API
        help
          This enables support for the DMA controller for the Dreamcast's
          G2 bus. Drivers that want this will generally enable this on
index 3519188..d643250 100644 (file)
@@ -16,7 +16,6 @@
 #include <cpu/gpio.h>
 #endif
 
-#define ARCH_NR_GPIOS 512
 #include <asm-generic/gpio.h>
 
 #ifdef CONFIG_GPIOLIB
index 25eb809..e48b3dd 100644 (file)
@@ -14,7 +14,6 @@
 #include <cpu/mmu_context.h>
 #include <asm/page.h>
 #include <asm/cache.h>
-#include <asm/thread_info.h>
 
 ! NOTE:
 ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
index 703d306..77aa2f8 100644 (file)
@@ -105,7 +105,7 @@ config VSYSCALL
          (the default value) say Y.
 
 config NUMA
-       bool "Non Uniform Memory Access (NUMA) Support"
+       bool "Non-Uniform Memory Access (NUMA) Support"
        depends on MMU && SYS_SUPPORTS_NUMA
        select ARCH_WANT_NUMA_VARIABLE_LOCALITY
        default n
index 4c1ca19..d16d6f5 100644 (file)
@@ -26,7 +26,7 @@
 #include <asm/processor.h>
 #include <asm/mmu_context.h>
 
-static int asids_seq_show(struct seq_file *file, void *iter)
+static int asids_debugfs_show(struct seq_file *file, void *iter)
 {
        struct task_struct *p;
 
@@ -48,18 +48,7 @@ static int asids_seq_show(struct seq_file *file, void *iter)
        return 0;
 }
 
-static int asids_debugfs_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, asids_seq_show, inode->i_private);
-}
-
-static const struct file_operations asids_debugfs_fops = {
-       .owner          = THIS_MODULE,
-       .open           = asids_debugfs_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(asids_debugfs);
 
 static int __init asids_debugfs_init(void)
 {
index 17d7807..b0f1851 100644 (file)
@@ -22,7 +22,7 @@ enum cache_type {
        CACHE_TYPE_UNIFIED,
 };
 
-static int cache_seq_show(struct seq_file *file, void *iter)
+static int cache_debugfs_show(struct seq_file *file, void *iter)
 {
        unsigned int cache_type = (unsigned int)file->private;
        struct cache_info *cache;
@@ -94,18 +94,7 @@ static int cache_seq_show(struct seq_file *file, void *iter)
        return 0;
 }
 
-static int cache_debugfs_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, cache_seq_show, inode->i_private);
-}
-
-static const struct file_operations cache_debugfs_fops = {
-       .owner          = THIS_MODULE,
-       .open           = cache_debugfs_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(cache_debugfs);
 
 static int __init cache_debugfs_init(void)
 {
index b20aba6..68eb7cc 100644 (file)
@@ -812,7 +812,7 @@ bool __in_29bit_mode(void)
         return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
 }
 
-static int pmb_seq_show(struct seq_file *file, void *iter)
+static int pmb_debugfs_show(struct seq_file *file, void *iter)
 {
        int i;
 
@@ -846,18 +846,7 @@ static int pmb_seq_show(struct seq_file *file, void *iter)
        return 0;
 }
 
-static int pmb_debugfs_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, pmb_seq_show, NULL);
-}
-
-static const struct file_operations pmb_debugfs_fops = {
-       .owner          = THIS_MODULE,
-       .open           = pmb_debugfs_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(pmb_debugfs);
 
 static int __init pmb_debugfs_init(void)
 {
index 8751162..c7b2e20 100644 (file)
@@ -50,10 +50,11 @@ extern pte_t *pkmap_page_table;
 
 #define flush_cache_kmaps()    flush_cache_all()
 
-/* FIXME: Use __flush_tlb_one(vaddr) instead of flush_cache_all() -- Anton */
-#define arch_kmap_local_post_map(vaddr, pteval)        flush_cache_all()
-#define arch_kmap_local_post_unmap(vaddr)      flush_cache_all()
-
+/* FIXME: Use __flush_*_one(vaddr) instead of flush_*_all() -- Anton */
+#define arch_kmap_local_pre_map(vaddr, pteval) flush_cache_all()
+#define arch_kmap_local_pre_unmap(vaddr)       flush_cache_all()
+#define arch_kmap_local_post_map(vaddr, pteval)        flush_tlb_all()
+#define arch_kmap_local_post_unmap(vaddr)      flush_tlb_all()
 
 #endif /* __KERNEL__ */
 
index 18d8f17..0904f56 100644 (file)
@@ -73,10 +73,8 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs,
                                                  unsigned int nr)
 {
        if (likely(nr < IA32_NR_syscalls)) {
-               instrumentation_begin();
                nr = array_index_nospec(nr, IA32_NR_syscalls);
                regs->ax = ia32_sys_call_table[nr](regs);
-               instrumentation_end();
        }
 }
 
@@ -91,8 +89,11 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
         * or may not be necessary, but it matches the old asm behavior.
         */
        nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
+       instrumentation_begin();
 
        do_syscall_32_irqs_on(regs, nr);
+
+       instrumentation_end();
        syscall_exit_to_user_mode(regs);
 }
 
@@ -121,11 +122,12 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
                res = get_user(*(u32 *)&regs->bp,
                       (u32 __user __force *)(unsigned long)(u32)regs->sp);
        }
-       instrumentation_end();
 
        if (res) {
                /* User code screwed up. */
                regs->ax = -EFAULT;
+
+               instrumentation_end();
                syscall_exit_to_user_mode(regs);
                return false;
        }
@@ -135,6 +137,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
 
        /* Now this is just like a normal syscall. */
        do_syscall_32_irqs_on(regs, nr);
+
+       instrumentation_end();
        syscall_exit_to_user_mode(regs);
        return true;
 }
index a5aba4a..67a4f1c 100644 (file)
  * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
  * disables preemption so be careful if you intend to use it for long periods
  * of time.
- * If you intend to use the FPU in softirq you need to check first with
+ * If you intend to use the FPU in irq/softirq you need to check first with
  * irq_fpu_usable() if it is possible.
  */
-extern void kernel_fpu_begin(void);
+
+/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
+#define KFPU_387       _BITUL(0)       /* 387 state will be initialized */
+#define KFPU_MXCSR     _BITUL(1)       /* MXCSR will be initialized */
+
+extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
 extern void kernel_fpu_end(void);
 extern bool irq_fpu_usable(void);
 extern void fpregs_mark_activate(void);
 
+/* Code that is unaware of kernel_fpu_begin_mask() can use this */
+static inline void kernel_fpu_begin(void)
+{
+       kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
+}
+
 /*
  * Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
  * A context switch will (and softirq might) save CPU's FPU registers to
index 247a60a..f656aab 100644 (file)
@@ -613,6 +613,7 @@ DECLARE_IDTENTRY_VC(X86_TRAP_VC,    exc_vmm_communication);
 
 #ifdef CONFIG_XEN_PV
 DECLARE_IDTENTRY_XENCB(X86_TRAP_OTHER, exc_xen_hypervisor_callback);
+DECLARE_IDTENTRY_RAW(X86_TRAP_OTHER,   exc_xen_unknown_trap);
 #endif
 
 /* Device interrupts common/spurious */
index 5e658ba..9abe842 100644 (file)
@@ -97,6 +97,7 @@
 
 #define        INTEL_FAM6_LAKEFIELD            0x8A
 #define INTEL_FAM6_ALDERLAKE           0x97
+#define INTEL_FAM6_ALDERLAKE_L         0x9A
 
 /* "Small Core" Processors (Atom) */
 
index 0b4920a..e16cccd 100644 (file)
@@ -86,7 +86,7 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
  * think of extending them - you will be slapped with a stinking trout or a frozen
  * shark will reach you, wherever you are! You've been warned.
  */
-static inline unsigned long long notrace __rdmsr(unsigned int msr)
+static __always_inline unsigned long long __rdmsr(unsigned int msr)
 {
        DECLARE_ARGS(val, low, high);
 
@@ -98,7 +98,7 @@ static inline unsigned long long notrace __rdmsr(unsigned int msr)
        return EAX_EDX_VAL(val, low, high);
 }
 
-static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high)
+static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
 {
        asm volatile("1: wrmsr\n"
                     "2:\n"
index 488a8e8..9239399 100644 (file)
@@ -110,6 +110,8 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
 #define topology_die_id(cpu)                   (cpu_data(cpu).cpu_die_id)
 #define topology_core_id(cpu)                  (cpu_data(cpu).cpu_core_id)
 
+extern unsigned int __max_die_per_package;
+
 #ifdef CONFIG_SMP
 #define topology_die_cpumask(cpu)              (per_cpu(cpu_die_map, cpu))
 #define topology_core_cpumask(cpu)             (per_cpu(cpu_core_map, cpu))
@@ -118,8 +120,6 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
 extern unsigned int __max_logical_packages;
 #define topology_max_packages()                        (__max_logical_packages)
 
-extern unsigned int __max_die_per_package;
-
 static inline int topology_max_die_per_package(void)
 {
        return __max_die_per_package;
index f8ca66f..347a956 100644 (file)
@@ -542,12 +542,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                u32 ecx;
 
                ecx = cpuid_ecx(0x8000001e);
-               nodes_per_socket = ((ecx >> 8) & 7) + 1;
+               __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
        } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
                u64 value;
 
                rdmsrl(MSR_FAM10H_NODE_ID, value);
-               nodes_per_socket = ((value >> 3) & 7) + 1;
+               __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
        }
 
        if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
index 13d3f1c..e133ce1 100644 (file)
@@ -1992,10 +1992,9 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
         * that out because it's an indirect call. Annotate it.
         */
        instrumentation_begin();
-       trace_hardirqs_off_finish();
+
        machine_check_vector(regs);
-       if (regs->flags & X86_EFLAGS_IF)
-               trace_hardirqs_on_prepare();
+
        instrumentation_end();
        irqentry_nmi_exit(regs, irq_state);
 }
@@ -2004,7 +2003,9 @@ static __always_inline void exc_machine_check_user(struct pt_regs *regs)
 {
        irqentry_enter_from_user_mode(regs);
        instrumentation_begin();
+
        machine_check_vector(regs);
+
        instrumentation_end();
        irqentry_exit_to_user_mode(regs);
 }
index 1068002..8678864 100644 (file)
 #define BITS_SHIFT_NEXT_LEVEL(eax)     ((eax) & 0x1f)
 #define LEVEL_MAX_SIBLINGS(ebx)                ((ebx) & 0xffff)
 
-#ifdef CONFIG_SMP
 unsigned int __max_die_per_package __read_mostly = 1;
 EXPORT_SYMBOL(__max_die_per_package);
 
+#ifdef CONFIG_SMP
 /*
  * Check if given CPUID extended toplogy "leaf" is implemented
  */
index eb86a2b..571220a 100644 (file)
@@ -121,7 +121,7 @@ int copy_fpregs_to_fpstate(struct fpu *fpu)
 }
 EXPORT_SYMBOL(copy_fpregs_to_fpstate);
 
-void kernel_fpu_begin(void)
+void kernel_fpu_begin_mask(unsigned int kfpu_mask)
 {
        preempt_disable();
 
@@ -141,13 +141,14 @@ void kernel_fpu_begin(void)
        }
        __cpu_invalidate_fpregs_state();
 
-       if (boot_cpu_has(X86_FEATURE_XMM))
+       /* Put sane initial values into the control registers. */
+       if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
                ldmxcsr(MXCSR_DEFAULT);
 
-       if (boot_cpu_has(X86_FEATURE_FPU))
+       if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
                asm volatile ("fninit");
 }
-EXPORT_SYMBOL_GPL(kernel_fpu_begin);
+EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
 
 void kernel_fpu_end(void)
 {
index 740f3bd..3412c45 100644 (file)
@@ -660,17 +660,6 @@ static void __init trim_platform_memory_ranges(void)
 
 static void __init trim_bios_range(void)
 {
-       /*
-        * A special case is the first 4Kb of memory;
-        * This is a BIOS owned area, not kernel ram, but generally
-        * not listed as such in the E820 table.
-        *
-        * This typically reserves additional memory (64KiB by default)
-        * since some BIOSes are known to corrupt low memory.  See the
-        * Kconfig help text for X86_RESERVE_LOW.
-        */
-       e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
-
        /*
         * special case: Some BIOSes report the PC BIOS
         * area (640Kb -> 1Mb) as RAM even though it is not.
@@ -728,6 +717,15 @@ early_param("reservelow", parse_reservelow);
 
 static void __init trim_low_memory_range(void)
 {
+       /*
+        * A special case is the first 4Kb of memory;
+        * This is a BIOS owned area, not kernel ram, but generally
+        * not listed as such in the E820 table.
+        *
+        * This typically reserves additional memory (64KiB by default)
+        * since some BIOSes are known to corrupt low memory.  See the
+        * Kconfig help text for X86_RESERVE_LOW.
+        */
        memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
 }
        
index 0bd1a0f..84c1821 100644 (file)
@@ -225,7 +225,7 @@ static inline u64 sev_es_rd_ghcb_msr(void)
        return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
 }
 
-static inline void sev_es_wr_ghcb_msr(u64 val)
+static __always_inline void sev_es_wr_ghcb_msr(u64 val)
 {
        u32 low, high;
 
@@ -286,6 +286,12 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
        u16 d2;
        u8  d1;
 
+       /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
+       if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
+               memcpy(dst, buf, size);
+               return ES_OK;
+       }
+
        switch (size) {
        case 1:
                memcpy(&d1, buf, 1);
@@ -335,6 +341,12 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
        u16 d2;
        u8  d1;
 
+       /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
+       if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
+               memcpy(buf, src, size);
+               return ES_OK;
+       }
+
        switch (size) {
        case 1:
                if (get_user(d1, s))
index 8ca66af..117e24f 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/numa.h>
 #include <linux/pgtable.h>
 #include <linux/overflow.h>
+#include <linux/syscore_ops.h>
 
 #include <asm/acpi.h>
 #include <asm/desc.h>
@@ -2083,6 +2084,23 @@ static void init_counter_refs(void)
        this_cpu_write(arch_prev_mperf, mperf);
 }
 
+#ifdef CONFIG_PM_SLEEP
+static struct syscore_ops freq_invariance_syscore_ops = {
+       .resume = init_counter_refs,
+};
+
+static void register_freq_invariance_syscore_ops(void)
+{
+       /* Bail out if registered already. */
+       if (freq_invariance_syscore_ops.node.prev)
+               return;
+
+       register_syscore_ops(&freq_invariance_syscore_ops);
+}
+#else
+static inline void register_freq_invariance_syscore_ops(void) {}
+#endif
+
 static void init_freq_invariance(bool secondary, bool cppc_ready)
 {
        bool ret = false;
@@ -2109,6 +2127,7 @@ static void init_freq_invariance(bool secondary, bool cppc_ready)
        if (ret) {
                init_counter_refs();
                static_branch_enable(&arch_scale_freq_key);
+               register_freq_invariance_syscore_ops();
                pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
        } else {
                pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");
index f15bc16..a889563 100644 (file)
@@ -9,31 +9,6 @@
        (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
         | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
 
-static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
-                                            enum kvm_reg reg)
-{
-       return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
-}
-
-static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
-                                        enum kvm_reg reg)
-{
-       return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
-}
-
-static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
-                                              enum kvm_reg reg)
-{
-       __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
-}
-
-static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
-                                          enum kvm_reg reg)
-{
-       __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
-       __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
-}
-
 #define BUILD_KVM_GPR_ACCESSORS(lname, uname)                                \
 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
 {                                                                            \
@@ -43,7 +18,6 @@ static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu,              \
                                                unsigned long val)            \
 {                                                                            \
        vcpu->arch.regs[VCPU_REGS_##uname] = val;                             \
-       kvm_register_mark_dirty(vcpu, VCPU_REGS_##uname);                     \
 }
 BUILD_KVM_GPR_ACCESSORS(rax, RAX)
 BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
@@ -63,6 +37,31 @@ BUILD_KVM_GPR_ACCESSORS(r14, R14)
 BUILD_KVM_GPR_ACCESSORS(r15, R15)
 #endif
 
+static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
+                                            enum kvm_reg reg)
+{
+       return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+}
+
+static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
+                                        enum kvm_reg reg)
+{
+       return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
+}
+
+static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
+                                              enum kvm_reg reg)
+{
+       __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+}
+
+static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
+                                          enum kvm_reg reg)
+{
+       __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+       __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
+}
+
 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
 {
        if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
index 581925e..261be1d 100644 (file)
 #define PT32_ROOT_LEVEL 2
 #define PT32E_ROOT_LEVEL 3
 
-static inline u64 rsvd_bits(int s, int e)
+static __always_inline u64 rsvd_bits(int s, int e)
 {
+       BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
+
+       if (__builtin_constant_p(e))
+               BUILD_BUG_ON(e > 63);
+       else
+               e &= 63;
+
        if (e < s)
                return 0;
 
index cb4c6ee..7a605ad 100644 (file)
@@ -200,6 +200,9 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
+       if (WARN_ON(!is_guest_mode(vcpu)))
+               return true;
+
        if (!nested_svm_vmrun_msrpm(svm)) {
                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                vcpu->run->internal.suberror =
index c8ffdbc..ac652bc 100644 (file)
@@ -1415,16 +1415,13 @@ static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
         * to be returned:
         *   GPRs RAX, RBX, RCX, RDX
         *
-        * Copy their values to the GHCB if they are dirty.
+        * Copy their values, even if they may not have been written during the
+        * VM-Exit.  It's the guest's responsibility to not consume random data.
         */
-       if (kvm_register_is_dirty(vcpu, VCPU_REGS_RAX))
-               ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
-       if (kvm_register_is_dirty(vcpu, VCPU_REGS_RBX))
-               ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
-       if (kvm_register_is_dirty(vcpu, VCPU_REGS_RCX))
-               ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
-       if (kvm_register_is_dirty(vcpu, VCPU_REGS_RDX))
-               ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
+       ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
+       ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
+       ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
+       ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
 }
 
 static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
index 7ef1717..f923e14 100644 (file)
@@ -3739,6 +3739,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
+       trace_kvm_entry(vcpu);
+
        svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
        svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
        svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
index 0fbb469..f2b9bfb 100644 (file)
@@ -3124,13 +3124,9 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
 {
-       struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       struct kvm_host_map *map;
-       struct page *page;
-       u64 hpa;
 
        /*
         * hv_evmcs may end up being not mapped after migration (when
@@ -3153,6 +3149,17 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
                }
        }
 
+       return true;
+}
+
+static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+{
+       struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       struct kvm_host_map *map;
+       struct page *page;
+       u64 hpa;
+
        if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
                /*
                 * Translate L1 physical address to host physical
@@ -3221,6 +3228,18 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
                exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
        else
                exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
+
+       return true;
+}
+
+static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
+{
+       if (!nested_get_evmcs_page(vcpu))
+               return false;
+
+       if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
+               return false;
+
        return true;
 }
 
@@ -6077,11 +6096,14 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
        if (is_guest_mode(vcpu)) {
                sync_vmcs02_to_vmcs12(vcpu, vmcs12);
                sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
-       } else if (!vmx->nested.need_vmcs12_to_shadow_sync) {
-               if (vmx->nested.hv_evmcs)
-                       copy_enlightened_to_vmcs12(vmx);
-               else if (enable_shadow_vmcs)
-                       copy_shadow_to_vmcs12(vmx);
+       } else  {
+               copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
+               if (!vmx->nested.need_vmcs12_to_shadow_sync) {
+                       if (vmx->nested.hv_evmcs)
+                               copy_enlightened_to_vmcs12(vmx);
+                       else if (enable_shadow_vmcs)
+                               copy_shadow_to_vmcs12(vmx);
+               }
        }
 
        BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
@@ -6602,7 +6624,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
        .hv_timer_pending = nested_vmx_preemption_timer_pending,
        .get_state = vmx_get_nested_state,
        .set_state = vmx_set_nested_state,
-       .get_nested_state_pages = nested_get_vmcs12_pages,
+       .get_nested_state_pages = vmx_get_nested_state_pages,
        .write_log_dirty = nested_vmx_write_pml_buffer,
        .enable_evmcs = nested_enable_evmcs,
        .get_evmcs_version = nested_get_evmcs_version,
index a886a47..cdf5f34 100644 (file)
@@ -29,7 +29,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = {
        [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
        [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
        [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
-       [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
+       [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
 };
 
 /* mapping between fixed pmc index and intel_arch_events array */
@@ -345,7 +345,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 
        pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
                                         x86_pmu.num_counters_gp);
+       eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
        pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
+       eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
        pmu->available_event_types = ~entry->ebx &
                                        ((1ull << eax.split.mask_length) - 1);
 
@@ -355,6 +357,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
                pmu->nr_arch_fixed_counters =
                        min_t(int, edx.split.num_counters_fixed,
                              x86_pmu.num_counters_fixed);
+               edx.split.bit_width_fixed = min_t(int,
+                       edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
                pmu->counter_bitmask[KVM_PMC_FIXED] =
                        ((u64)1 << edx.split.bit_width_fixed) - 1;
        }
index 2af05d3..cc60b1f 100644 (file)
@@ -6653,6 +6653,8 @@ reenter_guest:
        if (vmx->emulation_required)
                return EXIT_FASTPATH_NONE;
 
+       trace_kvm_entry(vcpu);
+
        if (vmx->ple_window_dirty) {
                vmx->ple_window_dirty = false;
                vmcs_write32(PLE_WINDOW, vmx->ple_window);
index 9a8969a..76bce83 100644 (file)
@@ -105,6 +105,7 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
 
 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
 static void process_nmi(struct kvm_vcpu *vcpu);
+static void process_smi(struct kvm_vcpu *vcpu);
 static void enter_smm(struct kvm_vcpu *vcpu);
 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
 static void store_regs(struct kvm_vcpu *vcpu);
@@ -4230,6 +4231,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
 {
        process_nmi(vcpu);
 
+       if (kvm_check_request(KVM_REQ_SMI, vcpu))
+               process_smi(vcpu);
+
        /*
         * In guest mode, payload delivery should be deferred,
         * so that the L1 hypervisor can intercept #PF before
@@ -8802,9 +8806,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        if (kvm_request_pending(vcpu)) {
                if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
-                       if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
-                               ;
-                       else if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
+                       if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
                                r = 0;
                                goto out;
                        }
@@ -8988,8 +8990,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                kvm_x86_ops.request_immediate_exit(vcpu);
        }
 
-       trace_kvm_entry(vcpu);
-
        fpregs_assert_state_consistent();
        if (test_thread_flag(TIF_NEED_FPU_LOAD))
                switch_fpu_return();
@@ -11556,6 +11556,7 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
 }
 EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
 
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
index 4321fa0..419365c 100644 (file)
 #include <asm/fpu/api.h>
 #include <asm/asm.h>
 
+/*
+ * Use KFPU_387.  MMX instructions are not affected by MXCSR,
+ * but both AMD and Intel documentation states that even integer MMX
+ * operations will result in #MF if an exception is pending in FCW.
+ *
+ * EMMS is not needed afterwards because, after calling kernel_fpu_end(),
+ * any subsequent user of the 387 stack will reinitialize it using
+ * KFPU_387.
+ */
+
 void *_mmx_memcpy(void *to, const void *from, size_t len)
 {
        void *p;
@@ -37,7 +47,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
        p = to;
        i = len >> 6; /* len/64 */
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        __asm__ __volatile__ (
                "1: prefetch (%0)\n"            /* This set is 28 bytes */
@@ -127,7 +137,7 @@ static void fast_clear_page(void *page)
 {
        int i;
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        __asm__ __volatile__ (
                "  pxor %%mm0, %%mm0\n" : :
@@ -160,7 +170,7 @@ static void fast_copy_page(void *to, void *from)
 {
        int i;
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        /*
         * maybe the prefetch stuff can go before the expensive fnsave...
@@ -247,7 +257,7 @@ static void fast_clear_page(void *page)
 {
        int i;
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        __asm__ __volatile__ (
                "  pxor %%mm0, %%mm0\n" : :
@@ -282,7 +292,7 @@ static void fast_copy_page(void *to, void *from)
 {
        int i;
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        __asm__ __volatile__ (
                "1: prefetch (%0)\n"
index 4409306..9a5a50c 100644 (file)
@@ -583,6 +583,13 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
                exc_debug(regs);
 }
 
+DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
+{
+       /* This should never happen and there is no way to handle it. */
+       pr_err("Unknown trap in Xen PV mode.");
+       BUG();
+}
+
 struct trap_array_entry {
        void (*orig)(void);
        void (*xen)(void);
@@ -631,6 +638,7 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
 {
        unsigned int nr;
        bool ist_okay = false;
+       bool found = false;
 
        /*
         * Replace trap handler addresses by Xen specific ones.
@@ -645,6 +653,7 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
                if (*addr == entry->orig) {
                        *addr = entry->xen;
                        ist_okay = entry->ist_okay;
+                       found = true;
                        break;
                }
        }
@@ -655,9 +664,13 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
                nr = (*addr - (void *)early_idt_handler_array[0]) /
                     EARLY_IDT_HANDLER_SIZE;
                *addr = (void *)xen_early_idt_handler_array[nr];
+               found = true;
        }
 
-       if (WARN_ON(ist != 0 && !ist_okay))
+       if (!found)
+               *addr = (void *)xen_asm_exc_xen_unknown_trap;
+
+       if (WARN_ON(found && ist != 0 && !ist_okay))
                return false;
 
        return true;
index 1cb0e84..53cf8aa 100644 (file)
@@ -178,6 +178,7 @@ xen_pv_trap asm_exc_simd_coprocessor_error
 #ifdef CONFIG_IA32_EMULATION
 xen_pv_trap entry_INT80_compat
 #endif
+xen_pv_trap asm_exc_xen_unknown_trap
 xen_pv_trap asm_exc_xen_hypervisor_callback
 
        __INIT
index 58ff363..1db063b 100644 (file)
@@ -586,6 +586,8 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
        if (!device)
                return -EINVAL;
 
+       *device = NULL;
+
        status = acpi_get_data_full(handle, acpi_scan_drop_device,
                                    (void **)device, callback);
        if (ACPI_FAILURE(status) || !*device) {
index 14f1658..6eb4c7a 100644 (file)
@@ -208,6 +208,16 @@ int device_links_read_lock_held(void)
 #endif
 #endif /* !CONFIG_SRCU */
 
+static bool device_is_ancestor(struct device *dev, struct device *target)
+{
+       while (target->parent) {
+               target = target->parent;
+               if (dev == target)
+                       return true;
+       }
+       return false;
+}
+
 /**
  * device_is_dependent - Check if one device depends on another one
  * @dev: Device to check dependencies for.
@@ -221,7 +231,12 @@ int device_is_dependent(struct device *dev, void *target)
        struct device_link *link;
        int ret;
 
-       if (dev == target)
+       /*
+        * The "ancestors" check is needed to catch the case when the target
+        * device has not been completely initialized yet and it is still
+        * missing from the list of children of its parent device.
+        */
+       if (dev == target || device_is_ancestor(dev, target))
                return 1;
 
        ret = device_for_each_child(dev, target, device_is_dependent);
@@ -456,7 +471,9 @@ static int devlink_add_symlinks(struct device *dev,
        struct device *con = link->consumer;
        char *buf;
 
-       len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
+       len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+                 strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+       len += strlen(":");
        len += strlen("supplier:") + 1;
        buf = kzalloc(len, GFP_KERNEL);
        if (!buf)
@@ -470,12 +487,12 @@ static int devlink_add_symlinks(struct device *dev,
        if (ret)
                goto err_con;
 
-       snprintf(buf, len, "consumer:%s", dev_name(con));
+       snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
        ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
        if (ret)
                goto err_con_dev;
 
-       snprintf(buf, len, "supplier:%s", dev_name(sup));
+       snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
        ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
        if (ret)
                goto err_sup_dev;
@@ -483,7 +500,7 @@ static int devlink_add_symlinks(struct device *dev,
        goto out;
 
 err_sup_dev:
-       snprintf(buf, len, "consumer:%s", dev_name(con));
+       snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
        sysfs_remove_link(&sup->kobj, buf);
 err_con_dev:
        sysfs_remove_link(&link->link_dev.kobj, "consumer");
@@ -506,7 +523,9 @@ static void devlink_remove_symlinks(struct device *dev,
        sysfs_remove_link(&link->link_dev.kobj, "consumer");
        sysfs_remove_link(&link->link_dev.kobj, "supplier");
 
-       len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
+       len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+                 strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+       len += strlen(":");
        len += strlen("supplier:") + 1;
        buf = kzalloc(len, GFP_KERNEL);
        if (!buf) {
@@ -514,9 +533,9 @@ static void devlink_remove_symlinks(struct device *dev,
                return;
        }
 
-       snprintf(buf, len, "supplier:%s", dev_name(sup));
+       snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
        sysfs_remove_link(&con->kobj, buf);
-       snprintf(buf, len, "consumer:%s", dev_name(con));
+       snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
        sysfs_remove_link(&sup->kobj, buf);
        kfree(buf);
 }
@@ -737,8 +756,9 @@ struct device_link *device_link_add(struct device *consumer,
 
        link->link_dev.class = &devlink_class;
        device_set_pm_not_required(&link->link_dev);
-       dev_set_name(&link->link_dev, "%s--%s",
-                    dev_name(supplier), dev_name(consumer));
+       dev_set_name(&link->link_dev, "%s:%s--%s:%s",
+                    dev_bus_name(supplier), dev_name(supplier),
+                    dev_bus_name(consumer), dev_name(consumer));
        if (device_register(&link->link_dev)) {
                put_device(consumer);
                put_device(supplier);
@@ -1808,9 +1828,7 @@ const char *dev_driver_string(const struct device *dev)
         * never change once they are set, so they don't need special care.
         */
        drv = READ_ONCE(dev->driver);
-       return drv ? drv->name :
-                       (dev->bus ? dev->bus->name :
-                       (dev->class ? dev->class->name : ""));
+       return drv ? drv->name : dev_bus_name(dev);
 }
 EXPORT_SYMBOL(dev_driver_string);
 
index 2f32f38..9179825 100644 (file)
@@ -370,13 +370,6 @@ static void driver_bound(struct device *dev)
 
        device_pm_check_callbacks(dev);
 
-       /*
-        * Reorder successfully probed devices to the end of the device list.
-        * This ensures that suspend/resume order matches probe order, which
-        * is usually what drivers rely on.
-        */
-       device_pm_move_to_tail(dev);
-
        /*
         * Make sure the device is no longer in one of the deferred lists and
         * kick off retrying all pending devices
@@ -619,6 +612,8 @@ dev_groups_failed:
        else if (drv->remove)
                drv->remove(dev);
 probe_failed:
+       kfree(dev->dma_range_map);
+       dev->dma_range_map = NULL;
        if (dev->bus)
                blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
                                             BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
index 95fd154..8456d83 100644 (file)
@@ -366,6 +366,8 @@ int devm_platform_get_irqs_affinity(struct platform_device *dev,
                return -ERANGE;
 
        nvec = platform_irq_count(dev);
+       if (nvec < 0)
+               return nvec;
 
        if (nvec < minvec)
                return -ENOSPC;
index 5265975..e1c6798 100644 (file)
@@ -945,7 +945,8 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
        if (info->feature_discard) {
                blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
                blk_queue_max_discard_sectors(rq, get_capacity(gd));
-               rq->limits.discard_granularity = info->discard_granularity;
+               rq->limits.discard_granularity = info->discard_granularity ?:
+                                                info->physical_sector_size;
                rq->limits.discard_alignment = info->discard_alignment;
                if (info->feature_secdiscard)
                        blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
@@ -2179,19 +2180,12 @@ static void blkfront_closing(struct blkfront_info *info)
 
 static void blkfront_setup_discard(struct blkfront_info *info)
 {
-       int err;
-       unsigned int discard_granularity;
-       unsigned int discard_alignment;
-
        info->feature_discard = 1;
-       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-               "discard-granularity", "%u", &discard_granularity,
-               "discard-alignment", "%u", &discard_alignment,
-               NULL);
-       if (!err) {
-               info->discard_granularity = discard_granularity;
-               info->discard_alignment = discard_alignment;
-       }
+       info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
+                                                        "discard-granularity",
+                                                        0);
+       info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
+                                                      "discard-alignment", 0);
        info->feature_secdiscard =
                !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
                                       0);
index 845b6c4..2344d56 100644 (file)
@@ -54,6 +54,7 @@ static int integrator_lm_populate(int num, struct device *dev)
                        ret = of_platform_default_populate(child, NULL, dev);
                        if (ret) {
                                dev_err(dev, "failed to populate module\n");
+                               of_node_put(child);
                                return ret;
                        }
                }
index 3061896..47d9ec3 100644 (file)
@@ -6,8 +6,6 @@ config MXC_CLK
 
 config MXC_CLK_SCU
        tristate
-       depends on ARCH_MXC
-       depends on IMX_SCU && HAVE_ARM_SMCCC
 
 config CLK_IMX1
        def_bool SOC_IMX1
index eea69d4..7aa7f4a 100644 (file)
@@ -392,7 +392,8 @@ static int mmp2_audio_clk_remove(struct platform_device *pdev)
        return 0;
 }
 
-static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int mmp2_audio_clk_suspend(struct device *dev)
 {
        struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
 
@@ -404,7 +405,7 @@ static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
        return 0;
 }
 
-static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
+static int mmp2_audio_clk_resume(struct device *dev)
 {
        struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
 
@@ -415,6 +416,7 @@ static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
 
        return 0;
 }
+#endif
 
 static const struct dev_pm_ops mmp2_audio_clk_pm_ops = {
        SET_RUNTIME_PM_OPS(mmp2_audio_clk_suspend, mmp2_audio_clk_resume, NULL)
index d82d725..b05901b 100644 (file)
@@ -891,21 +891,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
        },
 };
 
-static struct clk_branch gcc_camera_ahb_clk = {
-       .halt_reg = 0xb008,
-       .halt_check = BRANCH_HALT,
-       .hwcg_reg = 0xb008,
-       .hwcg_bit = 1,
-       .clkr = {
-               .enable_reg = 0xb008,
-               .enable_mask = BIT(0),
-               .hw.init = &(struct clk_init_data){
-                       .name = "gcc_camera_ahb_clk",
-                       .ops = &clk_branch2_ops,
-               },
-       },
-};
-
 static struct clk_branch gcc_camera_hf_axi_clk = {
        .halt_reg = 0xb020,
        .halt_check = BRANCH_HALT,
@@ -2317,7 +2302,6 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
        [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
        [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
        [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
-       [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
        [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
        [GCC_CAMERA_THROTTLE_HF_AXI_CLK] = &gcc_camera_throttle_hf_axi_clk.clkr,
        [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
@@ -2519,11 +2503,12 @@ static int gcc_sc7180_probe(struct platform_device *pdev)
 
        /*
         * Keep the clocks always-ON
-        * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_DISP_AHB_CLK
-        * GCC_GPU_CFG_AHB_CLK
+        * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK,
+        * GCC_DISP_AHB_CLK, GCC_GPU_CFG_AHB_CLK
         */
        regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
        regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
+       regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
        regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
        regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
 
index 6cb6617..ab594a0 100644 (file)
@@ -722,7 +722,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
                .name = "gcc_sdcc2_apps_clk_src",
                .parent_data = gcc_parent_data_4,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_floor_ops,
        },
 };
 
@@ -745,7 +745,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
                .name = "gcc_sdcc4_apps_clk_src",
                .parent_data = gcc_parent_data_0,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_floor_ops,
        },
 };
 
index a60aee1..65df9ef 100644 (file)
@@ -235,36 +235,6 @@ static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter,
        return len;
 }
 
-static ssize_t ti_eqep_position_floor_read(struct counter_device *counter,
-                                          struct counter_count *count,
-                                          void *ext_priv, char *buf)
-{
-       struct ti_eqep_cnt *priv = counter->priv;
-       u32 qposinit;
-
-       regmap_read(priv->regmap32, QPOSINIT, &qposinit);
-
-       return sprintf(buf, "%u\n", qposinit);
-}
-
-static ssize_t ti_eqep_position_floor_write(struct counter_device *counter,
-                                           struct counter_count *count,
-                                           void *ext_priv, const char *buf,
-                                           size_t len)
-{
-       struct ti_eqep_cnt *priv = counter->priv;
-       int err;
-       u32 res;
-
-       err = kstrtouint(buf, 0, &res);
-       if (err < 0)
-               return err;
-
-       regmap_write(priv->regmap32, QPOSINIT, res);
-
-       return len;
-}
-
 static ssize_t ti_eqep_position_enable_read(struct counter_device *counter,
                                            struct counter_count *count,
                                            void *ext_priv, char *buf)
@@ -301,11 +271,6 @@ static struct counter_count_ext ti_eqep_position_ext[] = {
                .read   = ti_eqep_position_ceiling_read,
                .write  = ti_eqep_position_ceiling_write,
        },
-       {
-               .name   = "floor",
-               .read   = ti_eqep_position_floor_read,
-               .write  = ti_eqep_position_floor_write,
-       },
        {
                .name   = "enable",
                .read   = ti_eqep_position_enable_read,
index fabfaac..fa56b45 100644 (file)
@@ -300,11 +300,11 @@ struct mv_cesa_tdma_desc {
        __le32 byte_cnt;
        union {
                __le32 src;
-               dma_addr_t src_dma;
+               u32 src_dma;
        };
        union {
                __le32 dst;
-               dma_addr_t dst_dma;
+               u32 dst_dma;
        };
        __le32 next_dma;
 
index 1d2e5b8..c027d99 100644 (file)
@@ -13,6 +13,7 @@ config IMX_DSP
 config IMX_SCU
        bool "IMX SCU Protocol driver"
        depends on IMX_MBOX
+       select SOC_BUS
        help
          The System Controller Firmware (SCFW) is a low-level system function
          which runs on a dedicated Cortex-M core to provide power, clock, and
index c70f46e..dea65d8 100644 (file)
@@ -521,7 +521,8 @@ config GPIO_SAMA5D2_PIOBU
 
 config GPIO_SIFIVE
        bool "SiFive GPIO support"
-       depends on OF_GPIO && IRQ_DOMAIN_HIERARCHY
+       depends on OF_GPIO
+       select IRQ_DOMAIN_HIERARCHY
        select GPIO_GENERIC
        select GPIOLIB_IRQCHIP
        select REGMAP_MMIO
@@ -597,6 +598,8 @@ config GPIO_TEGRA
        default ARCH_TEGRA
        depends on ARCH_TEGRA || COMPILE_TEST
        depends on OF_GPIO
+       select GPIOLIB_IRQCHIP
+       select IRQ_DOMAIN_HIERARCHY
        help
          Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
 
index 672681a..a912a8f 100644 (file)
@@ -676,20 +676,17 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
        else
                state->duty_cycle = 1;
 
+       val = (unsigned long long) u; /* on duration */
        regmap_read(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), &u);
-       val = (unsigned long long) u * NSEC_PER_SEC;
+       val += (unsigned long long) u; /* period = on + off duration */
+       val *= NSEC_PER_SEC;
        do_div(val, mvpwm->clk_rate);
-       if (val < state->duty_cycle) {
+       if (val > UINT_MAX)
+               state->period = UINT_MAX;
+       else if (val)
+               state->period = val;
+       else
                state->period = 1;
-       } else {
-               val -= state->duty_cycle;
-               if (val > UINT_MAX)
-                       state->period = UINT_MAX;
-               else if (val)
-                       state->period = val;
-               else
-                       state->period = 1;
-       }
 
        regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u);
        if (u)
index 12b679c..1a7b511 100644 (file)
@@ -1979,6 +1979,21 @@ struct gpio_chardev_data {
 #endif
 };
 
+static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
+{
+       struct gpio_device *gdev = cdev->gdev;
+       struct gpiochip_info chipinfo;
+
+       memset(&chipinfo, 0, sizeof(chipinfo));
+
+       strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
+       strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
+       chipinfo.lines = gdev->ngpio;
+       if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
+               return -EFAULT;
+       return 0;
+}
+
 #ifdef CONFIG_GPIO_CDEV_V1
 /*
  * returns 0 if the versions match, else the previously selected ABI version
@@ -1993,6 +2008,41 @@ static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
 
        return abiv;
 }
+
+static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
+                          bool watch)
+{
+       struct gpio_desc *desc;
+       struct gpioline_info lineinfo;
+       struct gpio_v2_line_info lineinfo_v2;
+
+       if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
+               return -EFAULT;
+
+       /* this doubles as a range check on line_offset */
+       desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset);
+       if (IS_ERR(desc))
+               return PTR_ERR(desc);
+
+       if (watch) {
+               if (lineinfo_ensure_abi_version(cdev, 1))
+                       return -EPERM;
+
+               if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
+                       return -EBUSY;
+       }
+
+       gpio_desc_to_lineinfo(desc, &lineinfo_v2);
+       gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
+
+       if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
+               if (watch)
+                       clear_bit(lineinfo.line_offset, cdev->watched_lines);
+               return -EFAULT;
+       }
+
+       return 0;
+}
 #endif
 
 static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
@@ -2030,6 +2080,22 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
        return 0;
 }
 
+static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
+{
+       __u32 offset;
+
+       if (copy_from_user(&offset, ip, sizeof(offset)))
+               return -EFAULT;
+
+       if (offset >= cdev->gdev->ngpio)
+               return -EINVAL;
+
+       if (!test_and_clear_bit(offset, cdev->watched_lines))
+               return -EBUSY;
+
+       return 0;
+}
+
 /*
  * gpio_ioctl() - ioctl handler for the GPIO chardev
  */
@@ -2037,80 +2103,24 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        struct gpio_chardev_data *cdev = file->private_data;
        struct gpio_device *gdev = cdev->gdev;
-       struct gpio_chip *gc = gdev->chip;
        void __user *ip = (void __user *)arg;
-       __u32 offset;
 
        /* We fail any subsequent ioctl():s when the chip is gone */
-       if (!gc)
+       if (!gdev->chip)
                return -ENODEV;
 
        /* Fill in the struct and pass to userspace */
        if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
-               struct gpiochip_info chipinfo;
-
-               memset(&chipinfo, 0, sizeof(chipinfo));
-
-               strscpy(chipinfo.name, dev_name(&gdev->dev),
-                       sizeof(chipinfo.name));
-               strscpy(chipinfo.label, gdev->label,
-                       sizeof(chipinfo.label));
-               chipinfo.lines = gdev->ngpio;
-               if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
-                       return -EFAULT;
-               return 0;
+               return chipinfo_get(cdev, ip);
 #ifdef CONFIG_GPIO_CDEV_V1
-       } else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
-               struct gpio_desc *desc;
-               struct gpioline_info lineinfo;
-               struct gpio_v2_line_info lineinfo_v2;
-
-               if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
-                       return -EFAULT;
-
-               /* this doubles as a range check on line_offset */
-               desc = gpiochip_get_desc(gc, lineinfo.line_offset);
-               if (IS_ERR(desc))
-                       return PTR_ERR(desc);
-
-               gpio_desc_to_lineinfo(desc, &lineinfo_v2);
-               gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
-
-               if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
-                       return -EFAULT;
-               return 0;
        } else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
                return linehandle_create(gdev, ip);
        } else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
                return lineevent_create(gdev, ip);
-       } else if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
-               struct gpio_desc *desc;
-               struct gpioline_info lineinfo;
-               struct gpio_v2_line_info lineinfo_v2;
-
-               if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
-                       return -EFAULT;
-
-               /* this doubles as a range check on line_offset */
-               desc = gpiochip_get_desc(gc, lineinfo.line_offset);
-               if (IS_ERR(desc))
-                       return PTR_ERR(desc);
-
-               if (lineinfo_ensure_abi_version(cdev, 1))
-                       return -EPERM;
-
-               if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
-                       return -EBUSY;
-
-               gpio_desc_to_lineinfo(desc, &lineinfo_v2);
-               gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
-
-               if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
-                       clear_bit(lineinfo.line_offset, cdev->watched_lines);
-                       return -EFAULT;
-               }
-
-               return 0;
+       } else if (cmd == GPIO_GET_LINEINFO_IOCTL ||
+                  cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
+               return lineinfo_get_v1(cdev, ip,
+                                      cmd == GPIO_GET_LINEINFO_WATCH_IOCTL);
 #endif /* CONFIG_GPIO_CDEV_V1 */
        } else if (cmd == GPIO_V2_GET_LINEINFO_IOCTL ||
                   cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL) {
@@ -2119,16 +2129,7 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        } else if (cmd == GPIO_V2_GET_LINE_IOCTL) {
                return linereq_create(gdev, ip);
        } else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) {
-               if (copy_from_user(&offset, ip, sizeof(offset)))
-                       return -EFAULT;
-
-               if (offset >= cdev->gdev->ngpio)
-                       return -EINVAL;
-
-               if (!test_and_clear_bit(offset, cdev->watched_lines))
-                       return -EBUSY;
-
-               return 0;
+               return lineinfo_unwatch(cdev, ip);
        }
        return -EINVAL;
 }
index b02cc2a..b78a634 100644 (file)
@@ -1489,6 +1489,9 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
                type = IRQ_TYPE_NONE;
        }
 
+       if (gc->to_irq)
+               chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__);
+
        gc->to_irq = gpiochip_to_irq;
        gc->irq.default_type = type;
        gc->irq.lock_key = lock_key;
index 087afab..cab1eba 100644 (file)
@@ -81,7 +81,6 @@ MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin");
 
 #define AMDGPU_RESUME_MS               2000
 
index 619d34c..346963e 100644 (file)
 #define mmVGT_ESGS_RING_SIZE_Vangogh_BASE_IDX    1
 #define mmSPI_CONFIG_CNTL_Vangogh                0x2440
 #define mmSPI_CONFIG_CNTL_Vangogh_BASE_IDX       1
+#define mmGCR_GENERAL_CNTL_Vangogh               0x1580
+#define mmGCR_GENERAL_CNTL_Vangogh_BASE_IDX      0
 
 #define mmCP_HYP_PFP_UCODE_ADDR                        0x5814
 #define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX       1
@@ -3244,7 +3246,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000142),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
index 07104a1..1961745 100644 (file)
@@ -491,12 +491,11 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
 {
        uint32_t def, data, def1, data1;
 
-       def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
+       def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
        def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
 
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
-               data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
-
+               data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
                data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
                           DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
                           DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
@@ -505,8 +504,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
                           DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
 
        } else {
-               data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
-
+               data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
                data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
                          DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
                          DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
@@ -516,7 +514,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
        }
 
        if (def != data)
-               WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
+               WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
        if (def1 != data1)
                WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
 }
@@ -525,17 +523,44 @@ static void
 mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
                                           bool enable)
 {
-       uint32_t def, data;
-
-       def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
-
-       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
-               data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
-       else
-               data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+       uint32_t def, data, def1, data1, def2, data2;
+
+       def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
+       def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
+       def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
+
+       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
+               data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
+               data1 &= !(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+               data2 &= !(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+       } else {
+               data |= MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
+               data1 |= (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+               data2 |= (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+       }
 
        if (def != data)
-               WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
+               WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
+       if (def1 != data1)
+               WREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL, data1);
+       if (def2 != data2)
+               WREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL, data2);
 }
 
 static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
@@ -554,26 +579,39 @@ static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
 
 static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags)
 {
-       int data, data1;
+       int data, data1, data2, data3;
 
        if (amdgpu_sriov_vf(adev))
                *flags = 0;
 
-       data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
-       data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+       data = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+       data1  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
+       data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
+       data3 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
 
        /* AMD_CG_SUPPORT_MC_MGCG */
-       if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) &&
-           !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+       if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
                       DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
                       DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
                       DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
                       DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
-                      DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
-               *flags |= AMD_CG_SUPPORT_MC_MGCG;
+                      DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))
+               && !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) {
+                       *flags |= AMD_CG_SUPPORT_MC_MGCG;
+       }
 
        /* AMD_CG_SUPPORT_MC_LS */
-       if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+       if (!(data1 & MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK)
+               && !(data2 & (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                               DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                               DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                               DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                               DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK))
+               && !(data3 & (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                               DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                               DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                               DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                               DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK)))
                *flags |= AMD_CG_SUPPORT_MC_LS;
 }
 
index 5b466f4..ab98c25 100644 (file)
@@ -251,6 +251,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
        struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
        bool force_reset = false;
        bool update_uclk = false;
+       bool p_state_change_support;
 
        if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
                return;
@@ -291,8 +292,9 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
                clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
 
        clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
-       if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
-               clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
+       p_state_change_support = new_clocks->p_state_change_support || (display_count == 0);
+       if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+               clk_mgr_base->clks.p_state_change_support = p_state_change_support;
 
                /* to disable P-State switching, set UCLK min = max */
                if (!clk_mgr_base->clks.p_state_change_support)
index 1bd1a09..f95bade 100644 (file)
@@ -2399,6 +2399,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
                        initial_link_setting;
        uint32_t link_bw;
 
+       if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+               return false;
+
        /* search for the minimum link setting that:
         * 1. is supported according to the link training result
         * 2. could support the b/w requested by the timing
@@ -3045,14 +3048,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
                for (i = 0; i < MAX_PIPES; i++) {
                        pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
                        if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
-                                       pipe_ctx->stream->link == link)
+                                       pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
                                core_link_disable_stream(pipe_ctx);
                }
 
                for (i = 0; i < MAX_PIPES; i++) {
                        pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
                        if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
-                                       pipe_ctx->stream->link == link)
+                                       pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
                                core_link_enable_stream(link->dc->current_state, pipe_ctx);
                }
 
index cfc130e..017b67b 100644 (file)
@@ -647,8 +647,13 @@ static void power_on_plane(
        if (REG(DC_IP_REQUEST_CNTL)) {
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 1);
-               hws->funcs.dpp_pg_control(hws, plane_id, true);
-               hws->funcs.hubp_pg_control(hws, plane_id, true);
+
+               if (hws->funcs.dpp_pg_control)
+                       hws->funcs.dpp_pg_control(hws, plane_id, true);
+
+               if (hws->funcs.hubp_pg_control)
+                       hws->funcs.hubp_pg_control(hws, plane_id, true);
+
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 0);
                DC_LOG_DEBUG(
@@ -1082,8 +1087,13 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
        if (REG(DC_IP_REQUEST_CNTL)) {
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 1);
-               hws->funcs.dpp_pg_control(hws, dpp->inst, false);
-               hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
+               if (hws->funcs.dpp_pg_control)
+                       hws->funcs.dpp_pg_control(hws, dpp->inst, false);
+
+               if (hws->funcs.hubp_pg_control)
+                       hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
                dpp->funcs->dpp_reset(dpp);
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 0);
index cb822df..480d928 100644 (file)
@@ -1062,8 +1062,13 @@ static void dcn20_power_on_plane(
        if (REG(DC_IP_REQUEST_CNTL)) {
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 1);
-               dcn20_dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
-               dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
+               if (hws->funcs.dpp_pg_control)
+                       hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
+
+               if (hws->funcs.hubp_pg_control)
+                       hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 0);
                DC_LOG_DEBUG(
index e04ecf0..5ed18ca 100644 (file)
@@ -2517,8 +2517,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
                 * if this primary pipe has a bottom pipe in prev. state
                 * and if the bottom pipe is still available (which it should be),
                 * pick that pipe as secondary
-                * Same logic applies for ODM pipes. Since mpo is not allowed with odm
-                * check in else case.
+                * Same logic applies for ODM pipes
                 */
                if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) {
                        preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;
@@ -2526,7 +2525,9 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
                                secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
                                secondary_pipe->pipe_idx = preferred_pipe_idx;
                        }
-               } else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
+               }
+               if (secondary_pipe == NULL &&
+                               dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
                        preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;
                        if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
                                secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
index 1c88d2e..b000b43 100644 (file)
@@ -296,7 +296,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
        .num_banks = 8,
        .num_chans = 4,
        .vmm_page_size_bytes = 4096,
-       .dram_clock_change_latency_us = 23.84,
+       .dram_clock_change_latency_us = 11.72,
        .return_bus_width_bytes = 64,
        .dispclk_dppclk_vco_speed_mhz = 3600,
        .xfc_bus_transport_time_us = 4,
index f743685..9a96970 100644 (file)
@@ -1121,7 +1121,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
 static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state)
 {
 
-       return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GpuChangeState, state, NULL);
+       return 0;
 }
 
 static const struct pptable_funcs renoir_ppt_funcs = {
index ba15070..4a8cbec 100644 (file)
@@ -3021,7 +3021,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set,
 
        ret = handle_conflicting_encoders(state, true);
        if (ret)
-               return ret;
+               goto fail;
 
        ret = drm_atomic_commit(state);
 
index 02ca22e..0b232a7 100644 (file)
@@ -387,9 +387,16 @@ static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
        if (gbo->vmap_use_count > 0)
                goto out;
 
-       ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
-       if (ret)
-               return ret;
+       /*
+        * VRAM helpers unmap the BO only on demand. So the previous
+        * page mapping might still be around. Only vmap if the there's
+        * no mapping present.
+        */
+       if (dma_buf_map_is_null(&gbo->map)) {
+               ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
+               if (ret)
+                       return ret;
+       }
 
 out:
        ++gbo->vmap_use_count;
@@ -577,6 +584,7 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
                return;
 
        ttm_bo_vunmap(bo, &gbo->map);
+       dma_buf_map_clear(&gbo->map); /* explicitly clear mapping for next vmap call */
 }
 
 static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
index 6e74e67..3491460 100644 (file)
@@ -388,19 +388,18 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
                return -ENOENT;
 
        *fence = drm_syncobj_fence_get(syncobj);
-       drm_syncobj_put(syncobj);
 
        if (*fence) {
                ret = dma_fence_chain_find_seqno(fence, point);
                if (!ret)
-                       return 0;
+                       goto out;
                dma_fence_put(*fence);
        } else {
                ret = -EINVAL;
        }
 
        if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
-               return ret;
+               goto out;
 
        memset(&wait, 0, sizeof(wait));
        wait.task = current;
@@ -432,6 +431,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
        if (wait.node.next)
                drm_syncobj_remove_wait(syncobj, &wait);
 
+out:
+       drm_syncobj_put(syncobj);
+
        return ret;
 }
 EXPORT_SYMBOL(drm_syncobj_find_fence);
index 92940a0..d5ace48 100644 (file)
@@ -3725,7 +3725,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
        intel_ddi_init_dp_buf_reg(encoder, crtc_state);
        if (!is_mst)
                intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
-       intel_dp_configure_protocol_converter(intel_dp);
+       intel_dp_configure_protocol_converter(intel_dp, crtc_state);
        intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
                                              true);
        intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
index 37f1a10..09123e8 100644 (file)
@@ -4014,7 +4014,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
        intel_de_posting_read(dev_priv, intel_dp->output_reg);
 }
 
-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+                                          const struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 tmp;
@@ -4033,8 +4034,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
                drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
                            enableddisabled(intel_dp->has_hdmi_sink));
 
-       tmp = intel_dp->dfp.ycbcr_444_to_420 ?
-               DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
+       tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
+               intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
 
        if (drm_dp_dpcd_writeb(&intel_dp->aux,
                               DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
@@ -4088,7 +4089,7 @@ static void intel_enable_dp(struct intel_atomic_state *state,
        }
 
        intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
-       intel_dp_configure_protocol_converter(intel_dp);
+       intel_dp_configure_protocol_converter(intel_dp, pipe_config);
        intel_dp_start_link_train(intel_dp, pipe_config);
        intel_dp_stop_link_train(intel_dp, pipe_config);
 
index b871a09..05f7ddf 100644 (file)
@@ -51,7 +51,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
 int intel_dp_retrain_link(struct intel_encoder *encoder,
                          struct drm_modeset_acquire_ctx *ctx);
 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp);
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+                                          const struct intel_crtc_state *crtc_state);
 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
                                           const struct intel_crtc_state *crtc_state,
                                           bool enable);
index b2a4bbc..b9d8825 100644 (file)
@@ -2210,6 +2210,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
        if (content_protection_type_changed) {
                mutex_lock(&hdcp->mutex);
                hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               drm_connector_get(&connector->base);
                schedule_work(&hdcp->prop_work);
                mutex_unlock(&hdcp->mutex);
        }
@@ -2221,6 +2222,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
                desired_and_not_enabled =
                        hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
                mutex_unlock(&hdcp->mutex);
+               /*
+                * If HDCP already ENABLED and CP property is DESIRED, schedule
+                * prop_work to update correct CP property to user space.
+                */
+               if (!desired_and_not_enabled && !content_protection_type_changed) {
+                       drm_connector_get(&connector->base);
+                       schedule_work(&hdcp->prop_work);
+               }
        }
 
        if (desired_and_not_enabled || content_protection_type_changed)
index a24cc1f..0625cbb 100644 (file)
@@ -134,11 +134,6 @@ static bool remove_signaling_context(struct intel_breadcrumbs *b,
        return true;
 }
 
-static inline bool __request_completed(const struct i915_request *rq)
-{
-       return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
-}
-
 __maybe_unused static bool
 check_signal_order(struct intel_context *ce, struct i915_request *rq)
 {
@@ -257,7 +252,7 @@ static void signal_irq_work(struct irq_work *work)
                list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
                        bool release;
 
-                       if (!__request_completed(rq))
+                       if (!__i915_request_is_complete(rq))
                                break;
 
                        if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
@@ -379,7 +374,7 @@ static void insert_breadcrumb(struct i915_request *rq)
         * straight onto a signaled list, and queue the irq worker for
         * its signal completion.
         */
-       if (__request_completed(rq)) {
+       if (__i915_request_is_complete(rq)) {
                if (__signal_request(rq) &&
                    llist_add(&rq->signal_node, &b->signaled_requests))
                        irq_work_queue(&b->irq_work);
index 7614a3d..26c7d0a 100644 (file)
@@ -3988,6 +3988,9 @@ err:
 static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
 {
        i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+
+       /* Called on error unwind, clear all flags to prevent further use */
+       memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx));
 }
 
 typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
index 7ea94d2..8015964 100644 (file)
@@ -126,6 +126,10 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
        struct intel_timeline_cacheline *cl =
                container_of(rcu, typeof(*cl), rcu);
 
+       /* Must wait until after all *rq->hwsp are complete before removing */
+       i915_gem_object_unpin_map(cl->hwsp->vma->obj);
+       __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
+
        i915_active_fini(&cl->active);
        kfree(cl);
 }
@@ -133,11 +137,6 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
 static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
 {
        GEM_BUG_ON(!i915_active_is_idle(&cl->active));
-
-       i915_gem_object_unpin_map(cl->hwsp->vma->obj);
-       i915_vma_put(cl->hwsp->vma);
-       __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
-
        call_rcu(&cl->rcu, __rcu_cacheline_free);
 }
 
@@ -179,7 +178,6 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
                return ERR_CAST(vaddr);
        }
 
-       i915_vma_get(hwsp->vma);
        cl->hwsp = hwsp;
        cl->vaddr = page_pack_bits(vaddr, cacheline);
 
index d76685c..9856479 100644 (file)
@@ -184,13 +184,24 @@ static u64 get_rc6(struct intel_gt *gt)
        return val;
 }
 
-static void park_rc6(struct drm_i915_private *i915)
+static void init_rc6(struct i915_pmu *pmu)
 {
-       struct i915_pmu *pmu = &i915->pmu;
+       struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+       intel_wakeref_t wakeref;
 
-       if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
+       with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) {
                pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
+               pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
+                                       pmu->sample[__I915_SAMPLE_RC6].cur;
+               pmu->sleep_last = ktime_get();
+       }
+}
 
+static void park_rc6(struct drm_i915_private *i915)
+{
+       struct i915_pmu *pmu = &i915->pmu;
+
+       pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
        pmu->sleep_last = ktime_get();
 }
 
@@ -201,6 +212,7 @@ static u64 get_rc6(struct intel_gt *gt)
        return __get_rc6(gt);
 }
 
+static void init_rc6(struct i915_pmu *pmu) { }
 static void park_rc6(struct drm_i915_private *i915) {}
 
 #endif
@@ -612,10 +624,8 @@ static void i915_pmu_enable(struct perf_event *event)
                container_of(event->pmu, typeof(*i915), pmu.base);
        unsigned int bit = event_enabled_bit(event);
        struct i915_pmu *pmu = &i915->pmu;
-       intel_wakeref_t wakeref;
        unsigned long flags;
 
-       wakeref = intel_runtime_pm_get(&i915->runtime_pm);
        spin_lock_irqsave(&pmu->lock, flags);
 
        /*
@@ -626,13 +636,6 @@ static void i915_pmu_enable(struct perf_event *event)
        GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
        GEM_BUG_ON(pmu->enable_count[bit] == ~0);
 
-       if (pmu->enable_count[bit] == 0 &&
-           config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
-               pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
-               pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
-               pmu->sleep_last = ktime_get();
-       }
-
        pmu->enable |= BIT_ULL(bit);
        pmu->enable_count[bit]++;
 
@@ -673,8 +676,6 @@ static void i915_pmu_enable(struct perf_event *event)
         * an existing non-zero value.
         */
        local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
-
-       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 }
 
 static void i915_pmu_disable(struct perf_event *event)
@@ -1130,6 +1131,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
        hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        pmu->timer.function = i915_sample;
        pmu->cpuhp.cpu = -1;
+       init_rc6(pmu);
 
        if (!is_igp(i915)) {
                pmu->name = kasprintf(GFP_KERNEL,
index 620b6fa..92adfee 100644 (file)
@@ -434,7 +434,7 @@ static inline u32 hwsp_seqno(const struct i915_request *rq)
 
 static inline bool __i915_request_has_started(const struct i915_request *rq)
 {
-       return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
+       return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
 }
 
 /**
@@ -465,11 +465,19 @@ static inline bool __i915_request_has_started(const struct i915_request *rq)
  */
 static inline bool i915_request_started(const struct i915_request *rq)
 {
+       bool result;
+
        if (i915_request_signaled(rq))
                return true;
 
-       /* Remember: started but may have since been preempted! */
-       return __i915_request_has_started(rq);
+       result = true;
+       rcu_read_lock(); /* the HWSP may be freed at runtime */
+       if (likely(!i915_request_signaled(rq)))
+               /* Remember: started but may have since been preempted! */
+               result = __i915_request_has_started(rq);
+       rcu_read_unlock();
+
+       return result;
 }
 
 /**
@@ -482,10 +490,16 @@ static inline bool i915_request_started(const struct i915_request *rq)
  */
 static inline bool i915_request_is_running(const struct i915_request *rq)
 {
+       bool result;
+
        if (!i915_request_is_active(rq))
                return false;
 
-       return __i915_request_has_started(rq);
+       rcu_read_lock();
+       result = __i915_request_has_started(rq) && i915_request_is_active(rq);
+       rcu_read_unlock();
+
+       return result;
 }
 
 /**
@@ -509,12 +523,25 @@ static inline bool i915_request_is_ready(const struct i915_request *rq)
        return !list_empty(&rq->sched.link);
 }
 
+static inline bool __i915_request_is_complete(const struct i915_request *rq)
+{
+       return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
+}
+
 static inline bool i915_request_completed(const struct i915_request *rq)
 {
+       bool result;
+
        if (i915_request_signaled(rq))
                return true;
 
-       return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
+       result = true;
+       rcu_read_lock(); /* the HWSP may be freed at runtime */
+       if (likely(!i915_request_signaled(rq)))
+               result = __i915_request_is_complete(rq);
+       rcu_read_unlock();
+
+       return result;
 }
 
 static inline void i915_request_mark_complete(struct i915_request *rq)
index 8cd776a..11e0313 100644 (file)
@@ -79,12 +79,13 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
        struct page *p;
        void *vaddr;
 
-       if (order) {
-               gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+       /* Don't set the __GFP_COMP flag for higher order allocations.
+        * Mapping pages directly into an userspace process and calling
+        * put_page() on a TTM allocated page is illegal.
+        */
+       if (order)
+               gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY |
                        __GFP_KSWAPD_RECLAIM;
-               gfp_flags &= ~__GFP_MOVABLE;
-               gfp_flags &= ~__GFP_COMP;
-       }
 
        if (!pool->use_dma_alloc) {
                p = alloc_pages(gfp_flags, order);
index 5551062..98cab0b 100644 (file)
@@ -1267,6 +1267,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
        card->dai_link = dai_link;
        card->num_links = 1;
        card->name = vc4_hdmi->variant->card_name;
+       card->driver_name = "vc4-hdmi";
        card->dev = dev;
        card->owner = THIS_MODULE;
 
index 0743ef5..8429ebe 100644 (file)
@@ -758,7 +758,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                        MT_STORE_FIELD(inrange_state);
                        return 1;
                case HID_DG_CONFIDENCE:
-                       if (cls->name == MT_CLS_WIN_8 &&
+                       if ((cls->name == MT_CLS_WIN_8 ||
+                            cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
                                (field->application == HID_DG_TOUCHPAD ||
                                 field->application == HID_DG_TOUCHSCREEN))
                                app->quirks |= MT_QUIRK_CONFIDENCE;
index e8acd23..aa9e488 100644 (file)
@@ -147,9 +147,9 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
        }
 
        if (flush)
-               wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
+               wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo);
        else if (insert)
-               wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
+               wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo,
                                       raw_data, report_size);
 
        return insert && !flush;
@@ -1280,7 +1280,7 @@ static void wacom_devm_kfifo_release(struct device *dev, void *res)
 static int wacom_devm_kfifo_alloc(struct wacom *wacom)
 {
        struct wacom_wac *wacom_wac = &wacom->wacom_wac;
-       struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo;
+       struct kfifo_rec_ptr_2 *pen_fifo;
        int error;
 
        pen_fifo = devres_alloc(wacom_devm_kfifo_release,
@@ -1297,6 +1297,7 @@ static int wacom_devm_kfifo_alloc(struct wacom *wacom)
        }
 
        devres_add(&wacom->hdev->dev, pen_fifo);
+       wacom_wac->pen_fifo = pen_fifo;
 
        return 0;
 }
index da612b6..195910d 100644 (file)
@@ -342,7 +342,7 @@ struct wacom_wac {
        struct input_dev *pen_input;
        struct input_dev *touch_input;
        struct input_dev *pad_input;
-       struct kfifo_rec_ptr_2 pen_fifo;
+       struct kfifo_rec_ptr_2 *pen_fifo;
        int pid;
        int num_contacts_left;
        u8 bt_features;
index 52acd77..251e75c 100644 (file)
@@ -268,6 +268,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7aa6),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
+       {
+               /* Alder Lake-P */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
        {
                /* Alder Lake CPU */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
index 3e7df1c..81d7b21 100644 (file)
@@ -64,7 +64,7 @@ static void stm_heartbeat_unlink(struct stm_source_data *data)
 
 static int stm_heartbeat_init(void)
 {
-       int i, ret = -ENOMEM;
+       int i, ret;
 
        if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX)
                return -EINVAL;
@@ -72,8 +72,10 @@ static int stm_heartbeat_init(void)
        for (i = 0; i < nr_devs; i++) {
                stm_heartbeat[i].data.name =
                        kasprintf(GFP_KERNEL, "heartbeat.%d", i);
-               if (!stm_heartbeat[i].data.name)
+               if (!stm_heartbeat[i].data.name) {
+                       ret = -ENOMEM;
                        goto fail_unregister;
+               }
 
                stm_heartbeat[i].data.nr_chans  = 1;
                stm_heartbeat[i].data.link      = stm_heartbeat_link;
index d4d60ad..ab1f39a 100644 (file)
@@ -1013,6 +1013,7 @@ config I2C_SIRF
 config I2C_SPRD
        tristate "Spreadtrum I2C interface"
        depends on I2C=y && (ARCH_SPRD || COMPILE_TEST)
+       depends on COMMON_CLK
        help
          If you say yes to this option, support will be included for the
          Spreadtrum I2C interface.
index b444fbf..a8e8af5 100644 (file)
@@ -241,6 +241,19 @@ static struct imx_i2c_hwdata vf610_i2c_hwdata = {
 
 };
 
+static const struct platform_device_id imx_i2c_devtype[] = {
+       {
+               .name = "imx1-i2c",
+               .driver_data = (kernel_ulong_t)&imx1_i2c_hwdata,
+       }, {
+               .name = "imx21-i2c",
+               .driver_data = (kernel_ulong_t)&imx21_i2c_hwdata,
+       }, {
+               /* sentinel */
+       }
+};
+MODULE_DEVICE_TABLE(platform, imx_i2c_devtype);
+
 static const struct of_device_id i2c_imx_dt_ids[] = {
        { .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, },
        { .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, },
@@ -1330,7 +1343,11 @@ static int i2c_imx_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        match = device_get_match_data(&pdev->dev);
-       i2c_imx->hwdata = match;
+       if (match)
+               i2c_imx->hwdata = match;
+       else
+               i2c_imx->hwdata = (struct imx_i2c_hwdata *)
+                               platform_get_device_id(pdev)->driver_data;
 
        /* Setup i2c_imx driver structure */
        strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
@@ -1498,6 +1515,7 @@ static struct platform_driver i2c_imx_driver = {
                .of_match_table = i2c_imx_dt_ids,
                .acpi_match_table = i2c_imx_acpi_ids,
        },
+       .id_table = imx_i2c_devtype,
 };
 
 static int __init i2c_adap_imx_init(void)
index d960790..845eda7 100644 (file)
@@ -347,7 +347,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
                if (result)
                        return result;
                if (recv_len && i == 0) {
-                       if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
+                       if (data[i] > I2C_SMBUS_BLOCK_MAX)
                                return -EPROTO;
                        length += data[i];
                }
index ec7a7e9..c0c7d01 100644 (file)
@@ -80,7 +80,7 @@ static int tegra_bpmp_xlate_flags(u16 flags, u16 *out)
                flags &= ~I2C_M_RECV_LEN;
        }
 
-       return (flags != 0) ? -EINVAL : 0;
+       return 0;
 }
 
 /**
index 6f08c0c..8b113ae 100644 (file)
@@ -326,6 +326,8 @@ static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned int reg)
        /* read back register to make sure that register writes completed */
        if (reg != I2C_TX_FIFO)
                readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+       else if (i2c_dev->is_vi)
+               readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, I2C_INT_STATUS));
 }
 
 static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned int reg)
@@ -339,6 +341,21 @@ static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data,
        writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
 }
 
+static void i2c_writesl_vi(struct tegra_i2c_dev *i2c_dev, void *data,
+                          unsigned int reg, unsigned int len)
+{
+       u32 *data32 = data;
+
+       /*
+        * VI I2C controller has known hardware bug where writes get stuck
+        * when immediate multiple writes happen to TX_FIFO register.
+        * Recommended software work around is to read I2C register after
+        * each write to TX_FIFO register to flush out the data.
+        */
+       while (len--)
+               i2c_writel(i2c_dev, *data32++, reg);
+}
+
 static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data,
                       unsigned int reg, unsigned int len)
 {
@@ -533,7 +550,7 @@ static int tegra_i2c_poll_register(struct tegra_i2c_dev *i2c_dev,
        void __iomem *addr = i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg);
        u32 val;
 
-       if (!i2c_dev->atomic_mode)
+       if (!i2c_dev->atomic_mode && !in_irq())
                return readl_relaxed_poll_timeout(addr, val, !(val & mask),
                                                  delay_us, timeout_us);
 
@@ -811,7 +828,10 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
                i2c_dev->msg_buf_remaining = buf_remaining;
                i2c_dev->msg_buf = buf + words_to_transfer * BYTES_PER_FIFO_WORD;
 
-               i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+               if (i2c_dev->is_vi)
+                       i2c_writesl_vi(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+               else
+                       i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
 
                buf += words_to_transfer * BYTES_PER_FIFO_WORD;
        }
index b11c8c4..e946903 100644 (file)
@@ -397,16 +397,12 @@ static int tiadc_iio_buffered_hardware_setup(struct device *dev,
        ret = devm_request_threaded_irq(dev, irq, pollfunc_th, pollfunc_bh,
                                flags, indio_dev->name, indio_dev);
        if (ret)
-               goto error_kfifo_free;
+               return ret;
 
        indio_dev->setup_ops = setup_ops;
        indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
 
        return 0;
-
-error_kfifo_free:
-       iio_kfifo_free(indio_dev->buffer);
-       return ret;
 }
 
 static const char * const chan_name_ain[] = {
index 0507283..2dbd264 100644 (file)
  * @sdata: Sensor data.
  *
  * returns:
- * 0 - no new samples available
- * 1 - new samples available
- * negative - error or unknown
+ * false - no new samples available or read error
+ * true - new samples available
  */
-static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
-                                           struct st_sensor_data *sdata)
+static bool st_sensors_new_samples_available(struct iio_dev *indio_dev,
+                                            struct st_sensor_data *sdata)
 {
        int ret, status;
 
        /* How would I know if I can't check it? */
        if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr)
-               return -EINVAL;
+               return true;
 
        /* No scan mask, no interrupt */
        if (!indio_dev->active_scan_mask)
-               return 0;
+               return false;
 
        ret = regmap_read(sdata->regmap,
                          sdata->sensor_settings->drdy_irq.stat_drdy.addr,
                          &status);
        if (ret < 0) {
                dev_err(sdata->dev, "error checking samples available\n");
-               return ret;
+               return false;
        }
 
-       if (status & sdata->sensor_settings->drdy_irq.stat_drdy.mask)
-               return 1;
-
-       return 0;
+       return !!(status & sdata->sensor_settings->drdy_irq.stat_drdy.mask);
 }
 
 /**
@@ -180,9 +176,15 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
 
        /* Tell the interrupt handler that we're dealing with edges */
        if (irq_trig == IRQF_TRIGGER_FALLING ||
-           irq_trig == IRQF_TRIGGER_RISING)
+           irq_trig == IRQF_TRIGGER_RISING) {
+               if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) {
+                       dev_err(&indio_dev->dev,
+                               "edge IRQ not supported w/o stat register.\n");
+                       err = -EOPNOTSUPP;
+                       goto iio_trigger_free;
+               }
                sdata->edge_irq = true;
-       else
+       } else {
                /*
                 * If we're not using edges (i.e. level interrupts) we
                 * just mask off the IRQ, handle one interrupt, then
@@ -190,6 +192,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                 * interrupt handler top half again and start over.
                 */
                irq_trig |= IRQF_ONESHOT;
+       }
 
        /*
         * If the interrupt pin is Open Drain, by definition this
index 28921b6..e9297c2 100644 (file)
@@ -187,9 +187,9 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
                return ret;
 
        if (pwr_down)
-               st->pwr_down_mask |= (1 << chan->channel);
-       else
                st->pwr_down_mask &= ~(1 << chan->channel);
+       else
+               st->pwr_down_mask |= (1 << chan->channel);
 
        ret = ad5504_spi_write(st, AD5504_ADDR_CTRL,
                                AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) |
index a2f8209..37fd0b6 100644 (file)
@@ -601,7 +601,7 @@ static int sx9310_read_thresh(struct sx9310_data *data,
                return ret;
 
        regval = FIELD_GET(SX9310_REG_PROX_CTRL8_9_PTHRESH_MASK, regval);
-       if (regval > ARRAY_SIZE(sx9310_pthresh_codes))
+       if (regval >= ARRAY_SIZE(sx9310_pthresh_codes))
                return -EINVAL;
 
        *val = sx9310_pthresh_codes[regval];
@@ -1305,7 +1305,8 @@ sx9310_get_default_reg(struct sx9310_data *data, int i,
                if (ret)
                        break;
 
-               pos = min(max(ilog2(pos), 3), 10) - 3;
+               /* Powers of 2, except for a gap between 16 and 64 */
+               pos = clamp(ilog2(pos), 3, 11) - (pos >= 32 ? 4 : 3);
                reg_def->def &= ~SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK;
                reg_def->def |= FIELD_PREP(SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK,
                                           pos);
index 503fe54..608ccb1 100644 (file)
@@ -248,6 +248,12 @@ static int mlx90632_set_meas_type(struct regmap *regmap, u8 type)
        if (ret < 0)
                return ret;
 
+       /*
+        * Give the mlx90632 some time to reset properly before sending a new I2C command
+        * if this is not done, the following I2C command(s) will not be accepted.
+        */
+       usleep_range(150, 200);
+
        ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL,
                                 (MLX90632_CFG_MTYP_MASK | MLX90632_CFG_PWR_MASK),
                                 (MLX90632_MTYP_STATUS(type) | MLX90632_PWR_STATUS_HALT));
index a740139..d109bb3 100644 (file)
@@ -2474,7 +2474,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
        init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
        init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
-       init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
+       init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
        init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
        init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
        return 0;
index 55d5386..ad82532 100644 (file)
@@ -532,7 +532,7 @@ struct hns_roce_qp_table {
        struct hns_roce_hem_table       sccc_table;
        struct mutex                    scc_mutex;
        struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
-       spinlock_t bank_lock;
+       struct mutex bank_mutex;
 };
 
 struct hns_roce_cq_table {
index d8e2fe5..1116371 100644 (file)
@@ -209,7 +209,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
 
                hr_qp->doorbell_qpn = 1;
        } else {
-               spin_lock(&qp_table->bank_lock);
+               mutex_lock(&qp_table->bank_mutex);
                bankid = get_least_load_bankid_for_qp(qp_table->bank);
 
                ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
@@ -217,12 +217,12 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
                if (ret) {
                        ibdev_err(&hr_dev->ib_dev,
                                  "failed to alloc QPN, ret = %d\n", ret);
-                       spin_unlock(&qp_table->bank_lock);
+                       mutex_unlock(&qp_table->bank_mutex);
                        return ret;
                }
 
                qp_table->bank[bankid].inuse++;
-               spin_unlock(&qp_table->bank_lock);
+               mutex_unlock(&qp_table->bank_mutex);
 
                hr_qp->doorbell_qpn = (u32)num;
        }
@@ -408,9 +408,9 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
 
        ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3);
 
-       spin_lock(&hr_dev->qp_table.bank_lock);
+       mutex_lock(&hr_dev->qp_table.bank_mutex);
        hr_dev->qp_table.bank[bankid].inuse--;
-       spin_unlock(&hr_dev->qp_table.bank_lock);
+       mutex_unlock(&hr_dev->qp_table.bank_mutex);
 }
 
 static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
@@ -1371,6 +1371,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
        unsigned int i;
 
        mutex_init(&qp_table->scc_mutex);
+       mutex_init(&qp_table->bank_mutex);
        xa_init(&hr_dev->qp_table_xa);
 
        reserved_from_bot = hr_dev->caps.reserved_qps;
index d26f3f3..aabdc07 100644 (file)
@@ -3311,8 +3311,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
        int err;
 
        dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
-       err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev),
-                                             &dev->port[port_num].roce.nb);
+       err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
        if (err) {
                dev->port[port_num].roce.nb.notifier_call = NULL;
                return err;
@@ -3324,8 +3323,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
 static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
 {
        if (dev->port[port_num].roce.nb.notifier_call) {
-               unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev),
-                                                 &dev->port[port_num].roce.nb);
+               unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
                dev->port[port_num].roce.nb.notifier_call = NULL;
        }
 }
index e59615a..586b0e5 100644 (file)
@@ -214,7 +214,7 @@ static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
        struct usnic_vnic_res *vnic_res;
        int len;
 
-       len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu ",
+       len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu",
                         qp_grp->ibqp.qp_num,
                         usnic_ib_qp_grp_state_to_string(qp_grp->state),
                         qp_grp->owner_pid,
@@ -224,14 +224,13 @@ static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
                res_chunk = qp_grp->res_chunk_list[i];
                for (j = 0; j < res_chunk->cnt; j++) {
                        vnic_res = res_chunk->res[j];
-                       len += sysfs_emit_at(
-                               buf, len, "%s[%d] ",
+                       len += sysfs_emit_at(buf, len, " %s[%d]",
                                usnic_vnic_res_type_to_str(vnic_res->type),
                                vnic_res->vnic_idx);
                }
        }
 
-       len = sysfs_emit_at(buf, len, "\n");
+       len += sysfs_emit_at(buf, len, "\n");
 
        return len;
 }
index c142f5e..de57f2f 100644 (file)
@@ -509,6 +509,20 @@ static inline int ib_send_flags_to_pvrdma(int flags)
        return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
 }
 
+static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type)
+{
+       switch (type) {
+       case PVRDMA_NETWORK_ROCE_V1:
+               return RDMA_NETWORK_ROCE_V1;
+       case PVRDMA_NETWORK_IPV4:
+               return RDMA_NETWORK_IPV4;
+       case PVRDMA_NETWORK_IPV6:
+               return RDMA_NETWORK_IPV6;
+       default:
+               return RDMA_NETWORK_IPV6;
+       }
+}
+
 void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
                         const struct pvrdma_qp_cap *src);
 void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
index a119ac3..6aa40bd 100644 (file)
@@ -367,7 +367,7 @@ retry:
        wc->dlid_path_bits = cqe->dlid_path_bits;
        wc->port_num = cqe->port_num;
        wc->vendor_err = cqe->vendor_err;
-       wc->network_hdr_type = cqe->network_hdr_type;
+       wc->network_hdr_type = pvrdma_network_type_to_ib(cqe->network_hdr_type);
 
        /* Update shared ring state */
        pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
index c4b06ce..943914c 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/if_arp.h>
 #include <linux/netdevice.h>
 #include <linux/if.h>
+#include <linux/if_vlan.h>
 #include <net/udp_tunnel.h>
 #include <net/sch_generic.h>
 #include <linux/netfilter.h>
@@ -153,9 +154,14 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 {
        struct udphdr *udph;
        struct net_device *ndev = skb->dev;
+       struct net_device *rdev = ndev;
        struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
        struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
 
+       if (!rxe && is_vlan_dev(rdev)) {
+               rdev = vlan_dev_real_dev(ndev);
+               rxe = rxe_get_dev_from_net(rdev);
+       }
        if (!rxe)
                goto drop;
 
index 5a09808..c7e3b6a 100644 (file)
@@ -872,6 +872,11 @@ static enum resp_states do_complete(struct rxe_qp *qp,
                        else
                                wc->network_hdr_type = RDMA_NETWORK_IPV6;
 
+                       if (is_vlan_dev(skb->dev)) {
+                               wc->wc_flags |= IB_WC_WITH_VLAN;
+                               wc->vlan_id = vlan_dev_vlan_id(skb->dev);
+                       }
+
                        if (pkt->mask & RXE_IMMDT_MASK) {
                                wc->wc_flags |= IB_WC_WITH_IMM;
                                wc->ex.imm_data = immdt_imm(pkt);
index 94920a5..b147f22 100644 (file)
@@ -493,8 +493,9 @@ config TI_SCI_INTA_IRQCHIP
          TI System Controller, say Y here. Otherwise, say N.
 
 config TI_PRUSS_INTC
-       tristate "TI PRU-ICSS Interrupt Controller"
-       depends on ARCH_DAVINCI || SOC_AM33XX || SOC_AM43XX || SOC_DRA7XX || ARCH_KEYSTONE || ARCH_K3
+       tristate
+       depends on TI_PRUSS
+       default TI_PRUSS
        select IRQ_DOMAIN
        help
          This enables support for the PRU-ICSS Local Interrupt Controller
index 5f5eb88..25c9a9c 100644 (file)
@@ -167,7 +167,7 @@ static void bcm2836_arm_irqchip_handle_ipi(struct irq_desc *desc)
        chained_irq_exit(chip, desc);
 }
 
-static void bcm2836_arm_irqchip_ipi_eoi(struct irq_data *d)
+static void bcm2836_arm_irqchip_ipi_ack(struct irq_data *d)
 {
        int cpu = smp_processor_id();
 
@@ -195,7 +195,7 @@ static struct irq_chip bcm2836_arm_irqchip_ipi = {
        .name           = "IPI",
        .irq_mask       = bcm2836_arm_irqchip_dummy_op,
        .irq_unmask     = bcm2836_arm_irqchip_dummy_op,
-       .irq_eoi        = bcm2836_arm_irqchip_ipi_eoi,
+       .irq_ack        = bcm2836_arm_irqchip_ipi_ack,
        .ipi_send_mask  = bcm2836_arm_irqchip_ipi_send_mask,
 };
 
index 9ed1bc4..09b91b8 100644 (file)
@@ -142,8 +142,8 @@ static void liointc_resume(struct irq_chip_generic *gc)
 
 static const char * const parent_names[] = {"int0", "int1", "int2", "int3"};
 
-int __init liointc_of_init(struct device_node *node,
-                               struct device_node *parent)
+static int __init liointc_of_init(struct device_node *node,
+                                 struct device_node *parent)
 {
        struct irq_chip_generic *gc;
        struct irq_domain *domain;
index 95d4fd8..0bbb0b2 100644 (file)
@@ -197,6 +197,13 @@ static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
                if (ret)
                        return ret;
 
+               ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq,
+                                                   &mips_mt_cpu_irq_controller,
+                                                   NULL);
+
+               if (ret)
+                       return ret;
+
                ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
                if (ret)
                        return ret;
index 0aa50d0..fbb3544 100644 (file)
@@ -66,7 +66,7 @@ static int sl28cpld_intc_probe(struct platform_device *pdev)
        irqchip->chip.num_regs = 1;
        irqchip->chip.status_base = base + INTC_IP;
        irqchip->chip.mask_base = base + INTC_IE;
-       irqchip->chip.mask_invert = true,
+       irqchip->chip.mask_invert = true;
        irqchip->chip.ack_base = base + INTC_IP;
 
        return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
index c1bcac7..28ddcaa 100644 (file)
@@ -844,11 +844,10 @@ static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
        rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
 
        ret = nvm_submit_io_sync_raw(dev, &rqd);
+       __free_page(page);
        if (ret)
                return ret;
 
-       __free_page(page);
-
        return rqd.error;
 }
 
index 8c87471..5a55617 100644 (file)
@@ -1481,9 +1481,9 @@ static int crypt_alloc_req_skcipher(struct crypt_config *cc,
 static int crypt_alloc_req_aead(struct crypt_config *cc,
                                 struct convert_context *ctx)
 {
-       if (!ctx->r.req) {
-               ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
-               if (!ctx->r.req)
+       if (!ctx->r.req_aead) {
+               ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+               if (!ctx->r.req_aead)
                        return -ENOMEM;
        }
 
index 81df019..b64fede 100644 (file)
@@ -257,8 +257,9 @@ struct dm_integrity_c {
        bool journal_uptodate;
        bool just_formatted;
        bool recalculate_flag;
-       bool fix_padding;
        bool discard;
+       bool fix_padding;
+       bool legacy_recalculate;
 
        struct alg_spec internal_hash_alg;
        struct alg_spec journal_crypt_alg;
@@ -386,6 +387,14 @@ static int dm_integrity_failed(struct dm_integrity_c *ic)
        return READ_ONCE(ic->failed);
 }
 
+static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
+{
+       if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) &&
+           !ic->legacy_recalculate)
+               return true;
+       return false;
+}
+
 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
                                          unsigned j, unsigned char seq)
 {
@@ -3140,6 +3149,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
                arg_count += !!ic->journal_crypt_alg.alg_string;
                arg_count += !!ic->journal_mac_alg.alg_string;
                arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
+               arg_count += ic->legacy_recalculate;
                DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
                       ic->tag_size, ic->mode, arg_count);
                if (ic->meta_dev)
@@ -3163,6 +3173,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
                }
                if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
                        DMEMIT(" fix_padding");
+               if (ic->legacy_recalculate)
+                       DMEMIT(" legacy_recalculate");
 
 #define EMIT_ALG(a, n)                                                 \
                do {                                                    \
@@ -3792,7 +3804,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
        unsigned extra_args;
        struct dm_arg_set as;
        static const struct dm_arg _args[] = {
-               {0, 15, "Invalid number of feature args"},
+               {0, 16, "Invalid number of feature args"},
        };
        unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
        bool should_write_sb;
@@ -3940,6 +3952,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
                        ic->discard = true;
                } else if (!strcmp(opt_string, "fix_padding")) {
                        ic->fix_padding = true;
+               } else if (!strcmp(opt_string, "legacy_recalculate")) {
+                       ic->legacy_recalculate = true;
                } else {
                        r = -EINVAL;
                        ti->error = "Invalid argument";
@@ -4235,6 +4249,20 @@ try_smaller_buffer:
                        r = -ENOMEM;
                        goto bad;
                }
+       } else {
+               if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
+                       ti->error = "Recalculate can only be specified with internal_hash";
+                       r = -EINVAL;
+                       goto bad;
+               }
+       }
+
+       if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+           le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
+           dm_integrity_disable_recalculate(ic)) {
+               ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
+               r = -EOPNOTSUPP;
+               goto bad;
        }
 
        ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
index 188f412..4acf234 100644 (file)
@@ -363,14 +363,23 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
 {
        int r;
        dev_t dev;
+       unsigned int major, minor;
+       char dummy;
        struct dm_dev_internal *dd;
        struct dm_table *t = ti->table;
 
        BUG_ON(!t);
 
-       dev = dm_get_dev_t(path);
-       if (!dev)
-               return -ENODEV;
+       if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
+               /* Extract the major/minor numbers */
+               dev = MKDEV(major, minor);
+               if (MAJOR(dev) != major || MINOR(dev) != minor)
+                       return -EOVERFLOW;
+       } else {
+               dev = dm_get_dev_t(path);
+               if (!dev)
+                       return -ENODEV;
+       }
 
        dd = find_device(&t->devices, dev);
        if (!dd) {
index ca40942..0438445 100644 (file)
@@ -639,8 +639,10 @@ static void md_submit_flush_data(struct work_struct *ws)
         * could wait for this and below md_handle_request could wait for those
         * bios because of suspend check
         */
+       spin_lock_irq(&mddev->lock);
        mddev->prev_flush_start = mddev->start_flush;
        mddev->flush_bio = NULL;
+       spin_unlock_irq(&mddev->lock);
        wake_up(&mddev->sb_wait);
 
        if (bio->bi_iter.bi_size == 0) {
index 3a94715..ea6f8ee 100644 (file)
@@ -10,5 +10,6 @@ obj-$(CONFIG_CEC_MESON_AO)    += meson/
 obj-$(CONFIG_CEC_SAMSUNG_S5P)  += s5p/
 obj-$(CONFIG_CEC_SECO)         += seco/
 obj-$(CONFIG_CEC_STI)          += sti/
+obj-$(CONFIG_CEC_STM32)                += stm32/
 obj-$(CONFIG_CEC_TEGRA)                += tegra/
 
index 96d3b2b..3f61f58 100644 (file)
@@ -118,8 +118,7 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
                                return -EINVAL;
                }
        } else {
-               length = (b->memory == VB2_MEMORY_USERPTR ||
-                         b->memory == VB2_MEMORY_DMABUF)
+               length = (b->memory == VB2_MEMORY_USERPTR)
                        ? b->length : vb->planes[0].length;
 
                if (b->bytesused > length)
index eb7b6f0..58ca47e 100644 (file)
@@ -772,14 +772,8 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
 
        switch (pll->bus_type) {
        case CCS_PLL_BUS_TYPE_CSI2_DPHY:
-               /* CSI transfers 2 bits per clock per lane; thus times 2 */
-               op_sys_clk_freq_hz_sdr = pll->link_freq * 2
-                       * (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
-                          1 : pll->csi2.lanes);
-               break;
        case CCS_PLL_BUS_TYPE_CSI2_CPHY:
-               op_sys_clk_freq_hz_sdr =
-                       pll->link_freq
+               op_sys_clk_freq_hz_sdr = pll->link_freq * 2
                        * (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
                           1 : pll->csi2.lanes);
                break;
index 9a6097b..6555bd4 100644 (file)
@@ -152,7 +152,7 @@ static int ccs_data_parse_version(struct bin_container *bin,
        vv->version_major = ((u16)v->static_data_version_major[0] << 8) +
                v->static_data_version_major[1];
        vv->version_minor = ((u16)v->static_data_version_minor[0] << 8) +
-               v->static_data_version_major[1];
+               v->static_data_version_minor[1];
        vv->date_year =  ((u16)v->year[0] << 8) + v->year[1];
        vv->date_month = v->month;
        vv->date_day = v->day;
index 36e354e..6cada8a 100644 (file)
@@ -302,7 +302,7 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
        if (!q->sensor)
                return -ENODEV;
 
-       freq = v4l2_get_link_rate(q->sensor->ctrl_handler, bpp, lanes);
+       freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes);
        if (freq < 0) {
                dev_err(dev, "error %lld, invalid link_freq\n", freq);
                return freq;
index bdd293f..7233a73 100644 (file)
@@ -349,8 +349,10 @@ static void venus_core_shutdown(struct platform_device *pdev)
 {
        struct venus_core *core = platform_get_drvdata(pdev);
 
+       pm_runtime_get_sync(core->dev);
        venus_shutdown(core);
        venus_firmware_deinit(core);
+       pm_runtime_put_sync(core->dev);
 }
 
 static __maybe_unused int venus_runtime_suspend(struct device *dev)
index 98bff76..e48d666 100644 (file)
@@ -654,7 +654,7 @@ static int rvin_parallel_parse_of(struct rvin_dev *vin)
 out:
        fwnode_handle_put(fwnode);
 
-       return 0;
+       return ret;
 }
 
 static int rvin_parallel_init(struct rvin_dev *vin)
index be8f275..1524dc0 100644 (file)
@@ -320,7 +320,7 @@ again:
                                data->body);
                        spin_lock(&data->keylock);
                        if (scancode) {
-                               delay = nsecs_to_jiffies(dev->timeout) +
+                               delay = usecs_to_jiffies(dev->timeout) +
                                        msecs_to_jiffies(100);
                                mod_timer(&data->rx_timeout, jiffies + delay);
                        } else {
index a905113..0c62295 100644 (file)
@@ -1551,7 +1551,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
        rdev->s_rx_carrier_range = ite_set_rx_carrier_range;
        /* FIFO threshold is 17 bytes, so 17 * 8 samples minimum */
        rdev->min_timeout = 17 * 8 * ITE_BAUDRATE_DIVISOR *
-                           itdev->params.sample_period;
+                           itdev->params.sample_period / 1000;
        rdev->timeout = IR_DEFAULT_TIMEOUT;
        rdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
        rdev->rx_resolution = ITE_BAUDRATE_DIVISOR *
index 1d811e5..1fd62c1 100644 (file)
@@ -737,7 +737,7 @@ static unsigned int repeat_period(int protocol)
 void rc_repeat(struct rc_dev *dev)
 {
        unsigned long flags;
-       unsigned int timeout = nsecs_to_jiffies(dev->timeout) +
+       unsigned int timeout = usecs_to_jiffies(dev->timeout) +
                msecs_to_jiffies(repeat_period(dev->last_protocol));
        struct lirc_scancode sc = {
                .scancode = dev->last_scancode, .rc_proto = dev->last_protocol,
@@ -855,7 +855,7 @@ void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode,
        ir_do_keydown(dev, protocol, scancode, keycode, toggle);
 
        if (dev->keypressed) {
-               dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) +
+               dev->keyup_jiffies = jiffies + usecs_to_jiffies(dev->timeout) +
                        msecs_to_jiffies(repeat_period(protocol));
                mod_timer(&dev->timer_keyup, dev->keyup_jiffies);
        }
@@ -1928,6 +1928,8 @@ int rc_register_device(struct rc_dev *dev)
                        goto out_raw;
        }
 
+       dev->registered = true;
+
        rc = device_add(&dev->dev);
        if (rc)
                goto out_rx_free;
@@ -1937,8 +1939,6 @@ int rc_register_device(struct rc_dev *dev)
                 dev->device_name ?: "Unspecified device", path ?: "N/A");
        kfree(path);
 
-       dev->registered = true;
-
        /*
         * once the the input device is registered in rc_setup_rx_device,
         * userspace can open the input device and rc_open() will be called
index 8cc28c9..96ae029 100644 (file)
@@ -385,7 +385,7 @@ static irqreturn_t serial_ir_irq_handler(int i, void *blah)
        } while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
 
        mod_timer(&serial_ir.timeout_timer,
-                 jiffies + nsecs_to_jiffies(serial_ir.rcdev->timeout));
+                 jiffies + usecs_to_jiffies(serial_ir.rcdev->timeout));
 
        ir_raw_event_handle(serial_ir.rcdev);
 
index 78007db..133d20e 100644 (file)
@@ -442,7 +442,7 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
 }
 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
 
-s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
+s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
                       unsigned int div)
 {
        struct v4l2_ctrl *ctrl;
@@ -473,4 +473,4 @@ s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
 
        return freq > 0 ? freq : -EINVAL;
 }
-EXPORT_SYMBOL_GPL(v4l2_get_link_rate);
+EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
index 2aa6648..5a491d2 100644 (file)
@@ -1512,6 +1512,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
        struct pcr_handle *handle;
        u32 base, len;
        int ret, i, bar = 0;
+       u8 val;
 
        dev_dbg(&(pcidev->dev),
                ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
@@ -1577,7 +1578,11 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
        pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
        pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
        pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
-
+       rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
+       if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
+               pcr->aspm_enabled = false;
+       else
+               pcr->aspm_enabled = true;
        pcr->card_inserted = 0;
        pcr->card_removed = 0;
        INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
index 1456eab..69d04ec 100644 (file)
@@ -1037,7 +1037,7 @@ kill_processes:
 
        if (hard_reset) {
                /* Release kernel context */
-               if (hl_ctx_put(hdev->kernel_ctx) == 1)
+               if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
                        hdev->kernel_ctx = NULL;
                hl_vm_fini(hdev);
                hl_mmu_fini(hdev);
@@ -1487,6 +1487,15 @@ void hl_device_fini(struct hl_device *hdev)
                }
        }
 
+       /* Disable PCI access from device F/W so it won't send us additional
+        * interrupts. We disable MSI/MSI-X at the halt_engines function and we
+        * can't have the F/W sending us interrupts after that. We need to
+        * disable the access here because if the device is marked disable, the
+        * message won't be send. Also, in case of heartbeat, the device CPU is
+        * marked as disable so this message won't be sent
+        */
+       hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
+
        /* Mark device as disabled */
        hdev->disabled = true;
 
index 20f77f5..c9a1298 100644 (file)
@@ -402,6 +402,10 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
        }
        counters->rx_throughput = result;
 
+       memset(&pkt, 0, sizeof(pkt));
+       pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
+                       CPUCP_PKT_CTL_OPCODE_SHIFT);
+
        /* Fetch PCI tx counter */
        pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
        rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -414,6 +418,7 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
        counters->tx_throughput = result;
 
        /* Fetch PCI replay counter */
+       memset(&pkt, 0, sizeof(pkt));
        pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
                        CPUCP_PKT_CTL_OPCODE_SHIFT);
 
index e0d7f5f..60e16dc 100644 (file)
@@ -2182,6 +2182,7 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
 int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
 int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
                        struct hl_mmu_hop_info *hops);
+bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
 
 int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
                                void __iomem *dst, u32 src_offset, u32 size);
index 12efbd9..d25892d 100644 (file)
@@ -133,6 +133,8 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
 
        hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
                                        &hw_idle.busy_engines_mask_ext, NULL);
+       hw_idle.busy_engines_mask =
+                       lower_32_bits(hw_idle.busy_engines_mask_ext);
 
        return copy_to_user(out, &hw_idle,
                min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
index cbe9da4..5d4fbdc 100644 (file)
@@ -886,8 +886,10 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
 {
        struct hl_device *hdev = ctx->hdev;
        u64 next_vaddr, i;
+       bool is_host_addr;
        u32 page_size;
 
+       is_host_addr = !hl_is_dram_va(hdev, vaddr);
        page_size = phys_pg_pack->page_size;
        next_vaddr = vaddr;
 
@@ -900,9 +902,13 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
                /*
                 * unmapping on Palladium can be really long, so avoid a CPU
                 * soft lockup bug by sleeping a little between unmapping pages
+                *
+                * In addition, when unmapping host memory we pass through
+                * the Linux kernel to unpin the pages and that takes a long
+                * time. Therefore, sleep every 32K pages to avoid soft lockup
                 */
-               if (hdev->pldm)
-                       usleep_range(500, 1000);
+               if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
+                       usleep_range(50, 200);
        }
 }
 
index 33ae953..28a4638 100644 (file)
@@ -9,7 +9,7 @@
 
 #include "habanalabs.h"
 
-static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
+bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
 
@@ -156,7 +156,7 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
        if (!hdev->mmu_enable)
                return 0;
 
-       is_dram_addr = is_dram_va(hdev, virt_addr);
+       is_dram_addr = hl_is_dram_va(hdev, virt_addr);
 
        if (is_dram_addr)
                mmu_prop = &prop->dmmu;
@@ -236,7 +236,7 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
        if (!hdev->mmu_enable)
                return 0;
 
-       is_dram_addr = is_dram_va(hdev, virt_addr);
+       is_dram_addr = hl_is_dram_va(hdev, virt_addr);
 
        if (is_dram_addr)
                mmu_prop = &prop->dmmu;
index 2ce6ea8..06d8a44 100644 (file)
@@ -467,8 +467,16 @@ static void hl_mmu_v1_fini(struct hl_device *hdev)
 {
        /* MMU H/W fini was already done in device hw_fini() */
 
-       kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
-       gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+       if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) {
+               kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
+               gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+       }
+
+       /* Make sure that if we arrive here again without init was called we
+        * won't cause kernel panic. This can happen for example if we fail
+        * during hard reset code at certain points
+        */
+       hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
 }
 
 /**
index 8c09e44..b328dda 100644 (file)
@@ -4002,7 +4002,8 @@ static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
        vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
                        VM_DONTCOPY | VM_NORESERVE;
 
-       rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
+       rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
+                               (dma_addr - HOST_PHYS_BASE), size);
        if (rc)
                dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
 
index b8b4aa6..63679a7 100644 (file)
@@ -2719,7 +2719,8 @@ static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
        vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
                        VM_DONTCOPY | VM_NORESERVE;
 
-       rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
+       rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
+                               (dma_addr - HOST_PHYS_BASE), size);
        if (rc)
                dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
 
index de7cb03..002426e 100644 (file)
@@ -384,8 +384,10 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
                     "merging was advertised but not possible");
        blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
 
-       if (mmc_card_mmc(card))
+       if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
                block_size = card->ext_csd.data_sector_size;
+               WARN_ON(block_size != 512 && block_size != 4096);
+       }
 
        blk_queue_logical_block_size(mq->queue, block_size);
        /*
index bbf3496..f9780c6 100644 (file)
@@ -314,11 +314,7 @@ err_clk:
 
 static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
 {
-       int ret;
-
-       ret = sdhci_pltfm_unregister(pdev);
-       if (ret)
-               dev_err(&pdev->dev, "failed to shutdown\n");
+       sdhci_pltfm_suspend(&pdev->dev);
 }
 
 MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
index 4b67379..d90020e 100644 (file)
@@ -16,6 +16,8 @@
 
 #include "sdhci-pltfm.h"
 
+#define SDHCI_DWCMSHC_ARG2_STUFF       GENMASK(31, 16)
+
 /* DWCMSHC specific Mode Select value */
 #define DWCMSHC_CTRL_HS400             0x7
 
@@ -49,6 +51,29 @@ static void dwcmshc_adma_write_desc(struct sdhci_host *host, void **desc,
        sdhci_adma_write_desc(host, desc, addr, len, cmd);
 }
 
+static void dwcmshc_check_auto_cmd23(struct mmc_host *mmc,
+                                    struct mmc_request *mrq)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+
+       /*
+        * No matter V4 is enabled or not, ARGUMENT2 register is 32-bit
+        * block count register which doesn't support stuff bits of
+        * CMD23 argument on dwcmsch host controller.
+        */
+       if (mrq->sbc && (mrq->sbc->arg & SDHCI_DWCMSHC_ARG2_STUFF))
+               host->flags &= ~SDHCI_AUTO_CMD23;
+       else
+               host->flags |= SDHCI_AUTO_CMD23;
+}
+
+static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+       dwcmshc_check_auto_cmd23(mmc, mrq);
+
+       sdhci_request(mmc, mrq);
+}
+
 static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
                                      unsigned int timing)
 {
@@ -133,6 +158,8 @@ static int dwcmshc_probe(struct platform_device *pdev)
 
        sdhci_get_of_property(pdev);
 
+       host->mmc_host_ops.request = dwcmshc_request;
+
        err = sdhci_add_host(host);
        if (err)
                goto err_clk;
index c67611f..d19eef5 100644 (file)
@@ -168,7 +168,12 @@ static void xenon_reset_exit(struct sdhci_host *host,
        /* Disable tuning request and auto-retuning again */
        xenon_retune_setup(host);
 
-       xenon_set_acg(host, true);
+       /*
+        * The ACG should be turned off at the early init time, in order
+        * to solve a possible issues with the 1.8V regulator stabilization.
+        * The feature is enabled in later stage.
+        */
+       xenon_set_acg(host, false);
 
        xenon_set_sdclk_off_idle(host, sdhc_id, false);
 
index 5cdf05b..3fa8c22 100644 (file)
@@ -1615,7 +1615,7 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
        /* Extract interleaved payload data and ECC bits */
        for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
                if (buf)
-                       nand_extract_bits(buf, step * eccsize, tmp_buf,
+                       nand_extract_bits(buf, step * eccsize * 8, tmp_buf,
                                          src_bit_off, eccsize * 8);
                src_bit_off += eccsize * 8;
 
index fdb112e..a304fda 100644 (file)
@@ -579,7 +579,7 @@ static int ebu_nand_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct ebu_nand_controller *ebu_host;
        struct nand_chip *nand;
-       struct mtd_info *mtd = NULL;
+       struct mtd_info *mtd;
        struct resource *res;
        char *resname;
        int ret;
@@ -647,12 +647,13 @@ static int ebu_nand_probe(struct platform_device *pdev)
               ebu_host->ebu + EBU_ADDR_SEL(cs));
 
        nand_set_flash_node(&ebu_host->chip, dev->of_node);
+
+       mtd = nand_to_mtd(&ebu_host->chip);
        if (!mtd->name) {
                dev_err(ebu_host->dev, "NAND label property is mandatory\n");
                return -EINVAL;
        }
 
-       mtd = nand_to_mtd(&ebu_host->chip);
        mtd->dev.parent = dev;
        ebu_host->dev = dev;
 
index f2b9250..0750121 100644 (file)
@@ -2210,6 +2210,9 @@ static int ns_attach_chip(struct nand_chip *chip)
 {
        unsigned int eccsteps, eccbytes;
 
+       chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+       chip->ecc.algo = bch ? NAND_ECC_ALGO_BCH : NAND_ECC_ALGO_HAMMING;
+
        if (!bch)
                return 0;
 
@@ -2233,8 +2236,6 @@ static int ns_attach_chip(struct nand_chip *chip)
                return -EINVAL;
        }
 
-       chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
-       chip->ecc.algo = NAND_ECC_ALGO_BCH;
        chip->ecc.size = 512;
        chip->ecc.strength = bch;
        chip->ecc.bytes = eccbytes;
@@ -2273,8 +2274,6 @@ static int __init ns_init_module(void)
        nsmtd       = nand_to_mtd(chip);
        nand_set_controller_data(chip, (void *)ns);
 
-       chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
-       chip->ecc.algo   = NAND_ECC_ALGO_HAMMING;
        /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
        /* and 'badblocks' parameters to work */
        chip->options   |= NAND_SKIP_BBTSCAN;
index fbb9955..2c3e65c 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/jiffies.h>
 #include <linux/sched.h>
 #include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-bch.h>
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/partitions.h>
 #include <linux/omap-dma.h>
@@ -1866,18 +1867,19 @@ static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
 static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
                                 struct mtd_oob_region *oobregion)
 {
-       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct nand_device *nand = mtd_to_nanddev(mtd);
+       const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
        int off = BADBLOCK_MARKER_LENGTH;
 
-       if (section >= chip->ecc.steps)
+       if (section >= engine_conf->nsteps)
                return -ERANGE;
 
        /*
         * When SW correction is employed, one OMAP specific marker byte is
         * reserved after each ECC step.
         */
-       oobregion->offset = off + (section * (chip->ecc.bytes + 1));
-       oobregion->length = chip->ecc.bytes;
+       oobregion->offset = off + (section * (engine_conf->code_size + 1));
+       oobregion->length = engine_conf->code_size;
 
        return 0;
 }
@@ -1885,7 +1887,8 @@ static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
 static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
                                  struct mtd_oob_region *oobregion)
 {
-       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct nand_device *nand = mtd_to_nanddev(mtd);
+       const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
        int off = BADBLOCK_MARKER_LENGTH;
 
        if (section)
@@ -1895,7 +1898,7 @@ static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
         * When SW correction is employed, one OMAP specific marker byte is
         * reserved after each ECC step.
         */
-       off += ((chip->ecc.bytes + 1) * chip->ecc.steps);
+       off += ((engine_conf->code_size + 1) * engine_conf->nsteps);
        if (off >= mtd->oobsize)
                return -ERANGE;
 
index 8ea545b..61d932c 100644 (file)
@@ -343,6 +343,7 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
                                      const struct nand_page_io_req *req)
 {
        struct nand_device *nand = spinand_to_nand(spinand);
+       struct mtd_info *mtd = spinand_to_mtd(spinand);
        struct spi_mem_dirmap_desc *rdesc;
        unsigned int nbytes = 0;
        void *buf = NULL;
@@ -382,9 +383,16 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
                memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
                       req->datalen);
 
-       if (req->ooblen)
-               memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
-                      req->ooblen);
+       if (req->ooblen) {
+               if (req->mode == MTD_OPS_AUTO_OOB)
+                       mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
+                                                   spinand->oobbuf,
+                                                   req->ooboffs,
+                                                   req->ooblen);
+               else
+                       memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
+                              req->ooblen);
+       }
 
        return 0;
 }
index 3ae884c..867f6be 100644 (file)
@@ -263,7 +263,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
 {
        struct can_priv *priv = netdev_priv(dev);
        struct can_ctrlmode cm = {.flags = priv->ctrlmode};
-       struct can_berr_counter bec;
+       struct can_berr_counter bec = { };
        enum can_state state = priv->state;
 
        if (priv->do_get_state)
index d53485c..1857aa9 100644 (file)
@@ -510,15 +510,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
        /* Find our integrated MDIO bus node */
        dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
        priv->master_mii_bus = of_mdio_find_bus(dn);
-       if (!priv->master_mii_bus)
+       if (!priv->master_mii_bus) {
+               of_node_put(dn);
                return -EPROBE_DEFER;
+       }
 
        get_device(&priv->master_mii_bus->dev);
        priv->master_mii_dn = dn;
 
        priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
-       if (!priv->slave_mii_bus)
+       if (!priv->slave_mii_bus) {
+               of_node_put(dn);
                return -ENOMEM;
+       }
 
        priv->slave_mii_bus->priv = priv;
        priv->slave_mii_bus->name = "sf2 slave mii";
index 37a7342..c87d445 100644 (file)
@@ -1183,6 +1183,20 @@ static const struct ksz_chip_data ksz8795_switch_chips[] = {
                .port_cnt = 5,          /* total cpu and user ports */
        },
        {
+               /*
+                * WARNING
+                * =======
+                * KSZ8794 is similar to KSZ8795, except the port map
+                * contains a gap between external and CPU ports, the
+                * port map is NOT continuous. The per-port register
+                * map is shifted accordingly too, i.e. registers at
+                * offset 0x40 are NOT used on KSZ8794 and they ARE
+                * used on KSZ8795 for external port 3.
+                *           external  cpu
+                * KSZ8794   0,1,2      4
+                * KSZ8795   0,1,2,3    4
+                * KSZ8765   0,1,2,3    4
+                */
                .chip_id = 0x8794,
                .dev_name = "KSZ8794",
                .num_vlans = 4096,
@@ -1216,9 +1230,13 @@ static int ksz8795_switch_init(struct ksz_device *dev)
                        dev->num_vlans = chip->num_vlans;
                        dev->num_alus = chip->num_alus;
                        dev->num_statics = chip->num_statics;
-                       dev->port_cnt = chip->port_cnt;
+                       dev->port_cnt = fls(chip->cpu_ports);
+                       dev->cpu_port = fls(chip->cpu_ports) - 1;
+                       dev->phy_port_cnt = dev->port_cnt - 1;
                        dev->cpu_ports = chip->cpu_ports;
-
+                       dev->host_mask = chip->cpu_ports;
+                       dev->port_mask = (BIT(dev->phy_port_cnt) - 1) |
+                                        chip->cpu_ports;
                        break;
                }
        }
@@ -1227,17 +1245,9 @@ static int ksz8795_switch_init(struct ksz_device *dev)
        if (!dev->cpu_ports)
                return -ENODEV;
 
-       dev->port_mask = BIT(dev->port_cnt) - 1;
-       dev->port_mask |= dev->host_mask;
-
        dev->reg_mib_cnt = KSZ8795_COUNTER_NUM;
        dev->mib_cnt = ARRAY_SIZE(mib_names);
 
-       dev->phy_port_cnt = dev->port_cnt - 1;
-
-       dev->cpu_port = dev->port_cnt - 1;
-       dev->host_mask = BIT(dev->cpu_port);
-
        dev->ports = devm_kzalloc(dev->dev,
                                  dev->port_cnt * sizeof(struct ksz_port),
                                  GFP_KERNEL);
index 4e0619c..a7e5ac6 100644 (file)
@@ -385,7 +385,7 @@ int ksz_switch_register(struct ksz_device *dev,
                gpiod_set_value_cansleep(dev->reset_gpio, 1);
                usleep_range(10000, 12000);
                gpiod_set_value_cansleep(dev->reset_gpio, 0);
-               usleep_range(100, 1000);
+               msleep(100);
        }
 
        mutex_init(&dev->dev_mutex);
@@ -419,7 +419,7 @@ int ksz_switch_register(struct ksz_device *dev,
                                if (of_property_read_u32(port, "reg",
                                                         &port_num))
                                        continue;
-                               if (port_num >= dev->port_cnt)
+                               if (!(dev->port_mask & BIT(port_num)))
                                        return -EINVAL;
                                of_get_phy_mode(port,
                                                &dev->ports[port_num].interface);
index e5cfbe1..19dc7dc 100644 (file)
@@ -1158,11 +1158,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
 #endif
        }
        if (!n || !n->dev)
-               goto free_sk;
+               goto free_dst;
 
        ndev = n->dev;
-       if (!ndev)
-               goto free_dst;
        if (is_vlan_dev(ndev))
                ndev = vlan_dev_real_dev(ndev);
 
@@ -1250,7 +1248,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
 free_csk:
        chtls_sock_release(&csk->kref);
 free_dst:
-       neigh_release(n);
+       if (n)
+               neigh_release(n);
        dst_release(dst);
 free_sk:
        inet_csk_prepare_forced_close(newsk);
index c527f4e..0602d5d 100644 (file)
@@ -462,6 +462,11 @@ struct bufdesc_ex {
  */
 #define FEC_QUIRK_CLEAR_SETUP_MII      (1 << 17)
 
+/* Some link partners do not tolerate the momentary reset of the REF_CLK
+ * frequency when the RNCTL register is cleared by hardware reset.
+ */
+#define FEC_QUIRK_NO_HARD_RESET                (1 << 18)
+
 struct bufdesc_prop {
        int qid;
        /* Address of Rx and Tx buffers */
index 04f24c6..9ebdb0e 100644 (file)
@@ -100,7 +100,8 @@ static const struct fec_devinfo fec_imx27_info = {
 static const struct fec_devinfo fec_imx28_info = {
        .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
                  FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
-                 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII,
+                 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
+                 FEC_QUIRK_NO_HARD_RESET,
 };
 
 static const struct fec_devinfo fec_imx6q_info = {
@@ -953,7 +954,8 @@ fec_restart(struct net_device *ndev)
         * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
         * instead of reset MAC itself.
         */
-       if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+       if (fep->quirks & FEC_QUIRK_HAS_AVB ||
+           ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
                writel(0, fep->hwp + FEC_ECNTRL);
        } else {
                writel(1, fep->hwp + FEC_ECNTRL);
@@ -2165,9 +2167,9 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        fep->mii_bus->parent = &pdev->dev;
 
        err = of_mdiobus_register(fep->mii_bus, node);
-       of_node_put(node);
        if (err)
                goto err_out_free_mdiobus;
+       of_node_put(node);
 
        mii_cnt++;
 
@@ -2180,6 +2182,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
 err_out_free_mdiobus:
        mdiobus_free(fep->mii_bus);
 err_out:
+       of_node_put(node);
        return err;
 }
 
index 4c4252e..a40af51 100644 (file)
@@ -5017,6 +5017,12 @@ static void ibmvnic_tasklet(struct tasklet_struct *t)
        while (!done) {
                /* Pull all the valid messages off the CRQ */
                while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
+                       /* This barrier makes sure ibmvnic_next_crq()'s
+                        * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
+                        * before ibmvnic_handle_crq()'s
+                        * switch(gen_crq->first) and switch(gen_crq->cmd).
+                        */
+                       dma_rmb();
                        ibmvnic_handle_crq(crq, adapter);
                        crq->generic.first = 0;
                }
index 21ee564..7efc61a 100644 (file)
@@ -4046,20 +4046,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
                goto error_param;
 
        vf = &pf->vf[vf_id];
-       vsi = pf->vsi[vf->lan_vsi_idx];
 
        /* When the VF is resetting wait until it is done.
         * It can take up to 200 milliseconds,
         * but wait for up to 300 milliseconds to be safe.
-        * If the VF is indeed in reset, the vsi pointer has
-        * to show on the newly loaded vsi under pf->vsi[id].
+        * Acquire the VSI pointer only after the VF has been
+        * properly initialized.
         */
        for (i = 0; i < 15; i++) {
-               if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
-                       if (i > 0)
-                               vsi = pf->vsi[vf->lan_vsi_idx];
+               if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
                        break;
-               }
                msleep(20);
        }
        if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
@@ -4068,6 +4064,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
                ret = -EAGAIN;
                goto error_param;
        }
+       vsi = pf->vsi[vf->lan_vsi_idx];
 
        if (is_multicast_ether_addr(mac)) {
                dev_err(&pf->pdev->dev,
index 5672535..fa1e128 100644 (file)
@@ -68,7 +68,9 @@
 #define ICE_INT_NAME_STR_LEN   (IFNAMSIZ + 16)
 #define ICE_AQ_LEN             64
 #define ICE_MBXSQ_LEN          64
-#define ICE_MIN_MSIX           2
+#define ICE_MIN_LAN_TXRX_MSIX  1
+#define ICE_MIN_LAN_OICR_MSIX  1
+#define ICE_MIN_MSIX           (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
 #define ICE_FDIR_MSIX          1
 #define ICE_NO_VSI             0xffff
 #define ICE_VSI_MAP_CONTIG     0
index 9e8e953..69c113a 100644 (file)
@@ -3258,8 +3258,8 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
  */
 static int ice_get_max_txq(struct ice_pf *pf)
 {
-       return min_t(int, num_online_cpus(),
-                    pf->hw.func_caps.common_cap.num_txq);
+       return min3(pf->num_lan_msix, (u16)num_online_cpus(),
+                   (u16)pf->hw.func_caps.common_cap.num_txq);
 }
 
 /**
@@ -3268,8 +3268,8 @@ static int ice_get_max_txq(struct ice_pf *pf)
  */
 static int ice_get_max_rxq(struct ice_pf *pf)
 {
-       return min_t(int, num_online_cpus(),
-                    pf->hw.func_caps.common_cap.num_rxq);
+       return min3(pf->num_lan_msix, (u16)num_online_cpus(),
+                   (u16)pf->hw.func_caps.common_cap.num_rxq);
 }
 
 /**
index 2d27f66..1927295 100644 (file)
@@ -1576,7 +1576,13 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
                       sizeof(struct in6_addr));
                input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
                input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
-               input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+
+               /* if no protocol requested, use IPPROTO_NONE */
+               if (!fsp->m_u.usr_ip6_spec.l4_proto)
+                       input->ip.v6.proto = IPPROTO_NONE;
+               else
+                       input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+
                memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
                       sizeof(struct in6_addr));
                memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
index 3df6748..ad9c22a 100644 (file)
@@ -161,8 +161,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
 
        switch (vsi->type) {
        case ICE_VSI_PF:
-               vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
-                                      num_online_cpus());
+               vsi->alloc_txq = min3(pf->num_lan_msix,
+                                     ice_get_avail_txq_count(pf),
+                                     (u16)num_online_cpus());
                if (vsi->req_txq) {
                        vsi->alloc_txq = vsi->req_txq;
                        vsi->num_txq = vsi->req_txq;
@@ -174,8 +175,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
                if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
                        vsi->alloc_rxq = 1;
                } else {
-                       vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
-                                              num_online_cpus());
+                       vsi->alloc_rxq = min3(pf->num_lan_msix,
+                                             ice_get_avail_rxq_count(pf),
+                                             (u16)num_online_cpus());
                        if (vsi->req_rxq) {
                                vsi->alloc_rxq = vsi->req_rxq;
                                vsi->num_rxq = vsi->req_rxq;
@@ -184,7 +186,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
 
                pf->num_lan_rx = vsi->alloc_rxq;
 
-               vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
+               vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
+                                          max_t(int, vsi->alloc_rxq,
+                                                vsi->alloc_txq));
                break;
        case ICE_VSI_VF:
                vf = &pf->vf[vsi->vf_id];
index 6e251df..06aa375 100644 (file)
@@ -3430,18 +3430,14 @@ static int ice_ena_msix_range(struct ice_pf *pf)
        if (v_actual < v_budget) {
                dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
                         v_budget, v_actual);
-/* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */
-#define ICE_MIN_LAN_VECS 2
-#define ICE_MIN_RDMA_VECS 2
-#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1)
 
-               if (v_actual < ICE_MIN_LAN_VECS) {
+               if (v_actual < ICE_MIN_MSIX) {
                        /* error if we can't get minimum vectors */
                        pci_disable_msix(pf->pdev);
                        err = -ERANGE;
                        goto msix_err;
                } else {
-                       pf->num_lan_msix = ICE_MIN_LAN_VECS;
+                       pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
                }
        }
 
@@ -4884,9 +4880,15 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
                goto err_update_filters;
        }
 
-       /* Add filter for new MAC. If filter exists, just return success */
+       /* Add filter for new MAC. If filter exists, return success */
        status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
        if (status == ICE_ERR_ALREADY_EXISTS) {
+               /* Although this MAC filter is already present in hardware it's
+                * possible in some cases (e.g. bonding) that dev_addr was
+                * modified outside of the driver and needs to be restored back
+                * to this value.
+                */
+               memcpy(netdev->dev_addr, mac, netdev->addr_len);
                netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
                return 0;
        }
index 422f539..2c2de56 100644 (file)
@@ -1924,12 +1924,15 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
                                  ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
                        l4_proto = ip.v4->protocol;
                } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
+                       int ret;
+
                        tunnel |= ICE_TX_CTX_EIPT_IPV6;
                        exthdr = ip.hdr + sizeof(*ip.v6);
                        l4_proto = ip.v6->nexthdr;
-                       if (l4.hdr != exthdr)
-                               ipv6_skip_exthdr(skb, exthdr - skb->data,
-                                                &l4_proto, &frag_off);
+                       ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
+                                              &l4_proto, &frag_off);
+                       if (ret < 0)
+                               return -1;
                }
 
                /* define outer transport */
index 61d331c..831f2f0 100644 (file)
@@ -1675,12 +1675,18 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
        cmd->base.phy_address = hw->phy.addr;
 
        /* advertising link modes */
-       ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
-       ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
-       ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
-       ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
-       ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
-       ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
+       if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
+       if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
+       if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
+       if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
+       if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
+       if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
 
        /* set autoneg settings */
        if (hw->mac.autoneg == 1) {
@@ -1792,6 +1798,12 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
 
        ethtool_convert_link_mode_to_legacy_u32(&advertising,
                                                cmd->link_modes.advertising);
+       /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
+        * We have to check this and convert it to ADVERTISE_2500_FULL
+        * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
+        */
+       if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
+               advertising |= ADVERTISE_2500_FULL;
 
        if (cmd->base.autoneg == AUTONEG_ENABLE) {
                hw->mac.autoneg = 1;
index bdfa2e2..5ddedc3 100644 (file)
@@ -488,10 +488,11 @@ dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
        dma_addr_t iova;
        u8 *buf;
 
-       buf = napi_alloc_frag(pool->rbsize);
+       buf = napi_alloc_frag(pool->rbsize + OTX2_ALIGN);
        if (unlikely(!buf))
                return -ENOMEM;
 
+       buf = PTR_ALIGN(buf, OTX2_ALIGN);
        iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
                                    DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
        if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
index 718f8c0..84e501e 100644 (file)
@@ -273,7 +273,7 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key
 
        err = devlink_fmsg_binary_pair_nest_start(fmsg, "data");
        if (err)
-               return err;
+               goto free_page;
 
        cmd = mlx5_rsc_dump_cmd_create(mdev, key);
        if (IS_ERR(cmd)) {
index e20c1da..a359c3c 100644 (file)
@@ -167,6 +167,12 @@ static const struct rhashtable_params tuples_nat_ht_params = {
        .min_size = 16 * 1024,
 };
 
+static bool
+mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
+{
+       return !!(entry->tuple_nat_node.next);
+}
+
 static int
 mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
 {
@@ -909,13 +915,13 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
 err_insert:
        mlx5_tc_ct_entry_del_rules(ct_priv, entry);
 err_rules:
-       rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
-                              &entry->tuple_nat_node, tuples_nat_ht_params);
+       if (mlx5_tc_ct_entry_has_nat(entry))
+               rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+                                      &entry->tuple_nat_node, tuples_nat_ht_params);
 err_tuple_nat:
-       if (entry->tuple_node.next)
-               rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
-                                      &entry->tuple_node,
-                                      tuples_ht_params);
+       rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
+                              &entry->tuple_node,
+                              tuples_ht_params);
 err_tuple:
 err_set:
        kfree(entry);
@@ -930,7 +936,7 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
 {
        mlx5_tc_ct_entry_del_rules(ct_priv, entry);
        mutex_lock(&ct_priv->shared_counter_lock);
-       if (entry->tuple_node.next)
+       if (mlx5_tc_ct_entry_has_nat(entry))
                rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
                                       &entry->tuple_nat_node,
                                       tuples_nat_ht_params);
index 6c5c54b..5cb9365 100644 (file)
@@ -76,7 +76,7 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
 
 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
 {
-       return NUM_IPSEC_SW_COUNTERS;
+       return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0;
 }
 
 static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_sw) {}
@@ -105,7 +105,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
 
 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
 {
-       return (mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
+       return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
 }
 
 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
index d20243d..f23c675 100644 (file)
@@ -1151,6 +1151,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
 {
        struct mlx5e_channels new_channels = {};
        bool reset_channels = true;
+       bool opened;
        int err = 0;
 
        mutex_lock(&priv->state_lock);
@@ -1159,22 +1160,24 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
        mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params,
                                                   trust_state);
 
-       if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
-               priv->channels.params = new_channels.params;
+       opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+       if (!opened)
                reset_channels = false;
-       }
 
        /* Skip if tx_min_inline is the same */
        if (new_channels.params.tx_min_inline_mode ==
            priv->channels.params.tx_min_inline_mode)
                reset_channels = false;
 
-       if (reset_channels)
+       if (reset_channels) {
                err = mlx5e_safe_switch_channels(priv, &new_channels,
                                                 mlx5e_update_trust_state_hw,
                                                 &trust_state);
-       else
+       } else {
                err = mlx5e_update_trust_state_hw(priv, &trust_state);
+               if (!err && !opened)
+                       priv->channels.params = new_channels.params;
+       }
 
        mutex_unlock(&priv->state_lock);
 
index 2e5a069..5e9474d 100644 (file)
@@ -458,12 +458,18 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
                goto out;
        }
 
-       new_channels.params = priv->channels.params;
+       new_channels.params = *cur_params;
        new_channels.params.num_channels = count;
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               struct mlx5e_params old_params;
+
+               old_params = *cur_params;
                *cur_params = new_channels.params;
                err = mlx5e_num_channels_changed(priv);
+               if (err)
+                       *cur_params = old_params;
+
                goto out;
        }
 
index f8619d3..aad3887 100644 (file)
@@ -3685,7 +3685,14 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
        new_channels.params.num_tc = tc ? tc : 1;
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               struct mlx5e_params old_params;
+
+               old_params = priv->channels.params;
                priv->channels.params = new_channels.params;
+               err = mlx5e_num_channels_changed(priv);
+               if (err)
+                       priv->channels.params = old_params;
+
                goto out;
        }
 
@@ -3876,7 +3883,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5e_channels new_channels = {};
-       struct mlx5e_params *old_params;
+       struct mlx5e_params *cur_params;
        int err = 0;
        bool reset;
 
@@ -3889,8 +3896,8 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
                goto out;
        }
 
-       old_params = &priv->channels.params;
-       if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
+       cur_params = &priv->channels.params;
+       if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
                netdev_warn(netdev, "can't set LRO with legacy RQ\n");
                err = -EINVAL;
                goto out;
@@ -3898,18 +3905,23 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
 
        reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
 
-       new_channels.params = *old_params;
+       new_channels.params = *cur_params;
        new_channels.params.lro_en = enable;
 
-       if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
-               if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params, NULL) ==
+       if (cur_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
+               if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
                    mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
                        reset = false;
        }
 
        if (!reset) {
-               *old_params = new_channels.params;
+               struct mlx5e_params old_params;
+
+               old_params = *cur_params;
+               *cur_params = new_channels.params;
                err = mlx5e_modify_tirs_lro(priv);
+               if (err)
+                       *cur_params = old_params;
                goto out;
        }
 
@@ -4189,9 +4201,16 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
        }
 
        if (!reset) {
+               unsigned int old_mtu = params->sw_mtu;
+
                params->sw_mtu = new_mtu;
-               if (preactivate)
-                       preactivate(priv, NULL);
+               if (preactivate) {
+                       err = preactivate(priv, NULL);
+                       if (err) {
+                               params->sw_mtu = old_mtu;
+                               goto out;
+                       }
+               }
                netdev->mtu = params->sw_mtu;
                goto out;
        }
@@ -5145,7 +5164,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
            FT_CAP(modify_root) &&
            FT_CAP(identified_miss_table_mode) &&
            FT_CAP(flow_table_modify)) {
-#ifdef CONFIG_MLX5_ESWITCH
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
                netdev->hw_features      |= NETIF_F_HW_TC;
 #endif
 #ifdef CONFIG_MLX5_EN_ARFS
index cfa0e85..629ae6c 100644 (file)
@@ -735,7 +735,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
 
        netdev->features       |= NETIF_F_NETNS_LOCAL;
 
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
        netdev->hw_features    |= NETIF_F_HW_TC;
+#endif
        netdev->hw_features    |= NETIF_F_SG;
        netdev->hw_features    |= NETIF_F_IP_CSUM;
        netdev->hw_features    |= NETIF_F_IPV6_CSUM;
index 56aa39a..8fd38ad 100644 (file)
@@ -67,6 +67,7 @@
 #include "lib/geneve.h"
 #include "lib/fs_chains.h"
 #include "diag/en_tc_tracepoint.h"
+#include <asm/div64.h>
 
 #define nic_chains(priv) ((priv)->fs.tc.chains)
 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
@@ -1162,6 +1163,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
        struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
        struct mlx5_flow_handle *rule;
 
+       if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+               return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+
        if (flow_flag_test(flow, CT)) {
                mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
 
@@ -1192,6 +1196,9 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
 {
        flow_flag_clear(flow, OFFLOADED);
 
+       if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+               goto offload_rule_0;
+
        if (flow_flag_test(flow, CT)) {
                mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
                return;
@@ -1200,6 +1207,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
        if (attr->esw_attr->split_count)
                mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
 
+offload_rule_0:
        mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
 }
 
@@ -2263,8 +2271,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
              BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
              BIT(FLOW_DISSECTOR_KEY_MPLS))) {
                NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
-               netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
-                           dissector->used_keys);
+               netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
+                          dissector->used_keys);
                return -EOPNOTSUPP;
        }
 
@@ -5001,13 +5009,13 @@ errout:
        return err;
 }
 
-static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
+static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
                               struct netlink_ext_ack *extack)
 {
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
        struct mlx5_eswitch *esw;
+       u32 rate_mbps = 0;
        u16 vport_num;
-       u32 rate_mbps;
        int err;
 
        vport_num = rpriv->rep->vport;
@@ -5024,7 +5032,11 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
         * Moreover, if rate is non zero we choose to configure to a minimum of
         * 1 mbit/sec.
         */
-       rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
+       if (rate) {
+               rate = (rate * BITS_PER_BYTE) + 500000;
+               rate_mbps = max_t(u32, do_div(rate, 1000000), 1);
+       }
+
        err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
        if (err)
                NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
index 3dbd63b..07b4dbd 100644 (file)
@@ -1141,6 +1141,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
 destroy_ft:
        root->cmds->destroy_flow_table(root, ft);
 free_ft:
+       rhltable_destroy(&ft->fgs_hash);
        kfree(ft);
 unlock_root:
        mutex_unlock(&root->chain_lock);
index 3a9fa62..d046db7 100644 (file)
@@ -90,4 +90,9 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
                               u32 key_type, u32 *p_key_id);
 void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
 
+static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+{
+       return devlink_net(priv_to_devlink(dev));
+}
+
 #endif
index eb956ce..eaa8958 100644 (file)
@@ -58,7 +58,7 @@ struct fw_page {
        struct rb_node          rb_node;
        u64                     addr;
        struct page            *page;
-       u16                     func_id;
+       u32                     function;
        unsigned long           bitmask;
        struct list_head        list;
        unsigned                free_count;
@@ -74,12 +74,17 @@ enum {
        MLX5_NUM_4K_IN_PAGE             = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
 };
 
-static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id)
+static u32 get_function(u16 func_id, bool ec_function)
+{
+       return func_id & (ec_function << 16);
+}
+
+static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
 {
        struct rb_root *root;
        int err;
 
-       root = xa_load(&dev->priv.page_root_xa, func_id);
+       root = xa_load(&dev->priv.page_root_xa, function);
        if (root)
                return root;
 
@@ -87,7 +92,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func
        if (!root)
                return ERR_PTR(-ENOMEM);
 
-       err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL);
+       err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
        if (err) {
                kfree(root);
                return ERR_PTR(err);
@@ -98,7 +103,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func
        return root;
 }
 
-static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
+static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
 {
        struct rb_node *parent = NULL;
        struct rb_root *root;
@@ -107,7 +112,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
        struct fw_page *tfp;
        int i;
 
-       root = page_root_per_func_id(dev, func_id);
+       root = page_root_per_function(dev, function);
        if (IS_ERR(root))
                return PTR_ERR(root);
 
@@ -130,7 +135,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
 
        nfp->addr = addr;
        nfp->page = page;
-       nfp->func_id = func_id;
+       nfp->function = function;
        nfp->free_count = MLX5_NUM_4K_IN_PAGE;
        for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
                set_bit(i, &nfp->bitmask);
@@ -143,14 +148,14 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
 }
 
 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
-                                   u32 func_id)
+                                   u32 function)
 {
        struct fw_page *result = NULL;
        struct rb_root *root;
        struct rb_node *tmp;
        struct fw_page *tfp;
 
-       root = xa_load(&dev->priv.page_root_xa, func_id);
+       root = xa_load(&dev->priv.page_root_xa, function);
        if (WARN_ON_ONCE(!root))
                return NULL;
 
@@ -194,14 +199,14 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
        return err;
 }
 
-static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
+static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
 {
        struct fw_page *fp = NULL;
        struct fw_page *iter;
        unsigned n;
 
        list_for_each_entry(iter, &dev->priv.free_list, list) {
-               if (iter->func_id != func_id)
+               if (iter->function != function)
                        continue;
                fp = iter;
        }
@@ -231,7 +236,7 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
 {
        struct rb_root *root;
 
-       root = xa_load(&dev->priv.page_root_xa, fwp->func_id);
+       root = xa_load(&dev->priv.page_root_xa, fwp->function);
        if (WARN_ON_ONCE(!root))
                return;
 
@@ -244,12 +249,12 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
        kfree(fwp);
 }
 
-static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
+static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
 {
        struct fw_page *fwp;
        int n;
 
-       fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id);
+       fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
        if (!fwp) {
                mlx5_core_warn_rl(dev, "page not found\n");
                return;
@@ -263,7 +268,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
                list_add(&fwp->list, &dev->priv.free_list);
 }
 
-static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
+static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
 {
        struct device *device = mlx5_core_dma_dev(dev);
        int nid = dev_to_node(device);
@@ -291,7 +296,7 @@ map:
                goto map;
        }
 
-       err = insert_page(dev, addr, page, func_id);
+       err = insert_page(dev, addr, page, function);
        if (err) {
                mlx5_core_err(dev, "failed to track allocated page\n");
                dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
@@ -328,6 +333,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
                      int notify_fail, bool ec_function)
 {
+       u32 function = get_function(func_id, ec_function);
        u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
        int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
        u64 addr;
@@ -345,10 +351,10 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
 
        for (i = 0; i < npages; i++) {
 retry:
-               err = alloc_4k(dev, &addr, func_id);
+               err = alloc_4k(dev, &addr, function);
                if (err) {
                        if (err == -ENOMEM)
-                               err = alloc_system_page(dev, func_id);
+                               err = alloc_system_page(dev, function);
                        if (err)
                                goto out_4k;
 
@@ -384,7 +390,7 @@ retry:
 
 out_4k:
        for (i--; i >= 0; i--)
-               free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id);
+               free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
 out_free:
        kvfree(in);
        if (notify_fail)
@@ -392,14 +398,15 @@ out_free:
        return err;
 }
 
-static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
+static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
                              bool ec_function)
 {
+       u32 function = get_function(func_id, ec_function);
        struct rb_root *root;
        struct rb_node *p;
        int npages = 0;
 
-       root = xa_load(&dev->priv.page_root_xa, func_id);
+       root = xa_load(&dev->priv.page_root_xa, function);
        if (WARN_ON_ONCE(!root))
                return;
 
@@ -446,6 +453,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
        struct rb_root *root;
        struct fw_page *fwp;
        struct rb_node *p;
+       bool ec_function;
        u32 func_id;
        u32 npages;
        u32 i = 0;
@@ -456,8 +464,9 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
        /* No hard feelings, we want our pages back! */
        npages = MLX5_GET(manage_pages_in, in, input_num_entries);
        func_id = MLX5_GET(manage_pages_in, in, function_id);
+       ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
 
-       root = xa_load(&dev->priv.page_root_xa, func_id);
+       root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
        if (WARN_ON_ONCE(!root))
                return -EEXIST;
 
@@ -473,9 +482,10 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
        return 0;
 }
 
-static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
+static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
                         int *nclaimed, bool ec_function)
 {
+       u32 function = get_function(func_id, ec_function);
        int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
        u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
        int num_claimed;
@@ -514,7 +524,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
        }
 
        for (i = 0; i < num_claimed; i++)
-               free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id);
+               free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
 
        if (nclaimed)
                *nclaimed = num_claimed;
index c6c5826..1892cea 100644 (file)
@@ -157,6 +157,7 @@ mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
 
 static const
 struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = {
+       .is_static = true,
        .can_handle = mlxsw_sp1_span_cpu_can_handle,
        .parms_set = mlxsw_sp1_span_entry_cpu_parms,
        .configure = mlxsw_sp1_span_entry_cpu_configure,
@@ -214,6 +215,7 @@ mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
 
 static const
 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
+       .is_static = true,
        .can_handle = mlxsw_sp_port_dev_check,
        .parms_set = mlxsw_sp_span_entry_phys_parms,
        .configure = mlxsw_sp_span_entry_phys_configure,
@@ -721,6 +723,7 @@ mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
 
 static const
 struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = {
+       .is_static = true,
        .can_handle = mlxsw_sp2_span_cpu_can_handle,
        .parms_set = mlxsw_sp2_span_entry_cpu_parms,
        .configure = mlxsw_sp2_span_entry_cpu_configure,
@@ -1036,6 +1039,9 @@ static void mlxsw_sp_span_respin_work(struct work_struct *work)
                if (!refcount_read(&curr->ref_count))
                        continue;
 
+               if (curr->ops->is_static)
+                       continue;
+
                err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms);
                if (err)
                        continue;
index d907718..aa1cd40 100644 (file)
@@ -60,6 +60,7 @@ struct mlxsw_sp_span_entry {
 };
 
 struct mlxsw_sp_span_entry_ops {
+       bool is_static;
        bool (*can_handle)(const struct net_device *to_dev);
        int (*parms_set)(struct mlxsw_sp *mlxsw_sp,
                         const struct net_device *to_dev,
index 82b1c7a..ba0e4d2 100644 (file)
@@ -129,7 +129,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
                                if (ret) {
                                        dev_err(&pdev->dev,
                                                "Failed to set tx_clk\n");
-                                       return ret;
+                                       goto err_remove_config_dt;
                                }
                        }
                }
@@ -143,7 +143,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
                        if (ret) {
                                dev_err(&pdev->dev,
                                        "Failed to set clk_ptp_ref\n");
-                               return ret;
+                               goto err_remove_config_dt;
                        }
                }
        }
index 9c272a2..1c9c67b 100644 (file)
@@ -375,6 +375,7 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
                                struct plat_stmmacenet_data *plat)
 {
        plat->bus_id = 2;
+       plat->addr64 = 32;
        return ehl_common_data(pdev, plat);
 }
 
@@ -406,6 +407,7 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
                                struct plat_stmmacenet_data *plat)
 {
        plat->bus_id = 3;
+       plat->addr64 = 32;
        return ehl_common_data(pdev, plat);
 }
 
index c19dac2..dd7917c 100644 (file)
@@ -992,7 +992,8 @@ static void __team_compute_features(struct team *team)
        unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
                                        IFF_XMIT_DST_RELEASE_PERM;
 
-       list_for_each_entry(port, &team->port_list, list) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(port, &team->port_list, list) {
                vlan_features = netdev_increment_features(vlan_features,
                                        port->dev->vlan_features,
                                        TEAM_VLAN_FEATURES);
@@ -1006,6 +1007,7 @@ static void __team_compute_features(struct team *team)
                if (port->dev->hard_header_len > max_hard_header_len)
                        max_hard_header_len = port->dev->hard_header_len;
        }
+       rcu_read_unlock();
 
        team->dev->vlan_features = vlan_features;
        team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
@@ -1020,9 +1022,7 @@ static void __team_compute_features(struct team *team)
 
 static void team_compute_features(struct team *team)
 {
-       mutex_lock(&team->lock);
        __team_compute_features(team);
-       mutex_unlock(&team->lock);
        netdev_change_features(team->dev);
 }
 
index 6aaa067..a9b5510 100644 (file)
@@ -968,6 +968,12 @@ static const struct usb_device_id  products[] = {
                                      USB_CDC_SUBCLASS_ETHERNET,
                                      USB_CDC_PROTO_NONE),
        .driver_info = (unsigned long)&wwan_info,
+}, {
+       /* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */
+       USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM,
+                                     USB_CDC_SUBCLASS_ETHERNET,
+                                     USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long)&wwan_info,
 }, {
        USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
                        USB_CDC_PROTO_NONE),
index 7ea113f..b96a3dc 100644 (file)
@@ -1303,6 +1303,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)},    /* Olivetti Olicard 160 */
        {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)},    /* Olivetti Olicard 500 */
        {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)},    /* Cinterion PLxx */
+       {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */
        {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)},    /* Cinterion PHxx,PXxx */
        {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)},   /* Cinterion ALASxx (1 RmNet) */
        {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)},    /* Cinterion PHxx,PXxx (2 RmNet) */
index 7220fc8..8280092 100644 (file)
@@ -314,6 +314,7 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = {
 const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
 const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
 const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
+const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
 const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6 AX211 160MHz";
 const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6 AX411 160MHz";
 const char iwl_ma_name[] = "Intel(R) Wi-Fi 6";
@@ -340,6 +341,18 @@ const struct iwl_cfg iwl_qu_b0_hr1_b0 = {
        .num_rbds = IWL_NUM_RBDS_22000_HE,
 };
 
+const struct iwl_cfg iwl_qu_b0_hr_b0 = {
+       .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
+       IWL_DEVICE_22500,
+       /*
+        * This device doesn't support receiving BlockAck with a large bitmap
+        * so we need to restrict the size of transmitted aggregation to the
+        * HT size; mac80211 would otherwise pick the HE max (256) by default.
+        */
+       .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+       .num_rbds = IWL_NUM_RBDS_22000_HE,
+};
+
 const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
        .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
        .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
@@ -366,6 +379,18 @@ const struct iwl_cfg iwl_qu_c0_hr1_b0 = {
        .num_rbds = IWL_NUM_RBDS_22000_HE,
 };
 
+const struct iwl_cfg iwl_qu_c0_hr_b0 = {
+       .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
+       IWL_DEVICE_22500,
+       /*
+        * This device doesn't support receiving BlockAck with a large bitmap
+        * so we need to restrict the size of transmitted aggregation to the
+        * HT size; mac80211 would otherwise pick the HE max (256) by default.
+        */
+       .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+       .num_rbds = IWL_NUM_RBDS_22000_HE,
+};
+
 const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
        .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
        .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
index 15248b0..d8b7776 100644 (file)
@@ -80,19 +80,45 @@ static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
 }
 
 /*
- * Evaluate a DSM with no arguments and a single u8 return value (inside a
- * buffer object), verify and return that value.
+ * Generic function to evaluate a DSM with no arguments
+ * and an integer return value,
+ * (as an integer object or inside a buffer object),
+ * verify and assign the value in the "value" parameter.
+ * return 0 in success and the appropriate errno otherwise.
  */
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
+static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
+                                   u64 *value, size_t expected_size)
 {
        union acpi_object *obj;
-       int ret;
+       int ret = 0;
 
        obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL);
-       if (IS_ERR(obj))
+       if (IS_ERR(obj)) {
+               IWL_DEBUG_DEV_RADIO(dev,
+                                   "Failed to get  DSM object. func= %d\n",
+                                   func);
                return -ENOENT;
+       }
+
+       if (obj->type == ACPI_TYPE_INTEGER) {
+               *value = obj->integer.value;
+       } else if (obj->type == ACPI_TYPE_BUFFER) {
+               __le64 le_value = 0;
 
-       if (obj->type != ACPI_TYPE_BUFFER) {
+               if (WARN_ON_ONCE(expected_size > sizeof(le_value)))
+                       return -EINVAL;
+
+               /* if the buffer size doesn't match the expected size */
+               if (obj->buffer.length != expected_size)
+                       IWL_DEBUG_DEV_RADIO(dev,
+                                           "ACPI: DSM invalid buffer size, padding or truncating (%d)\n",
+                                           obj->buffer.length);
+
+                /* assuming LE from Intel BIOS spec */
+               memcpy(&le_value, obj->buffer.pointer,
+                      min_t(size_t, expected_size, (size_t)obj->buffer.length));
+               *value = le64_to_cpu(le_value);
+       } else {
                IWL_DEBUG_DEV_RADIO(dev,
                                    "ACPI: DSM method did not return a valid object, type=%d\n",
                                    obj->type);
@@ -100,15 +126,6 @@ int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
                goto out;
        }
 
-       if (obj->buffer.length != sizeof(u8)) {
-               IWL_DEBUG_DEV_RADIO(dev,
-                                   "ACPI: DSM method returned invalid buffer, length=%d\n",
-                                   obj->buffer.length);
-               ret = -EINVAL;
-               goto out;
-       }
-
-       ret = obj->buffer.pointer[0];
        IWL_DEBUG_DEV_RADIO(dev,
                            "ACPI: DSM method evaluated: func=%d, ret=%d\n",
                            func, ret);
@@ -116,6 +133,24 @@ out:
        ACPI_FREE(obj);
        return ret;
 }
+
+/*
+ * Evaluate a DSM with no arguments and a u8 return value,
+ */
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value)
+{
+       int ret;
+       u64 val;
+
+       ret = iwl_acpi_get_dsm_integer(dev, rev, func, &val, sizeof(u8));
+
+       if (ret < 0)
+               return ret;
+
+       /* cast val (u64) to be u8 */
+       *value = (u8)val;
+       return 0;
+}
 IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
 
 union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
index 042dd24..1cce30d 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
  * Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #ifndef __iwl_fw_acpi__
 #define __iwl_fw_acpi__
@@ -99,7 +99,7 @@ struct iwl_fw_runtime;
 
 void *iwl_acpi_get_object(struct device *dev, acpi_string method);
 
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func);
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value);
 
 union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
                                         union acpi_object *data,
@@ -159,7 +159,8 @@ static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
        return ERR_PTR(-ENOENT);
 }
 
-static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
+static inline
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value)
 {
        return -ENOENT;
 }
index 6d8f7bf..895a907 100644 (file)
@@ -224,40 +224,46 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
 int iwl_pnvm_load(struct iwl_trans *trans,
                  struct iwl_notif_wait_data *notif_wait)
 {
-       const struct firmware *pnvm;
        struct iwl_notification_wait pnvm_wait;
        static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
                                                PNVM_INIT_COMPLETE_NTFY) };
-       char pnvm_name[64];
-       int ret;
 
        /* if the SKU_ID is empty, there's nothing to do */
        if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
                return 0;
 
-       /* if we already have it, nothing to do either */
-       if (trans->pnvm_loaded)
-               return 0;
+       /* load from disk only if we haven't done it (or tried) before */
+       if (!trans->pnvm_loaded) {
+               const struct firmware *pnvm;
+               char pnvm_name[64];
+               int ret;
+
+               /*
+                * The prefix unfortunately includes a hyphen at the end, so
+                * don't add the dot here...
+                */
+               snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
+                        trans->cfg->fw_name_pre);
+
+               /* ...but replace the hyphen with the dot here. */
+               if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
+                       pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
+
+               ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
+               if (ret) {
+                       IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
+                                    pnvm_name, ret);
+                       /*
+                        * Pretend we've loaded it - at least we've tried and
+                        * couldn't load it at all, so there's no point in
+                        * trying again over and over.
+                        */
+                       trans->pnvm_loaded = true;
+               } else {
+                       iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
 
-       /*
-        * The prefix unfortunately includes a hyphen at the end, so
-        * don't add the dot here...
-        */
-       snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
-                trans->cfg->fw_name_pre);
-
-       /* ...but replace the hyphen with the dot here. */
-       if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
-               pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
-
-       ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
-       if (ret) {
-               IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
-                            pnvm_name, ret);
-       } else {
-               iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
-
-               release_firmware(pnvm);
+                       release_firmware(pnvm);
+               }
        }
 
        iwl_init_notification_wait(notif_wait, &pnvm_wait,
index 27cb040..86e1d57 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
 #ifndef __IWL_CONFIG_H__
@@ -445,7 +445,7 @@ struct iwl_cfg {
 #define IWL_CFG_CORES_BT_GNSS          0x5
 
 #define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
-#define IWL_SUBDEVICE_NO_160(subdevice)        ((u16)((subdevice) & 0x0100) >> 9)
+#define IWL_SUBDEVICE_NO_160(subdevice)        ((u16)((subdevice) & 0x0200) >> 9)
 #define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
 
 struct iwl_dev_info {
@@ -491,6 +491,7 @@ extern const char iwl9260_killer_1550_name[];
 extern const char iwl9560_killer_1550i_name[];
 extern const char iwl9560_killer_1550s_name[];
 extern const char iwl_ax200_name[];
+extern const char iwl_ax203_name[];
 extern const char iwl_ax201_name[];
 extern const char iwl_ax101_name[];
 extern const char iwl_ax200_killer_1650w_name[];
@@ -574,6 +575,8 @@ extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
 extern const struct iwl_cfg iwl_qu_b0_hr1_b0;
 extern const struct iwl_cfg iwl_qu_c0_hr1_b0;
 extern const struct iwl_cfg iwl_quz_a0_hr1_b0;
+extern const struct iwl_cfg iwl_qu_b0_hr_b0;
+extern const struct iwl_cfg iwl_qu_c0_hr_b0;
 extern const struct iwl_cfg iwl_ax200_cfg_cc;
 extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
 extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
index a654147..a80a35a 100644 (file)
@@ -180,13 +180,6 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
        if (le32_to_cpu(tlv->length) < sizeof(*reg))
                return -EINVAL;
 
-       /* For safe using a string from FW make sure we have a
-        * null terminator
-        */
-       reg->name[IWL_FW_INI_MAX_NAME - 1] = 0;
-
-       IWL_DEBUG_FW(trans, "WRT: parsing region: %s\n", reg->name);
-
        if (id >= IWL_FW_INI_MAX_REGION_ID) {
                IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
                return -EINVAL;
index 2ac20d0..2b7ef15 100644 (file)
@@ -150,16 +150,17 @@ u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
 }
 IWL_EXPORT_SYMBOL(iwl_read_prph);
 
-void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs, u32 val, u32 delay_ms)
 {
        unsigned long flags;
 
        if (iwl_trans_grab_nic_access(trans, &flags)) {
+               mdelay(delay_ms);
                iwl_write_prph_no_grab(trans, ofs, val);
                iwl_trans_release_nic_access(trans, &flags);
        }
 }
-IWL_EXPORT_SYMBOL(iwl_write_prph);
+IWL_EXPORT_SYMBOL(iwl_write_prph_delay);
 
 int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
                      u32 bits, u32 mask, int timeout)
@@ -219,8 +220,8 @@ IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
 void iwl_force_nmi(struct iwl_trans *trans)
 {
        if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
-               iwl_write_prph(trans, DEVICE_SET_NMI_REG,
-                              DEVICE_SET_NMI_VAL_DRV);
+               iwl_write_prph_delay(trans, DEVICE_SET_NMI_REG,
+                                    DEVICE_SET_NMI_VAL_DRV, 1);
        else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
                iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
                                UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER);
index 39bceee..3c21c0e 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
  */
 #ifndef __iwl_io_h__
 #define __iwl_io_h__
@@ -37,7 +37,13 @@ u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs);
 u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
 void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val);
 void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val);
-void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
+void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs,
+                         u32 val, u32 delay_ms);
+static inline void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+       iwl_write_prph_delay(trans, ofs, val, 0);
+}
+
 int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
                      u32 bits, u32 mask, int timeout);
 void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
index 0b03fde..1158e25 100644 (file)
 #define RADIO_RSP_ADDR_POS             (6)
 #define RADIO_RSP_RD_CMD               (3)
 
+/* LTR control (Qu only) */
+#define HPM_MAC_LTR_CSR                        0xa0348c
+#define HPM_MAC_LRT_ENABLE_ALL         0xf
+/* also uses CSR_LTR_* for values */
+#define HPM_UMAC_LTR                   0xa03480
+
 /* FW monitor */
 #define MON_BUFF_SAMPLE_CTL            (0xa03c00)
 #define MON_BUFF_BASE_ADDR             (0xa03c1c)
index a0b7331..64c10ca 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -2032,8 +2032,6 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
 
        mutex_lock(&mvm->mutex);
 
-       clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
-
        /* get the BSS vif pointer again */
        vif = iwl_mvm_get_bss_vif(mvm);
        if (IS_ERR_OR_NULL(vif))
@@ -2148,6 +2146,8 @@ out_iterate:
                        iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
 
 out:
+       clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
        /* no need to reset the device in unified images, if successful */
        if (unified_image && !ret) {
                /* nothing else to do if we already sent D0I3_END_CMD */
index 573e469..38d0bfb 100644 (file)
@@ -459,7 +459,10 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
        const size_t bufsz = sizeof(buf);
        int pos = 0;
 
+       mutex_lock(&mvm->mutex);
        iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
+       mutex_unlock(&mvm->mutex);
+
        do_div(curr_os, NSEC_PER_USEC);
        diff = curr_os - curr_gp2;
        pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
index 0637eb1..313e9f1 100644 (file)
@@ -1090,20 +1090,22 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
 
 static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
 {
+       u8 value;
+
        int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
-                                     DSM_FUNC_ENABLE_INDONESIA_5G2);
+                                     DSM_FUNC_ENABLE_INDONESIA_5G2, &value);
 
        if (ret < 0)
                IWL_DEBUG_RADIO(mvm,
                                "Failed to evaluate DSM function ENABLE_INDONESIA_5G2, ret=%d\n",
                                ret);
 
-       else if (ret >= DSM_VALUE_INDONESIA_MAX)
+       else if (value >= DSM_VALUE_INDONESIA_MAX)
                IWL_DEBUG_RADIO(mvm,
-                               "DSM function ENABLE_INDONESIA_5G2 return invalid value, ret=%d\n",
-                               ret);
+                               "DSM function ENABLE_INDONESIA_5G2 return invalid value, value=%d\n",
+                               value);
 
-       else if (ret == DSM_VALUE_INDONESIA_ENABLE) {
+       else if (value == DSM_VALUE_INDONESIA_ENABLE) {
                IWL_DEBUG_RADIO(mvm,
                                "Evaluated DSM function ENABLE_INDONESIA_5G2: Enabling 5g2\n");
                return DSM_VALUE_INDONESIA_ENABLE;
@@ -1114,25 +1116,26 @@ static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
 
 static u8 iwl_mvm_eval_dsm_disable_srd(struct iwl_mvm *mvm)
 {
+       u8 value;
        int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
-                                     DSM_FUNC_DISABLE_SRD);
+                                     DSM_FUNC_DISABLE_SRD, &value);
 
        if (ret < 0)
                IWL_DEBUG_RADIO(mvm,
                                "Failed to evaluate DSM function DISABLE_SRD, ret=%d\n",
                                ret);
 
-       else if (ret >= DSM_VALUE_SRD_MAX)
+       else if (value >= DSM_VALUE_SRD_MAX)
                IWL_DEBUG_RADIO(mvm,
-                               "DSM function DISABLE_SRD return invalid value, ret=%d\n",
-                               ret);
+                               "DSM function DISABLE_SRD return invalid value, value=%d\n",
+                               value);
 
-       else if (ret == DSM_VALUE_SRD_PASSIVE) {
+       else if (value == DSM_VALUE_SRD_PASSIVE) {
                IWL_DEBUG_RADIO(mvm,
                                "Evaluated DSM function DISABLE_SRD: setting SRD to passive\n");
                return DSM_VALUE_SRD_PASSIVE;
 
-       } else if (ret == DSM_VALUE_SRD_DISABLE) {
+       } else if (value == DSM_VALUE_SRD_DISABLE) {
                IWL_DEBUG_RADIO(mvm,
                                "Evaluated DSM function DISABLE_SRD: disabling SRD\n");
                return DSM_VALUE_SRD_DISABLE;
index 6cce72b..bcbd77e 100644 (file)
@@ -4194,6 +4194,9 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
        iwl_mvm_binding_remove_vif(mvm, vif);
 
 out:
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) &&
+           switching_chanctx)
+               return;
        mvmvif->phy_ctxt = NULL;
        iwl_mvm_power_update_mac(mvm);
 }
index 98f62d7..61618f6 100644 (file)
@@ -791,6 +791,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        if (!mvm->scan_cmd)
                goto out_free;
 
+       /* invalidate ids to prevent accidental removal of sta_id 0 */
+       mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA;
+       mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA;
+
        /* Set EBS as successful as long as not stated otherwise by the FW. */
        mvm->last_ebs_successful = true;
 
@@ -1205,6 +1209,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
        reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
        if (device_reprobe(reprobe->dev))
                dev_err(reprobe->dev, "reprobe failed!\n");
+       put_device(reprobe->dev);
        kfree(reprobe);
        module_put(THIS_MODULE);
 }
@@ -1255,7 +1260,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
                        module_put(THIS_MODULE);
                        return;
                }
-               reprobe->dev = mvm->trans->dev;
+               reprobe->dev = get_device(mvm->trans->dev);
                INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
                schedule_work(&reprobe->work);
        } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
index dc17441..578c353 100644 (file)
@@ -2057,6 +2057,9 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        lockdep_assert_held(&mvm->mutex);
 
+       if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
+               return -EINVAL;
+
        iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
        ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
        if (ret)
@@ -2071,6 +2074,9 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
 
        lockdep_assert_held(&mvm->mutex);
 
+       if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
+               return -EINVAL;
+
        iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
        ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
        if (ret)
index a983c21..3712adc 100644 (file)
@@ -773,6 +773,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
 
        next = skb_gso_segment(skb, netdev_flags);
        skb_shinfo(skb)->gso_size = mss;
+       skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
        if (WARN_ON_ONCE(IS_ERR(next)))
                return -EINVAL;
        else if (next)
@@ -795,6 +796,8 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
 
                if (tcp_payload_len > mss) {
                        skb_shinfo(tmp)->gso_size = mss;
+                       skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 :
+                                                          SKB_GSO_TCPV6;
                } else {
                        if (qos) {
                                u8 *qc;
index 36bf414..5b5134d 100644 (file)
@@ -75,6 +75,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
                                 const struct fw_img *fw)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
+                     u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+                                     CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
+                     u32_encode_bits(250,
+                                     CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
+                     CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
+                     u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+                                     CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
+                     u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
        struct iwl_context_info_gen3 *ctxt_info_gen3;
        struct iwl_prph_scratch *prph_scratch;
        struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
@@ -189,8 +198,10 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
        /* Allocate IML */
        iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
                                     &trans_pcie->iml_dma_addr, GFP_KERNEL);
-       if (!iml_img)
-               return -ENOMEM;
+       if (!iml_img) {
+               ret = -ENOMEM;
+               goto err_free_ctxt_info;
+       }
 
        memcpy(iml_img, trans->iml, trans->iml_len);
 
@@ -206,23 +217,19 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
        iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
                    CSR_AUTO_FUNC_BOOT_ENA);
 
-       if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
-               /*
-                * The firmware initializes this again later (to a smaller
-                * value), but for the boot process initialize the LTR to
-                * ~250 usec.
-                */
-               u32 val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
-                         u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
-                                         CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
-                         u32_encode_bits(250,
-                                         CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
-                         CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
-                         u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
-                                         CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
-                         u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
-
-               iwl_write32(trans, CSR_LTR_LONG_VAL_AD, val);
+       /*
+        * To workaround hardware latency issues during the boot process,
+        * initialize the LTR to ~250 usec (see ltr_val above).
+        * The firmware initializes this again later (to a smaller value).
+        */
+       if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
+            trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
+           !trans->trans_cfg->integrated) {
+               iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
+       } else if (trans->trans_cfg->integrated &&
+                  trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
+               iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
+               iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
        }
 
        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
@@ -232,6 +239,11 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
 
        return 0;
 
+err_free_ctxt_info:
+       dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
+                         trans_pcie->ctxt_info_gen3,
+                         trans_pcie->ctxt_info_dma_addr);
+       trans_pcie->ctxt_info_gen3 = NULL;
 err_free_prph_info:
        dma_free_coherent(trans->dev,
                          sizeof(*prph_info),
@@ -294,6 +306,9 @@ int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
                return ret;
        }
 
+       if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
+               return -EBUSY;
+
        prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
                cpu_to_le64(trans_pcie->pnvm_dram.physical);
        prph_sc_ctrl->pnvm_cfg.pnvm_size =
index 9659826..ed3f5b7 100644 (file)
@@ -910,6 +910,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
                      IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
                      IWL_CFG_ANY, IWL_CFG_ANY,
                      iwl_qu_b0_hr1_b0, iwl_ax101_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+                     IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+                     IWL_CFG_ANY, IWL_CFG_ANY,
+                     iwl_qu_b0_hr_b0, iwl_ax203_name),
 
        /* Qu C step */
        _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -917,6 +922,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
                      IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
                      IWL_CFG_ANY, IWL_CFG_ANY,
                      iwl_qu_c0_hr1_b0, iwl_ax101_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+                     IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+                     IWL_CFG_ANY, IWL_CFG_ANY,
+                     iwl_qu_c0_hr_b0, iwl_ax203_name),
 
        /* QuZ */
        _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
index 285e0d5..ab93a84 100644 (file)
@@ -2107,7 +2107,8 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
 
        while (offs < dwords) {
                /* limit the time we spin here under lock to 1/2s */
-               ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC);
+               unsigned long end = jiffies + HZ / 2;
+               bool resched = false;
 
                if (iwl_trans_grab_nic_access(trans, &flags)) {
                        iwl_write32(trans, HBUS_TARG_MEM_RADDR,
@@ -2118,14 +2119,15 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
                                                        HBUS_TARG_MEM_RDAT);
                                offs++;
 
-                               /* calling ktime_get is expensive so
-                                * do it once in 128 reads
-                                */
-                               if (offs % 128 == 0 && ktime_after(ktime_get(),
-                                                                  timeout))
+                               if (time_after(jiffies, end)) {
+                                       resched = true;
                                        break;
+                               }
                        }
                        iwl_trans_release_nic_access(trans, &flags);
+
+                       if (resched)
+                               cond_resched();
                } else {
                        return -EBUSY;
                }
index 5dda001..83f4964 100644 (file)
@@ -201,6 +201,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_txq *txq = trans->txqs.txq[txq_id];
 
+       if (!txq) {
+               IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
+               return;
+       }
+
        spin_lock_bh(&txq->lock);
        while (txq->write_ptr != txq->read_ptr) {
                IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
index 27eea90..7ff1bb0 100644 (file)
@@ -142,26 +142,25 @@ void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
         * idx is bounded by n_window
         */
        int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+       struct sk_buff *skb;
 
        lockdep_assert_held(&txq->lock);
 
+       if (!txq->entries)
+               return;
+
        iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
                               iwl_txq_get_tfd(trans, txq, idx));
 
-       /* free SKB */
-       if (txq->entries) {
-               struct sk_buff *skb;
-
-               skb = txq->entries[idx].skb;
+       skb = txq->entries[idx].skb;
 
-               /* Can be called from irqs-disabled context
-                * If skb is not NULL, it means that the whole queue is being
-                * freed and that the queue is not empty - free the skb
-                */
-               if (skb) {
-                       iwl_op_mode_free_skb(trans->op_mode, skb);
-                       txq->entries[idx].skb = NULL;
-               }
+       /* Can be called from irqs-disabled context
+        * If skb is not NULL, it means that the whole queue is being
+        * freed and that the queue is not empty - free the skb
+        */
+       if (skb) {
+               iwl_op_mode_free_skb(trans->op_mode, skb);
+               txq->entries[idx].skb = NULL;
        }
 }
 
@@ -841,10 +840,8 @@ void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
                        int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
                        struct sk_buff *skb = txq->entries[idx].skb;
 
-                       if (WARN_ON_ONCE(!skb))
-                               continue;
-
-                       iwl_txq_free_tso_page(trans, skb);
+                       if (!WARN_ON_ONCE(!skb))
+                               iwl_txq_free_tso_page(trans, skb);
                }
                iwl_txq_gen2_free_tfd(trans, txq);
                txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
@@ -1494,28 +1491,28 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
         */
        int rd_ptr = txq->read_ptr;
        int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
+       struct sk_buff *skb;
 
        lockdep_assert_held(&txq->lock);
 
+       if (!txq->entries)
+               return;
+
        /* We have only q->n_window txq->entries, but we use
         * TFD_QUEUE_SIZE_MAX tfds
         */
        iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
 
        /* free SKB */
-       if (txq->entries) {
-               struct sk_buff *skb;
-
-               skb = txq->entries[idx].skb;
+       skb = txq->entries[idx].skb;
 
-               /* Can be called from irqs-disabled context
-                * If skb is not NULL, it means that the whole queue is being
-                * freed and that the queue is not empty - free the skb
-                */
-               if (skb) {
-                       iwl_op_mode_free_skb(trans->op_mode, skb);
-                       txq->entries[idx].skb = NULL;
-               }
+       /* Can be called from irqs-disabled context
+        * If skb is not NULL, it means that the whole queue is being
+        * freed and that the queue is not empty - free the skb
+        */
+       if (skb) {
+               iwl_op_mode_free_skb(trans->op_mode, skb);
+               txq->entries[idx].skb = NULL;
        }
 }
 
index a44b776..c135478 100644 (file)
@@ -231,7 +231,7 @@ mt7615_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
                        int cmd, int *seq)
 {
        struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
-       enum mt76_txq_id qid;
+       enum mt76_mcuq_id qid;
 
        mt7615_mcu_fill_msg(dev, skb, cmd, seq);
        if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
index 13d77f8..9fb506f 100644 (file)
@@ -83,7 +83,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
 {
        struct mt76_queue *q = &dev->q_rx[qid];
        struct mt76_sdio *sdio = &dev->sdio;
-       int len = 0, err, i, order;
+       int len = 0, err, i;
        struct page *page;
        u8 *buf;
 
@@ -96,8 +96,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
        if (len > sdio->func->cur_blksize)
                len = roundup(len, sdio->func->cur_blksize);
 
-       order = get_order(len);
-       page = __dev_alloc_pages(GFP_KERNEL, order);
+       page = __dev_alloc_pages(GFP_KERNEL, get_order(len));
        if (!page)
                return -ENOMEM;
 
@@ -106,7 +105,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
        err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
        if (err < 0) {
                dev_err(dev->dev, "sdio read data failed:%d\n", err);
-               __free_pages(page, order);
+               put_page(page);
                return err;
        }
 
@@ -123,7 +122,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
                if (q->queued + i + 1 == q->ndesc)
                        break;
        }
-       __free_pages(page, order);
+       put_page(page);
 
        spin_lock_bh(&q->lock);
        q->head = (q->head + i) % q->ndesc;
index 5fdd1a6..e211a2b 100644 (file)
@@ -256,7 +256,7 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
        struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
        struct mt7915_mcu_txd *mcu_txd;
        u8 seq, pkt_fmt, qidx;
-       enum mt76_txq_id txq;
+       enum mt76_mcuq_id qid;
        __le32 *txd;
        u32 val;
 
@@ -268,18 +268,18 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
                seq = ++dev->mt76.mcu.msg_seq & 0xf;
 
        if (cmd == -MCU_CMD_FW_SCATTER) {
-               txq = MT_MCUQ_FWDL;
+               qid = MT_MCUQ_FWDL;
                goto exit;
        }
 
        mcu_txd = (struct mt7915_mcu_txd *)skb_push(skb, sizeof(*mcu_txd));
 
        if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) {
-               txq = MT_MCUQ_WA;
+               qid = MT_MCUQ_WA;
                qidx = MT_TX_MCU_PORT_RX_Q0;
                pkt_fmt = MT_TX_TYPE_CMD;
        } else {
-               txq = MT_MCUQ_WM;
+               qid = MT_MCUQ_WM;
                qidx = MT_TX_MCU_PORT_RX_Q0;
                pkt_fmt = MT_TX_TYPE_CMD;
        }
@@ -326,7 +326,7 @@ exit:
        if (wait_seq)
                *wait_seq = seq;
 
-       return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0);
+       return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0);
 }
 
 static void
index 5f99054..af7d1ec 100644 (file)
@@ -152,8 +152,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
 
        if (new_p) {
                /* we have one extra ref from the allocator */
-               __free_pages(e->p, MT_RX_ORDER);
-
+               put_page(e->p);
                e->p = new_p;
        }
 }
@@ -310,7 +309,6 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
        }
 
        e = &q->e[q->end];
-       e->skb = skb;
        usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
                          mt7601u_complete_tx, q);
        ret = usb_submit_urb(e->urb, GFP_ATOMIC);
@@ -328,6 +326,7 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
 
        q->end = (q->end + 1) % q->entries;
        q->used++;
+       e->skb = skb;
 
        if (q->used >= q->entries)
                ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
index 200bdd6..8caf9b3 100644 (file)
@@ -1543,8 +1543,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
        }
 
        length = (io.nblocks + 1) << ns->lba_shift;
-       meta_len = (io.nblocks + 1) * ns->ms;
-       metadata = nvme_to_user_ptr(io.metadata);
+
+       if ((io.control & NVME_RW_PRINFO_PRACT) &&
+           ns->ms == sizeof(struct t10_pi_tuple)) {
+               /*
+                * Protection information is stripped/inserted by the
+                * controller.
+                */
+               if (nvme_to_user_ptr(io.metadata))
+                       return -EINVAL;
+               meta_len = 0;
+               metadata = NULL;
+       } else {
+               meta_len = (io.nblocks + 1) * ns->ms;
+               metadata = nvme_to_user_ptr(io.metadata);
+       }
 
        if (ns->features & NVME_NS_EXT_LBAS) {
                length += meta_len;
index 50d9a20..856aa31 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/t10-pi.h>
 #include <linux/types.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
 #include <linux/sed-opal.h>
 #include <linux/pci-p2pdma.h>
 
@@ -542,50 +543,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
        return true;
 }
 
-static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
 {
-       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
-       dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       dma_addr_t dma_addr = iod->first_dma;
        int i;
 
-       if (iod->dma_len) {
-               dma_unmap_page(dev->dev, dma_addr, iod->dma_len,
-                              rq_dma_dir(req));
-               return;
+       for (i = 0; i < iod->npages; i++) {
+               __le64 *prp_list = nvme_pci_iod_list(req)[i];
+               dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+
+               dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
+               dma_addr = next_dma_addr;
        }
 
-       WARN_ON_ONCE(!iod->nents);
+}
 
-       if (is_pci_p2pdma_page(sg_page(iod->sg)))
-               pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
-                                   rq_dma_dir(req));
-       else
-               dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
+{
+       const int last_sg = SGES_PER_PAGE - 1;
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       dma_addr_t dma_addr = iod->first_dma;
+       int i;
 
+       for (i = 0; i < iod->npages; i++) {
+               struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
+               dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
 
-       if (iod->npages == 0)
-               dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
-                       dma_addr);
+               dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
+               dma_addr = next_dma_addr;
+       }
 
-       for (i = 0; i < iod->npages; i++) {
-               void *addr = nvme_pci_iod_list(req)[i];
+}
 
-               if (iod->use_sgl) {
-                       struct nvme_sgl_desc *sg_list = addr;
+static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
+{
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 
-                       next_dma_addr =
-                           le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
-               } else {
-                       __le64 *prp_list = addr;
+       if (is_pci_p2pdma_page(sg_page(iod->sg)))
+               pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
+                                   rq_dma_dir(req));
+       else
+               dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+}
 
-                       next_dma_addr = le64_to_cpu(prp_list[last_prp]);
-               }
+static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+{
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 
-               dma_pool_free(dev->prp_page_pool, addr, dma_addr);
-               dma_addr = next_dma_addr;
+       if (iod->dma_len) {
+               dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
+                              rq_dma_dir(req));
+               return;
        }
 
+       WARN_ON_ONCE(!iod->nents);
+
+       nvme_unmap_sg(dev, req);
+       if (iod->npages == 0)
+               dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
+                             iod->first_dma);
+       else if (iod->use_sgl)
+               nvme_free_sgls(dev, req);
+       else
+               nvme_free_prps(dev, req);
        mempool_free(iod->sg, dev->iod_mempool);
 }
 
@@ -661,7 +683,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
                        __le64 *old_prp_list = prp_list;
                        prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
                        if (!prp_list)
-                               return BLK_STS_RESOURCE;
+                               goto free_prps;
                        list[iod->npages++] = prp_list;
                        prp_list[0] = old_prp_list[i - 1];
                        old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@@ -681,14 +703,14 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
                dma_addr = sg_dma_address(sg);
                dma_len = sg_dma_len(sg);
        }
-
 done:
        cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
        cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
-
        return BLK_STS_OK;
-
- bad_sgl:
+free_prps:
+       nvme_free_prps(dev, req);
+       return BLK_STS_RESOURCE;
+bad_sgl:
        WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
                        "Invalid SGL for payload:%d nents:%d\n",
                        blk_rq_payload_bytes(req), iod->nents);
@@ -760,7 +782,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
 
                        sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
                        if (!sg_list)
-                               return BLK_STS_RESOURCE;
+                               goto free_sgls;
 
                        i = 0;
                        nvme_pci_iod_list(req)[iod->npages++] = sg_list;
@@ -773,6 +795,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
        } while (--entries > 0);
 
        return BLK_STS_OK;
+free_sgls:
+       nvme_free_sgls(dev, req);
+       return BLK_STS_RESOURCE;
 }
 
 static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
@@ -841,7 +866,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
        sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
        iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
        if (!iod->nents)
-               goto out;
+               goto out_free_sg;
 
        if (is_pci_p2pdma_page(sg_page(iod->sg)))
                nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
@@ -850,16 +875,21 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
                nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
                                             rq_dma_dir(req), DMA_ATTR_NO_WARN);
        if (!nr_mapped)
-               goto out;
+               goto out_free_sg;
 
        iod->use_sgl = nvme_pci_use_sgls(dev, req);
        if (iod->use_sgl)
                ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
        else
                ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
-out:
        if (ret != BLK_STS_OK)
-               nvme_unmap_data(dev, req);
+               goto out_unmap_sg;
+       return BLK_STS_OK;
+
+out_unmap_sg:
+       nvme_unmap_sg(dev, req);
+out_free_sg:
+       mempool_free(iod->sg, dev->iod_mempool);
        return ret;
 }
 
@@ -1795,6 +1825,9 @@ static void nvme_map_cmb(struct nvme_dev *dev)
        if (dev->cmb_size)
                return;
 
+       if (NVME_CAP_CMBS(dev->ctrl.cap))
+               writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC);
+
        dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
        if (!dev->cmbsz)
                return;
@@ -1808,6 +1841,16 @@ static void nvme_map_cmb(struct nvme_dev *dev)
        if (offset > bar_size)
                return;
 
+       /*
+        * Tell the controller about the host side address mapping the CMB,
+        * and enable CMB decoding for the NVMe 1.4+ scheme:
+        */
+       if (NVME_CAP_CMBS(dev->ctrl.cap)) {
+               hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE |
+                            (pci_bus_address(pdev, bar) + offset),
+                            dev->bar + NVME_REG_CMBMSC);
+       }
+
        /*
         * Controllers may support a CMB size larger than their BAR,
         * for example, due to being behind a bridge. Reduce the CMB to
index cf6c49d..b7ce4f2 100644 (file)
@@ -97,6 +97,7 @@ struct nvme_rdma_queue {
        struct completion       cm_done;
        bool                    pi_support;
        int                     cq_size;
+       struct mutex            queue_lock;
 };
 
 struct nvme_rdma_ctrl {
@@ -579,6 +580,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
        int ret;
 
        queue = &ctrl->queues[idx];
+       mutex_init(&queue->queue_lock);
        queue->ctrl = ctrl;
        if (idx && ctrl->ctrl.max_integrity_segments)
                queue->pi_support = true;
@@ -598,7 +600,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
        if (IS_ERR(queue->cm_id)) {
                dev_info(ctrl->ctrl.device,
                        "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
-               return PTR_ERR(queue->cm_id);
+               ret = PTR_ERR(queue->cm_id);
+               goto out_destroy_mutex;
        }
 
        if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
@@ -628,6 +631,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
 out_destroy_cm_id:
        rdma_destroy_id(queue->cm_id);
        nvme_rdma_destroy_queue_ib(queue);
+out_destroy_mutex:
+       mutex_destroy(&queue->queue_lock);
        return ret;
 }
 
@@ -639,9 +644,10 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
 
 static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
 {
-       if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
-               return;
-       __nvme_rdma_stop_queue(queue);
+       mutex_lock(&queue->queue_lock);
+       if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
+               __nvme_rdma_stop_queue(queue);
+       mutex_unlock(&queue->queue_lock);
 }
 
 static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
@@ -651,6 +657,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
 
        nvme_rdma_destroy_queue_ib(queue);
        rdma_destroy_id(queue->cm_id);
+       mutex_destroy(&queue->queue_lock);
 }
 
 static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
index 2166199..881d28e 100644 (file)
@@ -76,6 +76,7 @@ struct nvme_tcp_queue {
        struct work_struct      io_work;
        int                     io_cpu;
 
+       struct mutex            queue_lock;
        struct mutex            send_mutex;
        struct llist_head       req_list;
        struct list_head        send_list;
@@ -1219,6 +1220,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
 
        sock_release(queue->sock);
        kfree(queue->pdu);
+       mutex_destroy(&queue->queue_lock);
 }
 
 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
@@ -1380,6 +1382,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
        struct nvme_tcp_queue *queue = &ctrl->queues[qid];
        int ret, rcv_pdu_size;
 
+       mutex_init(&queue->queue_lock);
        queue->ctrl = ctrl;
        init_llist_head(&queue->req_list);
        INIT_LIST_HEAD(&queue->send_list);
@@ -1398,7 +1401,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
        if (ret) {
                dev_err(nctrl->device,
                        "failed to create socket: %d\n", ret);
-               return ret;
+               goto err_destroy_mutex;
        }
 
        /* Single syn retry */
@@ -1507,6 +1510,8 @@ err_crypto:
 err_sock:
        sock_release(queue->sock);
        queue->sock = NULL;
+err_destroy_mutex:
+       mutex_destroy(&queue->queue_lock);
        return ret;
 }
 
@@ -1534,9 +1539,10 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
        struct nvme_tcp_queue *queue = &ctrl->queues[qid];
 
-       if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
-               return;
-       __nvme_tcp_stop_queue(queue);
+       mutex_lock(&queue->queue_lock);
+       if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
+               __nvme_tcp_stop_queue(queue);
+       mutex_unlock(&queue->queue_lock);
 }
 
 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
index 8d90235..dc1ea46 100644 (file)
@@ -487,8 +487,10 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
 
        /* return an all zeroed buffer if we can't find an active namespace */
        ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
-       if (!ns)
+       if (!ns) {
+               status = NVME_SC_INVALID_NS;
                goto done;
+       }
 
        nvmet_ns_revalidate(ns);
 
@@ -541,7 +543,9 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
                id->nsattr |= (1 << 0);
        nvmet_put_namespace(ns);
 done:
-       status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+       if (!status)
+               status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
        kfree(id);
 out:
        nvmet_req_complete(req, status);
index 65d5ea0..1cb158d 100644 (file)
@@ -1,2 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0
-obj-y          += phy-ingenic-usb.o
+obj-$(CONFIG_PHY_INGENIC_USB)          += phy-ingenic-usb.o
index d38def4..55f8e6c 100644 (file)
@@ -49,7 +49,9 @@ config PHY_MTK_HDMI
 
 config PHY_MTK_MIPI_DSI
        tristate "MediaTek MIPI-DSI Driver"
-       depends on ARCH_MEDIATEK && OF
+       depends on ARCH_MEDIATEK || COMPILE_TEST
+       depends on COMMON_CLK
+       depends on OF
        select GENERIC_PHY
        help
          Support MIPI DSI for Mediatek SoCs.
index 442522b..4728e2b 100644 (file)
@@ -662,35 +662,42 @@ static int cpcap_usb_phy_probe(struct platform_device *pdev)
        generic_phy = devm_phy_create(ddata->dev, NULL, &ops);
        if (IS_ERR(generic_phy)) {
                error = PTR_ERR(generic_phy);
-               return PTR_ERR(generic_phy);
+               goto out_reg_disable;
        }
 
        phy_set_drvdata(generic_phy, ddata);
 
        phy_provider = devm_of_phy_provider_register(ddata->dev,
                                                     of_phy_simple_xlate);
-       if (IS_ERR(phy_provider))
-               return PTR_ERR(phy_provider);
+       if (IS_ERR(phy_provider)) {
+               error = PTR_ERR(phy_provider);
+               goto out_reg_disable;
+       }
 
        error = cpcap_usb_init_optional_pins(ddata);
        if (error)
-               return error;
+               goto out_reg_disable;
 
        cpcap_usb_init_optional_gpios(ddata);
 
        error = cpcap_usb_init_iio(ddata);
        if (error)
-               return error;
+               goto out_reg_disable;
 
        error = cpcap_usb_init_interrupts(pdev, ddata);
        if (error)
-               return error;
+               goto out_reg_disable;
 
        usb_add_phy_dev(&ddata->phy);
        atomic_set(&ddata->active, 1);
        schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1));
 
        return 0;
+
+out_reg_disable:
+       regulator_disable(ddata->vusb);
+
+       return error;
 }
 
 static int cpcap_usb_phy_remove(struct platform_device *pdev)
index 34803a6..5c1a109 100644 (file)
@@ -347,7 +347,7 @@ FUNC_GROUP_DECL(RMII4, F24, E23, E24, E25, C25, C24, B26, B25, B24);
 
 #define D22 40
 SIG_EXPR_LIST_DECL_SESG(D22, SD1CLK, SD1, SIG_DESC_SET(SCU414, 8));
-SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU414, 8));
+SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU4B4, 8));
 PIN_DECL_2(D22, GPIOF0, SD1CLK, PWM8);
 GROUP_DECL(PWM8G0, D22);
 
index 7aeb552..72f17f2 100644 (file)
@@ -920,6 +920,10 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
                        err = hw->soc->bias_set(hw, desc, pullup);
                        if (err)
                                return err;
+               } else if (hw->soc->bias_set_combo) {
+                       err = hw->soc->bias_set_combo(hw, desc, pullup, arg);
+                       if (err)
+                               return err;
                } else {
                        return -ENOTSUPP;
                }
index d4ea108..abfe11c 100644 (file)
@@ -949,7 +949,6 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
        } else {
                int irq = chip->to_irq(chip, offset);
                const int pullidx = pull ? 1 : 0;
-               bool wake;
                int val;
                static const char * const pulls[] = {
                        "none        ",
index 53a6a24..3ea1634 100644 (file)
 #define JZ4740_GPIO_TRIG       0x70
 #define JZ4740_GPIO_FLAG       0x80
 
-#define JZ4760_GPIO_INT                0x10
-#define JZ4760_GPIO_PAT1       0x30
-#define JZ4760_GPIO_PAT0       0x40
-#define JZ4760_GPIO_FLAG       0x50
-#define JZ4760_GPIO_PEN                0x70
+#define JZ4770_GPIO_INT                0x10
+#define JZ4770_GPIO_PAT1       0x30
+#define JZ4770_GPIO_PAT0       0x40
+#define JZ4770_GPIO_FLAG       0x50
+#define JZ4770_GPIO_PEN                0x70
 
 #define X1830_GPIO_PEL                 0x110
 #define X1830_GPIO_PEH                 0x120
@@ -1688,8 +1688,8 @@ static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc,
 static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
                                   u8 offset, int value)
 {
-       if (jzgc->jzpc->info->version >= ID_JZ4760)
-               ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_PAT0, offset, !!value);
+       if (jzgc->jzpc->info->version >= ID_JZ4770)
+               ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value);
        else
                ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
 }
@@ -1718,9 +1718,9 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc,
                break;
        }
 
-       if (jzgc->jzpc->info->version >= ID_JZ4760) {
-               reg1 = JZ4760_GPIO_PAT1;
-               reg2 = JZ4760_GPIO_PAT0;
+       if (jzgc->jzpc->info->version >= ID_JZ4770) {
+               reg1 = JZ4770_GPIO_PAT1;
+               reg2 = JZ4770_GPIO_PAT0;
        } else {
                reg1 = JZ4740_GPIO_TRIG;
                reg2 = JZ4740_GPIO_DIR;
@@ -1758,8 +1758,8 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
        struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
        int irq = irqd->hwirq;
 
-       if (jzgc->jzpc->info->version >= ID_JZ4760)
-               ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, true);
+       if (jzgc->jzpc->info->version >= ID_JZ4770)
+               ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
        else
                ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
 
@@ -1774,8 +1774,8 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
 
        ingenic_gpio_irq_mask(irqd);
 
-       if (jzgc->jzpc->info->version >= ID_JZ4760)
-               ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, false);
+       if (jzgc->jzpc->info->version >= ID_JZ4770)
+               ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, false);
        else
                ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
 }
@@ -1799,8 +1799,8 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
                        irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH);
        }
 
-       if (jzgc->jzpc->info->version >= ID_JZ4760)
-               ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_FLAG, irq, false);
+       if (jzgc->jzpc->info->version >= ID_JZ4770)
+               ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false);
        else
                ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
 }
@@ -1856,8 +1856,8 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
 
        chained_irq_enter(irq_chip, desc);
 
-       if (jzgc->jzpc->info->version >= ID_JZ4760)
-               flag = ingenic_gpio_read_reg(jzgc, JZ4760_GPIO_FLAG);
+       if (jzgc->jzpc->info->version >= ID_JZ4770)
+               flag = ingenic_gpio_read_reg(jzgc, JZ4770_GPIO_FLAG);
        else
                flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG);
 
@@ -1938,9 +1938,9 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
        struct ingenic_pinctrl *jzpc = jzgc->jzpc;
        unsigned int pin = gc->base + offset;
 
-       if (jzpc->info->version >= ID_JZ4760) {
-               if (ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_INT) ||
-                   ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1))
+       if (jzpc->info->version >= ID_JZ4770) {
+               if (ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_INT) ||
+                   ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PAT1))
                        return GPIO_LINE_DIRECTION_IN;
                return GPIO_LINE_DIRECTION_OUT;
        }
@@ -1991,20 +1991,20 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
                        'A' + offt, idx, func);
 
        if (jzpc->info->version >= ID_X1000) {
-               ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+               ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
                ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, false);
-               ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
-               ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
+               ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
+               ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
                ingenic_shadow_config_pin_load(jzpc, pin);
-       } else if (jzpc->info->version >= ID_JZ4760) {
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+       } else if (jzpc->info->version >= ID_JZ4770) {
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
                ingenic_config_pin(jzpc, pin, GPIO_MSK, false);
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
        } else {
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, true);
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_TRIG, func & 0x2);
-               ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func > 0);
+               ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func & 0x1);
        }
 
        return 0;
@@ -2057,14 +2057,14 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
                        'A' + offt, idx, input ? "in" : "out");
 
        if (jzpc->info->version >= ID_X1000) {
-               ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+               ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
                ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, true);
-               ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+               ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
                ingenic_shadow_config_pin_load(jzpc, pin);
-       } else if (jzpc->info->version >= ID_JZ4760) {
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+       } else if (jzpc->info->version >= ID_JZ4770) {
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
                ingenic_config_pin(jzpc, pin, GPIO_MSK, true);
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
        } else {
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
@@ -2091,8 +2091,8 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
        unsigned int offt = pin / PINS_PER_GPIO_CHIP;
        bool pull;
 
-       if (jzpc->info->version >= ID_JZ4760)
-               pull = !ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PEN);
+       if (jzpc->info->version >= ID_JZ4770)
+               pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
        else
                pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
 
@@ -2141,8 +2141,8 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
                                        REG_SET(X1830_GPIO_PEH), bias << idxh);
                }
 
-       } else if (jzpc->info->version >= ID_JZ4760) {
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !bias);
+       } else if (jzpc->info->version >= ID_JZ4770) {
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PEN, !bias);
        } else {
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias);
        }
@@ -2151,8 +2151,8 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
 static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
                                     unsigned int pin, bool high)
 {
-       if (jzpc->info->version >= ID_JZ4760)
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
+       if (jzpc->info->version >= ID_JZ4770)
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, high);
        else
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
 }
index e051aec..d70caec 100644 (file)
@@ -51,6 +51,7 @@
  * @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
  *                  detection.
  * @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller
+ * @disabled_for_mux: These IRQs were disabled because we muxed away.
  * @soc:            Reference to soc_data of platform specific data.
  * @regs:           Base addresses for the TLMM tiles.
  * @phys_base:      Physical base address
@@ -72,6 +73,7 @@ struct msm_pinctrl {
        DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
        DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
        DECLARE_BITMAP(skip_wake_irqs, MAX_NR_GPIO);
+       DECLARE_BITMAP(disabled_for_mux, MAX_NR_GPIO);
 
        const struct msm_pinctrl_soc_data *soc;
        void __iomem *regs[MAX_NR_TILES];
@@ -96,6 +98,14 @@ MSM_ACCESSOR(intr_cfg)
 MSM_ACCESSOR(intr_status)
 MSM_ACCESSOR(intr_target)
 
+static void msm_ack_intr_status(struct msm_pinctrl *pctrl,
+                               const struct msm_pingroup *g)
+{
+       u32 val = g->intr_ack_high ? BIT(g->intr_status_bit) : 0;
+
+       msm_writel_intr_status(val, pctrl, g);
+}
+
 static int msm_get_groups_count(struct pinctrl_dev *pctldev)
 {
        struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -171,6 +181,10 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
                              unsigned group)
 {
        struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+       struct gpio_chip *gc = &pctrl->chip;
+       unsigned int irq = irq_find_mapping(gc->irq.domain, group);
+       struct irq_data *d = irq_get_irq_data(irq);
+       unsigned int gpio_func = pctrl->soc->gpio_func;
        const struct msm_pingroup *g;
        unsigned long flags;
        u32 val, mask;
@@ -187,6 +201,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
        if (WARN_ON(i == g->nfuncs))
                return -EINVAL;
 
+       /*
+        * If an GPIO interrupt is setup on this pin then we need special
+        * handling.  Specifically interrupt detection logic will still see
+        * the pin twiddle even when we're muxed away.
+        *
+        * When we see a pin with an interrupt setup on it then we'll disable
+        * (mask) interrupts on it when we mux away until we mux back.  Note
+        * that disable_irq() refcounts and interrupts are disabled as long as
+        * at least one disable_irq() has been called.
+        */
+       if (d && i != gpio_func &&
+           !test_and_set_bit(d->hwirq, pctrl->disabled_for_mux))
+               disable_irq(irq);
+
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
        val = msm_readl_ctl(pctrl, g);
@@ -196,6 +224,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
 
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
+       if (d && i == gpio_func &&
+           test_and_clear_bit(d->hwirq, pctrl->disabled_for_mux)) {
+               /*
+                * Clear interrupts detected while not GPIO since we only
+                * masked things.
+                */
+               if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+                       irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, false);
+               else
+                       msm_ack_intr_status(pctrl, g);
+
+               enable_irq(irq);
+       }
+
        return 0;
 }
 
@@ -210,8 +252,7 @@ static int msm_pinmux_request_gpio(struct pinctrl_dev *pctldev,
        if (!g->nfuncs)
                return 0;
 
-       /* For now assume function 0 is GPIO because it always is */
-       return msm_pinmux_set_mux(pctldev, g->funcs[0], offset);
+       return msm_pinmux_set_mux(pctldev, g->funcs[pctrl->soc->gpio_func], offset);
 }
 
 static const struct pinmux_ops msm_pinmux_ops = {
@@ -774,7 +815,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 }
 
-static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
+static void msm_gpio_irq_unmask(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
@@ -792,17 +833,6 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
 
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
-       if (status_clear) {
-               /*
-                * clear the interrupt status bit before unmask to avoid
-                * any erroneous interrupts that would have got latched
-                * when the interrupt is not in use.
-                */
-               val = msm_readl_intr_status(pctrl, g);
-               val &= ~BIT(g->intr_status_bit);
-               msm_writel_intr_status(val, pctrl, g);
-       }
-
        val = msm_readl_intr_cfg(pctrl, g);
        val |= BIT(g->intr_raw_status_bit);
        val |= BIT(g->intr_enable_bit);
@@ -822,7 +852,7 @@ static void msm_gpio_irq_enable(struct irq_data *d)
                irq_chip_enable_parent(d);
 
        if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
-               msm_gpio_irq_clear_unmask(d, true);
+               msm_gpio_irq_unmask(d);
 }
 
 static void msm_gpio_irq_disable(struct irq_data *d)
@@ -837,11 +867,6 @@ static void msm_gpio_irq_disable(struct irq_data *d)
                msm_gpio_irq_mask(d);
 }
 
-static void msm_gpio_irq_unmask(struct irq_data *d)
-{
-       msm_gpio_irq_clear_unmask(d, false);
-}
-
 /**
  * msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent.
  * @d: The irq dta.
@@ -894,7 +919,6 @@ static void msm_gpio_irq_ack(struct irq_data *d)
        struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
        const struct msm_pingroup *g;
        unsigned long flags;
-       u32 val;
 
        if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
                if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
@@ -906,12 +930,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
 
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
-       val = msm_readl_intr_status(pctrl, g);
-       if (g->intr_ack_high)
-               val |= BIT(g->intr_status_bit);
-       else
-               val &= ~BIT(g->intr_status_bit);
-       msm_writel_intr_status(val, pctrl, g);
+       msm_ack_intr_status(pctrl, g);
 
        if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
                msm_gpio_update_dual_edge_pos(pctrl, g, d);
@@ -936,6 +955,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
        struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
        const struct msm_pingroup *g;
        unsigned long flags;
+       bool was_enabled;
        u32 val;
 
        if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) {
@@ -997,6 +1017,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
         * could cause the INTR_STATUS to be set for EDGE interrupts.
         */
        val = msm_readl_intr_cfg(pctrl, g);
+       was_enabled = val & BIT(g->intr_raw_status_bit);
        val |= BIT(g->intr_raw_status_bit);
        if (g->intr_detection_width == 2) {
                val &= ~(3 << g->intr_detection_bit);
@@ -1046,6 +1067,14 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
        }
        msm_writel_intr_cfg(val, pctrl, g);
 
+       /*
+        * The first time we set RAW_STATUS_EN it could trigger an interrupt.
+        * Clear the interrupt.  This is safe because we have
+        * IRQCHIP_SET_TYPE_MASKED.
+        */
+       if (!was_enabled)
+               msm_ack_intr_status(pctrl, g);
+
        if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
                msm_gpio_update_dual_edge_pos(pctrl, g, d);
 
@@ -1099,16 +1128,11 @@ static int msm_gpio_irq_reqres(struct irq_data *d)
        }
 
        /*
-        * Clear the interrupt that may be pending before we enable
-        * the line.
-        * This is especially a problem with the GPIOs routed to the
-        * PDC. These GPIOs are direct-connect interrupts to the GIC.
-        * Disabling the interrupt line at the PDC does not prevent
-        * the interrupt from being latched at the GIC. The state at
-        * GIC needs to be cleared before enabling.
+        * The disable / clear-enable workaround we do in msm_pinmux_set_mux()
+        * only works if disable is not lazy since we only clear any bogus
+        * interrupt in hardware. Explicitly mark the interrupt as UNLAZY.
         */
-       if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
-               irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
+       irq_set_status_flags(d->irq, IRQ_DISABLE_UNLAZY);
 
        return 0;
 out:
index 333f992..e31a516 100644 (file)
@@ -118,6 +118,7 @@ struct msm_gpio_wakeirq_map {
  * @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need
  *                            to be aware that their parent can't handle dual
  *                            edge interrupts.
+ * @gpio_func: Which function number is GPIO (usually 0).
  */
 struct msm_pinctrl_soc_data {
        const struct pinctrl_pin_desc *pins;
@@ -134,6 +135,7 @@ struct msm_pinctrl_soc_data {
        const struct msm_gpio_wakeirq_map *wakeirq_map;
        unsigned int nwakeirq_map;
        bool wakeirq_dual_edge_errata;
+       unsigned int gpio_func;
 };
 
 extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
index 33040b0..2c941cd 100644 (file)
@@ -5,6 +5,7 @@
 
 menuconfig SURFACE_PLATFORMS
        bool "Microsoft Surface Platform-Specific Device Drivers"
+       depends on ACPI
        default y
        help
          Say Y here to get to see options for platform-specific device drivers
@@ -29,20 +30,19 @@ config SURFACE3_WMI
 
 config SURFACE_3_BUTTON
        tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet"
-       depends on ACPI && KEYBOARD_GPIO && I2C
+       depends on KEYBOARD_GPIO && I2C
        help
          This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet.
 
 config SURFACE_3_POWER_OPREGION
        tristate "Surface 3 battery platform operation region support"
-       depends on ACPI && I2C
+       depends on I2C
        help
          This driver provides support for ACPI operation
          region of the Surface 3 battery platform driver.
 
 config SURFACE_GPE
        tristate "Surface GPE/Lid Support Driver"
-       depends on ACPI
        depends on DMI
        help
          This driver marks the GPEs related to the ACPI lid device found on
@@ -52,7 +52,7 @@ config SURFACE_GPE
 
 config SURFACE_PRO3_BUTTON
        tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
-       depends on ACPI && INPUT
+       depends on INPUT
        help
          This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet.
 
index e49e5d6..86f6991 100644 (file)
@@ -181,12 +181,12 @@ static int surface_lid_enable_wakeup(struct device *dev, bool enable)
        return 0;
 }
 
-static int surface_gpe_suspend(struct device *dev)
+static int __maybe_unused surface_gpe_suspend(struct device *dev)
 {
        return surface_lid_enable_wakeup(dev, true);
 }
 
-static int surface_gpe_resume(struct device *dev)
+static int __maybe_unused surface_gpe_resume(struct device *dev)
 {
        return surface_lid_enable_wakeup(dev, false);
 }
index 0102bf1..ef83425 100644 (file)
@@ -85,7 +85,7 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
        iowrite32(val, dev->regbase + reg_offset);
 }
 
-#if CONFIG_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
 static int smu_fw_info_show(struct seq_file *s, void *unused)
 {
        struct amd_pmc_dev *dev = s->private;
index ecd4779..18bf8ae 100644 (file)
@@ -247,7 +247,8 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
        ret = bios_return->return_code;
 
        if (ret) {
-               if (ret != HPWMI_RET_UNKNOWN_CMDTYPE)
+               if (ret != HPWMI_RET_UNKNOWN_COMMAND &&
+                   ret != HPWMI_RET_UNKNOWN_CMDTYPE)
                        pr_warn("query 0x%x returned error 0x%x\n", query, ret);
                goto out_free;
        }
index b457b0b..2cce825 100644 (file)
@@ -164,13 +164,29 @@ static const struct i2c_inst_data bsg2150_data[]  = {
        {}
 };
 
-static const struct i2c_inst_data int3515_data[]  = {
-       { "tps6598x", IRQ_RESOURCE_APIC, 0 },
-       { "tps6598x", IRQ_RESOURCE_APIC, 1 },
-       { "tps6598x", IRQ_RESOURCE_APIC, 2 },
-       { "tps6598x", IRQ_RESOURCE_APIC, 3 },
-       {}
-};
+/*
+ * Device with _HID INT3515 (TI PD controllers) has some unresolved interrupt
+ * issues. The most common problem seen is interrupt flood.
+ *
+ * There are at least two known causes. Firstly, on some boards, the
+ * I2CSerialBus resource index does not match the Interrupt resource, i.e. they
+ * are not one-to-one mapped like in the array below. Secondly, on some boards
+ * the IRQ line from the PD controller is not actually connected at all. But the
+ * interrupt flood is also seen on some boards where those are not a problem, so
+ * there are some other problems as well.
+ *
+ * Because of the issues with the interrupt, the device is disabled for now. If
+ * you wish to debug the issues, uncomment the below, and add an entry for the
+ * INT3515 device to the i2c_multi_instance_ids table.
+ *
+ * static const struct i2c_inst_data int3515_data[]  = {
+ *     { "tps6598x", IRQ_RESOURCE_APIC, 0 },
+ *     { "tps6598x", IRQ_RESOURCE_APIC, 1 },
+ *     { "tps6598x", IRQ_RESOURCE_APIC, 2 },
+ *     { "tps6598x", IRQ_RESOURCE_APIC, 3 },
+ *     { }
+ * };
+ */
 
 /*
  * Note new device-ids must also be added to i2c_multi_instantiate_ids in
@@ -179,7 +195,6 @@ static const struct i2c_inst_data int3515_data[]  = {
 static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = {
        { "BSG1160", (unsigned long)bsg1160_data },
        { "BSG2150", (unsigned long)bsg2150_data },
-       { "INT3515", (unsigned long)int3515_data },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, i2c_multi_inst_acpi_ids);
index 7598cd4..5b81baf 100644 (file)
@@ -92,6 +92,7 @@ struct ideapad_private {
        struct dentry *debug;
        unsigned long cfg;
        bool has_hw_rfkill_switch;
+       bool has_touchpad_switch;
        const char *fnesc_guid;
 };
 
@@ -535,7 +536,9 @@ static umode_t ideapad_is_visible(struct kobject *kobj,
        } else if (attr == &dev_attr_fn_lock.attr) {
                supported = acpi_has_method(priv->adev->handle, "HALS") &&
                        acpi_has_method(priv->adev->handle, "SALS");
-       } else
+       } else if (attr == &dev_attr_touchpad.attr)
+               supported = priv->has_touchpad_switch;
+       else
                supported = true;
 
        return supported ? attr->mode : 0;
@@ -867,6 +870,9 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv)
 {
        unsigned long value;
 
+       if (!priv->has_touchpad_switch)
+               return;
+
        /* Without reading from EC touchpad LED doesn't switch state */
        if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
                /* Some IdeaPads don't really turn off touchpad - they only
@@ -989,6 +995,9 @@ static int ideapad_acpi_add(struct platform_device *pdev)
        priv->platform_device = pdev;
        priv->has_hw_rfkill_switch = dmi_check_system(hw_rfkill_list);
 
+       /* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */
+       priv->has_touchpad_switch = !acpi_dev_present("ELAN0634", NULL, -1);
+
        ret = ideapad_sysfs_init(priv);
        if (ret)
                return ret;
@@ -1006,6 +1015,10 @@ static int ideapad_acpi_add(struct platform_device *pdev)
        if (!priv->has_hw_rfkill_switch)
                write_ec_cmd(priv->adev->handle, VPCCMD_W_RF, 1);
 
+       /* The same for Touchpad */
+       if (!priv->has_touchpad_switch)
+               write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, 1);
+
        for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
                if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
                        ideapad_register_rfkill(priv, i);
index 3b49a1f..30a9062 100644 (file)
@@ -207,19 +207,19 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
        {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Stream x360 Convertible PC 11"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
                },
        },
        {
                .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
                },
        },
        {
                .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"),
                },
        },
        {} /* Array terminator */
index e03df28..f3e8eca 100644 (file)
@@ -8783,6 +8783,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
        TPACPI_Q_LNV3('N', '1', 'T', TPACPI_FAN_2CTL),  /* P71 */
        TPACPI_Q_LNV3('N', '1', 'U', TPACPI_FAN_2CTL),  /* P51 */
        TPACPI_Q_LNV3('N', '2', 'C', TPACPI_FAN_2CTL),  /* P52 / P72 */
+       TPACPI_Q_LNV3('N', '2', 'N', TPACPI_FAN_2CTL),  /* P53 / P73 */
        TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (1st gen) */
        TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (2nd gen) */
        TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (3nd gen) */
@@ -9951,9 +9952,9 @@ static int tpacpi_proxsensor_init(struct ibm_init_struct *iibm)
        if ((palm_err == -ENODEV) && (lap_err == -ENODEV))
                return 0;
        /* Otherwise, if there was an error return it */
-       if (palm_err && (palm_err != ENODEV))
+       if (palm_err && (palm_err != -ENODEV))
                return palm_err;
-       if (lap_err && (lap_err != ENODEV))
+       if (lap_err && (lap_err != -ENODEV))
                return lap_err;
 
        if (has_palmsensor) {
index 5783139..c4de932 100644 (file)
@@ -263,6 +263,16 @@ static const struct ts_dmi_data digma_citi_e200_data = {
        .properties     = digma_citi_e200_props,
 };
 
+static const struct property_entry estar_beauty_hd_props[] = {
+       PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+       { }
+};
+
+static const struct ts_dmi_data estar_beauty_hd_data = {
+       .acpi_name      = "GDIX1001:00",
+       .properties     = estar_beauty_hd_props,
+};
+
 static const struct property_entry gp_electronic_t701_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
@@ -942,6 +952,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
                },
        },
+       {
+               /* Estar Beauty HD (MID 7316R) */
+               .driver_data = (void *)&estar_beauty_hd_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Estar"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
+               },
+       },
        {
                /* GP-electronic T701 */
                .driver_data = (void *)&gp_electronic_t701_data,
index ca03d8e..67a768f 100644 (file)
@@ -1813,13 +1813,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
 {
        struct regulator_dev *r;
        struct device *dev = rdev->dev.parent;
-       int ret;
+       int ret = 0;
 
        /* No supply to resolve? */
        if (!rdev->supply_name)
                return 0;
 
-       /* Supply already resolved? */
+       /* Supply already resolved? (fast-path without locking contention) */
        if (rdev->supply)
                return 0;
 
@@ -1829,7 +1829,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
 
                /* Did the lookup explicitly defer for us? */
                if (ret == -EPROBE_DEFER)
-                       return ret;
+                       goto out;
 
                if (have_full_constraints()) {
                        r = dummy_regulator_rdev;
@@ -1837,15 +1837,18 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
                } else {
                        dev_err(dev, "Failed to resolve %s-supply for %s\n",
                                rdev->supply_name, rdev->desc->name);
-                       return -EPROBE_DEFER;
+                       ret = -EPROBE_DEFER;
+                       goto out;
                }
        }
 
        if (r == rdev) {
                dev_err(dev, "Supply for %s (%s) resolved to itself\n",
                        rdev->desc->name, rdev->supply_name);
-               if (!have_full_constraints())
-                       return -EINVAL;
+               if (!have_full_constraints()) {
+                       ret = -EINVAL;
+                       goto out;
+               }
                r = dummy_regulator_rdev;
                get_device(&r->dev);
        }
@@ -1859,7 +1862,8 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
        if (r->dev.parent && r->dev.parent != rdev->dev.parent) {
                if (!device_is_bound(r->dev.parent)) {
                        put_device(&r->dev);
-                       return -EPROBE_DEFER;
+                       ret = -EPROBE_DEFER;
+                       goto out;
                }
        }
 
@@ -1867,15 +1871,32 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
        ret = regulator_resolve_supply(r);
        if (ret < 0) {
                put_device(&r->dev);
-               return ret;
+               goto out;
+       }
+
+       /*
+        * Recheck rdev->supply with rdev->mutex lock held to avoid a race
+        * between rdev->supply null check and setting rdev->supply in
+        * set_supply() from concurrent tasks.
+        */
+       regulator_lock(rdev);
+
+       /* Supply just resolved by a concurrent task? */
+       if (rdev->supply) {
+               regulator_unlock(rdev);
+               put_device(&r->dev);
+               goto out;
        }
 
        ret = set_supply(rdev, r);
        if (ret < 0) {
+               regulator_unlock(rdev);
                put_device(&r->dev);
-               return ret;
+               goto out;
        }
 
+       regulator_unlock(rdev);
+
        /*
         * In set_machine_constraints() we may have turned this regulator on
         * but we couldn't propagate to the supply if it hadn't been resolved
@@ -1886,11 +1907,12 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
                if (ret < 0) {
                        _regulator_put(rdev->supply);
                        rdev->supply = NULL;
-                       return ret;
+                       goto out;
                }
        }
 
-       return 0;
+out:
+       return ret;
 }
 
 /* Internal regulator request function */
index a2beee6..5988c30 100644 (file)
@@ -444,7 +444,8 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
        fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
        if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone  */
                pr_err("error in devcmd2 init");
-               return -ENODEV;
+               err = -ENODEV;
+               goto err_free_wq;
        }
 
        /*
@@ -460,7 +461,7 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
        err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
                        DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
        if (err)
-               goto err_free_wq;
+               goto err_disable_wq;
 
        vdev->devcmd2->result =
                (struct devcmd2_result *) vdev->devcmd2->results_ring.descs;
@@ -481,8 +482,9 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
 
 err_free_desc_ring:
        vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
-err_free_wq:
+err_disable_wq:
        vnic_wq_disable(&vdev->devcmd2->wq);
+err_free_wq:
        vnic_wq_free(&vdev->devcmd2->wq);
 err_free_devcmd2:
        kfree(vdev->devcmd2);
index 42e4d35..65f168c 100644 (file)
@@ -1744,7 +1744,7 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
                iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
        }
 
-       vfc_cmd->correlation = cpu_to_be64(evt);
+       vfc_cmd->correlation = cpu_to_be64((u64)evt);
 
        if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
                return ibmvfc_send_event(evt, vhost, 0);
@@ -2418,7 +2418,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
                tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
                evt->sync_iu = &rsp_iu;
 
-               tmf->correlation = cpu_to_be64(evt);
+               tmf->correlation = cpu_to_be64((u64)evt);
 
                init_completion(&evt->comp);
                rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
@@ -3007,8 +3007,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
        unsigned long flags = 0;
 
        spin_lock_irqsave(shost->host_lock, flags);
-       if (sdev->type == TYPE_DISK)
+       if (sdev->type == TYPE_DISK) {
                sdev->allow_restart = 1;
+               blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
+       }
        spin_unlock_irqrestore(shost->host_lock, flags);
        return 0;
 }
index d71afae..8410004 100644 (file)
@@ -1623,8 +1623,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
                rc = fc_exch_done_locked(ep);
                WARN_ON(fc_seq_exch(sp) != ep);
                spin_unlock_bh(&ep->ex_lock);
-               if (!rc)
+               if (!rc) {
                        fc_exch_delete(ep);
+               } else {
+                       FC_EXCH_DBG(ep, "ep is completed already,"
+                                       "hence skip calling the resp\n");
+                       goto skip_resp;
+               }
        }
 
        /*
@@ -1643,6 +1648,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
        if (!fc_invoke_resp(ep, sp, fp))
                fc_frame_free(fp);
 
+skip_resp:
        fc_exch_release(ep);
        return;
 rel:
@@ -1899,10 +1905,16 @@ static void fc_exch_reset(struct fc_exch *ep)
 
        fc_exch_hold(ep);
 
-       if (!rc)
+       if (!rc) {
                fc_exch_delete(ep);
+       } else {
+               FC_EXCH_DBG(ep, "ep is completed already,"
+                               "hence skip calling the resp\n");
+               goto skip_resp;
+       }
 
        fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
+skip_resp:
        fc_seq_set_resp(sp, NULL, ep->arg);
        fc_exch_release(ep);
 }
index af19209..63a4f48 100644 (file)
@@ -8244,11 +8244,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
                        goto out;
                }
 
+               /* always store 64 bits regardless of addressing */
                sense_ptr = (void *)cmd->frame + ioc->sense_off;
-               if (instance->consistent_mask_64bit)
-                       put_unaligned_le64(sense_handle, sense_ptr);
-               else
-                       put_unaligned_le32(sense_handle, sense_ptr);
+               put_unaligned_le64(sense_handle, sense_ptr);
        }
 
        /*
index cba1cf6..1e939a2 100644 (file)
@@ -541,7 +541,14 @@ int srp_reconnect_rport(struct srp_rport *rport)
        res = mutex_lock_interruptible(&rport->mutex);
        if (res)
                goto out;
-       scsi_target_block(&shost->shost_gendev);
+       if (rport->state != SRP_RPORT_FAIL_FAST)
+               /*
+                * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
+                * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
+                * later is ok though, scsi_internal_device_unblock_nowait()
+                * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
+                */
+               scsi_target_block(&shost->shost_gendev);
        res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
        pr_debug("%s (state %d): transport.reconnect() returned %d\n",
                 dev_name(&shost->shost_gendev), rport->state, res);
index 3f6dfed..b915b38 100644 (file)
@@ -72,6 +72,7 @@ config SCSI_UFS_DWC_TC_PCI
 config SCSI_UFSHCD_PLATFORM
        tristate "Platform bus based UFS Controller support"
        depends on SCSI_UFSHCD
+       depends on HAS_IOMEM
        help
        This selects the UFS host controller support. Select this if
        you have an UFS controller on Platform bus.
index e31d2c5..fb32d12 100644 (file)
@@ -3996,6 +3996,8 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
        if (ret)
                dev_err(hba->dev, "%s: link recovery failed, err %d",
                        __func__, ret);
+       else
+               ufshcd_clear_ua_wluns(hba);
 
        return ret;
 }
@@ -4992,7 +4994,8 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
                break;
        } /* end of switch */
 
-       if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
+       if ((host_byte(result) != DID_OK) &&
+           (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
                ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
        return result;
 }
@@ -6001,6 +6004,9 @@ skip_err_handling:
        ufshcd_scsi_unblock_requests(hba);
        ufshcd_err_handling_unprepare(hba);
        up(&hba->eh_sem);
+
+       if (!err && needs_reset)
+               ufshcd_clear_ua_wluns(hba);
 }
 
 /**
@@ -6295,9 +6301,13 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
                intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
        }
 
-       if (enabled_intr_status && retval == IRQ_NONE) {
-               dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
-                                       __func__, intr_status);
+       if (enabled_intr_status && retval == IRQ_NONE &&
+                               !ufshcd_eh_in_progress(hba)) {
+               dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
+                                       __func__,
+                                       intr_status,
+                                       hba->ufs_stats.last_intr_status,
+                                       enabled_intr_status);
                ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
        }
 
@@ -6341,7 +6351,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
         * Even though we use wait_event() which sleeps indefinitely,
         * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
         */
-       req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
+       req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
+
        req->end_io_data = &wait;
        free_slot = req->tag;
        WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
@@ -6938,14 +6951,11 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
        ufshcd_set_clk_freq(hba, true);
 
        err = ufshcd_hba_enable(hba);
-       if (err)
-               goto out;
 
        /* Establish the link again and restore the device */
-       err = ufshcd_probe_hba(hba, false);
        if (!err)
-               ufshcd_clear_ua_wluns(hba);
-out:
+               err = ufshcd_probe_hba(hba, false);
+
        if (err)
                dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
        ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
@@ -7716,6 +7726,8 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
        if (ret)
                goto out;
 
+       ufshcd_clear_ua_wluns(hba);
+
        /* Initialize devfreq after UFS device is detected */
        if (ufshcd_is_clkscaling_supported(hba)) {
                memcpy(&hba->clk_scaling.saved_pwr_info.info,
@@ -7917,8 +7929,6 @@ out:
                pm_runtime_put_sync(hba->dev);
                ufshcd_exit_clk_scaling(hba);
                ufshcd_hba_exit(hba);
-       } else {
-               ufshcd_clear_ua_wluns(hba);
        }
 }
 
@@ -8775,6 +8785,7 @@ enable_gating:
                ufshcd_resume_clkscaling(hba);
        hba->clk_gating.is_suspended = false;
        hba->dev_info.b_rpm_dev_flush_capable = false;
+       ufshcd_clear_ua_wluns(hba);
        ufshcd_release(hba);
 out:
        if (hba->dev_info.b_rpm_dev_flush_capable) {
@@ -8885,6 +8896,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
        }
 
+       ufshcd_clear_ua_wluns(hba);
+
        /* Schedule clock gating in case of no access to UFS device yet */
        ufshcd_release(hba);
 
index f8e070d..a14684f 100644 (file)
@@ -214,7 +214,7 @@ int __init register_intc_controller(struct intc_desc *desc)
                        d->window[k].phys = res->start;
                        d->window[k].size = resource_size(res);
                        d->window[k].virt = ioremap(res->start,
-                                                        resource_size(res));
+                                                   resource_size(res));
                        if (!d->window[k].virt)
                                goto err2;
                }
index 9e62ba9..939915a 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/debugfs.h>
 #include "internals.h"
 
-static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
+static int intc_irq_xlate_show(struct seq_file *m, void *priv)
 {
        int i;
 
@@ -37,17 +37,7 @@ static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
        return 0;
 }
 
-static int intc_irq_xlate_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, intc_irq_xlate_debug, inode->i_private);
-}
-
-static const struct file_operations intc_irq_xlate_fops = {
-       .open = intc_irq_xlate_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(intc_irq_xlate);
 
 static int __init intc_irq_xlate_init(void)
 {
index c4472b6..698d21f 100644 (file)
@@ -271,8 +271,21 @@ struct soc_device * __init at91_soc_init(const struct at91_soc *socs)
        return soc_dev;
 }
 
+static const struct of_device_id at91_soc_allowed_list[] __initconst = {
+       { .compatible = "atmel,at91rm9200", },
+       { .compatible = "atmel,at91sam9", },
+       { .compatible = "atmel,sama5", },
+       { .compatible = "atmel,samv7", },
+       { }
+};
+
 static int __init atmel_soc_device_init(void)
 {
+       struct device_node *np = of_find_node_by_path("/");
+
+       if (!of_match_node(at91_soc_allowed_list, np))
+               return 0;
+
        at91_soc_init(socs);
 
        return 0;
index a9370f4..05812f8 100644 (file)
@@ -13,7 +13,7 @@ config SOC_IMX8M
        depends on ARCH_MXC || COMPILE_TEST
        default ARCH_MXC && ARM64
        select SOC_BUS
-       select ARM_GIC_V3 if ARCH_MXC
+       select ARM_GIC_V3 if ARCH_MXC && ARCH_MULTI_V7
        help
          If you say yes here you get support for the NXP i.MX8M family
          support, it will provide the SoC info like SoC family,
index 1217caf..9b07663 100644 (file)
@@ -140,12 +140,13 @@ struct litex_soc_ctrl_device {
        void __iomem *base;
 };
 
+#ifdef CONFIG_OF
 static const struct of_device_id litex_soc_ctrl_of_match[] = {
        {.compatible = "litex,soc-controller"},
        {},
 };
-
 MODULE_DEVICE_TABLE(of, litex_soc_ctrl_of_match);
+#endif /* CONFIG_OF */
 
 static int litex_soc_ctrl_probe(struct platform_device *pdev)
 {
index cbc4c28..62ea0c9 100644 (file)
@@ -254,7 +254,8 @@ static int altera_spi_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev,
                                "Invalid number of chipselect: %hu\n",
                                pdata->num_chipselect);
-                       return -EINVAL;
+                       err = -EINVAL;
+                       goto exit;
                }
 
                master->num_chipselect = pdata->num_chipselect;
index 859910e..8cb4d92 100644 (file)
@@ -682,6 +682,7 @@ static const struct of_device_id spidev_dt_ids[] = {
        { .compatible = "lwn,bk4" },
        { .compatible = "dh,dhcom-board" },
        { .compatible = "menlo,m53cpld" },
+       { .compatible = "cisco,spi-petra" },
        {},
 };
 MODULE_DEVICE_TABLE(of, spidev_dt_ids);
index b668a82..f5fbdbc 100644 (file)
@@ -367,7 +367,7 @@ hantro_reset_raw_fmt(struct hantro_ctx *ctx)
 
        hantro_reset_fmt(raw_fmt, raw_vpu_fmt);
        raw_fmt->width = encoded_fmt->width;
-       raw_fmt->width = encoded_fmt->width;
+       raw_fmt->height = encoded_fmt->height;
        if (ctx->is_encoder)
                hantro_set_fmt_out(ctx, raw_fmt);
        else
index 781c84a..de7442d 100644 (file)
@@ -203,7 +203,7 @@ static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
                position = cedrus_buf->codec.h264.position;
 
                sram_array[i] |= position << 1;
-               if (ref_list[i].fields & V4L2_H264_BOTTOM_FIELD_REF)
+               if (ref_list[i].fields == V4L2_H264_BOTTOM_FIELD_REF)
                        sram_array[i] |= BIT(0);
        }
 
index ab5a862..f798b0c 100644 (file)
@@ -20,9 +20,9 @@ enum country_code_type_t {
        COUNTRY_CODE_MAX
 };
 
-int rtw_regd_init(struct adapter *padapter,
-       void (*reg_notifier)(struct wiphy *wiphy,
-               struct regulatory_request *request));
+void rtw_regd_init(struct wiphy *wiphy,
+                  void (*reg_notifier)(struct wiphy *wiphy,
+                                       struct regulatory_request *request));
 void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
 
 
index bf14172..1103231 100644 (file)
@@ -3211,9 +3211,6 @@ void rtw_cfg80211_init_wiphy(struct adapter *padapter)
                        rtw_cfg80211_init_ht_capab(&bands->ht_cap, NL80211_BAND_2GHZ, rf_type);
        }
 
-       /* init regulary domain */
-       rtw_regd_init(padapter, rtw_reg_notifier);
-
        /* copy mac_addr to wiphy */
        memcpy(wiphy->perm_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
 
@@ -3328,6 +3325,9 @@ int rtw_wdev_alloc(struct adapter *padapter, struct device *dev)
        *((struct adapter **)wiphy_priv(wiphy)) = padapter;
        rtw_cfg80211_preinit_wiphy(padapter, wiphy);
 
+       /* init regulary domain */
+       rtw_regd_init(wiphy, rtw_reg_notifier);
+
        ret = wiphy_register(wiphy);
        if (ret < 0) {
                DBG_8192C("Couldn't register wiphy device\n");
index 578b9f7..2833fc6 100644 (file)
@@ -139,15 +139,11 @@ static void _rtw_regd_init_wiphy(struct rtw_regulatory *reg,
        _rtw_reg_apply_flags(wiphy);
 }
 
-int rtw_regd_init(struct adapter *padapter,
-                 void (*reg_notifier)(struct wiphy *wiphy,
-                                      struct regulatory_request *request))
+void rtw_regd_init(struct wiphy *wiphy,
+                  void (*reg_notifier)(struct wiphy *wiphy,
+                                       struct regulatory_request *request))
 {
-       struct wiphy *wiphy = padapter->rtw_wdev->wiphy;
-
        _rtw_regd_init_wiphy(NULL, wiphy, reg_notifier);
-
-       return 0;
 }
 
 void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
index 6b171ff..a5991df 100644 (file)
@@ -562,8 +562,6 @@ tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
 
 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
 {
-       if (tcmu_cmd->se_cmd)
-               tcmu_cmd->se_cmd->priv = NULL;
        kfree(tcmu_cmd->dbi);
        kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
 }
@@ -1174,11 +1172,12 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
        mutex_lock(&udev->cmdr_lock);
-       se_cmd->priv = tcmu_cmd;
        if (!(se_cmd->transport_state & CMD_T_ABORTED))
                ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
        if (ret < 0)
                tcmu_free_cmd(tcmu_cmd);
+       else
+               se_cmd->priv = tcmu_cmd;
        mutex_unlock(&udev->cmdr_lock);
        return scsi_ret;
 }
@@ -1241,6 +1240,7 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
 
                list_del_init(&cmd->queue_entry);
                tcmu_free_cmd(cmd);
+               se_cmd->priv = NULL;
                target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
                unqueued = true;
        }
@@ -1332,6 +1332,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
        }
 
 done:
+       se_cmd->priv = NULL;
        if (read_len_valid) {
                pr_debug("read_len = %d\n", read_len);
                target_complete_cmd_with_length(cmd->se_cmd,
@@ -1478,6 +1479,7 @@ static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
        se_cmd = cmd->se_cmd;
        tcmu_free_cmd(cmd);
 
+       se_cmd->priv = NULL;
        target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
 }
 
@@ -1592,6 +1594,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
                         * removed then LIO core will do the right thing and
                         * fail the retry.
                         */
+                       tcmu_cmd->se_cmd->priv = NULL;
                        target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
                        tcmu_free_cmd(tcmu_cmd);
                        continue;
@@ -1605,6 +1608,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
                         * Ignore scsi_ret for now. target_complete_cmd
                         * drops it.
                         */
+                       tcmu_cmd->se_cmd->priv = NULL;
                        target_complete_cmd(tcmu_cmd->se_cmd,
                                            SAM_STAT_CHECK_CONDITION);
                        tcmu_free_cmd(tcmu_cmd);
@@ -2212,6 +2216,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
                if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
                        WARN_ON(!cmd->se_cmd);
                        list_del_init(&cmd->queue_entry);
+                       cmd->se_cmd->priv = NULL;
                        if (err_level == 1) {
                                /*
                                 * Userspace was not able to start the
index c981757..780d7c4 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/mm.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/tee_drv.h>
 #include <linux/types.h>
@@ -148,7 +149,8 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
                         */
                        optee_cq_wait_for_completion(&optee->call_queue, &w);
                } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
-                       might_sleep();
+                       if (need_resched())
+                               cond_resched();
                        param.a0 = res.a0;
                        param.a1 = res.a1;
                        param.a2 = res.a2;
index 8b7f941..b8c4159 100644 (file)
@@ -2316,7 +2316,7 @@ static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw,
 
        if (auth && auth->reply.route_hi == sw->config.route_hi &&
            auth->reply.route_lo == sw->config.route_lo) {
-               tb_dbg(tb, "NVM_AUTH found for %llx flags 0x%#x status %#x\n",
+               tb_dbg(tb, "NVM_AUTH found for %llx flags %#x status %#x\n",
                       tb_route(sw), auth->reply.hdr.flags, auth->reply.status);
                if (auth->reply.hdr.flags & ICM_FLAGS_ERROR)
                        ret = -EIO;
index 319d68c..219e857 100644 (file)
@@ -2081,9 +2081,6 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
        return 0;
 }
 
-extern ssize_t redirected_tty_write(struct file *, const char __user *,
-                                                       size_t, loff_t *);
-
 /**
  *     job_control             -       check job control
  *     @tty: tty
@@ -2105,7 +2102,7 @@ static int job_control(struct tty_struct *tty, struct file *file)
        /* NOTE: not yet done after every sleep pending a thorough
           check of the logic of this change. -- jlc */
        /* don't stop on /dev/console */
-       if (file->f_op->write == redirected_tty_write)
+       if (file->f_op->write_iter == redirected_tty_write)
                return 0;
 
        return __tty_check_change(tty, SIGTTIN);
@@ -2309,7 +2306,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
        ssize_t retval = 0;
 
        /* Job control check -- must be done at start (POSIX.1 7.1.1.4). */
-       if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) {
+       if (L_TOSTOP(tty) && file->f_op->write_iter != redirected_tty_write) {
                retval = tty_check_change(tty);
                if (retval)
                        return retval;
index 118b299..e0c00a1 100644 (file)
@@ -648,6 +648,14 @@ static void wait_for_xmitr(struct uart_port *port)
                                  (val & STAT_TX_RDY(port)), 1, 10000);
 }
 
+static void wait_for_xmite(struct uart_port *port)
+{
+       u32 val;
+
+       readl_poll_timeout_atomic(port->membase + UART_STAT, val,
+                                 (val & STAT_TX_EMP), 1, 10000);
+}
+
 static void mvebu_uart_console_putchar(struct uart_port *port, int ch)
 {
        wait_for_xmitr(port);
@@ -675,7 +683,7 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
 
        uart_console_write(port, s, count, mvebu_uart_console_putchar);
 
-       wait_for_xmitr(port);
+       wait_for_xmite(port);
 
        if (ier)
                writel(ier, port->membase + UART_CTRL(port));
index 8034489..48de209 100644 (file)
@@ -143,12 +143,9 @@ LIST_HEAD(tty_drivers);                    /* linked list of tty drivers */
 DEFINE_MUTEX(tty_mutex);
 
 static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
-ssize_t redirected_tty_write(struct file *, const char __user *,
-                                                       size_t, loff_t *);
+static ssize_t tty_write(struct kiocb *, struct iov_iter *);
 static __poll_t tty_poll(struct file *, poll_table *);
 static int tty_open(struct inode *, struct file *);
-long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 #ifdef CONFIG_COMPAT
 static long tty_compat_ioctl(struct file *file, unsigned int cmd,
                                unsigned long arg);
@@ -438,8 +435,7 @@ static ssize_t hung_up_tty_read(struct file *file, char __user *buf,
        return 0;
 }
 
-static ssize_t hung_up_tty_write(struct file *file, const char __user *buf,
-                                size_t count, loff_t *ppos)
+static ssize_t hung_up_tty_write(struct kiocb *iocb, struct iov_iter *from)
 {
        return -EIO;
 }
@@ -478,7 +474,8 @@ static void tty_show_fdinfo(struct seq_file *m, struct file *file)
 static const struct file_operations tty_fops = {
        .llseek         = no_llseek,
        .read           = tty_read,
-       .write          = tty_write,
+       .write_iter     = tty_write,
+       .splice_write   = iter_file_splice_write,
        .poll           = tty_poll,
        .unlocked_ioctl = tty_ioctl,
        .compat_ioctl   = tty_compat_ioctl,
@@ -491,7 +488,8 @@ static const struct file_operations tty_fops = {
 static const struct file_operations console_fops = {
        .llseek         = no_llseek,
        .read           = tty_read,
-       .write          = redirected_tty_write,
+       .write_iter     = redirected_tty_write,
+       .splice_write   = iter_file_splice_write,
        .poll           = tty_poll,
        .unlocked_ioctl = tty_ioctl,
        .compat_ioctl   = tty_compat_ioctl,
@@ -503,7 +501,7 @@ static const struct file_operations console_fops = {
 static const struct file_operations hung_up_tty_fops = {
        .llseek         = no_llseek,
        .read           = hung_up_tty_read,
-       .write          = hung_up_tty_write,
+       .write_iter     = hung_up_tty_write,
        .poll           = hung_up_tty_poll,
        .unlocked_ioctl = hung_up_tty_ioctl,
        .compat_ioctl   = hung_up_tty_compat_ioctl,
@@ -606,9 +604,9 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
        /* This breaks for file handles being sent over AF_UNIX sockets ? */
        list_for_each_entry(priv, &tty->tty_files, list) {
                filp = priv->file;
-               if (filp->f_op->write == redirected_tty_write)
+               if (filp->f_op->write_iter == redirected_tty_write)
                        cons_filp = filp;
-               if (filp->f_op->write != tty_write)
+               if (filp->f_op->write_iter != tty_write)
                        continue;
                closecount++;
                __tty_fasync(-1, filp, 0);      /* can't block */
@@ -901,9 +899,9 @@ static inline ssize_t do_tty_write(
        ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
        struct tty_struct *tty,
        struct file *file,
-       const char __user *buf,
-       size_t count)
+       struct iov_iter *from)
 {
+       size_t count = iov_iter_count(from);
        ssize_t ret, written = 0;
        unsigned int chunk;
 
@@ -955,14 +953,20 @@ static inline ssize_t do_tty_write(
                size_t size = count;
                if (size > chunk)
                        size = chunk;
+
                ret = -EFAULT;
-               if (copy_from_user(tty->write_buf, buf, size))
+               if (copy_from_iter(tty->write_buf, size, from) != size)
                        break;
+
                ret = write(tty, file, tty->write_buf, size);
                if (ret <= 0)
                        break;
+
+               /* FIXME! Have Al check this! */
+               if (ret != size)
+                       iov_iter_revert(from, size-ret);
+
                written += ret;
-               buf += ret;
                count -= ret;
                if (!count)
                        break;
@@ -1022,9 +1026,9 @@ void tty_write_message(struct tty_struct *tty, char *msg)
  *     write method will not be invoked in parallel for each device.
  */
 
-static ssize_t tty_write(struct file *file, const char __user *buf,
-                                               size_t count, loff_t *ppos)
+static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
 {
+       struct file *file = iocb->ki_filp;
        struct tty_struct *tty = file_tty(file);
        struct tty_ldisc *ld;
        ssize_t ret;
@@ -1038,17 +1042,16 @@ static ssize_t tty_write(struct file *file, const char __user *buf,
                tty_err(tty, "missing write_room method\n");
        ld = tty_ldisc_ref_wait(tty);
        if (!ld)
-               return hung_up_tty_write(file, buf, count, ppos);
+               return hung_up_tty_write(iocb, from);
        if (!ld->ops->write)
                ret = -EIO;
        else
-               ret = do_tty_write(ld->ops->write, tty, file, buf, count);
+               ret = do_tty_write(ld->ops->write, tty, file, from);
        tty_ldisc_deref(ld);
        return ret;
 }
 
-ssize_t redirected_tty_write(struct file *file, const char __user *buf,
-                                               size_t count, loff_t *ppos)
+ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
 {
        struct file *p = NULL;
 
@@ -1059,11 +1062,11 @@ ssize_t redirected_tty_write(struct file *file, const char __user *buf,
 
        if (p) {
                ssize_t res;
-               res = vfs_write(p, buf, count, &p->f_pos);
+               res = vfs_iocb_iter_write(p, iocb, iter);
                fput(p);
                return res;
        }
-       return tty_write(file, buf, count, ppos);
+       return tty_write(iocb, iter);
 }
 
 /*
@@ -2295,7 +2298,7 @@ static int tioccons(struct file *file)
 {
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
-       if (file->f_op->write == redirected_tty_write) {
+       if (file->f_op->write_iter == redirected_tty_write) {
                struct file *f;
                spin_lock(&redirect_lock);
                f = redirect;
index 22a56c4..7990fee 100644 (file)
@@ -185,7 +185,11 @@ static int cdns_imx_probe(struct platform_device *pdev)
        }
 
        data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks);
-       data->clks = (struct clk_bulk_data *)imx_cdns3_core_clks;
+       data->clks = devm_kmemdup(dev, imx_cdns3_core_clks,
+                               sizeof(imx_cdns3_core_clks), GFP_KERNEL);
+       if (!data->clks)
+               return -ENOMEM;
+
        ret = devm_clk_bulk_get(dev, data->num_clks, data->clks);
        if (ret)
                return ret;
@@ -214,20 +218,16 @@ err:
        return ret;
 }
 
-static int cdns_imx_remove_core(struct device *dev, void *data)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-
-       platform_device_unregister(pdev);
-
-       return 0;
-}
-
 static int cdns_imx_remove(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
+       struct cdns_imx *data = dev_get_drvdata(dev);
 
-       device_for_each_child(dev, NULL, cdns_imx_remove_core);
+       pm_runtime_get_sync(dev);
+       of_platform_depopulate(dev);
+       clk_bulk_disable_unprepare(data->num_clks, data->clks);
+       pm_runtime_disable(dev);
+       pm_runtime_put_noidle(dev);
        platform_set_drvdata(pdev, NULL);
 
        return 0;
index 0bd6b20..02d8bfa 100644 (file)
@@ -420,7 +420,10 @@ static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
        u32 state, reg, loops;
 
        /* Stop DMA activity */
-       writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+       if (ep->epn.desc_mode)
+               writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+       else
+               writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
 
        /* Wait for it to complete */
        for (loops = 0; loops < 1000; loops++) {
index 3e88c76..fb01ff4 100644 (file)
@@ -17,7 +17,7 @@ if USB_BDC_UDC
 comment "Platform Support"
 config USB_BDC_PCI
        tristate "BDC support for PCIe based platforms"
-       depends on USB_PCI
+       depends on USB_PCI && BROKEN
        default USB_BDC_UDC
        help
                Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform.
index 6a62bbd..ea114f9 100644 (file)
@@ -1529,10 +1529,13 @@ static ssize_t soft_connect_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t n)
 {
        struct usb_udc          *udc = container_of(dev, struct usb_udc, dev);
+       ssize_t                 ret;
 
+       mutex_lock(&udc_lock);
        if (!udc->driver) {
                dev_err(dev, "soft-connect without a gadget driver\n");
-               return -EOPNOTSUPP;
+               ret = -EOPNOTSUPP;
+               goto out;
        }
 
        if (sysfs_streq(buf, "connect")) {
@@ -1543,10 +1546,14 @@ static ssize_t soft_connect_store(struct device *dev,
                usb_gadget_udc_stop(udc);
        } else {
                dev_err(dev, "unsupported command '%s'\n", buf);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
-       return n;
+       ret = n;
+out:
+       mutex_unlock(&udc_lock);
+       return ret;
 }
 static DEVICE_ATTR_WO(soft_connect);
 
index 1a953f4..5706776 100644 (file)
@@ -2270,17 +2270,20 @@ static int dummy_hub_control(
                        }
                        fallthrough;
                case USB_PORT_FEAT_RESET:
+                       if (!(dum_hcd->port_status & USB_PORT_STAT_CONNECTION))
+                               break;
                        /* if it's already enabled, disable */
                        if (hcd->speed == HCD_USB3) {
-                               dum_hcd->port_status = 0;
                                dum_hcd->port_status =
                                        (USB_SS_PORT_STAT_POWER |
                                         USB_PORT_STAT_CONNECTION |
                                         USB_PORT_STAT_RESET);
-                       } else
+                       } else {
                                dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE
                                        | USB_PORT_STAT_LOW_SPEED
                                        | USB_PORT_STAT_HIGH_SPEED);
+                               dum_hcd->port_status |= USB_PORT_STAT_RESET;
+                       }
                        /*
                         * We want to reset device status. All but the
                         * Self powered feature
@@ -2292,7 +2295,8 @@ static int dummy_hub_control(
                         * interval? Is it still 50msec as for HS?
                         */
                        dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
-                       fallthrough;
+                       set_link_state(dum_hcd);
+                       break;
                case USB_PORT_FEAT_C_CONNECTION:
                case USB_PORT_FEAT_C_RESET:
                case USB_PORT_FEAT_C_ENABLE:
index e358ae1..1926b32 100644 (file)
@@ -574,6 +574,7 @@ static int ehci_run (struct usb_hcd *hcd)
        struct ehci_hcd         *ehci = hcd_to_ehci (hcd);
        u32                     temp;
        u32                     hcc_params;
+       int                     rc;
 
        hcd->uses_new_polling = 1;
 
@@ -629,9 +630,20 @@ static int ehci_run (struct usb_hcd *hcd)
        down_write(&ehci_cf_port_reset_rwsem);
        ehci->rh_state = EHCI_RH_RUNNING;
        ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+
+       /* Wait until HC become operational */
        ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
        msleep(5);
+       rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000);
+
        up_write(&ehci_cf_port_reset_rwsem);
+
+       if (rc) {
+               ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n",
+                        ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc);
+               return rc;
+       }
+
        ehci->last_periodic_enable = ktime_get_real();
 
        temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
index 087402a..9f9ab5c 100644 (file)
@@ -345,6 +345,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
 
        unlink_empty_async_suspended(ehci);
 
+       /* Some Synopsys controllers mistakenly leave IAA turned on */
+       ehci_writel(ehci, STS_IAA, &ehci->regs->status);
+
        /* Any IAA cycle that started before the suspend is now invalid */
        end_iaa_cycle(ehci);
        ehci_handle_start_intr_unlinks(ehci);
index 5677b81..cf0c93a 100644 (file)
@@ -2931,6 +2931,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
        trb->field[0] = cpu_to_le32(field1);
        trb->field[1] = cpu_to_le32(field2);
        trb->field[2] = cpu_to_le32(field3);
+       /* make sure TRB is fully written before giving it to the controller */
+       wmb();
        trb->field[3] = cpu_to_le32(field4);
 
        trace_xhci_queue_trb(ring, trb);
index 934be16..50bb91b 100644 (file)
@@ -623,6 +623,13 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
                                                                     enable);
                        if (err < 0)
                                break;
+
+                       /*
+                        * wait 500us for LFPS detector to be disabled before
+                        * sending ACK
+                        */
+                       if (!enable)
+                               usleep_range(500, 1000);
                }
 
                if (err < 0) {
index c8f0282..18ffd05 100644 (file)
@@ -714,6 +714,23 @@ static bool xs_hvm_defer_init_for_callback(void)
 #endif
 }
 
+static int xenbus_probe_thread(void *unused)
+{
+       DEFINE_WAIT(w);
+
+       /*
+        * We actually just want to wait for *any* trigger of xb_waitq,
+        * and run xenbus_probe() the moment it occurs.
+        */
+       prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE);
+       schedule();
+       finish_wait(&xb_waitq, &w);
+
+       DPRINTK("probing");
+       xenbus_probe();
+       return 0;
+}
+
 static int __init xenbus_probe_initcall(void)
 {
        /*
@@ -725,6 +742,20 @@ static int __init xenbus_probe_initcall(void)
             !xs_hvm_defer_init_for_callback()))
                xenbus_probe();
 
+       /*
+        * For XS_LOCAL, spawn a thread which will wait for xenstored
+        * or a xenstore-stubdom to be started, then probe. It will be
+        * triggered when communication starts happening, by waiting
+        * on xb_waitq.
+        */
+       if (xen_store_domain_type == XS_LOCAL) {
+               struct task_struct *probe_task;
+
+               probe_task = kthread_run(xenbus_probe_thread, NULL,
+                                        "xenbus_probe");
+               if (IS_ERR(probe_task))
+                       return PTR_ERR(probe_task);
+       }
        return 0;
 }
 device_initcall(xenbus_probe_initcall);
index 02d7d7b..9cadacf 100644 (file)
@@ -3117,7 +3117,7 @@ void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
                list_del_init(&lower->list);
                if (lower == node)
                        node = NULL;
-               btrfs_backref_free_node(cache, lower);
+               btrfs_backref_drop_node(cache, lower);
        }
 
        btrfs_backref_cleanup_node(cache, node);
index 52f2198..0886e81 100644 (file)
@@ -2669,7 +2669,8 @@ again:
         * Go through delayed refs for all the stuff we've just kicked off
         * and then loop back (just once)
         */
-       ret = btrfs_run_delayed_refs(trans, 0);
+       if (!ret)
+               ret = btrfs_run_delayed_refs(trans, 0);
        if (!ret && loops == 0) {
                loops++;
                spin_lock(&cur_trans->dirty_bgs_lock);
index d79b836..30b1a63 100644 (file)
@@ -5549,7 +5549,15 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
                                goto out_free;
                        }
 
-                       trans = btrfs_start_transaction(tree_root, 0);
+                      /*
+                       * Use join to avoid potential EINTR from transaction
+                       * start. See wait_reserve_ticket and the whole
+                       * reservation callchain.
+                       */
+                       if (for_reloc)
+                               trans = btrfs_join_transaction(tree_root);
+                       else
+                               trans = btrfs_start_transaction(tree_root, 0);
                        if (IS_ERR(trans)) {
                                err = PTR_ERR(trans);
                                goto out_free;
index ae97f4d..78a3537 100644 (file)
@@ -5512,6 +5512,21 @@ static int clone_range(struct send_ctx *sctx,
                        break;
                offset += clone_len;
                clone_root->offset += clone_len;
+
+               /*
+                * If we are cloning from the file we are currently processing,
+                * and using the send root as the clone root, we must stop once
+                * the current clone offset reaches the current eof of the file
+                * at the receiver, otherwise we would issue an invalid clone
+                * operation (source range going beyond eof) and cause the
+                * receiver to fail. So if we reach the current eof, bail out
+                * and fallback to a regular write.
+                */
+               if (clone_root->root == sctx->send_root &&
+                   clone_root->ino == sctx->cur_ino &&
+                   clone_root->offset >= sctx->cur_inode_next_write_offset)
+                       break;
+
                data_offset += clone_len;
 next:
                path->slots[0]++;
index 8e0f7a1..6af7f2b 100644 (file)
@@ -2264,14 +2264,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
         */
        btrfs_free_log_root_tree(trans, fs_info);
 
-       /*
-        * commit_fs_roots() can call btrfs_save_ino_cache(), which generates
-        * new delayed refs. Must handle them or qgroup can be wrong.
-        */
-       ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
-       if (ret)
-               goto unlock_tree_log;
-
        /*
         * Since fs roots are all committed, we can get a quite accurate
         * new_roots. So let's do quota accounting.
index b62be84..0a6de85 100644 (file)
@@ -4317,6 +4317,8 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
                btrfs_warn(fs_info,
        "balance: cannot set exclusive op status, resume manually");
 
+       btrfs_release_path(path);
+
        mutex_lock(&fs_info->balance_mutex);
        BUG_ON(fs_info->balance_ctl);
        spin_lock(&fs_info->balance_lock);
index 8405870..d87bd85 100644 (file)
@@ -5038,7 +5038,7 @@ bad:
        return;
 }
 
-static struct ceph_connection *con_get(struct ceph_connection *con)
+static struct ceph_connection *mds_get_con(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
 
@@ -5047,7 +5047,7 @@ static struct ceph_connection *con_get(struct ceph_connection *con)
        return NULL;
 }
 
-static void con_put(struct ceph_connection *con)
+static void mds_put_con(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
 
@@ -5058,7 +5058,7 @@ static void con_put(struct ceph_connection *con)
  * if the client is unresponsive for long enough, the mds will kill
  * the session entirely.
  */
-static void peer_reset(struct ceph_connection *con)
+static void mds_peer_reset(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5067,7 +5067,7 @@ static void peer_reset(struct ceph_connection *con)
        send_mds_reconnect(mdsc, s);
 }
 
-static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5125,8 +5125,8 @@ out:
  * Note: returned pointer is the address of a structure that's
  * managed separately.  Caller must *not* attempt to free it.
  */
-static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
-                                       int *proto, int force_new)
+static struct ceph_auth_handshake *
+mds_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5142,7 +5142,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
        return auth;
 }
 
-static int add_authorizer_challenge(struct ceph_connection *con,
+static int mds_add_authorizer_challenge(struct ceph_connection *con,
                                    void *challenge_buf, int challenge_buf_len)
 {
        struct ceph_mds_session *s = con->private;
@@ -5153,7 +5153,7 @@ static int add_authorizer_challenge(struct ceph_connection *con,
                                            challenge_buf, challenge_buf_len);
 }
 
-static int verify_authorizer_reply(struct ceph_connection *con)
+static int mds_verify_authorizer_reply(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5165,7 +5165,7 @@ static int verify_authorizer_reply(struct ceph_connection *con)
                NULL, NULL, NULL, NULL);
 }
 
-static int invalidate_authorizer(struct ceph_connection *con)
+static int mds_invalidate_authorizer(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5288,15 +5288,15 @@ static int mds_check_message_signature(struct ceph_msg *msg)
 }
 
 static const struct ceph_connection_operations mds_con_ops = {
-       .get = con_get,
-       .put = con_put,
-       .dispatch = dispatch,
-       .get_authorizer = get_authorizer,
-       .add_authorizer_challenge = add_authorizer_challenge,
-       .verify_authorizer_reply = verify_authorizer_reply,
-       .invalidate_authorizer = invalidate_authorizer,
-       .peer_reset = peer_reset,
+       .get = mds_get_con,
+       .put = mds_put_con,
        .alloc_msg = mds_alloc_msg,
+       .dispatch = mds_dispatch,
+       .peer_reset = mds_peer_reset,
+       .get_authorizer = mds_get_authorizer,
+       .add_authorizer_challenge = mds_add_authorizer_challenge,
+       .verify_authorizer_reply = mds_verify_authorizer_reply,
+       .invalidate_authorizer = mds_invalidate_authorizer,
        .sign_message = mds_sign_message,
        .check_message_signature = mds_check_message_signature,
        .get_auth_request = mds_get_auth_request,
index 5d39129..c8ef24b 100644 (file)
@@ -2195,7 +2195,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
        if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
                tcon->nohandlecache = ctx->nohandlecache;
        else
-               tcon->nohandlecache = 1;
+               tcon->nohandlecache = true;
        tcon->nodelete = ctx->nodelete;
        tcon->local_lease = ctx->local_lease;
        INIT_LIST_HEAD(&tcon->pending_opens);
@@ -2628,7 +2628,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
        } else if (ctx)
                tcon->unix_ext = 1; /* Unix Extensions supported */
 
-       if (tcon->unix_ext == 0) {
+       if (!tcon->unix_ext) {
                cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n");
                return;
        }
index e9abb41..95ef26b 100644 (file)
@@ -338,7 +338,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
        if (ssocket == NULL)
                return -EAGAIN;
 
-       if (signal_pending(current)) {
+       if (fatal_signal_pending(current)) {
                cifs_dbg(FYI, "signal pending before send request\n");
                return -ERESTARTSYS;
        }
@@ -429,7 +429,7 @@ unmask:
 
        if (signal_pending(current) && (total_len != send_length)) {
                cifs_dbg(FYI, "signal is pending after attempt to send\n");
-               rc = -EINTR;
+               rc = -ERESTARTSYS;
        }
 
        /* uncork it */
index acfb558..c41cb88 100644 (file)
@@ -1474,21 +1474,25 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
        }
 
        /*
-        * Some filesystems may redirty the inode during the writeback
-        * due to delalloc, clear dirty metadata flags right before
-        * write_inode()
+        * If the inode has dirty timestamps and we need to write them, call
+        * mark_inode_dirty_sync() to notify the filesystem about it and to
+        * change I_DIRTY_TIME into I_DIRTY_SYNC.
         */
-       spin_lock(&inode->i_lock);
-
-       dirty = inode->i_state & I_DIRTY;
        if ((inode->i_state & I_DIRTY_TIME) &&
-           ((dirty & I_DIRTY_INODE) ||
-            wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
+           (wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
             time_after(jiffies, inode->dirtied_time_when +
                        dirtytime_expire_interval * HZ))) {
-               dirty |= I_DIRTY_TIME;
                trace_writeback_lazytime(inode);
+               mark_inode_dirty_sync(inode);
        }
+
+       /*
+        * Some filesystems may redirty the inode during the writeback
+        * due to delalloc, clear dirty metadata flags right before
+        * write_inode()
+        */
+       spin_lock(&inode->i_lock);
+       dirty = inode->i_state & I_DIRTY;
        inode->i_state &= ~dirty;
 
        /*
@@ -1509,8 +1513,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
 
        spin_unlock(&inode->i_lock);
 
-       if (dirty & I_DIRTY_TIME)
-               mark_inode_dirty_sync(inode);
        /* Don't write the inode if only I_DIRTY_PAGES was set */
        if (dirty & ~I_DIRTY_PAGES) {
                int err = write_inode(inode, wbc);
index 985a9e3..c07913e 100644 (file)
@@ -1025,6 +1025,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
                             const struct iovec *fast_iov,
                             struct iov_iter *iter, bool force);
+static void io_req_drop_files(struct io_kiocb *req);
 
 static struct kmem_cache *req_cachep;
 
@@ -1048,8 +1049,7 @@ EXPORT_SYMBOL(io_uring_get_socket);
 
 static inline void io_clean_op(struct io_kiocb *req)
 {
-       if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
-                         REQ_F_INFLIGHT))
+       if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
                __io_clean_op(req);
 }
 
@@ -1075,8 +1075,11 @@ static bool io_match_task(struct io_kiocb *head,
                return true;
 
        io_for_each_link(req, head) {
-               if ((req->flags & REQ_F_WORK_INITIALIZED) &&
-                   (req->work.flags & IO_WQ_WORK_FILES) &&
+               if (!(req->flags & REQ_F_WORK_INITIALIZED))
+                       continue;
+               if (req->file && req->file->f_op == &io_uring_fops)
+                       return true;
+               if ((req->work.flags & IO_WQ_WORK_FILES) &&
                    req->work.identity->files == files)
                        return true;
        }
@@ -1394,6 +1397,8 @@ static void io_req_clean_work(struct io_kiocb *req)
                        free_fs_struct(fs);
                req->work.flags &= ~IO_WQ_WORK_FS;
        }
+       if (req->flags & REQ_F_INFLIGHT)
+               io_req_drop_files(req);
 
        io_put_identity(req->task->io_uring, req);
 }
@@ -1503,11 +1508,14 @@ static bool io_grab_identity(struct io_kiocb *req)
                        return false;
                atomic_inc(&id->files->count);
                get_nsproxy(id->nsproxy);
-               req->flags |= REQ_F_INFLIGHT;
 
-               spin_lock_irq(&ctx->inflight_lock);
-               list_add(&req->inflight_entry, &ctx->inflight_list);
-               spin_unlock_irq(&ctx->inflight_lock);
+               if (!(req->flags & REQ_F_INFLIGHT)) {
+                       req->flags |= REQ_F_INFLIGHT;
+
+                       spin_lock_irq(&ctx->inflight_lock);
+                       list_add(&req->inflight_entry, &ctx->inflight_list);
+                       spin_unlock_irq(&ctx->inflight_lock);
+               }
                req->work.flags |= IO_WQ_WORK_FILES;
        }
        if (!(req->work.flags & IO_WQ_WORK_MM) &&
@@ -2270,6 +2278,8 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
                struct io_uring_task *tctx = rb->task->io_uring;
 
                percpu_counter_sub(&tctx->inflight, rb->task_refs);
+               if (atomic_read(&tctx->in_idle))
+                       wake_up(&tctx->wait);
                put_task_struct_many(rb->task, rb->task_refs);
                rb->task = NULL;
        }
@@ -2288,6 +2298,8 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
                        struct io_uring_task *tctx = rb->task->io_uring;
 
                        percpu_counter_sub(&tctx->inflight, rb->task_refs);
+                       if (atomic_read(&tctx->in_idle))
+                               wake_up(&tctx->wait);
                        put_task_struct_many(rb->task, rb->task_refs);
                }
                rb->task = req->task;
@@ -3548,7 +3560,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
 
        /* read it all, or we did blocking attempt. no retry. */
        if (!iov_iter_count(iter) || !force_nonblock ||
-           (req->file->f_flags & O_NONBLOCK))
+           (req->file->f_flags & O_NONBLOCK) || !(req->flags & REQ_F_ISREG))
                goto done;
 
        io_size -= ret;
@@ -4468,7 +4480,6 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
         * io_wq_work.flags, so initialize io_wq_work firstly.
         */
        io_req_init_async(req);
-       req->work.flags |= IO_WQ_WORK_NO_CANCEL;
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
@@ -4501,6 +4512,8 @@ static int io_close(struct io_kiocb *req, bool force_nonblock,
 
        /* if the file has a flush method, be safe and punt to async */
        if (close->put_file->f_op->flush && force_nonblock) {
+               /* not safe to cancel at this point */
+               req->work.flags |= IO_WQ_WORK_NO_CANCEL;
                /* was never set, but play safe */
                req->flags &= ~REQ_F_NOWAIT;
                /* avoid grabbing files - we don't need the files */
@@ -6157,8 +6170,10 @@ static void io_req_drop_files(struct io_kiocb *req)
        struct io_uring_task *tctx = req->task->io_uring;
        unsigned long flags;
 
-       put_files_struct(req->work.identity->files);
-       put_nsproxy(req->work.identity->nsproxy);
+       if (req->work.flags & IO_WQ_WORK_FILES) {
+               put_files_struct(req->work.identity->files);
+               put_nsproxy(req->work.identity->nsproxy);
+       }
        spin_lock_irqsave(&ctx->inflight_lock, flags);
        list_del(&req->inflight_entry);
        spin_unlock_irqrestore(&ctx->inflight_lock, flags);
@@ -6225,9 +6240,6 @@ static void __io_clean_op(struct io_kiocb *req)
                }
                req->flags &= ~REQ_F_NEED_CLEANUP;
        }
-
-       if (req->flags & REQ_F_INFLIGHT)
-               io_req_drop_files(req);
 }
 
 static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
@@ -6446,6 +6458,15 @@ static struct file *io_file_get(struct io_submit_state *state,
                file = __io_file_get(state, fd);
        }
 
+       if (file && file->f_op == &io_uring_fops) {
+               io_req_init_async(req);
+               req->flags |= REQ_F_INFLIGHT;
+
+               spin_lock_irq(&ctx->inflight_lock);
+               list_add(&req->inflight_entry, &ctx->inflight_list);
+               spin_unlock_irq(&ctx->inflight_lock);
+       }
+
        return file;
 }
 
@@ -8856,8 +8877,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
 
                spin_lock_irq(&ctx->inflight_lock);
                list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
-                       if (req->task != task ||
-                           req->work.identity->files != files)
+                       if (!io_match_task(req, task, files))
                                continue;
                        found = true;
                        break;
@@ -8874,6 +8894,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
                io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
                io_poll_remove_all(ctx, task, files);
                io_kill_timeouts(ctx, task, files);
+               io_cqring_overflow_flush(ctx, true, task, files);
                /* cancellations _may_ trigger task work */
                io_run_task_work();
                schedule();
@@ -8914,8 +8935,6 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
 
 static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
 {
-       WARN_ON_ONCE(ctx->sqo_task != current);
-
        mutex_lock(&ctx->uring_lock);
        ctx->sqo_dead = 1;
        mutex_unlock(&ctx->uring_lock);
@@ -8937,6 +8956,7 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
 
        if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
                /* for SQPOLL only sqo_task has task notes */
+               WARN_ON_ONCE(ctx->sqo_task != current);
                io_disable_sqo_submit(ctx);
                task = ctx->sq_data->thread;
                atomic_inc(&task->io_uring->in_idle);
@@ -9082,6 +9102,10 @@ void __io_uring_task_cancel(void)
        /* make sure overflow events are dropped */
        atomic_inc(&tctx->in_idle);
 
+       /* trigger io_disable_sqo_submit() */
+       if (tctx->sqpoll)
+               __io_uring_files_cancel(NULL);
+
        do {
                /* read completions before cancelations */
                inflight = tctx_inflight(tctx);
@@ -9128,7 +9152,10 @@ static int io_uring_flush(struct file *file, void *data)
 
        if (ctx->flags & IORING_SETUP_SQPOLL) {
                /* there is only one file note, which is owned by sqo_task */
-               WARN_ON_ONCE((ctx->sqo_task == current) ==
+               WARN_ON_ONCE(ctx->sqo_task != current &&
+                            xa_load(&tctx->xa, (unsigned long)file));
+               /* sqo_dead check is for when this happens after cancellation */
+               WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead &&
                             !xa_load(&tctx->xa, (unsigned long)file));
 
                io_disable_sqo_submit(ctx);
index f277d02..c757193 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/pagemap.h>
 #include <linux/sched/mm.h>
 #include <linux/fsnotify.h>
+#include <linux/uio.h>
 
 #include "kernfs-internal.h"
 
@@ -180,11 +181,10 @@ static const struct seq_operations kernfs_seq_ops = {
  * it difficult to use seq_file.  Implement simplistic custom buffering for
  * bin files.
  */
-static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
-                                      char __user *user_buf, size_t count,
-                                      loff_t *ppos)
+static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 {
-       ssize_t len = min_t(size_t, count, PAGE_SIZE);
+       struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
+       ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE);
        const struct kernfs_ops *ops;
        char *buf;
 
@@ -210,7 +210,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
        of->event = atomic_read(&of->kn->attr.open->event);
        ops = kernfs_ops(of->kn);
        if (ops->read)
-               len = ops->read(of, buf, len, *ppos);
+               len = ops->read(of, buf, len, iocb->ki_pos);
        else
                len = -EINVAL;
 
@@ -220,12 +220,12 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
        if (len < 0)
                goto out_free;
 
-       if (copy_to_user(user_buf, buf, len)) {
+       if (copy_to_iter(buf, len, iter) != len) {
                len = -EFAULT;
                goto out_free;
        }
 
-       *ppos += len;
+       iocb->ki_pos += len;
 
  out_free:
        if (buf == of->prealloc_buf)
@@ -235,31 +235,14 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
        return len;
 }
 
-/**
- * kernfs_fop_read - kernfs vfs read callback
- * @file: file pointer
- * @user_buf: data to write
- * @count: number of bytes
- * @ppos: starting offset
- */
-static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
-                              size_t count, loff_t *ppos)
+static ssize_t kernfs_fop_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 {
-       struct kernfs_open_file *of = kernfs_of(file);
-
-       if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
-               return seq_read(file, user_buf, count, ppos);
-       else
-               return kernfs_file_direct_read(of, user_buf, count, ppos);
+       if (kernfs_of(iocb->ki_filp)->kn->flags & KERNFS_HAS_SEQ_SHOW)
+               return seq_read_iter(iocb, iter);
+       return kernfs_file_read_iter(iocb, iter);
 }
 
-/**
- * kernfs_fop_write - kernfs vfs write callback
- * @file: file pointer
- * @user_buf: data to write
- * @count: number of bytes
- * @ppos: starting offset
- *
+/*
  * Copy data in from userland and pass it to the matching kernfs write
  * operation.
  *
@@ -269,20 +252,18 @@ static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
  * modify only the the value you're changing, then write entire buffer
  * back.
  */
-static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
-                               size_t count, loff_t *ppos)
+static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
 {
-       struct kernfs_open_file *of = kernfs_of(file);
+       struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
+       ssize_t len = iov_iter_count(iter);
        const struct kernfs_ops *ops;
-       ssize_t len;
        char *buf;
 
        if (of->atomic_write_len) {
-               len = count;
                if (len > of->atomic_write_len)
                        return -E2BIG;
        } else {
-               len = min_t(size_t, count, PAGE_SIZE);
+               len = min_t(size_t, len, PAGE_SIZE);
        }
 
        buf = of->prealloc_buf;
@@ -293,7 +274,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
        if (!buf)
                return -ENOMEM;
 
-       if (copy_from_user(buf, user_buf, len)) {
+       if (copy_from_iter(buf, len, iter) != len) {
                len = -EFAULT;
                goto out_free;
        }
@@ -312,7 +293,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
 
        ops = kernfs_ops(of->kn);
        if (ops->write)
-               len = ops->write(of, buf, len, *ppos);
+               len = ops->write(of, buf, len, iocb->ki_pos);
        else
                len = -EINVAL;
 
@@ -320,7 +301,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
        mutex_unlock(&of->mutex);
 
        if (len > 0)
-               *ppos += len;
+               iocb->ki_pos += len;
 
 out_free:
        if (buf == of->prealloc_buf)
@@ -673,7 +654,7 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
 
        /*
         * Write path needs to atomic_write_len outside active reference.
-        * Cache it in open_file.  See kernfs_fop_write() for details.
+        * Cache it in open_file.  See kernfs_fop_write_iter() for details.
         */
        of->atomic_write_len = ops->atomic_write_len;
 
@@ -960,14 +941,16 @@ void kernfs_notify(struct kernfs_node *kn)
 EXPORT_SYMBOL_GPL(kernfs_notify);
 
 const struct file_operations kernfs_file_fops = {
-       .read           = kernfs_fop_read,
-       .write          = kernfs_fop_write,
+       .read_iter      = kernfs_fop_read_iter,
+       .write_iter     = kernfs_fop_write_iter,
        .llseek         = generic_file_llseek,
        .mmap           = kernfs_fop_mmap,
        .open           = kernfs_fop_open,
        .release        = kernfs_fop_release,
        .poll           = kernfs_fop_poll,
        .fsync          = noop_fsync,
+       .splice_read    = generic_file_splice_read,
+       .splice_write   = iter_file_splice_write,
 };
 
 /**
index c5989cf..39c9684 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -1206,6 +1206,7 @@ const struct file_operations pipefifo_fops = {
        .unlocked_ioctl = pipe_ioctl,
        .release        = pipe_release,
        .fasync         = pipe_fasync,
+       .splice_write   = iter_file_splice_write,
 };
 
 /*
index 3178992..d2018f7 100644 (file)
@@ -1770,6 +1770,12 @@ static int process_sysctl_arg(char *param, char *val,
                        return 0;
        }
 
+       if (!val)
+               return -EINVAL;
+       len = strlen(val);
+       if (len == 0)
+               return -EINVAL;
+
        /*
         * To set sysctl options, we use a temporary mount of proc, look up the
         * respective sys/ file and write to it. To avoid mounting it when no
@@ -1811,7 +1817,6 @@ static int process_sysctl_arg(char *param, char *val,
                                file, param, val);
                goto out;
        }
-       len = strlen(val);
        wret = kernel_write(file, val, len, &pos);
        if (wret < 0) {
                err = wret;
index 5bef3a6..d0df217 100644 (file)
@@ -705,6 +705,7 @@ static int udf_check_vsd(struct super_block *sb)
        struct buffer_head *bh = NULL;
        int nsr = 0;
        struct udf_sb_info *sbi;
+       loff_t session_offset;
 
        sbi = UDF_SB(sb);
        if (sb->s_blocksize < sizeof(struct volStructDesc))
@@ -712,7 +713,8 @@ static int udf_check_vsd(struct super_block *sb)
        else
                sectorsize = sb->s_blocksize;
 
-       sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits);
+       session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits;
+       sector += session_offset;
 
        udf_debug("Starting at sector %u (%lu byte sectors)\n",
                  (unsigned int)(sector >> sb->s_blocksize_bits),
@@ -757,8 +759,7 @@ static int udf_check_vsd(struct super_block *sb)
 
        if (nsr > 0)
                return 1;
-       else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) ==
-                       VSD_FIRST_SECTOR_OFFSET)
+       else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET)
                return -1;
        else
                return 0;
index 3c3e16c..dc605c4 100644 (file)
@@ -2,9 +2,8 @@
 #ifndef __DT_APQ8016_LPASS_H
 #define __DT_APQ8016_LPASS_H
 
-#define MI2S_PRIMARY   0
-#define MI2S_SECONDARY 1
-#define MI2S_TERTIARY  2
-#define MI2S_QUATERNARY        3
+#include <dt-bindings/sound/qcom,lpass.h>
+
+/* NOTE: Use qcom,lpass.h to define any AIF ID's for LPASS */
 
 #endif /* __DT_APQ8016_LPASS_H */
diff --git a/include/dt-bindings/sound/qcom,lpass.h b/include/dt-bindings/sound/qcom,lpass.h
new file mode 100644 (file)
index 0000000..7b0b80b
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_QCOM_LPASS_H
+#define __DT_QCOM_LPASS_H
+
+#define MI2S_PRIMARY   0
+#define MI2S_SECONDARY 1
+#define MI2S_TERTIARY  2
+#define MI2S_QUATERNARY        3
+#define MI2S_QUINARY   4
+
+#define LPASS_DP_RX    5
+
+#define LPASS_MCLK0    0
+
+#endif /* __DT_QCOM_LPASS_H */
index 56ecaaf..5c1ee8b 100644 (file)
@@ -2,10 +2,8 @@
 #ifndef __DT_SC7180_LPASS_H
 #define __DT_SC7180_LPASS_H
 
-#define MI2S_PRIMARY   0
-#define MI2S_SECONDARY 1
-#define LPASS_DP_RX    2
+#include <dt-bindings/sound/qcom,lpass.h>
 
-#define LPASS_MCLK0    0
+/* NOTE: Use qcom,lpass.h to define any AIF ID's for LPASS */
 
 #endif /* __DT_APQ8016_LPASS_H */
index 89bb8b8..1779f90 100644 (file)
@@ -609,6 +609,18 @@ static inline const char *dev_name(const struct device *dev)
        return kobject_name(&dev->kobj);
 }
 
+/**
+ * dev_bus_name - Return a device's bus/class name, if at all possible
+ * @dev: struct device to get the bus/class name of
+ *
+ * Will return the name of the bus/class the device is attached to.  If it is
+ * not attached to a bus/class, an empty string will be returned.
+ */
+static inline const char *dev_bus_name(const struct device *dev)
+{
+       return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "");
+}
+
 __printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...);
 
 #ifdef CONFIG_NUMA
index 65b81e0..2484ed9 100644 (file)
@@ -33,6 +33,9 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
                                          unsigned int cpu,
                                          const char *namefmt);
 
+void kthread_set_per_cpu(struct task_struct *k, int cpu);
+bool kthread_is_per_cpu(struct task_struct *k);
+
 /**
  * kthread_run - create and wake a thread.
  * @threadfn: the function to run until signal_pending(current).
index a12b552..73f20de 100644 (file)
@@ -230,6 +230,5 @@ static inline ktime_t ms_to_ktime(u64 ms)
 }
 
 # include <linux/timekeeping.h>
-# include <linux/timekeeping32.h>
 
 #endif
index b955ed3..88197b8 100644 (file)
@@ -1239,22 +1239,4 @@ static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
        return val.vbool;
 }
 
-/**
- * mlx5_core_net - Provide net namespace of the mlx5_core_dev
- * @dev: mlx5 core device
- *
- * mlx5_core_net() returns the net namespace of mlx5 core device.
- * This can be called only in below described limited context.
- * (a) When a devlink instance for mlx5_core is registered and
- *     when devlink reload operation is disabled.
- *     or
- * (b) during devlink reload reload_down() and reload_up callbacks
- *     where it is ensured that devlink instance's net namespace is
- *     stable.
- */
-static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
-{
-       return devlink_net(priv_to_devlink(dev));
-}
-
 #endif /* MLX5_DRIVER_H */
index d925359..bfed36e 100644 (file)
@@ -116,6 +116,9 @@ enum {
        NVME_REG_BPMBL  = 0x0048,       /* Boot Partition Memory Buffer
                                         * Location
                                         */
+       NVME_REG_CMBMSC = 0x0050,       /* Controller Memory Buffer Memory
+                                        * Space Control
+                                        */
        NVME_REG_PMRCAP = 0x0e00,       /* Persistent Memory Capabilities */
        NVME_REG_PMRCTL = 0x0e04,       /* Persistent Memory Region Control */
        NVME_REG_PMRSTS = 0x0e08,       /* Persistent Memory Region Status */
@@ -135,6 +138,7 @@ enum {
 #define NVME_CAP_CSS(cap)      (((cap) >> 37) & 0xff)
 #define NVME_CAP_MPSMIN(cap)   (((cap) >> 48) & 0xf)
 #define NVME_CAP_MPSMAX(cap)   (((cap) >> 52) & 0xf)
+#define NVME_CAP_CMBS(cap)     (((cap) >> 57) & 0x1)
 
 #define NVME_CMB_BIR(cmbloc)   ((cmbloc) & 0x7)
 #define NVME_CMB_OFST(cmbloc)  (((cmbloc) >> 12) & 0xfffff)
@@ -192,6 +196,8 @@ enum {
        NVME_CSTS_SHST_OCCUR    = 1 << 2,
        NVME_CSTS_SHST_CMPLT    = 2 << 2,
        NVME_CSTS_SHST_MASK     = 3 << 2,
+       NVME_CMBMSC_CRE         = 1 << 0,
+       NVME_CMBMSC_CMSE        = 1 << 1,
 };
 
 struct nvme_id_power_state {
index 2024944..20e84a8 100644 (file)
@@ -331,6 +331,12 @@ regulator_get_exclusive(struct device *dev, const char *id)
        return ERR_PTR(-ENODEV);
 }
 
+static inline struct regulator *__must_check
+devm_regulator_get_exclusive(struct device *dev, const char *id)
+{
+       return ERR_PTR(-ENODEV);
+}
+
 static inline struct regulator *__must_check
 regulator_get_optional(struct device *dev, const char *id)
 {
@@ -486,6 +492,11 @@ static inline int regulator_get_voltage(struct regulator *regulator)
        return -EINVAL;
 }
 
+static inline int regulator_sync_voltage(struct regulator *regulator)
+{
+       return -EINVAL;
+}
+
 static inline int regulator_is_supported_voltage(struct regulator *regulator,
                                   int min_uV, int max_uV)
 {
@@ -578,6 +589,25 @@ static inline int devm_regulator_unregister_notifier(struct regulator *regulator
        return 0;
 }
 
+static inline int regulator_suspend_enable(struct regulator_dev *rdev,
+                                          suspend_state_t state)
+{
+       return -EINVAL;
+}
+
+static inline int regulator_suspend_disable(struct regulator_dev *rdev,
+                                           suspend_state_t state)
+{
+       return -EINVAL;
+}
+
+static inline int regulator_set_suspend_voltage(struct regulator *regulator,
+                                               int min_uV, int max_uV,
+                                               suspend_state_t state)
+{
+       return -EINVAL;
+}
+
 static inline void *regulator_get_drvdata(struct regulator *regulator)
 {
        return NULL;
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h
deleted file mode 100644 (file)
index 266017f..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _LINUX_TIMEKEEPING32_H
-#define _LINUX_TIMEKEEPING32_H
-/*
- * These interfaces are all based on the old timespec type
- * and should get replaced with the timespec64 based versions
- * over time so we can remove the file here.
- */
-
-static inline unsigned long get_seconds(void)
-{
-       return ktime_get_real_seconds();
-}
-
-#endif
index c873f47..37803f3 100644 (file)
@@ -421,6 +421,7 @@ extern void tty_kclose(struct tty_struct *tty);
 extern int tty_dev_name_to_number(const char *name, dev_t *number);
 extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout);
 extern void tty_ldisc_unlock(struct tty_struct *tty);
+extern ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *);
 #else
 static inline void tty_kref_put(struct tty_struct *tty)
 { }
index be36cbd..3eb2022 100644 (file)
@@ -520,7 +520,7 @@ int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat,
                        u32 width, u32 height);
 
 /**
- * v4l2_get_link_rate - Get link rate from transmitter
+ * v4l2_get_link_freq - Get link rate from transmitter
  *
  * @handler: The transmitter's control handler
  * @mul: The multiplier between pixel rate and link frequency. Bits per pixel on
@@ -537,7 +537,7 @@ int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat,
  *     -ENOENT: Link frequency or pixel rate control not found
  *     -EINVAL: Invalid link frequency value
  */
-s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
+s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
                       unsigned int div);
 
 static inline u64 v4l2_buffer_get_timestamp(const struct v4l2_buffer *buf)
index ccc3d1f..eee7344 100644 (file)
@@ -92,6 +92,7 @@ struct lapb_cb {
        unsigned short          n2, n2count;
        unsigned short          t1, t2;
        struct timer_list       t1timer, t2timer;
+       bool                    t1timer_stop, t2timer_stop;
 
        /* Internal control information */
        struct sk_buff_head     write_queue;
@@ -103,6 +104,7 @@ struct lapb_cb {
        struct lapb_frame       frmr_data;
        unsigned char           frmr_type;
 
+       spinlock_t              lock;
        refcount_t              refcnt;
 };
 
index f4af836..4b6ecf5 100644 (file)
@@ -721,6 +721,8 @@ void *nft_set_elem_init(const struct nft_set *set,
                        const struct nft_set_ext_tmpl *tmpl,
                        const u32 *key, const u32 *key_end, const u32 *data,
                        u64 timeout, u64 expiration, gfp_t gfp);
+int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
+                           struct nft_expr *expr_array[]);
 void nft_set_elem_destroy(const struct nft_set *set, void *elem,
                          bool destroy_expr);
 
index 78d13c8..25bbada 100644 (file)
@@ -630,6 +630,7 @@ static inline void tcp_clear_xmit_timers(struct sock *sk)
 
 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
 unsigned int tcp_current_mss(struct sock *sk);
+u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
 
 /* Bound MSS / TSO packet size with the half of the window */
 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
@@ -2060,7 +2061,7 @@ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
 extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
                                u32 reo_wnd);
-extern void tcp_rack_mark_lost(struct sock *sk);
+extern bool tcp_rack_mark_lost(struct sock *sk);
 extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
                             u64 xmit_time);
 extern void tcp_rack_reo_timeout(struct sock *sk);
index 2336bf9..2e1200d 100644 (file)
@@ -229,7 +229,7 @@ typedef int (*snd_pcm_hw_rule_func_t)(struct snd_pcm_hw_params *params,
 struct snd_pcm_hw_rule {
        unsigned int cond;
        int var;
-       int deps[4];
+       int deps[5];
 
        snd_pcm_hw_rule_func_t func;
        void *private;
index 5039af6..cbe3e15 100644 (file)
@@ -366,7 +366,7 @@ TRACE_EVENT(sched_process_wait,
 );
 
 /*
- * Tracepoint for do_fork:
+ * Tracepoint for kernel_clone:
  */
 TRACE_EVENT(sched_process_fork,
 
index 9744773..bd4424d 100644 (file)
@@ -71,90 +71,4 @@ enum br_mrp_sub_tlv_header_type {
        BR_MRP_SUB_TLV_HEADER_TEST_AUTO_MGR = 0x3,
 };
 
-struct br_mrp_tlv_hdr {
-       __u8 type;
-       __u8 length;
-};
-
-struct br_mrp_sub_tlv_hdr {
-       __u8 type;
-       __u8 length;
-};
-
-struct br_mrp_end_hdr {
-       struct br_mrp_tlv_hdr hdr;
-};
-
-struct br_mrp_common_hdr {
-       __be16 seq_id;
-       __u8 domain[MRP_DOMAIN_UUID_LENGTH];
-};
-
-struct br_mrp_ring_test_hdr {
-       __be16 prio;
-       __u8 sa[ETH_ALEN];
-       __be16 port_role;
-       __be16 state;
-       __be16 transitions;
-       __be32 timestamp;
-};
-
-struct br_mrp_ring_topo_hdr {
-       __be16 prio;
-       __u8 sa[ETH_ALEN];
-       __be16 interval;
-};
-
-struct br_mrp_ring_link_hdr {
-       __u8 sa[ETH_ALEN];
-       __be16 port_role;
-       __be16 interval;
-       __be16 blocked;
-};
-
-struct br_mrp_sub_opt_hdr {
-       __u8 type;
-       __u8 manufacture_data[MRP_MANUFACTURE_DATA_LENGTH];
-};
-
-struct br_mrp_test_mgr_nack_hdr {
-       __be16 prio;
-       __u8 sa[ETH_ALEN];
-       __be16 other_prio;
-       __u8 other_sa[ETH_ALEN];
-};
-
-struct br_mrp_test_prop_hdr {
-       __be16 prio;
-       __u8 sa[ETH_ALEN];
-       __be16 other_prio;
-       __u8 other_sa[ETH_ALEN];
-};
-
-struct br_mrp_oui_hdr {
-       __u8 oui[MRP_OUI_LENGTH];
-};
-
-struct br_mrp_in_test_hdr {
-       __be16 id;
-       __u8 sa[ETH_ALEN];
-       __be16 port_role;
-       __be16 state;
-       __be16 transitions;
-       __be32 timestamp;
-};
-
-struct br_mrp_in_topo_hdr {
-       __u8 sa[ETH_ALEN];
-       __be16 id;
-       __be16 interval;
-};
-
-struct br_mrp_in_link_hdr {
-       __u8 sa[ETH_ALEN];
-       __be16 port_role;
-       __be16 id;
-       __be16 interval;
-};
-
 #endif
index 1dccb55..708addd 100644 (file)
@@ -28,10 +28,10 @@ struct ipv6_rpl_sr_hdr {
                pad:4,
                reserved1:16;
 #elif defined(__BIG_ENDIAN_BITFIELD)
-       __u32   reserved:20,
+       __u32   cmpri:4,
+               cmpre:4,
                pad:4,
-               cmpri:4,
-               cmpre:4;
+               reserved:20;
 #else
 #error  "Please fix <asm/byteorder.h>"
 #endif
index 00850b9..a38454d 100644 (file)
@@ -176,7 +176,7 @@ struct v4l2_subdev_capability {
 };
 
 /* The v4l2 sub-device video device node is registered in read-only mode. */
-#define V4L2_SUBDEV_CAP_RO_SUBDEV              BIT(0)
+#define V4L2_SUBDEV_CAP_RO_SUBDEV              0x00000001
 
 /* Backwards compatibility define --- to be removed */
 #define v4l2_subdev_edid v4l2_edid
index f8b638c..901a4fd 100644 (file)
@@ -133,6 +133,13 @@ enum pvrdma_wc_flags {
        PVRDMA_WC_FLAGS_MAX             = PVRDMA_WC_WITH_NETWORK_HDR_TYPE,
 };
 
+enum pvrdma_network_type {
+       PVRDMA_NETWORK_IB,
+       PVRDMA_NETWORK_ROCE_V1 = PVRDMA_NETWORK_IB,
+       PVRDMA_NETWORK_IPV4,
+       PVRDMA_NETWORK_IPV6
+};
+
 struct pvrdma_alloc_ucontext_resp {
        __u32 qp_tab_size;
        __u32 reserved;
index 37720a6..d66cd10 100644 (file)
@@ -819,9 +819,8 @@ void __init fork_init(void)
        init_task.signal->rlim[RLIMIT_SIGPENDING] =
                init_task.signal->rlim[RLIMIT_NPROC];
 
-       for (i = 0; i < UCOUNT_COUNTS; i++) {
+       for (i = 0; i < UCOUNT_COUNTS; i++)
                init_user_ns.ucount_max[i] = max_threads/2;
-       }
 
 #ifdef CONFIG_VMAP_STACK
        cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
@@ -1654,9 +1653,8 @@ static inline void init_task_pid_links(struct task_struct *task)
 {
        enum pid_type type;
 
-       for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
+       for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type)
                INIT_HLIST_NODE(&task->pid_links[type]);
-       }
 }
 
 static inline void
index c47d101..45a13eb 100644 (file)
@@ -763,6 +763,29 @@ static struct futex_pi_state *alloc_pi_state(void)
        return pi_state;
 }
 
+static void pi_state_update_owner(struct futex_pi_state *pi_state,
+                                 struct task_struct *new_owner)
+{
+       struct task_struct *old_owner = pi_state->owner;
+
+       lockdep_assert_held(&pi_state->pi_mutex.wait_lock);
+
+       if (old_owner) {
+               raw_spin_lock(&old_owner->pi_lock);
+               WARN_ON(list_empty(&pi_state->list));
+               list_del_init(&pi_state->list);
+               raw_spin_unlock(&old_owner->pi_lock);
+       }
+
+       if (new_owner) {
+               raw_spin_lock(&new_owner->pi_lock);
+               WARN_ON(!list_empty(&pi_state->list));
+               list_add(&pi_state->list, &new_owner->pi_state_list);
+               pi_state->owner = new_owner;
+               raw_spin_unlock(&new_owner->pi_lock);
+       }
+}
+
 static void get_pi_state(struct futex_pi_state *pi_state)
 {
        WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
@@ -785,17 +808,11 @@ static void put_pi_state(struct futex_pi_state *pi_state)
         * and has cleaned up the pi_state already
         */
        if (pi_state->owner) {
-               struct task_struct *owner;
                unsigned long flags;
 
                raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
-               owner = pi_state->owner;
-               if (owner) {
-                       raw_spin_lock(&owner->pi_lock);
-                       list_del_init(&pi_state->list);
-                       raw_spin_unlock(&owner->pi_lock);
-               }
-               rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
+               pi_state_update_owner(pi_state, NULL);
+               rt_mutex_proxy_unlock(&pi_state->pi_mutex);
                raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
        }
 
@@ -941,7 +958,8 @@ static inline void exit_pi_state_list(struct task_struct *curr) { }
  *     FUTEX_OWNER_DIED bit. See [4]
  *
  * [10] There is no transient state which leaves owner and user space
- *     TID out of sync.
+ *     TID out of sync. Except one error case where the kernel is denied
+ *     write access to the user address, see fixup_pi_state_owner().
  *
  *
  * Serialization and lifetime rules:
@@ -1521,26 +1539,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
                        ret = -EINVAL;
        }
 
-       if (ret)
-               goto out_unlock;
-
-       /*
-        * This is a point of no return; once we modify the uval there is no
-        * going back and subsequent operations must not fail.
-        */
-
-       raw_spin_lock(&pi_state->owner->pi_lock);
-       WARN_ON(list_empty(&pi_state->list));
-       list_del_init(&pi_state->list);
-       raw_spin_unlock(&pi_state->owner->pi_lock);
-
-       raw_spin_lock(&new_owner->pi_lock);
-       WARN_ON(!list_empty(&pi_state->list));
-       list_add(&pi_state->list, &new_owner->pi_state_list);
-       pi_state->owner = new_owner;
-       raw_spin_unlock(&new_owner->pi_lock);
-
-       postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+       if (!ret) {
+               /*
+                * This is a point of no return; once we modified the uval
+                * there is no going back and subsequent operations must
+                * not fail.
+                */
+               pi_state_update_owner(pi_state, new_owner);
+               postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+       }
 
 out_unlock:
        raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
@@ -2323,18 +2330,13 @@ static void unqueue_me_pi(struct futex_q *q)
        spin_unlock(q->lock_ptr);
 }
 
-static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
-                               struct task_struct *argowner)
+static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+                                 struct task_struct *argowner)
 {
        struct futex_pi_state *pi_state = q->pi_state;
-       u32 uval, curval, newval;
        struct task_struct *oldowner, *newowner;
-       u32 newtid;
-       int ret, err = 0;
-
-       lockdep_assert_held(q->lock_ptr);
-
-       raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+       u32 uval, curval, newval, newtid;
+       int err = 0;
 
        oldowner = pi_state->owner;
 
@@ -2368,14 +2370,12 @@ retry:
                         * We raced against a concurrent self; things are
                         * already fixed up. Nothing to do.
                         */
-                       ret = 0;
-                       goto out_unlock;
+                       return 0;
                }
 
                if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
-                       /* We got the lock after all, nothing to fix. */
-                       ret = 0;
-                       goto out_unlock;
+                       /* We got the lock. pi_state is correct. Tell caller. */
+                       return 1;
                }
 
                /*
@@ -2402,8 +2402,7 @@ retry:
                         * We raced against a concurrent self; things are
                         * already fixed up. Nothing to do.
                         */
-                       ret = 0;
-                       goto out_unlock;
+                       return 1;
                }
                newowner = argowner;
        }
@@ -2433,22 +2432,9 @@ retry:
         * We fixed up user space. Now we need to fix the pi_state
         * itself.
         */
-       if (pi_state->owner != NULL) {
-               raw_spin_lock(&pi_state->owner->pi_lock);
-               WARN_ON(list_empty(&pi_state->list));
-               list_del_init(&pi_state->list);
-               raw_spin_unlock(&pi_state->owner->pi_lock);
-       }
+       pi_state_update_owner(pi_state, newowner);
 
-       pi_state->owner = newowner;
-
-       raw_spin_lock(&newowner->pi_lock);
-       WARN_ON(!list_empty(&pi_state->list));
-       list_add(&pi_state->list, &newowner->pi_state_list);
-       raw_spin_unlock(&newowner->pi_lock);
-       raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
-
-       return 0;
+       return argowner == current;
 
        /*
         * In order to reschedule or handle a page fault, we need to drop the
@@ -2469,17 +2455,16 @@ handle_err:
 
        switch (err) {
        case -EFAULT:
-               ret = fault_in_user_writeable(uaddr);
+               err = fault_in_user_writeable(uaddr);
                break;
 
        case -EAGAIN:
                cond_resched();
-               ret = 0;
+               err = 0;
                break;
 
        default:
                WARN_ON_ONCE(1);
-               ret = err;
                break;
        }
 
@@ -2489,17 +2474,44 @@ handle_err:
        /*
         * Check if someone else fixed it for us:
         */
-       if (pi_state->owner != oldowner) {
-               ret = 0;
-               goto out_unlock;
-       }
+       if (pi_state->owner != oldowner)
+               return argowner == current;
 
-       if (ret)
-               goto out_unlock;
+       /* Retry if err was -EAGAIN or the fault in succeeded */
+       if (!err)
+               goto retry;
 
-       goto retry;
+       /*
+        * fault_in_user_writeable() failed so user state is immutable. At
+        * best we can make the kernel state consistent but user state will
+        * be most likely hosed and any subsequent unlock operation will be
+        * rejected due to PI futex rule [10].
+        *
+        * Ensure that the rtmutex owner is also the pi_state owner despite
+        * the user space value claiming something different. There is no
+        * point in unlocking the rtmutex if current is the owner as it
+        * would need to wait until the next waiter has taken the rtmutex
+        * to guarantee consistent state. Keep it simple. Userspace asked
+        * for this wreckaged state.
+        *
+        * The rtmutex has an owner - either current or some other
+        * task. See the EAGAIN loop above.
+        */
+       pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex));
 
-out_unlock:
+       return err;
+}
+
+static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+                               struct task_struct *argowner)
+{
+       struct futex_pi_state *pi_state = q->pi_state;
+       int ret;
+
+       lockdep_assert_held(q->lock_ptr);
+
+       raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+       ret = __fixup_pi_state_owner(uaddr, q, argowner);
        raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
        return ret;
 }
@@ -2523,8 +2535,6 @@ static long futex_wait_restart(struct restart_block *restart);
  */
 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
 {
-       int ret = 0;
-
        if (locked) {
                /*
                 * Got the lock. We might not be the anticipated owner if we
@@ -2535,8 +2545,8 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
                 * stable state, anything else needs more attention.
                 */
                if (q->pi_state->owner != current)
-                       ret = fixup_pi_state_owner(uaddr, q, current);
-               return ret ? ret : locked;
+                       return fixup_pi_state_owner(uaddr, q, current);
+               return 1;
        }
 
        /*
@@ -2547,23 +2557,17 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
         * Another speculative read; pi_state->owner == current is unstable
         * but needs our attention.
         */
-       if (q->pi_state->owner == current) {
-               ret = fixup_pi_state_owner(uaddr, q, NULL);
-               return ret;
-       }
+       if (q->pi_state->owner == current)
+               return fixup_pi_state_owner(uaddr, q, NULL);
 
        /*
         * Paranoia check. If we did not take the lock, then we should not be
-        * the owner of the rt_mutex.
+        * the owner of the rt_mutex. Warn and establish consistent state.
         */
-       if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
-               printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
-                               "pi-state %p\n", ret,
-                               q->pi_state->pi_mutex.owner,
-                               q->pi_state->owner);
-       }
+       if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
+               return fixup_pi_state_owner(uaddr, q, current);
 
-       return ret;
+       return 0;
 }
 
 /**
@@ -2771,7 +2775,6 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
                         ktime_t *time, int trylock)
 {
        struct hrtimer_sleeper timeout, *to;
-       struct futex_pi_state *pi_state = NULL;
        struct task_struct *exiting = NULL;
        struct rt_mutex_waiter rt_waiter;
        struct futex_hash_bucket *hb;
@@ -2907,23 +2910,8 @@ no_block:
        if (res)
                ret = (res < 0) ? res : 0;
 
-       /*
-        * If fixup_owner() faulted and was unable to handle the fault, unlock
-        * it and return the fault to userspace.
-        */
-       if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
-               pi_state = q.pi_state;
-               get_pi_state(pi_state);
-       }
-
        /* Unqueue and drop the lock */
        unqueue_me_pi(&q);
-
-       if (pi_state) {
-               rt_mutex_futex_unlock(&pi_state->pi_mutex);
-               put_pi_state(pi_state);
-       }
-
        goto out;
 
 out_unlock_put_key:
@@ -3183,7 +3171,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
                                 u32 __user *uaddr2)
 {
        struct hrtimer_sleeper timeout, *to;
-       struct futex_pi_state *pi_state = NULL;
        struct rt_mutex_waiter rt_waiter;
        struct futex_hash_bucket *hb;
        union futex_key key2 = FUTEX_KEY_INIT;
@@ -3261,16 +3248,17 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
                if (q.pi_state && (q.pi_state->owner != current)) {
                        spin_lock(q.lock_ptr);
                        ret = fixup_pi_state_owner(uaddr2, &q, current);
-                       if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
-                               pi_state = q.pi_state;
-                               get_pi_state(pi_state);
-                       }
                        /*
                         * Drop the reference to the pi state which
                         * the requeue_pi() code acquired for us.
                         */
                        put_pi_state(q.pi_state);
                        spin_unlock(q.lock_ptr);
+                       /*
+                        * Adjust the return value. It's either -EFAULT or
+                        * success (1) but the caller expects 0 for success.
+                        */
+                       ret = ret < 0 ? ret : 0;
                }
        } else {
                struct rt_mutex *pi_mutex;
@@ -3301,25 +3289,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
                if (res)
                        ret = (res < 0) ? res : 0;
 
-               /*
-                * If fixup_pi_state_owner() faulted and was unable to handle
-                * the fault, unlock the rt_mutex and return the fault to
-                * userspace.
-                */
-               if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
-                       pi_state = q.pi_state;
-                       get_pi_state(pi_state);
-               }
-
                /* Unqueue and drop the lock. */
                unqueue_me_pi(&q);
        }
 
-       if (pi_state) {
-               rt_mutex_futex_unlock(&pi_state->pi_mutex);
-               put_pi_state(pi_state);
-       }
-
        if (ret == -EINTR) {
                /*
                 * We've already been requeued, but cannot restart by calling
index ab8567f..dec3f73 100644 (file)
@@ -2859,3 +2859,4 @@ bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
        rcu_read_unlock();
        return res;
 }
+EXPORT_SYMBOL_GPL(irq_check_status_bit);
index 2c0c4d6..dc0e2d7 100644 (file)
@@ -402,7 +402,7 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
        struct msi_domain_ops *ops = info->ops;
        struct irq_data *irq_data;
        struct msi_desc *desc;
-       msi_alloc_info_t arg;
+       msi_alloc_info_t arg = { };
        int i, ret, virq;
        bool can_reserve;
 
index a5eceec..1578973 100644 (file)
@@ -294,7 +294,7 @@ static int kthread(void *_create)
        do_exit(ret);
 }
 
-/* called from do_fork() to get node information for about to be created task */
+/* called from kernel_clone() to get node information for about to be created task */
 int tsk_fork_get_node(struct task_struct *tsk)
 {
 #ifdef CONFIG_NUMA
@@ -493,11 +493,36 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
                return p;
        kthread_bind(p, cpu);
        /* CPU hotplug need to bind once again when unparking the thread. */
-       set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
        to_kthread(p)->cpu = cpu;
        return p;
 }
 
+void kthread_set_per_cpu(struct task_struct *k, int cpu)
+{
+       struct kthread *kthread = to_kthread(k);
+       if (!kthread)
+               return;
+
+       WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
+
+       if (cpu < 0) {
+               clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+               return;
+       }
+
+       kthread->cpu = cpu;
+       set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+}
+
+bool kthread_is_per_cpu(struct task_struct *k)
+{
+       struct kthread *kthread = to_kthread(k);
+       if (!kthread)
+               return false;
+
+       return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+}
+
 /**
  * kthread_unpark - unpark a thread created by kthread_create().
  * @k:         thread created by kthread_create().
index c1418b4..bdaf482 100644 (file)
@@ -79,7 +79,7 @@ module_param(lock_stat, int, 0644);
 DEFINE_PER_CPU(unsigned int, lockdep_recursion);
 EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
 
-static inline bool lockdep_enabled(void)
+static __always_inline bool lockdep_enabled(void)
 {
        if (!debug_locks)
                return false;
@@ -5271,12 +5271,15 @@ static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie
 /*
  * Check whether we follow the irq-flags state precisely:
  */
-static void check_flags(unsigned long flags)
+static noinstr void check_flags(unsigned long flags)
 {
 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
        if (!debug_locks)
                return;
 
+       /* Get the warning out..  */
+       instrumentation_begin();
+
        if (irqs_disabled_flags(flags)) {
                if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
                        printk("possible reason: unannotated irqs-off.\n");
@@ -5304,6 +5307,8 @@ static void check_flags(unsigned long flags)
 
        if (!debug_locks)
                print_irqtrace_events(current);
+
+       instrumentation_end();
 #endif
 }
 
index cfdd5b9..2f8cd61 100644 (file)
@@ -1716,8 +1716,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
  * possible because it belongs to the pi_state which is about to be freed
  * and it is not longer visible to other tasks.
  */
-void rt_mutex_proxy_unlock(struct rt_mutex *lock,
-                          struct task_struct *proxy_owner)
+void rt_mutex_proxy_unlock(struct rt_mutex *lock)
 {
        debug_rt_mutex_proxy_unlock(lock);
        rt_mutex_set_owner(lock, NULL);
index d1d62f9..ca6fb48 100644 (file)
@@ -133,8 +133,7 @@ enum rtmutex_chainwalk {
 extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
 extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
                                       struct task_struct *proxy_owner);
-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
-                                 struct task_struct *proxy_owner);
+extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
 extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
 extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
                                     struct rt_mutex_waiter *waiter,
index ffdd0dc..5a95c68 100644 (file)
@@ -1291,11 +1291,16 @@ static size_t info_print_prefix(const struct printk_info  *info, bool syslog,
  * done:
  *
  *   - Add prefix for each line.
+ *   - Drop truncated lines that no longer fit into the buffer.
  *   - Add the trailing newline that has been removed in vprintk_store().
- *   - Drop truncated lines that do not longer fit into the buffer.
+ *   - Add a string terminator.
+ *
+ * Since the produced string is always terminated, the maximum possible
+ * return value is @r->text_buf_size - 1;
  *
  * Return: The length of the updated/prepared text, including the added
- * prefixes and the newline. The dropped line(s) are not counted.
+ * prefixes and the newline. The terminator is not counted. The dropped
+ * line(s) are not counted.
  */
 static size_t record_print_text(struct printk_record *r, bool syslog,
                                bool time)
@@ -1338,26 +1343,31 @@ static size_t record_print_text(struct printk_record *r, bool syslog,
 
                /*
                 * Truncate the text if there is not enough space to add the
-                * prefix and a trailing newline.
+                * prefix and a trailing newline and a terminator.
                 */
-               if (len + prefix_len + text_len + 1 > buf_size) {
+               if (len + prefix_len + text_len + 1 + 1 > buf_size) {
                        /* Drop even the current line if no space. */
-                       if (len + prefix_len + line_len + 1 > buf_size)
+                       if (len + prefix_len + line_len + 1 + 1 > buf_size)
                                break;
 
-                       text_len = buf_size - len - prefix_len - 1;
+                       text_len = buf_size - len - prefix_len - 1 - 1;
                        truncated = true;
                }
 
                memmove(text + prefix_len, text, text_len);
                memcpy(text, prefix, prefix_len);
 
+               /*
+                * Increment the prepared length to include the text and
+                * prefix that were just moved+copied. Also increment for the
+                * newline at the end of this line. If this is the last line,
+                * there is no newline, but it will be added immediately below.
+                */
                len += prefix_len + line_len + 1;
-
                if (text_len == line_len) {
                        /*
-                        * Add the trailing newline removed in
-                        * vprintk_store().
+                        * This is the last line. Add the trailing newline
+                        * removed in vprintk_store().
                         */
                        text[prefix_len + line_len] = '\n';
                        break;
@@ -1382,6 +1392,14 @@ static size_t record_print_text(struct printk_record *r, bool syslog,
                text_len -= line_len + 1;
        }
 
+       /*
+        * If a buffer was provided, it will be terminated. Space for the
+        * string terminator is guaranteed to be available. The terminator is
+        * not counted in the return value.
+        */
+       if (buf_size > 0)
+               r->text_buf[len] = 0;
+
        return len;
 }
 
@@ -3427,7 +3445,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
        while (prb_read_valid_info(prb, seq, &info, &line_count)) {
                if (r.info->seq >= dumper->next_seq)
                        break;
-               l += get_record_print_text_size(&info, line_count, true, time);
+               l += get_record_print_text_size(&info, line_count, syslog, time);
                seq = r.info->seq + 1;
        }
 
@@ -3437,7 +3455,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
                                                &info, &line_count)) {
                if (r.info->seq >= dumper->next_seq)
                        break;
-               l -= get_record_print_text_size(&info, line_count, true, time);
+               l -= get_record_print_text_size(&info, line_count, syslog, time);
                seq = r.info->seq + 1;
        }
 
index 6704f06..8a7b736 100644 (file)
@@ -1718,7 +1718,7 @@ static bool copy_data(struct prb_data_ring *data_ring,
 
        /* Caller interested in the line count? */
        if (line_count)
-               *line_count = count_lines(data, data_size);
+               *line_count = count_lines(data, len);
 
        /* Caller interested in the data content? */
        if (!buf || !buf_size)
index 15d2562..ff74fca 100644 (file)
@@ -1796,13 +1796,28 @@ static inline bool rq_has_pinned_tasks(struct rq *rq)
  */
 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
 {
+       /* When not in the task's cpumask, no point in looking further. */
        if (!cpumask_test_cpu(cpu, p->cpus_ptr))
                return false;
 
-       if (is_per_cpu_kthread(p) || is_migration_disabled(p))
+       /* migrate_disabled() must be allowed to finish. */
+       if (is_migration_disabled(p))
                return cpu_online(cpu);
 
-       return cpu_active(cpu);
+       /* Non kernel threads are not allowed during either online or offline. */
+       if (!(p->flags & PF_KTHREAD))
+               return cpu_active(cpu);
+
+       /* KTHREAD_IS_PER_CPU is always allowed. */
+       if (kthread_is_per_cpu(p))
+               return cpu_online(cpu);
+
+       /* Regular kernel threads don't get to stay during offline. */
+       if (cpu_rq(cpu)->balance_push)
+               return false;
+
+       /* But are allowed during online. */
+       return cpu_online(cpu);
 }
 
 /*
@@ -2327,7 +2342,9 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
 
        if (p->flags & PF_KTHREAD || is_migration_disabled(p)) {
                /*
-                * Kernel threads are allowed on online && !active CPUs.
+                * Kernel threads are allowed on online && !active CPUs,
+                * however, during cpu-hot-unplug, even these might get pushed
+                * away if not KTHREAD_IS_PER_CPU.
                 *
                 * Specifically, migration_disabled() tasks must not fail the
                 * cpumask_any_and_distribute() pick below, esp. so on
@@ -2371,16 +2388,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
 
        __do_set_cpus_allowed(p, new_mask, flags);
 
-       if (p->flags & PF_KTHREAD) {
-               /*
-                * For kernel threads that do indeed end up on online &&
-                * !active we want to ensure they are strict per-CPU threads.
-                */
-               WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
-                       !cpumask_intersects(new_mask, cpu_active_mask) &&
-                       p->nr_cpus_allowed != 1);
-       }
-
        return affine_move_task(rq, p, &rf, dest_cpu, flags);
 
 out:
@@ -3121,6 +3128,13 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
 
 static inline bool ttwu_queue_cond(int cpu, int wake_flags)
 {
+       /*
+        * Do not complicate things with the async wake_list while the CPU is
+        * in hotplug state.
+        */
+       if (!cpu_active(cpu))
+               return false;
+
        /*
         * If the CPU does not share cache, then queue the task on the
         * remote rqs wakelist to avoid accessing remote data.
@@ -7276,8 +7290,14 @@ static void balance_push(struct rq *rq)
        /*
         * Both the cpu-hotplug and stop task are in this case and are
         * required to complete the hotplug process.
+        *
+        * XXX: the idle task does not match kthread_is_per_cpu() due to
+        * histerical raisins.
         */
-       if (is_per_cpu_kthread(push_task) || is_migration_disabled(push_task)) {
+       if (rq->idle == push_task ||
+           ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
+           is_migration_disabled(push_task)) {
+
                /*
                 * If this is the idle task on the outgoing CPU try to wake
                 * up the hotplug control thread which might wait for the
@@ -7309,7 +7329,7 @@ static void balance_push(struct rq *rq)
        /*
         * At this point need_resched() is true and we'll take the loop in
         * schedule(). The next pick is obviously going to be the stop task
-        * which is_per_cpu_kthread() and will push this task away.
+        * which kthread_is_per_cpu() and will push this task away.
         */
        raw_spin_lock(&rq->lock);
 }
@@ -7320,10 +7340,13 @@ static void balance_push_set(int cpu, bool on)
        struct rq_flags rf;
 
        rq_lock_irqsave(rq, &rf);
-       if (on)
+       rq->balance_push = on;
+       if (on) {
+               WARN_ON_ONCE(rq->balance_callback);
                rq->balance_callback = &balance_push_callback;
-       else
+       } else if (rq->balance_callback == &balance_push_callback) {
                rq->balance_callback = NULL;
+       }
        rq_unlock_irqrestore(rq, &rf);
 }
 
@@ -7441,6 +7464,10 @@ int sched_cpu_activate(unsigned int cpu)
        struct rq *rq = cpu_rq(cpu);
        struct rq_flags rf;
 
+       /*
+        * Make sure that when the hotplug state machine does a roll-back
+        * we clear balance_push. Ideally that would happen earlier...
+        */
        balance_push_set(cpu, false);
 
 #ifdef CONFIG_SCHED_SMT
@@ -7483,17 +7510,27 @@ int sched_cpu_deactivate(unsigned int cpu)
        int ret;
 
        set_cpu_active(cpu, false);
+
+       /*
+        * From this point forward, this CPU will refuse to run any task that
+        * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
+        * push those tasks away until this gets cleared, see
+        * sched_cpu_dying().
+        */
+       balance_push_set(cpu, true);
+
        /*
-        * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
-        * users of this state to go away such that all new such users will
-        * observe it.
+        * We've cleared cpu_active_mask / set balance_push, wait for all
+        * preempt-disabled and RCU users of this state to go away such that
+        * all new such users will observe it.
+        *
+        * Specifically, we rely on ttwu to no longer target this CPU, see
+        * ttwu_queue_cond() and is_cpu_allowed().
         *
         * Do sync before park smpboot threads to take care the rcu boost case.
         */
        synchronize_rcu();
 
-       balance_push_set(cpu, true);
-
        rq_lock_irqsave(rq, &rf);
        if (rq->rd) {
                update_rq_clock(rq);
@@ -7574,6 +7611,25 @@ static void calc_load_migrate(struct rq *rq)
                atomic_long_add(delta, &calc_load_tasks);
 }
 
+static void dump_rq_tasks(struct rq *rq, const char *loglvl)
+{
+       struct task_struct *g, *p;
+       int cpu = cpu_of(rq);
+
+       lockdep_assert_held(&rq->lock);
+
+       printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
+       for_each_process_thread(g, p) {
+               if (task_cpu(p) != cpu)
+                       continue;
+
+               if (!task_on_rq_queued(p))
+                       continue;
+
+               printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
+       }
+}
+
 int sched_cpu_dying(unsigned int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
@@ -7583,9 +7639,18 @@ int sched_cpu_dying(unsigned int cpu)
        sched_tick_stop(cpu);
 
        rq_lock_irqsave(rq, &rf);
-       BUG_ON(rq->nr_running != 1 || rq_has_pinned_tasks(rq));
+       if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
+               WARN(true, "Dying CPU not properly vacated!");
+               dump_rq_tasks(rq, KERN_WARNING);
+       }
        rq_unlock_irqrestore(rq, &rf);
 
+       /*
+        * Now that the CPU is offline, make sure we're welcome
+        * to new tasks once we come back up.
+        */
+       balance_push_set(cpu, false);
+
        calc_load_migrate(rq);
        update_max_interval();
        nohz_balance_exit_idle(rq);
index 12ada79..bb09988 100644 (file)
@@ -975,6 +975,7 @@ struct rq {
        unsigned long           cpu_capacity_orig;
 
        struct callback_head    *balance_callback;
+       unsigned char           balance_push;
 
        unsigned char           nohz_idle_balance;
        unsigned char           idle_balance;
index 6b9c431..5ad8566 100644 (file)
@@ -3704,7 +3704,8 @@ static bool access_pidfd_pidns(struct pid *pid)
        return true;
 }
 
-static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
+static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
+               siginfo_t __user *info)
 {
 #ifdef CONFIG_COMPAT
        /*
index 2efe1e2..f25208e 100644 (file)
@@ -188,6 +188,7 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
                kfree(td);
                return PTR_ERR(tsk);
        }
+       kthread_set_per_cpu(tsk, cpu);
        /*
         * Park the thread so that it could start right on the CPU
         * when it is available.
index 7404d38..87389b9 100644 (file)
@@ -498,7 +498,7 @@ out:
 static void sync_hw_clock(struct work_struct *work);
 static DECLARE_WORK(sync_work, sync_hw_clock);
 static struct hrtimer sync_hrtimer;
-#define SYNC_PERIOD_NS (11UL * 60 * NSEC_PER_SEC)
+#define SYNC_PERIOD_NS (11ULL * 60 * NSEC_PER_SEC)
 
 static enum hrtimer_restart sync_timer_callback(struct hrtimer *timer)
 {
@@ -512,7 +512,7 @@ static void sched_sync_hw_clock(unsigned long offset_nsec, bool retry)
        ktime_t exp = ktime_set(ktime_get_real_seconds(), 0);
 
        if (retry)
-               exp = ktime_add_ns(exp, 2 * NSEC_PER_SEC - offset_nsec);
+               exp = ktime_add_ns(exp, 2ULL * NSEC_PER_SEC - offset_nsec);
        else
                exp = ktime_add_ns(exp, SYNC_PERIOD_NS - offset_nsec);
 
index a45cedd..6aee576 100644 (file)
@@ -991,8 +991,7 @@ EXPORT_SYMBOL_GPL(ktime_get_seconds);
 /**
  * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
  *
- * Returns the wall clock seconds since 1970. This replaces the
- * get_seconds() interface which is not y2038 safe on 32bit systems.
+ * Returns the wall clock seconds since 1970.
  *
  * For 64bit systems the fast access to tk->xtime_sec is preserved. On
  * 32bit systems the access must be protected with the sequence
index 9880b6c..894bb88 100644 (file)
@@ -1848,12 +1848,6 @@ static void worker_attach_to_pool(struct worker *worker,
 {
        mutex_lock(&wq_pool_attach_mutex);
 
-       /*
-        * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
-        * online CPUs.  It'll be re-applied when any of the CPUs come up.
-        */
-       set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
-
        /*
         * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
         * stable across this function.  See the comments above the flag
@@ -1861,6 +1855,11 @@ static void worker_attach_to_pool(struct worker *worker,
         */
        if (pool->flags & POOL_DISASSOCIATED)
                worker->flags |= WORKER_UNBOUND;
+       else
+               kthread_set_per_cpu(worker->task, pool->cpu);
+
+       if (worker->rescue_wq)
+               set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
 
        list_add_tail(&worker->node, &pool->workers);
        worker->pool = pool;
@@ -1883,6 +1882,7 @@ static void worker_detach_from_pool(struct worker *worker)
 
        mutex_lock(&wq_pool_attach_mutex);
 
+       kthread_set_per_cpu(worker->task, -1);
        list_del(&worker->node);
        worker->pool = NULL;
 
@@ -4919,8 +4919,10 @@ static void unbind_workers(int cpu)
 
                raw_spin_unlock_irq(&pool->lock);
 
-               for_each_pool_worker(worker, pool)
-                       WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_active_mask) < 0);
+               for_each_pool_worker(worker, pool) {
+                       kthread_set_per_cpu(worker->task, -1);
+                       WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
+               }
 
                mutex_unlock(&wq_pool_attach_mutex);
 
@@ -4972,9 +4974,11 @@ static void rebind_workers(struct worker_pool *pool)
         * of all workers first and then clear UNBOUND.  As we're called
         * from CPU_ONLINE, the following shouldn't fail.
         */
-       for_each_pool_worker(worker, pool)
+       for_each_pool_worker(worker, pool) {
+               kthread_set_per_cpu(worker->task, pool->cpu);
                WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
                                                  pool->attrs->cpumask) < 0);
+       }
 
        raw_spin_lock_irq(&pool->lock);
 
index 8b635fd..3a0b1c9 100644 (file)
@@ -123,6 +123,7 @@ config UBSAN_SIGNED_OVERFLOW
 config UBSAN_UNSIGNED_OVERFLOW
        bool "Perform checking for unsigned arithmetic overflow"
        depends on $(cc-option,-fsanitize=unsigned-integer-overflow)
+       depends on !X86_32 # avoid excessive stack usage on x86-32/clang
        help
          This option enables -fsanitize=unsigned-integer-overflow which checks
          for overflow of any arithmetic operations with unsigned integers. This
index c3a9ea7..874b732 100644 (file)
@@ -473,6 +473,11 @@ static inline void *arch_kmap_local_high_get(struct page *page)
 }
 #endif
 
+#ifndef arch_kmap_local_set_pte
+#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
+       set_pte_at(mm, vaddr, ptep, ptev)
+#endif
+
 /* Unmap a local mapping which was obtained by kmap_high_get() */
 static inline bool kmap_high_unmap_local(unsigned long vaddr)
 {
@@ -515,7 +520,7 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        BUG_ON(!pte_none(*(kmap_pte - idx)));
        pteval = pfn_pte(pfn, prot);
-       set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval);
+       arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval);
        arch_kmap_local_post_map(vaddr, pteval);
        current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
        preempt_enable();
index 55bd6f0..e529428 100644 (file)
 
 #include "kasan.h"
 
-enum kasan_arg_mode {
-       KASAN_ARG_MODE_DEFAULT,
-       KASAN_ARG_MODE_OFF,
-       KASAN_ARG_MODE_PROD,
-       KASAN_ARG_MODE_FULL,
+enum kasan_arg {
+       KASAN_ARG_DEFAULT,
+       KASAN_ARG_OFF,
+       KASAN_ARG_ON,
 };
 
 enum kasan_arg_stacktrace {
@@ -38,7 +37,7 @@ enum kasan_arg_fault {
        KASAN_ARG_FAULT_PANIC,
 };
 
-static enum kasan_arg_mode kasan_arg_mode __ro_after_init;
+static enum kasan_arg kasan_arg __ro_after_init;
 static enum kasan_arg_stacktrace kasan_arg_stacktrace __ro_after_init;
 static enum kasan_arg_fault kasan_arg_fault __ro_after_init;
 
@@ -52,26 +51,24 @@ DEFINE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
 /* Whether panic or disable tag checking on fault. */
 bool kasan_flag_panic __ro_after_init;
 
-/* kasan.mode=off/prod/full */
-static int __init early_kasan_mode(char *arg)
+/* kasan=off/on */
+static int __init early_kasan_flag(char *arg)
 {
        if (!arg)
                return -EINVAL;
 
        if (!strcmp(arg, "off"))
-               kasan_arg_mode = KASAN_ARG_MODE_OFF;
-       else if (!strcmp(arg, "prod"))
-               kasan_arg_mode = KASAN_ARG_MODE_PROD;
-       else if (!strcmp(arg, "full"))
-               kasan_arg_mode = KASAN_ARG_MODE_FULL;
+               kasan_arg = KASAN_ARG_OFF;
+       else if (!strcmp(arg, "on"))
+               kasan_arg = KASAN_ARG_ON;
        else
                return -EINVAL;
 
        return 0;
 }
-early_param("kasan.mode", early_kasan_mode);
+early_param("kasan", early_kasan_flag);
 
-/* kasan.stack=off/on */
+/* kasan.stacktrace=off/on */
 static int __init early_kasan_flag_stacktrace(char *arg)
 {
        if (!arg)
@@ -113,8 +110,8 @@ void kasan_init_hw_tags_cpu(void)
         * as this function is only called for MTE-capable hardware.
         */
 
-       /* If KASAN is disabled, do nothing. */
-       if (kasan_arg_mode == KASAN_ARG_MODE_OFF)
+       /* If KASAN is disabled via command line, don't initialize it. */
+       if (kasan_arg == KASAN_ARG_OFF)
                return;
 
        hw_init_tags(KASAN_TAG_MAX);
@@ -124,43 +121,28 @@ void kasan_init_hw_tags_cpu(void)
 /* kasan_init_hw_tags() is called once on boot CPU. */
 void __init kasan_init_hw_tags(void)
 {
-       /* If hardware doesn't support MTE, do nothing. */
+       /* If hardware doesn't support MTE, don't initialize KASAN. */
        if (!system_supports_mte())
                return;
 
-       /* Choose KASAN mode if kasan boot parameter is not provided. */
-       if (kasan_arg_mode == KASAN_ARG_MODE_DEFAULT) {
-               if (IS_ENABLED(CONFIG_DEBUG_KERNEL))
-                       kasan_arg_mode = KASAN_ARG_MODE_FULL;
-               else
-                       kasan_arg_mode = KASAN_ARG_MODE_PROD;
-       }
-
-       /* Preset parameter values based on the mode. */
-       switch (kasan_arg_mode) {
-       case KASAN_ARG_MODE_DEFAULT:
-               /* Shouldn't happen as per the check above. */
-               WARN_ON(1);
-               return;
-       case KASAN_ARG_MODE_OFF:
-               /* If KASAN is disabled, do nothing. */
+       /* If KASAN is disabled via command line, don't initialize it. */
+       if (kasan_arg == KASAN_ARG_OFF)
                return;
-       case KASAN_ARG_MODE_PROD:
-               static_branch_enable(&kasan_flag_enabled);
-               break;
-       case KASAN_ARG_MODE_FULL:
-               static_branch_enable(&kasan_flag_enabled);
-               static_branch_enable(&kasan_flag_stacktrace);
-               break;
-       }
 
-       /* Now, optionally override the presets. */
+       /* Enable KASAN. */
+       static_branch_enable(&kasan_flag_enabled);
 
        switch (kasan_arg_stacktrace) {
        case KASAN_ARG_STACKTRACE_DEFAULT:
+               /*
+                * Default to enabling stack trace collection for
+                * debug kernels.
+                */
+               if (IS_ENABLED(CONFIG_DEBUG_KERNEL))
+                       static_branch_enable(&kasan_flag_stacktrace);
                break;
        case KASAN_ARG_STACKTRACE_OFF:
-               static_branch_disable(&kasan_flag_stacktrace);
+               /* Do nothing, kasan_flag_stacktrace keeps its default value. */
                break;
        case KASAN_ARG_STACKTRACE_ON:
                static_branch_enable(&kasan_flag_stacktrace);
@@ -169,11 +151,16 @@ void __init kasan_init_hw_tags(void)
 
        switch (kasan_arg_fault) {
        case KASAN_ARG_FAULT_DEFAULT:
+               /*
+                * Default to no panic on report.
+                * Do nothing, kasan_flag_panic keeps its default value.
+                */
                break;
        case KASAN_ARG_FAULT_REPORT:
-               kasan_flag_panic = false;
+               /* Do nothing, kasan_flag_panic keeps its default value. */
                break;
        case KASAN_ARG_FAULT_PANIC:
+               /* Enable panic on report. */
                kasan_flag_panic = true;
                break;
        }
index 7ca0b92..c4605ac 100644 (file)
@@ -373,9 +373,10 @@ static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr,
 
                if (kasan_pte_table(*pmd)) {
                        if (IS_ALIGNED(addr, PMD_SIZE) &&
-                           IS_ALIGNED(next, PMD_SIZE))
+                           IS_ALIGNED(next, PMD_SIZE)) {
                                pmd_clear(pmd);
-                       continue;
+                               continue;
+                       }
                }
                pte = pte_offset_kernel(pmd, addr);
                kasan_remove_pte_table(pte, addr, next);
@@ -398,9 +399,10 @@ static void kasan_remove_pud_table(pud_t *pud, unsigned long addr,
 
                if (kasan_pmd_table(*pud)) {
                        if (IS_ALIGNED(addr, PUD_SIZE) &&
-                           IS_ALIGNED(next, PUD_SIZE))
+                           IS_ALIGNED(next, PUD_SIZE)) {
                                pud_clear(pud);
-                       continue;
+                               continue;
+                       }
                }
                pmd = pmd_offset(pud, addr);
                pmd_base = pmd_offset(pud, 0);
@@ -424,9 +426,10 @@ static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr,
 
                if (kasan_pud_table(*p4d)) {
                        if (IS_ALIGNED(addr, P4D_SIZE) &&
-                           IS_ALIGNED(next, P4D_SIZE))
+                           IS_ALIGNED(next, P4D_SIZE)) {
                                p4d_clear(p4d);
-                       continue;
+                               continue;
+                       }
                }
                pud = pud_offset(p4d, addr);
                kasan_remove_pud_table(pud, addr, next);
@@ -457,9 +460,10 @@ void kasan_remove_zero_shadow(void *start, unsigned long size)
 
                if (kasan_p4d_table(*pgd)) {
                        if (IS_ALIGNED(addr, PGDIR_SIZE) &&
-                           IS_ALIGNED(next, PGDIR_SIZE))
+                           IS_ALIGNED(next, PGDIR_SIZE)) {
                                pgd_clear(pgd);
-                       continue;
+                               continue;
+                       }
                }
 
                p4d = p4d_offset(pgd, addr);
@@ -482,7 +486,6 @@ int kasan_add_zero_shadow(void *start, unsigned long size)
 
        ret = kasan_populate_early_shadow(shadow_start, shadow_end);
        if (ret)
-               kasan_remove_zero_shadow(shadow_start,
-                                       size >> KASAN_SHADOW_SCALE_SHIFT);
+               kasan_remove_zero_shadow(start, size);
        return ret;
 }
index d24bcfa..1eaaec1 100644 (file)
@@ -1427,7 +1427,7 @@ phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
 }
 
 /**
- * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node
+ * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
  * @size: size of memory block to be allocated in bytes
  * @align: alignment of the region and block's size
  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
index 605f671..e2de77b 100644 (file)
@@ -3115,9 +3115,7 @@ void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
                page_counter_uncharge(&memcg->kmem, nr_pages);
 
-       page_counter_uncharge(&memcg->memory, nr_pages);
-       if (do_memsw_account())
-               page_counter_uncharge(&memcg->memsw, nr_pages);
+       refill_stock(memcg, nr_pages);
 }
 
 /**
index 04d9f15..e948163 100644 (file)
@@ -1885,6 +1885,12 @@ static int soft_offline_free_page(struct page *page)
        return rc;
 }
 
+static void put_ref_page(struct page *page)
+{
+       if (page)
+               put_page(page);
+}
+
 /**
  * soft_offline_page - Soft offline a page.
  * @pfn: pfn to soft-offline
@@ -1910,20 +1916,26 @@ static int soft_offline_free_page(struct page *page)
 int soft_offline_page(unsigned long pfn, int flags)
 {
        int ret;
-       struct page *page;
        bool try_again = true;
+       struct page *page, *ref_page = NULL;
+
+       WARN_ON_ONCE(!pfn_valid(pfn) && (flags & MF_COUNT_INCREASED));
 
        if (!pfn_valid(pfn))
                return -ENXIO;
+       if (flags & MF_COUNT_INCREASED)
+               ref_page = pfn_to_page(pfn);
+
        /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
        page = pfn_to_online_page(pfn);
-       if (!page)
+       if (!page) {
+               put_ref_page(ref_page);
                return -EIO;
+       }
 
        if (PageHWPoison(page)) {
                pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
-               if (flags & MF_COUNT_INCREASED)
-                       put_page(page);
+               put_ref_page(ref_page);
                return 0;
        }
 
index ee5e612..c0efe92 100644 (file)
@@ -402,6 +402,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
        struct zone *oldzone, *newzone;
        int dirty;
        int expected_count = expected_page_refs(mapping, page) + extra_count;
+       int nr = thp_nr_pages(page);
 
        if (!mapping) {
                /* Anonymous page without mapping */
@@ -437,7 +438,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         */
        newpage->index = page->index;
        newpage->mapping = page->mapping;
-       page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
+       page_ref_add(newpage, nr); /* add cache reference */
        if (PageSwapBacked(page)) {
                __SetPageSwapBacked(newpage);
                if (PageSwapCache(page)) {
@@ -459,7 +460,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
        if (PageTransHuge(page)) {
                int i;
 
-               for (i = 1; i < HPAGE_PMD_NR; i++) {
+               for (i = 1; i < nr; i++) {
                        xas_next(&xas);
                        xas_store(&xas, newpage);
                }
@@ -470,7 +471,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         * to one less reference.
         * We know this isn't the last reference.
         */
-       page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
+       page_ref_unfreeze(page, expected_count - nr);
 
        xas_unlock(&xas);
        /* Leave irq disabled to prevent preemption while updating stats */
@@ -493,17 +494,17 @@ int migrate_page_move_mapping(struct address_space *mapping,
                old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
                new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
 
-               __dec_lruvec_state(old_lruvec, NR_FILE_PAGES);
-               __inc_lruvec_state(new_lruvec, NR_FILE_PAGES);
+               __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
+               __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
                if (PageSwapBacked(page) && !PageSwapCache(page)) {
-                       __dec_lruvec_state(old_lruvec, NR_SHMEM);
-                       __inc_lruvec_state(new_lruvec, NR_SHMEM);
+                       __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
+                       __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
                }
                if (dirty && mapping_can_writeback(mapping)) {
-                       __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
-                       __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
-                       __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
-                       __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
+                       __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
+                       __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
+                       __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
+                       __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
                }
        }
        local_irq_enable();
index 027f648..519a60d 100644 (file)
@@ -1207,8 +1207,10 @@ static void kernel_init_free_pages(struct page *page, int numpages)
        /* s390's use of memset() could override KASAN redzones. */
        kasan_disable_current();
        for (i = 0; i < numpages; i++) {
+               u8 tag = page_kasan_tag(page + i);
                page_kasan_tag_reset(page + i);
                clear_highpage(page + i);
+               page_kasan_tag_set(page + i, tag);
        }
        kasan_enable_current();
 }
index d9e4e10..7ecbbbe 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2791,7 +2791,8 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
                                                   void *obj)
 {
        if (unlikely(slab_want_init_on_free(s)) && obj)
-               memset((void *)((char *)obj + s->offset), 0, sizeof(void *));
+               memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
+                       0, sizeof(void *));
 }
 
 /*
@@ -2883,7 +2884,7 @@ redo:
                stat(s, ALLOC_FASTPATH);
        }
 
-       maybe_wipe_obj_freeptr(s, kasan_reset_tag(object));
+       maybe_wipe_obj_freeptr(s, object);
 
        if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
                memset(kasan_reset_tag(object), 0, s->object_size);
@@ -3329,7 +3330,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
                int j;
 
                for (j = 0; j < i; j++)
-                       memset(p[j], 0, s->object_size);
+                       memset(kasan_reset_tag(p[j]), 0, s->object_size);
        }
 
        /* memcg and kmem_cache debug support */
@@ -5624,10 +5625,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
 
        s->kobj.kset = kset;
        err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
-       if (err) {
-               kobject_put(&s->kobj);
+       if (err)
                goto out;
-       }
 
        err = sysfs_create_group(&s->kobj, &slab_attr_group);
        if (err)
index 1883118..32a48e5 100644 (file)
@@ -88,4 +88,33 @@ int br_mrp_switchdev_send_in_test(struct net_bridge *br, struct br_mrp *mrp,
 int br_mrp_ring_port_open(struct net_device *dev, u8 loc);
 int br_mrp_in_port_open(struct net_device *dev, u8 loc);
 
+/* MRP protocol data units */
+struct br_mrp_tlv_hdr {
+       __u8 type;
+       __u8 length;
+};
+
+struct br_mrp_common_hdr {
+       __be16 seq_id;
+       __u8 domain[MRP_DOMAIN_UUID_LENGTH];
+};
+
+struct br_mrp_ring_test_hdr {
+       __be16 prio;
+       __u8 sa[ETH_ALEN];
+       __be16 port_role;
+       __be16 state;
+       __be16 transitions;
+       __be32 timestamp;
+} __attribute__((__packed__));
+
+struct br_mrp_in_test_hdr {
+       __be16 id;
+       __u8 sa[ETH_ALEN];
+       __be16 port_role;
+       __be16 state;
+       __be16 transitions;
+       __be32 timestamp;
+} __attribute__((__packed__));
+
 #endif /* _BR_PRIVATE_MRP_H */
index 9815cfe..ca44c32 100644 (file)
@@ -569,6 +569,34 @@ e_range:
        return -ERANGE;
 }
 
+static int decode_con_secret(void **p, void *end, u8 *con_secret,
+                            int *con_secret_len)
+{
+       int len;
+
+       ceph_decode_32_safe(p, end, len, bad);
+       ceph_decode_need(p, end, len, bad);
+
+       dout("%s len %d\n", __func__, len);
+       if (con_secret) {
+               if (len > CEPH_MAX_CON_SECRET_LEN) {
+                       pr_err("connection secret too big %d\n", len);
+                       goto bad_memzero;
+               }
+               memcpy(con_secret, *p, len);
+               *con_secret_len = len;
+       }
+       memzero_explicit(*p, len);
+       *p += len;
+       return 0;
+
+bad_memzero:
+       memzero_explicit(*p, len);
+bad:
+       pr_err("failed to decode connection secret\n");
+       return -EINVAL;
+}
+
 static int handle_auth_session_key(struct ceph_auth_client *ac,
                                   void **p, void *end,
                                   u8 *session_key, int *session_key_len,
@@ -612,17 +640,9 @@ static int handle_auth_session_key(struct ceph_auth_client *ac,
                dout("%s decrypted %d bytes\n", __func__, ret);
                dend = dp + ret;
 
-               ceph_decode_32_safe(&dp, dend, len, e_inval);
-               if (len > CEPH_MAX_CON_SECRET_LEN) {
-                       pr_err("connection secret too big %d\n", len);
-                       return -EINVAL;
-               }
-
-               dout("%s connection secret len %d\n", __func__, len);
-               if (con_secret) {
-                       memcpy(con_secret, dp, len);
-                       *con_secret_len = len;
-               }
+               ret = decode_con_secret(&dp, dend, con_secret, con_secret_len);
+               if (ret)
+                       return ret;
        }
 
        /* service tickets */
@@ -828,7 +848,6 @@ static int decrypt_authorizer_reply(struct ceph_crypto_key *secret,
 {
        void *dp, *dend;
        u8 struct_v;
-       int len;
        int ret;
 
        dp = *p + ceph_x_encrypt_offset();
@@ -843,17 +862,9 @@ static int decrypt_authorizer_reply(struct ceph_crypto_key *secret,
        ceph_decode_64_safe(&dp, dend, *nonce_plus_one, e_inval);
        dout("%s nonce_plus_one %llu\n", __func__, *nonce_plus_one);
        if (struct_v >= 2) {
-               ceph_decode_32_safe(&dp, dend, len, e_inval);
-               if (len > CEPH_MAX_CON_SECRET_LEN) {
-                       pr_err("connection secret too big %d\n", len);
-                       return -EINVAL;
-               }
-
-               dout("%s connection secret len %d\n", __func__, len);
-               if (con_secret) {
-                       memcpy(con_secret, dp, len);
-                       *con_secret_len = len;
-               }
+               ret = decode_con_secret(&dp, dend, con_secret, con_secret_len);
+               if (ret)
+                       return ret;
        }
 
        return 0;
index 4f75df4..92d89b3 100644 (file)
@@ -96,6 +96,7 @@ int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
        key->len = ceph_decode_16(p);
        ceph_decode_need(p, end, key->len, bad);
        ret = set_secret(key, *p);
+       memzero_explicit(*p, key->len);
        *p += key->len;
        return ret;
 
@@ -134,7 +135,7 @@ int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
 void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
 {
        if (key) {
-               kfree(key->key);
+               kfree_sensitive(key->key);
                key->key = NULL;
                if (key->tfm) {
                        crypto_free_sync_skcipher(key->tfm);
index 04f653b..2cb5ffd 100644 (file)
@@ -1100,7 +1100,7 @@ static int read_partial_message(struct ceph_connection *con)
                if (ret < 0)
                        return ret;
 
-               BUG_ON(!con->in_msg ^ skip);
+               BUG_ON((!con->in_msg) ^ skip);
                if (skip) {
                        /* skip this message */
                        dout("alloc_msg said skip message\n");
index c38d8de..cc40ce4 100644 (file)
@@ -689,11 +689,10 @@ static int verify_epilogue_crcs(struct ceph_connection *con, u32 front_crc,
 }
 
 static int setup_crypto(struct ceph_connection *con,
-                       u8 *session_key, int session_key_len,
-                       u8 *con_secret, int con_secret_len)
+                       const u8 *session_key, int session_key_len,
+                       const u8 *con_secret, int con_secret_len)
 {
        unsigned int noio_flag;
-       void *p;
        int ret;
 
        dout("%s con %p con_mode %d session_key_len %d con_secret_len %d\n",
@@ -751,15 +750,14 @@ static int setup_crypto(struct ceph_connection *con,
                return ret;
        }
 
-       p = con_secret;
-       WARN_ON((unsigned long)p & crypto_aead_alignmask(con->v2.gcm_tfm));
-       ret = crypto_aead_setkey(con->v2.gcm_tfm, p, CEPH_GCM_KEY_LEN);
+       WARN_ON((unsigned long)con_secret &
+               crypto_aead_alignmask(con->v2.gcm_tfm));
+       ret = crypto_aead_setkey(con->v2.gcm_tfm, con_secret, CEPH_GCM_KEY_LEN);
        if (ret) {
                pr_err("failed to set gcm key: %d\n", ret);
                return ret;
        }
 
-       p += CEPH_GCM_KEY_LEN;
        WARN_ON(crypto_aead_ivsize(con->v2.gcm_tfm) != CEPH_GCM_IV_LEN);
        ret = crypto_aead_setauthsize(con->v2.gcm_tfm, CEPH_GCM_TAG_LEN);
        if (ret) {
@@ -777,8 +775,11 @@ static int setup_crypto(struct ceph_connection *con,
        aead_request_set_callback(con->v2.gcm_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                  crypto_req_done, &con->v2.gcm_wait);
 
-       memcpy(&con->v2.in_gcm_nonce, p, CEPH_GCM_IV_LEN);
-       memcpy(&con->v2.out_gcm_nonce, p + CEPH_GCM_IV_LEN, CEPH_GCM_IV_LEN);
+       memcpy(&con->v2.in_gcm_nonce, con_secret + CEPH_GCM_KEY_LEN,
+              CEPH_GCM_IV_LEN);
+       memcpy(&con->v2.out_gcm_nonce,
+              con_secret + CEPH_GCM_KEY_LEN + CEPH_GCM_IV_LEN,
+              CEPH_GCM_IV_LEN);
        return 0;  /* auth_x, secure mode */
 }
 
@@ -800,7 +801,7 @@ static int hmac_sha256(struct ceph_connection *con, const struct kvec *kvecs,
        desc->tfm = con->v2.hmac_tfm;
        ret = crypto_shash_init(desc);
        if (ret)
-               return ret;
+               goto out;
 
        for (i = 0; i < kvec_cnt; i++) {
                WARN_ON((unsigned long)kvecs[i].iov_base &
@@ -808,15 +809,14 @@ static int hmac_sha256(struct ceph_connection *con, const struct kvec *kvecs,
                ret = crypto_shash_update(desc, kvecs[i].iov_base,
                                          kvecs[i].iov_len);
                if (ret)
-                       return ret;
+                       goto out;
        }
 
        ret = crypto_shash_final(desc, hmac);
-       if (ret)
-               return ret;
 
+out:
        shash_desc_zero(desc);
-       return 0;  /* auth_x, both plain and secure modes */
+       return ret;  /* auth_x, both plain and secure modes */
 }
 
 static void gcm_inc_nonce(struct ceph_gcm_nonce *nonce)
@@ -2072,27 +2072,32 @@ static int process_auth_done(struct ceph_connection *con, void *p, void *end)
        if (con->state != CEPH_CON_S_V2_AUTH) {
                dout("%s con %p state changed to %d\n", __func__, con,
                     con->state);
-               return -EAGAIN;
+               ret = -EAGAIN;
+               goto out;
        }
 
        dout("%s con %p handle_auth_done ret %d\n", __func__, con, ret);
        if (ret)
-               return ret;
+               goto out;
 
        ret = setup_crypto(con, session_key, session_key_len, con_secret,
                           con_secret_len);
        if (ret)
-               return ret;
+               goto out;
 
        reset_out_kvecs(con);
        ret = prepare_auth_signature(con);
        if (ret) {
                pr_err("prepare_auth_signature failed: %d\n", ret);
-               return ret;
+               goto out;
        }
 
        con->state = CEPH_CON_S_V2_AUTH_SIGNATURE;
-       return 0;
+
+out:
+       memzero_explicit(session_key_buf, sizeof(session_key_buf));
+       memzero_explicit(con_secret_buf, sizeof(con_secret_buf));
+       return ret;
 
 bad:
        pr_err("failed to decode auth_done\n");
@@ -3436,6 +3441,8 @@ void ceph_con_v2_reset_protocol(struct ceph_connection *con)
        }
 
        con->v2.con_mode = CEPH_CON_MODE_UNKNOWN;
+       memzero_explicit(&con->v2.in_gcm_nonce, CEPH_GCM_IV_LEN);
+       memzero_explicit(&con->v2.out_gcm_nonce, CEPH_GCM_IV_LEN);
 
        if (con->v2.hmac_tfm) {
                crypto_free_shash(con->v2.hmac_tfm);
index b9d54ed..195ceb8 100644 (file)
@@ -1433,7 +1433,7 @@ static int mon_handle_auth_bad_method(struct ceph_connection *con,
 /*
  * handle incoming message
  */
-static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+static void mon_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
 {
        struct ceph_mon_client *monc = con->private;
        int type = le16_to_cpu(msg->hdr.type);
@@ -1565,21 +1565,21 @@ static void mon_fault(struct ceph_connection *con)
  * will come from the messenger workqueue, which is drained prior to
  * mon_client destruction.
  */
-static struct ceph_connection *con_get(struct ceph_connection *con)
+static struct ceph_connection *mon_get_con(struct ceph_connection *con)
 {
        return con;
 }
 
-static void con_put(struct ceph_connection *con)
+static void mon_put_con(struct ceph_connection *con)
 {
 }
 
 static const struct ceph_connection_operations mon_con_ops = {
-       .get = con_get,
-       .put = con_put,
-       .dispatch = dispatch,
-       .fault = mon_fault,
+       .get = mon_get_con,
+       .put = mon_put_con,
        .alloc_msg = mon_alloc_msg,
+       .dispatch = mon_dispatch,
+       .fault = mon_fault,
        .get_auth_request = mon_get_auth_request,
        .handle_auth_reply_more = mon_handle_auth_reply_more,
        .handle_auth_done = mon_handle_auth_done,
index 61229c5..ff8624a 100644 (file)
@@ -5412,7 +5412,7 @@ void ceph_osdc_cleanup(void)
 /*
  * handle incoming message
  */
-static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+static void osd_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
 {
        struct ceph_osd *osd = con->private;
        struct ceph_osd_client *osdc = osd->o_osdc;
@@ -5534,9 +5534,9 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
        return m;
 }
 
-static struct ceph_msg *alloc_msg(struct ceph_connection *con,
-                                 struct ceph_msg_header *hdr,
-                                 int *skip)
+static struct ceph_msg *osd_alloc_msg(struct ceph_connection *con,
+                                     struct ceph_msg_header *hdr,
+                                     int *skip)
 {
        struct ceph_osd *osd = con->private;
        int type = le16_to_cpu(hdr->type);
@@ -5560,7 +5560,7 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con,
 /*
  * Wrappers to refcount containing ceph_osd struct
  */
-static struct ceph_connection *get_osd_con(struct ceph_connection *con)
+static struct ceph_connection *osd_get_con(struct ceph_connection *con)
 {
        struct ceph_osd *osd = con->private;
        if (get_osd(osd))
@@ -5568,7 +5568,7 @@ static struct ceph_connection *get_osd_con(struct ceph_connection *con)
        return NULL;
 }
 
-static void put_osd_con(struct ceph_connection *con)
+static void osd_put_con(struct ceph_connection *con)
 {
        struct ceph_osd *osd = con->private;
        put_osd(osd);
@@ -5582,8 +5582,8 @@ static void put_osd_con(struct ceph_connection *con)
  * Note: returned pointer is the address of a structure that's
  * managed separately.  Caller must *not* attempt to free it.
  */
-static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
-                                       int *proto, int force_new)
+static struct ceph_auth_handshake *
+osd_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
 {
        struct ceph_osd *o = con->private;
        struct ceph_osd_client *osdc = o->o_osdc;
@@ -5599,7 +5599,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
        return auth;
 }
 
-static int add_authorizer_challenge(struct ceph_connection *con,
+static int osd_add_authorizer_challenge(struct ceph_connection *con,
                                    void *challenge_buf, int challenge_buf_len)
 {
        struct ceph_osd *o = con->private;
@@ -5610,7 +5610,7 @@ static int add_authorizer_challenge(struct ceph_connection *con,
                                            challenge_buf, challenge_buf_len);
 }
 
-static int verify_authorizer_reply(struct ceph_connection *con)
+static int osd_verify_authorizer_reply(struct ceph_connection *con)
 {
        struct ceph_osd *o = con->private;
        struct ceph_osd_client *osdc = o->o_osdc;
@@ -5622,7 +5622,7 @@ static int verify_authorizer_reply(struct ceph_connection *con)
                NULL, NULL, NULL, NULL);
 }
 
-static int invalidate_authorizer(struct ceph_connection *con)
+static int osd_invalidate_authorizer(struct ceph_connection *con)
 {
        struct ceph_osd *o = con->private;
        struct ceph_osd_client *osdc = o->o_osdc;
@@ -5731,18 +5731,18 @@ static int osd_check_message_signature(struct ceph_msg *msg)
 }
 
 static const struct ceph_connection_operations osd_con_ops = {
-       .get = get_osd_con,
-       .put = put_osd_con,
-       .dispatch = dispatch,
-       .get_authorizer = get_authorizer,
-       .add_authorizer_challenge = add_authorizer_challenge,
-       .verify_authorizer_reply = verify_authorizer_reply,
-       .invalidate_authorizer = invalidate_authorizer,
-       .alloc_msg = alloc_msg,
+       .get = osd_get_con,
+       .put = osd_put_con,
+       .alloc_msg = osd_alloc_msg,
+       .dispatch = osd_dispatch,
+       .fault = osd_fault,
        .reencode_message = osd_reencode_message,
+       .get_authorizer = osd_get_authorizer,
+       .add_authorizer_challenge = osd_add_authorizer_challenge,
+       .verify_authorizer_reply = osd_verify_authorizer_reply,
+       .invalidate_authorizer = osd_invalidate_authorizer,
        .sign_message = osd_sign_message,
        .check_message_signature = osd_check_message_signature,
-       .fault = osd_fault,
        .get_auth_request = osd_get_auth_request,
        .handle_auth_reply_more = osd_handle_auth_reply_more,
        .handle_auth_done = osd_handle_auth_done,
index 4cac31d..2193ae5 100644 (file)
@@ -1035,7 +1035,7 @@ source_ok:
                        fld.saddr = dnet_select_source(dev_out, 0,
                                                       RT_SCOPE_HOST);
                        if (!fld.daddr)
-                               goto out;
+                               goto done;
                }
                fld.flowidn_oif = LOOPBACK_IFINDEX;
                res.type = RTN_LOCAL;
index d4f66ab..a8f8f98 100644 (file)
@@ -2859,7 +2859,8 @@ static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
        } else if (tcp_is_rack(sk)) {
                u32 prior_retrans = tp->retrans_out;
 
-               tcp_rack_mark_lost(sk);
+               if (tcp_rack_mark_lost(sk))
+                       *ack_flag &= ~FLAG_SET_XMIT_TIMER;
                if (prior_retrans > tp->retrans_out)
                        *ack_flag |= FLAG_LOST_RETRANS;
        }
@@ -3392,8 +3393,8 @@ static void tcp_ack_probe(struct sock *sk)
        } else {
                unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
 
-               tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
-                                    when, TCP_RTO_MAX);
+               when = tcp_clamp_probe0_to_user_timeout(sk, when);
+               tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, TCP_RTO_MAX);
        }
 }
 
@@ -3816,9 +3817,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
        if (tp->tlp_high_seq)
                tcp_process_tlp_ack(sk, ack, flag);
-       /* If needed, reset TLP/RTO timer; RACK may later override this. */
-       if (flag & FLAG_SET_XMIT_TIMER)
-               tcp_set_xmit_timer(sk);
 
        if (tcp_ack_is_dubious(sk, flag)) {
                if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP))) {
@@ -3831,6 +3829,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                                      &rexmit);
        }
 
+       /* If needed, reset TLP/RTO timer when RACK doesn't set. */
+       if (flag & FLAG_SET_XMIT_TIMER)
+               tcp_set_xmit_timer(sk);
+
        if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
                sk_dst_confirm(sk);
 
index b0a6b0b..fbf140a 100644 (file)
@@ -4099,6 +4099,8 @@ void tcp_send_probe0(struct sock *sk)
                 */
                timeout = TCP_RESOURCE_PROBE_INTERVAL;
        }
+
+       timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
        tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
 }
 
index 177307a..6f1b4ac 100644 (file)
@@ -96,13 +96,13 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
        }
 }
 
-void tcp_rack_mark_lost(struct sock *sk)
+bool tcp_rack_mark_lost(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        u32 timeout;
 
        if (!tp->rack.advanced)
-               return;
+               return false;
 
        /* Reset the advanced flag to avoid unnecessary queue scanning */
        tp->rack.advanced = 0;
@@ -112,6 +112,7 @@ void tcp_rack_mark_lost(struct sock *sk)
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
                                          timeout, inet_csk(sk)->icsk_rto);
        }
+       return !!timeout;
 }
 
 /* Record the most recently (re)sent time among the (s)acked packets
index faa9294..4ef0807 100644 (file)
@@ -40,6 +40,24 @@ static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
        return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
 }
 
+u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
+{
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       u32 remaining;
+       s32 elapsed;
+
+       if (!icsk->icsk_user_timeout || !icsk->icsk_probes_tstamp)
+               return when;
+
+       elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
+       if (unlikely(elapsed < 0))
+               elapsed = 0;
+       remaining = msecs_to_jiffies(icsk->icsk_user_timeout) - elapsed;
+       remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN);
+
+       return min_t(u32, remaining, when);
+}
+
 /**
  *  tcp_write_err() - close socket and save error info
  *  @sk:  The socket the error has appeared on.
index c12dbc5..ef9b4ac 100644 (file)
@@ -2902,7 +2902,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
                        break;
                if (!aalg->pfkey_supported)
                        continue;
-               if (aalg_tmpl_set(t, aalg) && aalg->available)
+               if (aalg_tmpl_set(t, aalg))
                        sz += sizeof(struct sadb_comb);
        }
        return sz + sizeof(struct sadb_prop);
@@ -2920,7 +2920,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
                if (!ealg->pfkey_supported)
                        continue;
 
-               if (!(ealg_tmpl_set(t, ealg) && ealg->available))
+               if (!(ealg_tmpl_set(t, ealg)))
                        continue;
 
                for (k = 1; ; k++) {
@@ -2931,7 +2931,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
                        if (!aalg->pfkey_supported)
                                continue;
 
-                       if (aalg_tmpl_set(t, aalg) && aalg->available)
+                       if (aalg_tmpl_set(t, aalg))
                                sz += sizeof(struct sadb_comb);
                }
        }
index 4096188..0511bbe 100644 (file)
@@ -122,6 +122,8 @@ static struct lapb_cb *lapb_create_cb(void)
 
        timer_setup(&lapb->t1timer, NULL, 0);
        timer_setup(&lapb->t2timer, NULL, 0);
+       lapb->t1timer_stop = true;
+       lapb->t2timer_stop = true;
 
        lapb->t1      = LAPB_DEFAULT_T1;
        lapb->t2      = LAPB_DEFAULT_T2;
@@ -129,6 +131,8 @@ static struct lapb_cb *lapb_create_cb(void)
        lapb->mode    = LAPB_DEFAULT_MODE;
        lapb->window  = LAPB_DEFAULT_WINDOW;
        lapb->state   = LAPB_STATE_0;
+
+       spin_lock_init(&lapb->lock);
        refcount_set(&lapb->refcnt, 1);
 out:
        return lapb;
@@ -178,11 +182,23 @@ int lapb_unregister(struct net_device *dev)
                goto out;
        lapb_put(lapb);
 
+       /* Wait for other refs to "lapb" to drop */
+       while (refcount_read(&lapb->refcnt) > 2)
+               usleep_range(1, 10);
+
+       spin_lock_bh(&lapb->lock);
+
        lapb_stop_t1timer(lapb);
        lapb_stop_t2timer(lapb);
 
        lapb_clear_queues(lapb);
 
+       spin_unlock_bh(&lapb->lock);
+
+       /* Wait for running timers to stop */
+       del_timer_sync(&lapb->t1timer);
+       del_timer_sync(&lapb->t2timer);
+
        __lapb_remove_cb(lapb);
 
        lapb_put(lapb);
@@ -201,6 +217,8 @@ int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms)
        if (!lapb)
                goto out;
 
+       spin_lock_bh(&lapb->lock);
+
        parms->t1      = lapb->t1 / HZ;
        parms->t2      = lapb->t2 / HZ;
        parms->n2      = lapb->n2;
@@ -219,6 +237,7 @@ int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms)
        else
                parms->t2timer = (lapb->t2timer.expires - jiffies) / HZ;
 
+       spin_unlock_bh(&lapb->lock);
        lapb_put(lapb);
        rc = LAPB_OK;
 out:
@@ -234,6 +253,8 @@ int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms)
        if (!lapb)
                goto out;
 
+       spin_lock_bh(&lapb->lock);
+
        rc = LAPB_INVALUE;
        if (parms->t1 < 1 || parms->t2 < 1 || parms->n2 < 1)
                goto out_put;
@@ -256,6 +277,7 @@ int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms)
 
        rc = LAPB_OK;
 out_put:
+       spin_unlock_bh(&lapb->lock);
        lapb_put(lapb);
 out:
        return rc;
@@ -270,6 +292,8 @@ int lapb_connect_request(struct net_device *dev)
        if (!lapb)
                goto out;
 
+       spin_lock_bh(&lapb->lock);
+
        rc = LAPB_OK;
        if (lapb->state == LAPB_STATE_1)
                goto out_put;
@@ -285,24 +309,18 @@ int lapb_connect_request(struct net_device *dev)
 
        rc = LAPB_OK;
 out_put:
+       spin_unlock_bh(&lapb->lock);
        lapb_put(lapb);
 out:
        return rc;
 }
 EXPORT_SYMBOL(lapb_connect_request);
 
-int lapb_disconnect_request(struct net_device *dev)
+static int __lapb_disconnect_request(struct lapb_cb *lapb)
 {
-       struct lapb_cb *lapb = lapb_devtostruct(dev);
-       int rc = LAPB_BADTOKEN;
-
-       if (!lapb)
-               goto out;
-
        switch (lapb->state) {
        case LAPB_STATE_0:
-               rc = LAPB_NOTCONNECTED;
-               goto out_put;
+               return LAPB_NOTCONNECTED;
 
        case LAPB_STATE_1:
                lapb_dbg(1, "(%p) S1 TX DISC(1)\n", lapb->dev);
@@ -310,12 +328,10 @@ int lapb_disconnect_request(struct net_device *dev)
                lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND);
                lapb->state = LAPB_STATE_0;
                lapb_start_t1timer(lapb);
-               rc = LAPB_NOTCONNECTED;
-               goto out_put;
+               return LAPB_NOTCONNECTED;
 
        case LAPB_STATE_2:
-               rc = LAPB_OK;
-               goto out_put;
+               return LAPB_OK;
        }
 
        lapb_clear_queues(lapb);
@@ -328,8 +344,22 @@ int lapb_disconnect_request(struct net_device *dev)
        lapb_dbg(1, "(%p) S3 DISC(1)\n", lapb->dev);
        lapb_dbg(0, "(%p) S3 -> S2\n", lapb->dev);
 
-       rc = LAPB_OK;
-out_put:
+       return LAPB_OK;
+}
+
+int lapb_disconnect_request(struct net_device *dev)
+{
+       struct lapb_cb *lapb = lapb_devtostruct(dev);
+       int rc = LAPB_BADTOKEN;
+
+       if (!lapb)
+               goto out;
+
+       spin_lock_bh(&lapb->lock);
+
+       rc = __lapb_disconnect_request(lapb);
+
+       spin_unlock_bh(&lapb->lock);
        lapb_put(lapb);
 out:
        return rc;
@@ -344,6 +374,8 @@ int lapb_data_request(struct net_device *dev, struct sk_buff *skb)
        if (!lapb)
                goto out;
 
+       spin_lock_bh(&lapb->lock);
+
        rc = LAPB_NOTCONNECTED;
        if (lapb->state != LAPB_STATE_3 && lapb->state != LAPB_STATE_4)
                goto out_put;
@@ -352,6 +384,7 @@ int lapb_data_request(struct net_device *dev, struct sk_buff *skb)
        lapb_kick(lapb);
        rc = LAPB_OK;
 out_put:
+       spin_unlock_bh(&lapb->lock);
        lapb_put(lapb);
 out:
        return rc;
@@ -364,7 +397,9 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
        int rc = LAPB_BADTOKEN;
 
        if (lapb) {
+               spin_lock_bh(&lapb->lock);
                lapb_data_input(lapb, skb);
+               spin_unlock_bh(&lapb->lock);
                lapb_put(lapb);
                rc = LAPB_OK;
        }
@@ -435,6 +470,8 @@ static int lapb_device_event(struct notifier_block *this, unsigned long event,
        if (!lapb)
                return NOTIFY_DONE;
 
+       spin_lock_bh(&lapb->lock);
+
        switch (event) {
        case NETDEV_UP:
                lapb_dbg(0, "(%p) Interface up: %s\n", dev, dev->name);
@@ -454,7 +491,7 @@ static int lapb_device_event(struct notifier_block *this, unsigned long event,
                break;
        case NETDEV_GOING_DOWN:
                if (netif_carrier_ok(dev))
-                       lapb_disconnect_request(dev);
+                       __lapb_disconnect_request(lapb);
                break;
        case NETDEV_DOWN:
                lapb_dbg(0, "(%p) Interface down: %s\n", dev, dev->name);
@@ -489,6 +526,7 @@ static int lapb_device_event(struct notifier_block *this, unsigned long event,
                break;
        }
 
+       spin_unlock_bh(&lapb->lock);
        lapb_put(lapb);
        return NOTIFY_DONE;
 }
index baa247f..0230b27 100644 (file)
@@ -40,6 +40,7 @@ void lapb_start_t1timer(struct lapb_cb *lapb)
        lapb->t1timer.function = lapb_t1timer_expiry;
        lapb->t1timer.expires  = jiffies + lapb->t1;
 
+       lapb->t1timer_stop = false;
        add_timer(&lapb->t1timer);
 }
 
@@ -50,16 +51,19 @@ void lapb_start_t2timer(struct lapb_cb *lapb)
        lapb->t2timer.function = lapb_t2timer_expiry;
        lapb->t2timer.expires  = jiffies + lapb->t2;
 
+       lapb->t2timer_stop = false;
        add_timer(&lapb->t2timer);
 }
 
 void lapb_stop_t1timer(struct lapb_cb *lapb)
 {
+       lapb->t1timer_stop = true;
        del_timer(&lapb->t1timer);
 }
 
 void lapb_stop_t2timer(struct lapb_cb *lapb)
 {
+       lapb->t2timer_stop = true;
        del_timer(&lapb->t2timer);
 }
 
@@ -72,16 +76,31 @@ static void lapb_t2timer_expiry(struct timer_list *t)
 {
        struct lapb_cb *lapb = from_timer(lapb, t, t2timer);
 
+       spin_lock_bh(&lapb->lock);
+       if (timer_pending(&lapb->t2timer)) /* A new timer has been set up */
+               goto out;
+       if (lapb->t2timer_stop) /* The timer has been stopped */
+               goto out;
+
        if (lapb->condition & LAPB_ACK_PENDING_CONDITION) {
                lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;
                lapb_timeout_response(lapb);
        }
+
+out:
+       spin_unlock_bh(&lapb->lock);
 }
 
 static void lapb_t1timer_expiry(struct timer_list *t)
 {
        struct lapb_cb *lapb = from_timer(lapb, t, t1timer);
 
+       spin_lock_bh(&lapb->lock);
+       if (timer_pending(&lapb->t1timer)) /* A new timer has been set up */
+               goto out;
+       if (lapb->t1timer_stop) /* The timer has been stopped */
+               goto out;
+
        switch (lapb->state) {
 
                /*
@@ -108,7 +127,7 @@ static void lapb_t1timer_expiry(struct timer_list *t)
                                lapb->state = LAPB_STATE_0;
                                lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
                                lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev);
-                               return;
+                               goto out;
                        } else {
                                lapb->n2count++;
                                if (lapb->mode & LAPB_EXTENDED) {
@@ -132,7 +151,7 @@ static void lapb_t1timer_expiry(struct timer_list *t)
                                lapb->state = LAPB_STATE_0;
                                lapb_disconnect_confirmation(lapb, LAPB_TIMEDOUT);
                                lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev);
-                               return;
+                               goto out;
                        } else {
                                lapb->n2count++;
                                lapb_dbg(1, "(%p) S2 TX DISC(1)\n", lapb->dev);
@@ -150,7 +169,7 @@ static void lapb_t1timer_expiry(struct timer_list *t)
                                lapb_stop_t2timer(lapb);
                                lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
                                lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev);
-                               return;
+                               goto out;
                        } else {
                                lapb->n2count++;
                                lapb_requeue_frames(lapb);
@@ -167,7 +186,7 @@ static void lapb_t1timer_expiry(struct timer_list *t)
                                lapb->state = LAPB_STATE_0;
                                lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
                                lapb_dbg(0, "(%p) S4 -> S0\n", lapb->dev);
-                               return;
+                               goto out;
                        } else {
                                lapb->n2count++;
                                lapb_transmit_frmr(lapb);
@@ -176,4 +195,7 @@ static void lapb_t1timer_expiry(struct timer_list *t)
        }
 
        lapb_start_t1timer(lapb);
+
+out:
+       spin_unlock_bh(&lapb->lock);
 }
index 982fdc1..ecda126 100644 (file)
@@ -1077,6 +1077,7 @@ enum queue_stop_reason {
        IEEE80211_QUEUE_STOP_REASON_FLUSH,
        IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN,
        IEEE80211_QUEUE_STOP_REASON_RESERVE_TID,
+       IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE,
 
        IEEE80211_QUEUE_STOP_REASONS,
 };
index 4bc350c..b80c9b0 100644 (file)
@@ -1633,6 +1633,10 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
        if (ret)
                return ret;
 
+       ieee80211_stop_vif_queues(local, sdata,
+                                 IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE);
+       synchronize_net();
+
        ieee80211_do_stop(sdata, false);
 
        ieee80211_teardown_sdata(sdata);
@@ -1655,6 +1659,8 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
        err = ieee80211_do_open(&sdata->wdev, false);
        WARN(err, "type change: do_open returned %d", err);
 
+       ieee80211_wake_vif_queues(local, sdata,
+                                 IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE);
        return ret;
 }
 
index ae1cb2c..76747bf 100644 (file)
@@ -133,16 +133,20 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
        }
 
        if (wide_bw_chansw_ie) {
+               u8 new_seg1 = wide_bw_chansw_ie->new_center_freq_seg1;
                struct ieee80211_vht_operation vht_oper = {
                        .chan_width =
                                wide_bw_chansw_ie->new_channel_width,
                        .center_freq_seg0_idx =
                                wide_bw_chansw_ie->new_center_freq_seg0,
-                       .center_freq_seg1_idx =
-                               wide_bw_chansw_ie->new_center_freq_seg1,
+                       .center_freq_seg1_idx = new_seg1,
                        /* .basic_mcs_set doesn't matter */
                };
-               struct ieee80211_ht_operation ht_oper = {};
+               struct ieee80211_ht_operation ht_oper = {
+                       .operation_mode =
+                               cpu_to_le16(new_seg1 <<
+                                           IEEE80211_HT_OP_MODE_CCFS2_SHIFT),
+               };
 
                /* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT,
                 * to the previously parsed chandef
index 15c467f..8d3aa97 100644 (file)
@@ -5235,9 +5235,8 @@ static void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
        kfree(elem);
 }
 
-static int nft_set_elem_expr_clone(const struct nft_ctx *ctx,
-                                  struct nft_set *set,
-                                  struct nft_expr *expr_array[])
+int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
+                           struct nft_expr *expr_array[])
 {
        struct nft_expr *expr;
        int err, i, k;
index 0b053f7..d164ef9 100644 (file)
@@ -295,6 +295,12 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
                        err = -EOPNOTSUPP;
                        goto err_expr_free;
                }
+       } else if (set->num_exprs > 0) {
+               err = nft_set_elem_expr_clone(ctx, set, priv->expr_array);
+               if (err < 0)
+                       return err;
+
+               priv->num_exprs = set->num_exprs;
        }
 
        nft_set_ext_prepare(&priv->tmpl);
@@ -306,8 +312,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
                nft_dynset_ext_add_expr(priv);
 
        if (set->flags & NFT_SET_TIMEOUT) {
-               if (timeout || set->timeout)
+               if (timeout || set->timeout) {
+                       nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT);
                        nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION);
+               }
        }
 
        priv->timeout = timeout;
@@ -376,22 +384,25 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
                         nf_jiffies64_to_msecs(priv->timeout),
                         NFTA_DYNSET_PAD))
                goto nla_put_failure;
-       if (priv->num_exprs == 1) {
-               if (nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr_array[0]))
-                       goto nla_put_failure;
-       } else if (priv->num_exprs > 1) {
-               struct nlattr *nest;
-
-               nest = nla_nest_start_noflag(skb, NFTA_DYNSET_EXPRESSIONS);
-               if (!nest)
-                       goto nla_put_failure;
-
-               for (i = 0; i < priv->num_exprs; i++) {
-                       if (nft_expr_dump(skb, NFTA_LIST_ELEM,
-                                         priv->expr_array[i]))
+       if (priv->set->num_exprs == 0) {
+               if (priv->num_exprs == 1) {
+                       if (nft_expr_dump(skb, NFTA_DYNSET_EXPR,
+                                         priv->expr_array[0]))
                                goto nla_put_failure;
+               } else if (priv->num_exprs > 1) {
+                       struct nlattr *nest;
+
+                       nest = nla_nest_start_noflag(skb, NFTA_DYNSET_EXPRESSIONS);
+                       if (!nest)
+                               goto nla_put_failure;
+
+                       for (i = 0; i < priv->num_exprs; i++) {
+                               if (nft_expr_dump(skb, NFTA_LIST_ELEM,
+                                                 priv->expr_array[i]))
+                                       goto nla_put_failure;
+                       }
+                       nla_nest_end(skb, nest);
                }
-               nla_nest_end(skb, nest);
        }
        if (nla_put_be32(skb, NFTA_DYNSET_FLAGS, htonl(flags)))
                goto nla_put_failure;
index 6409063..722f7ef 100644 (file)
@@ -852,6 +852,7 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
 
        if (!dev->polling) {
                device_unlock(&dev->dev);
+               nfc_put_device(dev);
                return -EINVAL;
        }
 
index 955c195..9c7eb84 100644 (file)
@@ -105,7 +105,7 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
        if (addr->target_idx > dev->target_next_idx - 1 ||
            addr->target_idx < dev->target_next_idx - dev->n_targets) {
                rc = -EINVAL;
-               goto error;
+               goto put_dev;
        }
 
        rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol);
index 382add7..1ae90fb 100644 (file)
@@ -197,6 +197,7 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
        tail = b->peer_backlog_tail;
        while (CIRC_CNT(head, tail, size) > 0) {
                struct rxrpc_peer *peer = b->peer_backlog[tail];
+               rxrpc_put_local(peer->local);
                kfree(peer);
                tail = (tail + 1) & (size - 1);
        }
index 855a10f..94113ca 100644 (file)
@@ -388,9 +388,10 @@ static int __switchdev_handle_port_obj_add(struct net_device *dev,
        extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
 
        if (check_cb(dev)) {
-               /* This flag is only checked if the return value is success. */
-               port_obj_info->handled = true;
-               return add_cb(dev, port_obj_info->obj, extack);
+               err = add_cb(dev, port_obj_info->obj, extack);
+               if (err != -EOPNOTSUPP)
+                       port_obj_info->handled = true;
+               return err;
        }
 
        /* Switch ports might be stacked under e.g. a LAG. Ignore the
@@ -441,9 +442,10 @@ static int __switchdev_handle_port_obj_del(struct net_device *dev,
        int err = -EOPNOTSUPP;
 
        if (check_cb(dev)) {
-               /* This flag is only checked if the return value is success. */
-               port_obj_info->handled = true;
-               return del_cb(dev, port_obj_info->obj);
+               err = del_cb(dev, port_obj_info->obj);
+               if (err != -EOPNOTSUPP)
+                       port_obj_info->handled = true;
+               return err;
        }
 
        /* Switch ports might be stacked under e.g. a LAG. Ignore the
@@ -493,8 +495,10 @@ static int __switchdev_handle_port_attr_set(struct net_device *dev,
        int err = -EOPNOTSUPP;
 
        if (check_cb(dev)) {
-               port_attr_info->handled = true;
-               return set_cb(dev, port_attr_info->attr);
+               err = set_cb(dev, port_attr_info->attr);
+               if (err != -EOPNOTSUPP)
+                       port_attr_info->handled = true;
+               return err;
        }
 
        /* Switch ports might be stacked under e.g. a LAG. Ignore the
index 69102fd..76a80a4 100644 (file)
@@ -896,8 +896,9 @@ out:
 int call_commit_handler(struct net_device *dev)
 {
 #ifdef CONFIG_WIRELESS_EXT
-       if ((netif_running(dev)) &&
-          (dev->wireless_handlers->standard[0] != NULL))
+       if (netif_running(dev) &&
+           dev->wireless_handlers &&
+           dev->wireless_handlers->standard[0])
                /* Call the commit handler on the driver */
                return dev->wireless_handlers->standard[0](dev, NULL,
                                                           NULL, NULL);
index be6351e..1158cd0 100644 (file)
@@ -660,7 +660,7 @@ resume:
                /* only the first xfrm gets the encap type */
                encap_type = 0;
 
-               if (async && x->repl->recheck(x, skb, seq)) {
+               if (x->repl->recheck(x, skb, seq)) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
                        goto drop_unlock;
                }
index d622c25..b74f28c 100644 (file)
@@ -793,15 +793,22 @@ static int xfrm_policy_addr_delta(const xfrm_address_t *a,
                                  const xfrm_address_t *b,
                                  u8 prefixlen, u16 family)
 {
+       u32 ma, mb, mask;
        unsigned int pdw, pbi;
        int delta = 0;
 
        switch (family) {
        case AF_INET:
-               if (sizeof(long) == 4 && prefixlen == 0)
-                       return ntohl(a->a4) - ntohl(b->a4);
-               return (ntohl(a->a4) & ((~0UL << (32 - prefixlen)))) -
-                      (ntohl(b->a4) & ((~0UL << (32 - prefixlen))));
+               if (prefixlen == 0)
+                       return 0;
+               mask = ~0U << (32 - prefixlen);
+               ma = ntohl(a->a4) & mask;
+               mb = ntohl(b->a4) & mask;
+               if (ma < mb)
+                       delta = -1;
+               else if (ma > mb)
+                       delta = 1;
+               break;
        case AF_INET6:
                pdw = prefixlen >> 5;
                pbi = prefixlen & 0x1f;
@@ -812,10 +819,13 @@ static int xfrm_policy_addr_delta(const xfrm_address_t *a,
                                return delta;
                }
                if (pbi) {
-                       u32 mask = ~0u << (32 - pbi);
-
-                       delta = (ntohl(a->a6[pdw]) & mask) -
-                               (ntohl(b->a6[pdw]) & mask);
+                       mask = ~0U << (32 - pbi);
+                       ma = ntohl(a->a6[pdw]) & mask;
+                       mb = ntohl(b->a6[pdw]) & mask;
+                       if (ma < mb)
+                               delta = -1;
+                       else if (ma > mb)
+                               delta = 1;
                }
                break;
        default:
@@ -3078,8 +3088,8 @@ struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
                xflo.flags = flags;
 
                /* To accelerate a bit...  */
-               if ((dst_orig->flags & DST_NOXFRM) ||
-                   !net->xfrm.policy_count[XFRM_POLICY_OUT])
+               if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
+                              !net->xfrm.policy_count[XFRM_POLICY_OUT]))
                        goto nopol;
 
                xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
index 9f3f8e9..c4aac70 100644 (file)
@@ -382,8 +382,8 @@ retry:
                        continue;
 
                /*
-                * The 'deps' array includes maximum three dependencies
-                * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth
+                * The 'deps' array includes maximum four dependencies
+                * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fifth
                 * member of this array is a sentinel and should be
                 * negative value.
                 *
index 11554d0..1b8409e 100644 (file)
@@ -611,7 +611,8 @@ snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_in
 
        if (info->is_midi) {
                struct midi_info minf;
-               snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
+               if (snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf))
+                       return -ENXIO;
                inf->synth_type = SYNTH_TYPE_MIDI;
                inf->synth_subtype = 0;
                inf->nr_voices = 16;
index 6a0d070..c456861 100644 (file)
@@ -307,6 +307,10 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0xa0c8,
        },
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x43c8,
+       },
 #endif
 
 /* Elkhart Lake */
index 687216e..eec1775 100644 (file)
@@ -2934,7 +2934,7 @@ static void hda_call_codec_resume(struct hda_codec *codec)
        snd_hdac_leave_pm(&codec->core);
 }
 
-static int hda_codec_suspend(struct device *dev)
+static int hda_codec_runtime_suspend(struct device *dev)
 {
        struct hda_codec *codec = dev_to_hda_codec(dev);
        unsigned int state;
@@ -2953,7 +2953,7 @@ static int hda_codec_suspend(struct device *dev)
        return 0;
 }
 
-static int hda_codec_resume(struct device *dev)
+static int hda_codec_runtime_resume(struct device *dev)
 {
        struct hda_codec *codec = dev_to_hda_codec(dev);
 
@@ -2968,16 +2968,6 @@ static int hda_codec_resume(struct device *dev)
        return 0;
 }
 
-static int hda_codec_runtime_suspend(struct device *dev)
-{
-       return hda_codec_suspend(dev);
-}
-
-static int hda_codec_runtime_resume(struct device *dev)
-{
-       return hda_codec_resume(dev);
-}
-
 #endif /* CONFIG_PM */
 
 #ifdef CONFIG_PM_SLEEP
@@ -2998,31 +2988,31 @@ static void hda_codec_pm_complete(struct device *dev)
 static int hda_codec_pm_suspend(struct device *dev)
 {
        dev->power.power_state = PMSG_SUSPEND;
-       return hda_codec_suspend(dev);
+       return pm_runtime_force_suspend(dev);
 }
 
 static int hda_codec_pm_resume(struct device *dev)
 {
        dev->power.power_state = PMSG_RESUME;
-       return hda_codec_resume(dev);
+       return pm_runtime_force_resume(dev);
 }
 
 static int hda_codec_pm_freeze(struct device *dev)
 {
        dev->power.power_state = PMSG_FREEZE;
-       return hda_codec_suspend(dev);
+       return pm_runtime_force_suspend(dev);
 }
 
 static int hda_codec_pm_thaw(struct device *dev)
 {
        dev->power.power_state = PMSG_THAW;
-       return hda_codec_resume(dev);
+       return pm_runtime_force_resume(dev);
 }
 
 static int hda_codec_pm_restore(struct device *dev)
 {
        dev->power.power_state = PMSG_RESTORE;
-       return hda_codec_resume(dev);
+       return pm_runtime_force_resume(dev);
 }
 #endif /* CONFIG_PM_SLEEP */
 
index e4dd2ff..5a50d3a 100644 (file)
@@ -2484,6 +2484,9 @@ static const struct pci_device_id azx_ids[] = {
        /* CometLake-S */
        { PCI_DEVICE(0x8086, 0xa3f0),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* CometLake-R */
+       { PCI_DEVICE(0x8086, 0xf0c8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Icelake */
        { PCI_DEVICE(0x8086, 0x34c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
@@ -2507,6 +2510,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Alderlake-S */
        { PCI_DEVICE(0x8086, 0x7ad0),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Alderlake-P */
+       { PCI_DEVICE(0x8086, 0x51c8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Elkhart Lake */
        { PCI_DEVICE(0x8086, 0x4b55),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
index 74d246a..97adff0 100644 (file)
@@ -4346,6 +4346,7 @@ HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI",       patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI",  patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI",        patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI",  patch_i915_tgl_hdmi),
+HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI",        patch_i915_icl_hdmi),
index dd82ff2..2906455 100644 (file)
@@ -6371,6 +6371,7 @@ enum {
        ALC256_FIXUP_HP_HEADSET_MIC,
        ALC236_FIXUP_DELL_AIO_HEADSET_MIC,
        ALC282_FIXUP_ACER_DISABLE_LINEOUT,
+       ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -7808,6 +7809,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE
        },
+       [ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_limit_int_mic_boost,
+               .chained = true,
+               .chain_id = ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7826,6 +7833,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
+       SND_PCI_QUIRK(0x1025, 0x1094, "Acer Aspire E5-575T", ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1166, "Acer Veriton N4640G", ALC269_FIXUP_LIFEBOOK),
@@ -7998,6 +8006,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE),
+       SND_PCI_QUIRK(0x1043, 0x1982, "ASUS B1400CEPE", ALC256_FIXUP_ASUS_HPE),
        SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
        SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
index 0ab40a8..a5c1a2c 100644 (file)
@@ -113,6 +113,7 @@ static struct via_spec *via_new_spec(struct hda_codec *codec)
                spec->codec_type = VT1708S;
        spec->gen.indep_hp = 1;
        spec->gen.keep_eapd_on = 1;
+       spec->gen.dac_min_mute = 1;
        spec->gen.pcm_playback_hook = via_playback_pcm_hook;
        spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
        codec->power_save_node = 1;
@@ -1042,7 +1043,7 @@ static const struct hda_fixup via_fixups[] = {
 static const struct snd_pci_quirk vt2002p_fixups[] = {
        SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
        SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
-       SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", VIA_FIXUP_POWER_SAVE),
+       SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", VIA_FIXUP_POWER_SAVE),
        {}
 };
 
index deca8c7..050a61f 100644 (file)
@@ -165,10 +165,24 @@ static int rn_acp_deinit(void __iomem *acp_base)
 
 static const struct dmi_system_id rn_acp_quirk_table[] = {
        {
-               /* Lenovo IdeaPad Flex 5 14ARE05, IdeaPad 5 15ARE05 */
+               /* Lenovo IdeaPad S340-14API */
                .matches = {
                        DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
-                       DMI_EXACT_MATCH(DMI_BOARD_NAME, "LNVNB161216"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81NB"),
+               }
+       },
+       {
+               /* Lenovo IdeaPad Flex 5 14ARE05 */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81X2"),
+               }
+       },
+       {
+               /* Lenovo IdeaPad 5 15ARE05 */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81YQ"),
                }
        },
        {
index 1010c9e..472caad 100644 (file)
@@ -595,18 +595,10 @@ static struct snd_soc_dai_driver ak4497_dai = {
        .ops = &ak4458_dai_ops,
 };
 
-static void ak4458_power_off(struct ak4458_priv *ak4458)
+static void ak4458_reset(struct ak4458_priv *ak4458, bool active)
 {
        if (ak4458->reset_gpiod) {
-               gpiod_set_value_cansleep(ak4458->reset_gpiod, 0);
-               usleep_range(1000, 2000);
-       }
-}
-
-static void ak4458_power_on(struct ak4458_priv *ak4458)
-{
-       if (ak4458->reset_gpiod) {
-               gpiod_set_value_cansleep(ak4458->reset_gpiod, 1);
+               gpiod_set_value_cansleep(ak4458->reset_gpiod, active);
                usleep_range(1000, 2000);
        }
 }
@@ -620,7 +612,7 @@ static int ak4458_init(struct snd_soc_component *component)
        if (ak4458->mute_gpiod)
                gpiod_set_value_cansleep(ak4458->mute_gpiod, 1);
 
-       ak4458_power_on(ak4458);
+       ak4458_reset(ak4458, false);
 
        ret = snd_soc_component_update_bits(component, AK4458_00_CONTROL1,
                            0x80, 0x80);   /* ACKS bit = 1; 10000000 */
@@ -650,7 +642,7 @@ static void ak4458_remove(struct snd_soc_component *component)
 {
        struct ak4458_priv *ak4458 = snd_soc_component_get_drvdata(component);
 
-       ak4458_power_off(ak4458);
+       ak4458_reset(ak4458, true);
 }
 
 #ifdef CONFIG_PM
@@ -660,7 +652,7 @@ static int __maybe_unused ak4458_runtime_suspend(struct device *dev)
 
        regcache_cache_only(ak4458->regmap, true);
 
-       ak4458_power_off(ak4458);
+       ak4458_reset(ak4458, true);
 
        if (ak4458->mute_gpiod)
                gpiod_set_value_cansleep(ak4458->mute_gpiod, 0);
@@ -685,8 +677,8 @@ static int __maybe_unused ak4458_runtime_resume(struct device *dev)
        if (ak4458->mute_gpiod)
                gpiod_set_value_cansleep(ak4458->mute_gpiod, 1);
 
-       ak4458_power_off(ak4458);
-       ak4458_power_on(ak4458);
+       ak4458_reset(ak4458, true);
+       ak4458_reset(ak4458, false);
 
        regcache_cache_only(ak4458->regmap, false);
        regcache_mark_dirty(ak4458->regmap);
index d5fcc4d..0f3ac22 100644 (file)
@@ -717,7 +717,7 @@ static int hdmi_codec_set_jack(struct snd_soc_component *component,
                               void *data)
 {
        struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
-       int ret = -EOPNOTSUPP;
+       int ret = -ENOTSUPP;
 
        if (hcp->hcd.ops->hook_plugged_cb) {
                hcp->jack = jack;
index dec8716..985b2dc 100644 (file)
@@ -2031,11 +2031,14 @@ static struct wm_coeff_ctl *wm_adsp_get_ctl(struct wm_adsp *dsp,
                                             unsigned int alg)
 {
        struct wm_coeff_ctl *pos, *rslt = NULL;
+       const char *fw_txt = wm_adsp_fw_text[dsp->fw];
 
        list_for_each_entry(pos, &dsp->ctl_list, list) {
                if (!pos->subname)
                        continue;
                if (strncmp(pos->subname, name, pos->subname_len) == 0 &&
+                   strncmp(pos->fw_name, fw_txt,
+                           SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == 0 &&
                                pos->alg_region.alg == alg &&
                                pos->alg_region.type == type) {
                        rslt = pos;
index ede4a9a..dbbb761 100644 (file)
@@ -90,7 +90,7 @@ static int imx_hdmi_init(struct snd_soc_pcm_runtime *rtd)
        }
 
        ret = snd_soc_component_set_jack(component, &data->hdmi_jack, NULL);
-       if (ret && ret != -EOPNOTSUPP) {
+       if (ret && ret != -ENOTSUPP) {
                dev_err(card->dev, "Can't set HDMI Jack %d\n", ret);
                return ret;
        }
index ca96890..6d0d6ef 100644 (file)
@@ -67,6 +67,16 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
                .driver_data = (void *)(SOF_RT711_JD_SRC_JD2 |
                                        SOF_RT715_DAI_ID_FIX),
        },
+       {
+               .callback = sof_sdw_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A5E")
+               },
+               .driver_data = (void *)(SOF_RT711_JD_SRC_JD2 |
+                                       SOF_RT715_DAI_ID_FIX |
+                                       SOF_SDW_FOUR_SPK),
+       },
        {
                .callback = sof_sdw_quirk_cb,
                .matches = {
index ae466cd..b824086 100644 (file)
@@ -3619,19 +3619,20 @@ static void skl_tplg_complete(struct snd_soc_component *component)
 
        list_for_each_entry(dobj, &component->dobj_list, list) {
                struct snd_kcontrol *kcontrol = dobj->control.kcontrol;
-               struct soc_enum *se =
-                       (struct soc_enum *)kcontrol->private_value;
-               char **texts = dobj->control.dtexts;
+               struct soc_enum *se;
+               char **texts;
                char chan_text[4];
 
-               if (dobj->type != SND_SOC_DOBJ_ENUM ||
-                   dobj->control.kcontrol->put !=
-                   skl_tplg_multi_config_set_dmic)
+               if (dobj->type != SND_SOC_DOBJ_ENUM || !kcontrol ||
+                   kcontrol->put != skl_tplg_multi_config_set_dmic)
                        continue;
+
+               se = (struct soc_enum *)kcontrol->private_value;
+               texts = dobj->control.dtexts;
                sprintf(chan_text, "c%d", mach->mach_params.dmic_num);
 
                for (i = 0; i < se->items; i++) {
-                       struct snd_ctl_elem_value val;
+                       struct snd_ctl_elem_value val = {};
 
                        if (strstr(texts[i], chan_text)) {
                                val.value.enumerated.item[0] = i;
index 078e58f..cfbd0c6 100644 (file)
@@ -532,6 +532,7 @@ static struct snd_soc_dai_link mt8183_da7219_dai_links[] = {
                .dpcm_playback = 1,
                .ignore_suspend = 1,
                .be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
+               .ignore = 1,
                .init = mt8183_da7219_max98357_hdmi_init,
                SND_SOC_DAILINK_REG(tdm),
        },
@@ -754,8 +755,10 @@ static int mt8183_da7219_max98357_dev_probe(struct platform_device *pdev)
                        }
                }
 
-               if (hdmi_codec && strcmp(dai_link->name, "TDM") == 0)
+               if (hdmi_codec && strcmp(dai_link->name, "TDM") == 0) {
                        dai_link->codecs->of_node = hdmi_codec;
+                       dai_link->ignore = 0;
+               }
 
                if (!dai_link->platforms->name)
                        dai_link->platforms->of_node = platform_node;
index 8c83408..1ce3edd 100644 (file)
@@ -515,6 +515,7 @@ static struct snd_soc_dai_link mt8183_mt6358_ts3a227_dai_links[] = {
                .ignore_suspend = 1,
                .be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
                .ops = &mt8183_mt6358_tdm_ops,
+               .ignore = 1,
                .init = mt8183_mt6358_ts3a227_max98357_hdmi_init,
                SND_SOC_DAILINK_REG(tdm),
        },
@@ -661,8 +662,10 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev)
                                                    SND_SOC_DAIFMT_CBM_CFM;
                }
 
-               if (hdmi_codec && strcmp(dai_link->name, "TDM") == 0)
+               if (hdmi_codec && strcmp(dai_link->name, "TDM") == 0) {
                        dai_link->codecs->of_node = hdmi_codec;
+                       dai_link->ignore = 0;
+               }
 
                if (!dai_link->platforms->name)
                        dai_link->platforms->of_node = platform_node;
index 716fbb4..ae2c748 100644 (file)
@@ -401,6 +401,53 @@ static const struct snd_soc_ops mt8192_mt6359_rt1015_rt5682_capture1_ops = {
        .startup = mt8192_mt6359_rt1015_rt5682_cap1_startup,
 };
 
+static int
+mt8192_mt6359_rt5682_startup(struct snd_pcm_substream *substream)
+{
+       static const unsigned int channels[] = {
+               1, 2
+       };
+       static const struct snd_pcm_hw_constraint_list constraints_channels = {
+               .count = ARRAY_SIZE(channels),
+               .list = channels,
+               .mask = 0,
+       };
+       static const unsigned int rates[] = {
+               48000
+       };
+       static const struct snd_pcm_hw_constraint_list constraints_rates = {
+               .count = ARRAY_SIZE(rates),
+               .list  = rates,
+               .mask = 0,
+       };
+
+       struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       int ret;
+
+       ret = snd_pcm_hw_constraint_list(runtime, 0,
+                                        SNDRV_PCM_HW_PARAM_CHANNELS,
+                                        &constraints_channels);
+       if (ret < 0) {
+               dev_err(rtd->dev, "hw_constraint_list channels failed\n");
+               return ret;
+       }
+
+       ret = snd_pcm_hw_constraint_list(runtime, 0,
+                                        SNDRV_PCM_HW_PARAM_RATE,
+                                        &constraints_rates);
+       if (ret < 0) {
+               dev_err(rtd->dev, "hw_constraint_list rate failed\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static const struct snd_soc_ops mt8192_mt6359_rt5682_ops = {
+       .startup = mt8192_mt6359_rt5682_startup,
+};
+
 /* FE */
 SND_SOC_DAILINK_DEFS(playback1,
                     DAILINK_COMP_ARRAY(COMP_CPU("DL1")),
@@ -648,6 +695,7 @@ static struct snd_soc_dai_link mt8192_mt6359_dai_links[] = {
                            SND_SOC_DPCM_TRIGGER_PRE},
                .dynamic = 1,
                .dpcm_playback = 1,
+               .ops = &mt8192_mt6359_rt5682_ops,
                SND_SOC_DAILINK_REG(playback3),
        },
        {
@@ -721,6 +769,7 @@ static struct snd_soc_dai_link mt8192_mt6359_dai_links[] = {
                            SND_SOC_DPCM_TRIGGER_PRE},
                .dynamic = 1,
                .dpcm_capture = 1,
+               .ops = &mt8192_mt6359_rt5682_ops,
                SND_SOC_DAILINK_REG(capture2),
        },
        {
index c5e99c2..66b8343 100644 (file)
@@ -344,8 +344,30 @@ int asoc_qcom_lpass_cpu_dai_probe(struct snd_soc_dai *dai)
 }
 EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_probe);
 
+static int asoc_qcom_of_xlate_dai_name(struct snd_soc_component *component,
+                                  struct of_phandle_args *args,
+                                  const char **dai_name)
+{
+       struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+       struct lpass_variant *variant = drvdata->variant;
+       int id = args->args[0];
+       int ret = -EINVAL;
+       int i;
+
+       for (i = 0; i  < variant->num_dai; i++) {
+               if (variant->dai_driver[i].id == id) {
+                       *dai_name = variant->dai_driver[i].name;
+                       ret = 0;
+                       break;
+               }
+       }
+
+       return ret;
+}
+
 static const struct snd_soc_component_driver lpass_cpu_comp_driver = {
        .name = "lpass-cpu",
+       .of_xlate_dai_name = asoc_qcom_of_xlate_dai_name,
 };
 
 static bool lpass_cpu_regmap_writeable(struct device *dev, unsigned int reg)
index 92f98b4..ef8a798 100644 (file)
@@ -131,7 +131,7 @@ static struct lpass_variant ipq806x_data = {
        .micmode                = REG_FIELD_ID(0x0010, 4, 7, 5, 0x4),
        .micmono                = REG_FIELD_ID(0x0010, 3, 3, 5, 0x4),
        .wssrc                  = REG_FIELD_ID(0x0010, 2, 2, 5, 0x4),
-       .bitwidth               = REG_FIELD_ID(0x0010, 0, 0, 5, 0x4),
+       .bitwidth               = REG_FIELD_ID(0x0010, 0, 1, 5, 0x4),
 
        .rdma_dyncclk           = REG_FIELD_ID(0x6000, 12, 12, 4, 0x1000),
        .rdma_bursten           = REG_FIELD_ID(0x6000, 11, 11, 4, 0x1000),
index 4055428..baf72f1 100644 (file)
 #define        LPAIF_WRDMAPERCNT_REG(v, chan)  LPAIF_WRDMA_REG_ADDR(v, 0x14, (chan))
 
 #define LPAIF_INTFDMA_REG(v, chan, reg, dai_id)  \
-               ((v->dai_driver[dai_id].id ==  LPASS_DP_RX) ? \
+       ((dai_id ==  LPASS_DP_RX) ? \
                LPAIF_HDMI_RDMA##reg##_REG(v, chan) : \
                 LPAIF_RDMA##reg##_REG(v, chan))
 
index d1c2485..0074b7f 100644 (file)
@@ -257,6 +257,9 @@ static int lpass_platform_pcmops_hw_params(struct snd_soc_component *component,
                break;
        case MI2S_PRIMARY:
        case MI2S_SECONDARY:
+       case MI2S_TERTIARY:
+       case MI2S_QUATERNARY:
+       case MI2S_QUINARY:
                ret = regmap_fields_write(dmactl->intf, id,
                                                LPAIF_DMACTL_AUDINTF(dma_port));
                if (ret) {
@@ -507,6 +510,9 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                        break;
                case MI2S_PRIMARY:
                case MI2S_SECONDARY:
+               case MI2S_TERTIARY:
+               case MI2S_QUATERNARY:
+               case MI2S_QUINARY:
                        reg_irqclr = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
                        val_irqclr = LPAIF_IRQ_ALL(ch);
 
@@ -559,6 +565,9 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                        break;
                case MI2S_PRIMARY:
                case MI2S_SECONDARY:
+               case MI2S_TERTIARY:
+               case MI2S_QUATERNARY:
+               case MI2S_QUINARY:
                        reg_irqen = LPAIF_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
                        val_mask = LPAIF_IRQ_ALL(ch);
                        val_irqen = 0;
@@ -655,6 +664,9 @@ static irqreturn_t lpass_dma_interrupt_handler(
        break;
        case MI2S_PRIMARY:
        case MI2S_SECONDARY:
+       case MI2S_TERTIARY:
+       case MI2S_QUATERNARY:
+       case MI2S_QUINARY:
                map = drvdata->lpaif_map;
                reg = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
                val = 0;
index 85db650..735c9da 100644 (file)
@@ -20,7 +20,7 @@
 #include "lpass.h"
 
 static struct snd_soc_dai_driver sc7180_lpass_cpu_dai_driver[] = {
-       [MI2S_PRIMARY] = {
+       {
                .id = MI2S_PRIMARY,
                .name = "Primary MI2S",
                .playback = {
@@ -44,9 +44,7 @@ static struct snd_soc_dai_driver sc7180_lpass_cpu_dai_driver[] = {
                },
                .probe  = &asoc_qcom_lpass_cpu_dai_probe,
                .ops    = &asoc_qcom_lpass_cpu_dai_ops,
-       },
-
-       [MI2S_SECONDARY] = {
+       }, {
                .id = MI2S_SECONDARY,
                .name = "Secondary MI2S",
                .playback = {
@@ -60,8 +58,7 @@ static struct snd_soc_dai_driver sc7180_lpass_cpu_dai_driver[] = {
                },
                .probe  = &asoc_qcom_lpass_cpu_dai_probe,
                .ops    = &asoc_qcom_lpass_cpu_dai_ops,
-       },
-       [LPASS_DP_RX] = {
+       }, {
                .id = LPASS_DP_RX,
                .name = "Hdmi",
                .playback = {
@@ -174,7 +171,7 @@ static struct lpass_variant sc7180_data = {
        .rdma_channels          = 5,
        .hdmi_rdma_reg_base             = 0x64000,
        .hdmi_rdma_reg_stride   = 0x1000,
-       .hdmi_rdma_channels             = 4,
+       .hdmi_rdma_channels             = 3,
        .dmactl_audif_start     = 1,
        .wrdma_reg_base         = 0x18000,
        .wrdma_reg_stride       = 0x1000,
index 0195372..2d68af0 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/compiler.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
-#include <dt-bindings/sound/sc7180-lpass.h>
+#include <dt-bindings/sound/qcom,lpass.h>
 #include "lpass-hdmi.h"
 
 #define LPASS_AHBIX_CLOCK_FREQUENCY            131072000
index 950c450..22e7b4c 100644 (file)
@@ -447,7 +447,7 @@ static void remove_dai(struct snd_soc_component *comp,
 {
        struct snd_soc_dai_driver *dai_drv =
                container_of(dobj, struct snd_soc_dai_driver, dobj);
-       struct snd_soc_dai *dai;
+       struct snd_soc_dai *dai, *_dai;
 
        if (pass != SOC_TPLG_PASS_PCM_DAI)
                return;
@@ -455,9 +455,9 @@ static void remove_dai(struct snd_soc_component *comp,
        if (dobj->ops && dobj->ops->dai_unload)
                dobj->ops->dai_unload(comp, dobj);
 
-       for_each_component_dais(comp, dai)
+       for_each_component_dais_safe(comp, dai, _dai)
                if (dai->driver == dai_drv)
-                       dai->driver = NULL;
+                       snd_soc_unregister_dai(dai);
 
        list_del(&dobj->list);
 }
@@ -902,7 +902,7 @@ static int soc_tplg_denum_create_values(struct soc_tplg *tplg, struct soc_enum *
                return -EINVAL;
 
        se->dobj.control.dvalues = devm_kcalloc(tplg->dev, le32_to_cpu(ec->items),
-                                          sizeof(u32),
+                                          sizeof(*se->dobj.control.dvalues),
                                           GFP_KERNEL);
        if (!se->dobj.control.dvalues)
                return -ENOMEM;
@@ -1742,7 +1742,7 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
        list_add(&dai_drv->dobj.list, &tplg->comp->dobj_list);
 
        /* register the DAI to the component */
-       dai = devm_snd_soc_register_dai(tplg->dev, tplg->comp, dai_drv, false);
+       dai = snd_soc_register_dai(tplg->comp, dai_drv, false);
        if (!dai)
                return -ENOMEM;
 
@@ -1750,6 +1750,7 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
        ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
        if (ret != 0) {
                dev_err(dai->dev, "Failed to create DAI widgets %d\n", ret);
+               snd_soc_unregister_dai(dai);
                return ret;
        }
 
index d306c37..4797a1c 100644 (file)
@@ -355,7 +355,7 @@ config SND_SOC_SOF_HDA
 
 config SND_SOC_SOF_INTEL_SOUNDWIRE_LINK
        bool "SOF support for SoundWire"
-       depends on SOUNDWIRE && ACPI
+       depends on ACPI
        help
          This adds support for SoundWire with Sound Open Firmware
          for Intel(R) platforms.
@@ -371,6 +371,7 @@ config SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
 
 config SND_SOC_SOF_INTEL_SOUNDWIRE
        tristate
+       select SOUNDWIRE
        select SOUNDWIRE_INTEL
        help
          This option is not user-selectable but automagically handled by
index 6875fa5..6744318 100644 (file)
@@ -63,16 +63,18 @@ static int hda_codec_load_module(struct hda_codec *codec)
 }
 
 /* enable controller wake up event for all codecs with jack connectors */
-void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev)
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable)
 {
        struct hda_bus *hbus = sof_to_hbus(sdev);
        struct hdac_bus *bus = sof_to_bus(sdev);
        struct hda_codec *codec;
        unsigned int mask = 0;
 
-       list_for_each_codec(codec, hbus)
-               if (codec->jacktbl.used)
-                       mask |= BIT(codec->core.addr);
+       if (enable) {
+               list_for_each_codec(codec, hbus)
+                       if (codec->jacktbl.used)
+                               mask |= BIT(codec->core.addr);
+       }
 
        snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, mask);
 }
@@ -81,23 +83,18 @@ void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev)
 void hda_codec_jack_check(struct snd_sof_dev *sdev)
 {
        struct hda_bus *hbus = sof_to_hbus(sdev);
-       struct hdac_bus *bus = sof_to_bus(sdev);
        struct hda_codec *codec;
 
-       /* disable controller Wake Up event*/
-       snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, 0);
-
        list_for_each_codec(codec, hbus)
                /*
                 * Wake up all jack-detecting codecs regardless whether an event
                 * has been recorded in STATESTS
                 */
                if (codec->jacktbl.used)
-                       schedule_delayed_work(&codec->jackpoll_work,
-                                             codec->jackpoll_interval);
+                       pm_request_resume(&codec->core.dev);
 }
 #else
-void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev) {}
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable) {}
 void hda_codec_jack_check(struct snd_sof_dev *sdev) {}
 #endif /* CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC */
 EXPORT_SYMBOL_NS(hda_codec_jack_wake_enable, SND_SOC_SOF_HDA_AUDIO_CODEC);
@@ -156,7 +153,8 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
                if (!hdev->bus->audio_component) {
                        dev_dbg(sdev->dev,
                                "iDisp hw present but no driver\n");
-                       goto error;
+                       ret = -ENOENT;
+                       goto out;
                }
                hda_priv->need_display_power = true;
        }
@@ -173,24 +171,23 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
                 * other return codes without modification
                 */
                if (ret == 0)
-                       goto error;
+                       ret = -ENOENT;
        }
 
-       return ret;
-
-error:
-       snd_hdac_ext_bus_device_exit(hdev);
-       return -ENOENT;
-
+out:
+       if (ret < 0) {
+               snd_hdac_device_unregister(hdev);
+               put_device(&hdev->dev);
+       }
 #else
        hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
        if (!hdev)
                return -ENOMEM;
 
        ret = snd_hdac_ext_bus_device_init(&hbus->core, address, hdev, HDA_DEV_ASOC);
+#endif
 
        return ret;
-#endif
 }
 
 /* Codec initialization */
index 2b00115..1c5e05b 100644 (file)
@@ -617,7 +617,7 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
 
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
        if (runtime_suspend)
-               hda_codec_jack_wake_enable(sdev);
+               hda_codec_jack_wake_enable(sdev, true);
 
        /* power down all hda link */
        snd_hdac_ext_bus_link_power_down_all(bus);
@@ -683,8 +683,11 @@ static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
 
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
        /* check jack status */
-       if (runtime_resume)
-               hda_codec_jack_check(sdev);
+       if (runtime_resume) {
+               hda_codec_jack_wake_enable(sdev, false);
+               if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
+                       hda_codec_jack_check(sdev);
+       }
 
        /* turn off the links that were off before suspend */
        list_for_each_entry(hlink, &bus->hlink_list, list) {
index 9ec8ae0..a3b6f3e 100644 (file)
@@ -650,7 +650,7 @@ void sof_hda_bus_init(struct hdac_bus *bus, struct device *dev);
  */
 void hda_codec_probe_bus(struct snd_sof_dev *sdev,
                         bool hda_codec_use_common_hdmi);
-void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev);
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable);
 void hda_codec_jack_check(struct snd_sof_dev *sdev);
 
 #endif /* CONFIG_SND_SOC_SOF_HDA */
index 2a369c2..cc2e257 100644 (file)
@@ -131,12 +131,13 @@ static int sof_acpi_probe(struct platform_device *pdev)
        if (!id)
                return -ENODEV;
 
-       ret = snd_intel_acpi_dsp_driver_probe(dev, id->id);
-       if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
-               dev_dbg(dev, "SOF ACPI driver not selected, aborting probe\n");
-               return -ENODEV;
+       if (IS_REACHABLE(CONFIG_SND_INTEL_DSP_CONFIG)) {
+               ret = snd_intel_acpi_dsp_driver_probe(dev, id->id);
+               if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
+                       dev_dbg(dev, "SOF ACPI driver not selected, aborting probe\n");
+                       return -ENODEV;
+               }
        }
-
        dev_dbg(dev, "ACPI DSP detected");
 
        sof_pdata = devm_kzalloc(dev, sizeof(*sof_pdata), GFP_KERNEL);
index 63b989e..215711a 100644 (file)
@@ -344,10 +344,12 @@ static int sof_pci_probe(struct pci_dev *pci,
        const struct snd_sof_dsp_ops *ops;
        int ret;
 
-       ret = snd_intel_dsp_driver_probe(pci);
-       if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
-               dev_dbg(&pci->dev, "SOF PCI driver not selected, aborting probe\n");
-               return -ENODEV;
+       if (IS_REACHABLE(CONFIG_SND_INTEL_DSP_CONFIG)) {
+               ret = snd_intel_dsp_driver_probe(pci);
+               if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
+                       dev_dbg(&pci->dev, "SOF PCI driver not selected, aborting probe\n");
+                       return -ENODEV;
+               }
        }
        dev_dbg(&pci->dev, "PCI DSP detected");
 
index 31051f2..dc68ed6 100644 (file)
@@ -485,18 +485,9 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip,
                              const struct audioformat *fmt, int rate)
 {
        struct usb_device *dev = chip->dev;
-       struct usb_host_interface *alts;
-       unsigned int ep;
        unsigned char data[3];
        int err, crate;
 
-       alts = snd_usb_get_host_interface(chip, fmt->iface, fmt->altsetting);
-       if (!alts)
-               return -EINVAL;
-       if (get_iface_desc(alts)->bNumEndpoints < 1)
-               return -EINVAL;
-       ep = get_endpoint(alts, 0)->bEndpointAddress;
-
        /* if endpoint doesn't have sampling rate control, bail out */
        if (!(fmt->attributes & UAC_EP_CS_ATTR_SAMPLE_RATE))
                return 0;
@@ -506,11 +497,11 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip,
        data[2] = rate >> 16;
        err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR,
                              USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_OUT,
-                             UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep,
-                             data, sizeof(data));
+                             UAC_EP_CS_ATTR_SAMPLE_RATE << 8,
+                             fmt->endpoint, data, sizeof(data));
        if (err < 0) {
                dev_err(&dev->dev, "%d:%d: cannot set freq %d to ep %#x\n",
-                       fmt->iface, fmt->altsetting, rate, ep);
+                       fmt->iface, fmt->altsetting, rate, fmt->endpoint);
                return err;
        }
 
@@ -524,11 +515,11 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip,
 
        err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC_GET_CUR,
                              USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN,
-                             UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep,
-                             data, sizeof(data));
+                             UAC_EP_CS_ATTR_SAMPLE_RATE << 8,
+                             fmt->endpoint, data, sizeof(data));
        if (err < 0) {
                dev_err(&dev->dev, "%d:%d: cannot get freq at ep %#x\n",
-                       fmt->iface, fmt->altsetting, ep);
+                       fmt->iface, fmt->altsetting, fmt->endpoint);
                chip->sample_rate_read_error++;
                return 0; /* some devices don't support reading */
        }
index fe73fe3..8e56882 100644 (file)
@@ -1252,6 +1252,15 @@ int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
 
        /* If the interface has been already set up, just set EP parameters */
        if (!ep->iface_ref->need_setup) {
+               /* sample rate setup of UAC1 is per endpoint, and we need
+                * to update at each EP configuration
+                */
+               if (ep->cur_audiofmt->protocol == UAC_VERSION_1) {
+                       err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt,
+                                                      ep->cur_rate);
+                       if (err < 0)
+                               goto unlock;
+               }
                err = snd_usb_endpoint_set_params(chip, ep);
                if (err < 0)
                        goto unlock;
index 9ebc5d2..e6ff317 100644 (file)
@@ -466,6 +466,17 @@ static int validate_sample_rate_table_v2v3(struct snd_usb_audio *chip,
        unsigned int nr_rates;
        int i, err;
 
+       /* performing the rate verification may lead to unexpected USB bus
+        * behavior afterwards by some unknown reason.  Do this only for the
+        * known devices.
+        */
+       switch (USB_ID_VENDOR(chip->usb_id)) {
+       case 0x07fd: /* MOTU */
+               break;
+       default:
+               return 0; /* don't perform the validation as default */
+       }
+
        table = kcalloc(fp->nr_rates, sizeof(*table), GFP_KERNEL);
        if (!table)
                return -ENOMEM;
index 1ac2cc6..521cc84 100644 (file)
@@ -175,11 +175,13 @@ static int add_roland_implicit_fb(struct snd_usb_audio *chip,
                                       ifnum, alts);
 }
 
-/* Pioneer devices: playback and capture streams sharing the same iface/altset
+/* Playback and capture EPs on Pioneer devices share the same iface/altset,
+ * but they don't seem working with the implicit fb mode well, hence we
+ * just return as if the sync were already set up.
  */
-static int add_pioneer_implicit_fb(struct snd_usb_audio *chip,
-                                  struct audioformat *fmt,
-                                  struct usb_host_interface *alts)
+static int skip_pioneer_sync_ep(struct snd_usb_audio *chip,
+                               struct audioformat *fmt,
+                               struct usb_host_interface *alts)
 {
        struct usb_endpoint_descriptor *epd;
 
@@ -194,8 +196,7 @@ static int add_pioneer_implicit_fb(struct snd_usb_audio *chip,
             (epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) !=
             USB_ENDPOINT_USAGE_IMPLICIT_FB))
                return 0;
-       return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress, 1,
-                                      alts->desc.bInterfaceNumber, alts);
+       return 1; /* don't handle with the implicit fb, just skip sync EP */
 }
 
 static int __add_generic_implicit_fb(struct snd_usb_audio *chip,
@@ -298,11 +299,11 @@ static int audioformat_implicit_fb_quirk(struct snd_usb_audio *chip,
                        return 1;
        }
 
-       /* Pioneer devices implicit feedback with vendor spec class */
+       /* Pioneer devices with vendor spec class */
        if (attr == USB_ENDPOINT_SYNC_ASYNC &&
            alts->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
            USB_ID_VENDOR(chip->usb_id) == 0x2b73 /* Pioneer */) {
-               if (add_pioneer_implicit_fb(chip, fmt, alts))
+               if (skip_pioneer_sync_ep(chip, fmt, alts))
                        return 1;
        }
 
index 5607990..078bb4c 100644 (file)
@@ -663,7 +663,7 @@ static int hw_check_valid_format(struct snd_usb_substream *subs,
        check_fmts.bits[1] = (u32)(fp->formats >> 32);
        snd_mask_intersect(&check_fmts, fmts);
        if (snd_mask_empty(&check_fmts)) {
-               hwc_debug("   > check: no supported format %d\n", fp->format);
+               hwc_debug("   > check: no supported format 0x%llx\n", fp->formats);
                return 0;
        }
        /* check the channels */
@@ -775,24 +775,11 @@ static int hw_rule_channels(struct snd_pcm_hw_params *params,
        return apply_hw_params_minmax(it, rmin, rmax);
 }
 
-static int hw_rule_format(struct snd_pcm_hw_params *params,
-                         struct snd_pcm_hw_rule *rule)
+static int apply_hw_params_format_bits(struct snd_mask *fmt, u64 fbits)
 {
-       struct snd_usb_substream *subs = rule->private;
-       const struct audioformat *fp;
-       struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
-       u64 fbits;
        u32 oldbits[2];
        int changed;
 
-       hwc_debug("hw_rule_format: %x:%x\n", fmt->bits[0], fmt->bits[1]);
-       fbits = 0;
-       list_for_each_entry(fp, &subs->fmt_list, list) {
-               if (!hw_check_valid_format(subs, params, fp))
-                       continue;
-               fbits |= fp->formats;
-       }
-
        oldbits[0] = fmt->bits[0];
        oldbits[1] = fmt->bits[1];
        fmt->bits[0] &= (u32)fbits;
@@ -806,6 +793,24 @@ static int hw_rule_format(struct snd_pcm_hw_params *params,
        return changed;
 }
 
+static int hw_rule_format(struct snd_pcm_hw_params *params,
+                         struct snd_pcm_hw_rule *rule)
+{
+       struct snd_usb_substream *subs = rule->private;
+       const struct audioformat *fp;
+       struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+       u64 fbits;
+
+       hwc_debug("hw_rule_format: %x:%x\n", fmt->bits[0], fmt->bits[1]);
+       fbits = 0;
+       list_for_each_entry(fp, &subs->fmt_list, list) {
+               if (!hw_check_valid_format(subs, params, fp))
+                       continue;
+               fbits |= fp->formats;
+       }
+       return apply_hw_params_format_bits(fmt, fbits);
+}
+
 static int hw_rule_period_time(struct snd_pcm_hw_params *params,
                               struct snd_pcm_hw_rule *rule)
 {
@@ -833,64 +838,92 @@ static int hw_rule_period_time(struct snd_pcm_hw_params *params,
        return apply_hw_params_minmax(it, pmin, UINT_MAX);
 }
 
-/* apply PCM hw constraints from the concurrent sync EP */
-static int apply_hw_constraint_from_sync(struct snd_pcm_runtime *runtime,
-                                        struct snd_usb_substream *subs)
+/* get the EP or the sync EP for implicit fb when it's already set up */
+static const struct snd_usb_endpoint *
+get_sync_ep_from_substream(struct snd_usb_substream *subs)
 {
        struct snd_usb_audio *chip = subs->stream->chip;
-       struct snd_usb_endpoint *ep;
        const struct audioformat *fp;
-       int err;
+       const struct snd_usb_endpoint *ep;
 
        list_for_each_entry(fp, &subs->fmt_list, list) {
                ep = snd_usb_get_endpoint(chip, fp->endpoint);
                if (ep && ep->cur_rate)
-                       goto found;
+                       return ep;
                if (!fp->implicit_fb)
                        continue;
                /* for the implicit fb, check the sync ep as well */
                ep = snd_usb_get_endpoint(chip, fp->sync_ep);
                if (ep && ep->cur_rate)
-                       goto found;
+                       return ep;
        }
-       return 0;
+       return NULL;
+}
 
- found:
-       if (!find_format(&subs->fmt_list, ep->cur_format, ep->cur_rate,
-                        ep->cur_channels, false, NULL)) {
-               usb_audio_dbg(chip, "EP 0x%x being used, but not applicable\n",
-                             ep->ep_num);
+/* additional hw constraints for implicit feedback mode */
+static int hw_rule_format_implicit_fb(struct snd_pcm_hw_params *params,
+                                     struct snd_pcm_hw_rule *rule)
+{
+       struct snd_usb_substream *subs = rule->private;
+       const struct snd_usb_endpoint *ep;
+       struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+       ep = get_sync_ep_from_substream(subs);
+       if (!ep)
                return 0;
-       }
 
-       usb_audio_dbg(chip, "EP 0x%x being used, using fixed params:\n",
-                     ep->ep_num);
-       usb_audio_dbg(chip, "rate=%d, period_size=%d, periods=%d\n",
-                     ep->cur_rate, ep->cur_period_frames,
-                     ep->cur_buffer_periods);
+       hwc_debug("applying %s\n", __func__);
+       return apply_hw_params_format_bits(fmt, pcm_format_to_bits(ep->cur_format));
+}
 
-       runtime->hw.formats = subs->formats;
-       runtime->hw.rate_min = runtime->hw.rate_max = ep->cur_rate;
-       runtime->hw.rates = SNDRV_PCM_RATE_KNOT;
-       runtime->hw.periods_min = runtime->hw.periods_max =
-               ep->cur_buffer_periods;
+static int hw_rule_rate_implicit_fb(struct snd_pcm_hw_params *params,
+                                   struct snd_pcm_hw_rule *rule)
+{
+       struct snd_usb_substream *subs = rule->private;
+       const struct snd_usb_endpoint *ep;
+       struct snd_interval *it;
 
-       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
-                                 hw_rule_channels, subs,
-                                 SNDRV_PCM_HW_PARAM_FORMAT,
-                                 SNDRV_PCM_HW_PARAM_RATE,
-                                 -1);
-       if (err < 0)
-               return err;
+       ep = get_sync_ep_from_substream(subs);
+       if (!ep)
+               return 0;
 
-       err = snd_pcm_hw_constraint_minmax(runtime,
-                                          SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
-                                          ep->cur_period_frames,
-                                          ep->cur_period_frames);
-       if (err < 0)
-               return err;
+       hwc_debug("applying %s\n", __func__);
+       it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+       return apply_hw_params_minmax(it, ep->cur_rate, ep->cur_rate);
+}
 
-       return 1; /* notify the finding */
+static int hw_rule_period_size_implicit_fb(struct snd_pcm_hw_params *params,
+                                          struct snd_pcm_hw_rule *rule)
+{
+       struct snd_usb_substream *subs = rule->private;
+       const struct snd_usb_endpoint *ep;
+       struct snd_interval *it;
+
+       ep = get_sync_ep_from_substream(subs);
+       if (!ep)
+               return 0;
+
+       hwc_debug("applying %s\n", __func__);
+       it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
+       return apply_hw_params_minmax(it, ep->cur_period_frames,
+                                     ep->cur_period_frames);
+}
+
+static int hw_rule_periods_implicit_fb(struct snd_pcm_hw_params *params,
+                                      struct snd_pcm_hw_rule *rule)
+{
+       struct snd_usb_substream *subs = rule->private;
+       const struct snd_usb_endpoint *ep;
+       struct snd_interval *it;
+
+       ep = get_sync_ep_from_substream(subs);
+       if (!ep)
+               return 0;
+
+       hwc_debug("applying %s\n", __func__);
+       it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIODS);
+       return apply_hw_params_minmax(it, ep->cur_buffer_periods,
+                                     ep->cur_buffer_periods);
 }
 
 /*
@@ -899,20 +932,11 @@ static int apply_hw_constraint_from_sync(struct snd_pcm_runtime *runtime,
 
 static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substream *subs)
 {
-       struct snd_usb_audio *chip = subs->stream->chip;
        const struct audioformat *fp;
        unsigned int pt, ptmin;
        int param_period_time_if_needed = -1;
        int err;
 
-       mutex_lock(&chip->mutex);
-       err = apply_hw_constraint_from_sync(runtime, subs);
-       mutex_unlock(&chip->mutex);
-       if (err < 0)
-               return err;
-       if (err > 0) /* found the matching? */
-               goto add_extra_rules;
-
        runtime->hw.formats = subs->formats;
 
        runtime->hw.rate_min = 0x7fffffff;
@@ -957,6 +981,7 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
 
        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
                                  hw_rule_rate, subs,
+                                 SNDRV_PCM_HW_PARAM_RATE,
                                  SNDRV_PCM_HW_PARAM_FORMAT,
                                  SNDRV_PCM_HW_PARAM_CHANNELS,
                                  param_period_time_if_needed,
@@ -964,9 +989,9 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
        if (err < 0)
                return err;
 
-add_extra_rules:
        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
                                  hw_rule_channels, subs,
+                                 SNDRV_PCM_HW_PARAM_CHANNELS,
                                  SNDRV_PCM_HW_PARAM_FORMAT,
                                  SNDRV_PCM_HW_PARAM_RATE,
                                  param_period_time_if_needed,
@@ -975,6 +1000,7 @@ add_extra_rules:
                return err;
        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
                                  hw_rule_format, subs,
+                                 SNDRV_PCM_HW_PARAM_FORMAT,
                                  SNDRV_PCM_HW_PARAM_RATE,
                                  SNDRV_PCM_HW_PARAM_CHANNELS,
                                  param_period_time_if_needed,
@@ -993,6 +1019,28 @@ add_extra_rules:
                        return err;
        }
 
+       /* additional hw constraints for implicit fb */
+       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
+                                 hw_rule_format_implicit_fb, subs,
+                                 SNDRV_PCM_HW_PARAM_FORMAT, -1);
+       if (err < 0)
+               return err;
+       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+                                 hw_rule_rate_implicit_fb, subs,
+                                 SNDRV_PCM_HW_PARAM_RATE, -1);
+       if (err < 0)
+               return err;
+       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+                                 hw_rule_period_size_implicit_fb, subs,
+                                 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
+       if (err < 0)
+               return err;
+       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
+                                 hw_rule_periods_implicit_fb, subs,
+                                 SNDRV_PCM_HW_PARAM_PERIODS, -1);
+       if (err < 0)
+               return err;
+
        return 0;
 }
 
index 89e1726..e196e36 100644 (file)
@@ -1470,30 +1470,6 @@ static void set_format_emu_quirk(struct snd_usb_substream *subs,
        subs->pkt_offset_adj = (emu_samplerate_id >= EMU_QUIRK_SR_176400HZ) ? 4 : 0;
 }
 
-
-/*
- * Pioneer DJ DJM-900NXS2
- * Device needs to know the sample rate each time substream is started
- */
-static int pioneer_djm_set_format_quirk(struct snd_usb_substream *subs)
-{
-       unsigned int cur_rate = subs->data_endpoint->cur_rate;
-       /* Convert sample rate value to little endian */
-       u8 sr[3];
-
-       sr[0] = cur_rate & 0xff;
-       sr[1] = (cur_rate >> 8) & 0xff;
-       sr[2] = (cur_rate >> 16) & 0xff;
-
-       /* Configure device */
-       usb_set_interface(subs->dev, 0, 1);
-       snd_usb_ctl_msg(subs->stream->chip->dev,
-               usb_rcvctrlpipe(subs->stream->chip->dev, 0),
-               0x01, 0x22, 0x0100, 0x0082, &sr, 0x0003);
-
-       return 0;
-}
-
 void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
                              const struct audioformat *fmt)
 {
@@ -1504,10 +1480,6 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
        case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */
                set_format_emu_quirk(subs, fmt);
                break;
-       case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */
-       case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
-               pioneer_djm_set_format_quirk(subs);
-               break;
        case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
                subs->stream_offset_adj = 2;
                break;
index cacd66a..a2b233f 100644 (file)
@@ -107,8 +107,8 @@ int monitor_device(const char *device_name,
                        ret = -EIO;
                        break;
                }
-               fprintf(stdout, "GPIO EVENT at %llu on line %d (%d|%d) ",
-                       event.timestamp_ns, event.offset, event.line_seqno,
+               fprintf(stdout, "GPIO EVENT at %" PRIu64 " on line %d (%d|%d) ",
+                       (uint64_t)event.timestamp_ns, event.offset, event.line_seqno,
                        event.seqno);
                switch (event.id) {
                case GPIO_V2_LINE_EVENT_RISING_EDGE:
index f229ec6..41e76d2 100644 (file)
@@ -10,6 +10,7 @@
 #include <ctype.h>
 #include <errno.h>
 #include <fcntl.h>
+#include <inttypes.h>
 #include <linux/gpio.h>
 #include <poll.h>
 #include <stdbool.h>
@@ -86,8 +87,8 @@ int main(int argc, char **argv)
                                return EXIT_FAILURE;
                        }
 
-                       printf("line %u: %s at %llu\n",
-                              chg.info.offset, event, chg.timestamp_ns);
+                       printf("line %u: %s at %" PRIu64 "\n",
+                              chg.info.offset, event, (uint64_t)chg.timestamp_ns);
                }
        }
 
index cfcdbd7..17465d4 100644 (file)
@@ -367,21 +367,13 @@ static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, boo
        return map;
 }
 
-static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
-                                    struct perf_evsel *evsel, int idx, int cpu,
-                                    int thread)
+static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
 {
        struct perf_sample_id *sid = SID(evsel, cpu, thread);
 
        sid->idx = idx;
-       if (evlist->cpus && cpu >= 0)
-               sid->cpu = evlist->cpus->map[cpu];
-       else
-               sid->cpu = -1;
-       if (!evsel->system_wide && evlist->threads && thread >= 0)
-               sid->tid = perf_thread_map__pid(evlist->threads, thread);
-       else
-               sid->tid = -1;
+       sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
+       sid->tid = perf_thread_map__pid(evsel->threads, thread);
 }
 
 static struct perf_mmap*
@@ -500,8 +492,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
                        if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
                                                   fd) < 0)
                                return -1;
-                       perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
-                                                thread);
+                       perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
                }
        }
 
index 5f8d3ee..4bd3031 100644 (file)
@@ -2928,14 +2928,10 @@ int check(struct objtool_file *file)
        warnings += ret;
 
 out:
-       if (ret < 0) {
-               /*
-                *  Fatal error.  The binary is corrupt or otherwise broken in
-                *  some way, or objtool itself is broken.  Fail the kernel
-                *  build.
-                */
-               return ret;
-       }
-
+       /*
+        *  For now, don't fail the kernel build on fatal warnings.  These
+        *  errors are still fairly common due to the growing matrix of
+        *  supported toolchains and their recent pace of change.
+        */
        return 0;
 }
index be89c74..d8421e1 100644 (file)
@@ -380,8 +380,11 @@ static int read_symbols(struct elf *elf)
 
        symtab = find_section_by_name(elf, ".symtab");
        if (!symtab) {
-               WARN("missing symbol table");
-               return -1;
+               /*
+                * A missing symbol table is actually possible if it's an empty
+                * .o file.  This can happen for thunk_64.o.
+                */
+               return 0;
        }
 
        symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
@@ -448,6 +451,13 @@ static int read_symbols(struct elf *elf)
                list_add(&sym->list, entry);
                elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
                elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
+
+               /*
+                * Don't store empty STT_NOTYPE symbols in the rbtree.  They
+                * can exist within a function, confusing the sorting.
+                */
+               if (!sym->len)
+                       rb_erase(&sym->node, &sym->sec->symbol_tree);
        }
 
        if (stats)
index edacfa9..42dad4a 100644 (file)
@@ -186,6 +186,7 @@ struct output_option {
 
 enum {
        OUTPUT_TYPE_SYNTH = PERF_TYPE_MAX,
+       OUTPUT_TYPE_OTHER,
        OUTPUT_TYPE_MAX
 };
 
@@ -283,6 +284,18 @@ static struct {
 
                .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
        },
+
+       [OUTPUT_TYPE_OTHER] = {
+               .user_set = false,
+
+               .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
+                             PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
+                             PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
+                             PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
+                             PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
+
+               .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
+       },
 };
 
 struct evsel_script {
@@ -343,8 +356,11 @@ static inline int output_type(unsigned int type)
        case PERF_TYPE_SYNTH:
                return OUTPUT_TYPE_SYNTH;
        default:
-               return type;
+               if (type < PERF_TYPE_MAX)
+                       return type;
        }
+
+       return OUTPUT_TYPE_OTHER;
 }
 
 static inline unsigned int attr_type(unsigned int type)
index ee94d3e..e6d3452 100644 (file)
@@ -162,6 +162,14 @@ static bool contains_event(struct evsel **metric_events, int num_events,
        return false;
 }
 
+static bool evsel_same_pmu(struct evsel *ev1, struct evsel *ev2)
+{
+       if (!ev1->pmu_name || !ev2->pmu_name)
+               return false;
+
+       return !strcmp(ev1->pmu_name, ev2->pmu_name);
+}
+
 /**
  * Find a group of events in perf_evlist that correspond to those from a parsed
  * metric expression. Note, as find_evsel_group is called in the same order as
@@ -280,8 +288,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
                         */
                        if (!has_constraint &&
                            ev->leader != metric_events[i]->leader &&
-                           !strcmp(ev->leader->pmu_name,
-                                   metric_events[i]->leader->pmu_name))
+                           evsel_same_pmu(ev->leader, metric_events[i]->leader))
                                break;
                        if (!strcmp(metric_events[i]->name, ev->name)) {
                                set_bit(ev->idx, evlist_used);
@@ -766,7 +773,6 @@ int __weak arch_get_runtimeparam(struct pmu_event *pe __maybe_unused)
 struct metricgroup_add_iter_data {
        struct list_head *metric_list;
        const char *metric;
-       struct metric **m;
        struct expr_ids *ids;
        int *ret;
        bool *has_match;
@@ -1058,12 +1064,13 @@ static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
                                                  void *data)
 {
        struct metricgroup_add_iter_data *d = data;
+       struct metric *m = NULL;
        int ret;
 
        if (!match_pe_metric(pe, d->metric))
                return 0;
 
-       ret = add_metric(d->metric_list, pe, d->metric_no_group, d->m, NULL, d->ids);
+       ret = add_metric(d->metric_list, pe, d->metric_no_group, &m, NULL, d->ids);
        if (ret)
                return ret;
 
@@ -1114,7 +1121,6 @@ static int metricgroup__add_metric(const char *metric, bool metric_no_group,
                                .metric_list = &list,
                                .metric = metric,
                                .metric_no_group = metric_no_group,
-                               .m = &m,
                                .ids = &ids,
                                .has_match = &has_match,
                                .ret = &ret,
index 5390158..09cb3a6 100644 (file)
@@ -1249,6 +1249,8 @@ static void dump_isst_config(int arg)
        isst_ctdp_display_information_end(outf);
 }
 
+static void adjust_scaling_max_from_base_freq(int cpu);
+
 static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
                                  void *arg4)
 {
@@ -1267,6 +1269,9 @@ static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
                        int pkg_id = get_physical_package_id(cpu);
                        int die_id = get_physical_die_id(cpu);
 
+                       /* Wait for updated base frequencies */
+                       usleep(2000);
+
                        fprintf(stderr, "Option is set to online/offline\n");
                        ctdp_level.core_cpumask_size =
                                alloc_cpu_set(&ctdp_level.core_cpumask);
@@ -1283,6 +1288,7 @@ static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
                                        if (CPU_ISSET_S(i, ctdp_level.core_cpumask_size, ctdp_level.core_cpumask)) {
                                                fprintf(stderr, "online cpu %d\n", i);
                                                set_cpu_online_offline(i, 1);
+                                               adjust_scaling_max_from_base_freq(i);
                                        } else {
                                                fprintf(stderr, "offline cpu %d\n", i);
                                                set_cpu_online_offline(i, 0);
@@ -1440,6 +1446,31 @@ static int set_cpufreq_scaling_min_max(int cpu, int max, int freq)
        return 0;
 }
 
+static int no_turbo(void)
+{
+       return parse_int_file(0, "/sys/devices/system/cpu/intel_pstate/no_turbo");
+}
+
+static void adjust_scaling_max_from_base_freq(int cpu)
+{
+       int base_freq, scaling_max_freq;
+
+       scaling_max_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_max_freq", cpu);
+       base_freq = get_cpufreq_base_freq(cpu);
+       if (scaling_max_freq < base_freq || no_turbo())
+               set_cpufreq_scaling_min_max(cpu, 1, base_freq);
+}
+
+static void adjust_scaling_min_from_base_freq(int cpu)
+{
+       int base_freq, scaling_min_freq;
+
+       scaling_min_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_min_freq", cpu);
+       base_freq = get_cpufreq_base_freq(cpu);
+       if (scaling_min_freq < base_freq)
+               set_cpufreq_scaling_min_max(cpu, 0, base_freq);
+}
+
 static int set_clx_pbf_cpufreq_scaling_min_max(int cpu)
 {
        struct isst_pkg_ctdp_level_info *ctdp_level;
@@ -1537,6 +1568,7 @@ static void set_scaling_min_to_cpuinfo_max(int cpu)
                        continue;
 
                set_cpufreq_scaling_min_max_from_cpuinfo(i, 1, 0);
+               adjust_scaling_min_from_base_freq(i);
        }
 }
 
index 21516e2..e808a47 100755 (executable)
@@ -43,9 +43,9 @@ class KunitStatus(Enum):
        BUILD_FAILURE = auto()
        TEST_FAILURE = auto()
 
-def get_kernel_root_path():
-       parts = sys.argv[0] if not __file__ else __file__
-       parts = os.path.realpath(parts).split('tools/testing/kunit')
+def get_kernel_root_path() -> str:
+       path = sys.argv[0] if not __file__ else __file__
+       parts = os.path.realpath(path).split('tools/testing/kunit')
        if len(parts) != 2:
                sys.exit(1)
        return parts[0]
@@ -171,7 +171,7 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
                                exec_result.elapsed_time))
        return parse_result
 
-def add_common_opts(parser):
+def add_common_opts(parser) -> None:
        parser.add_argument('--build_dir',
                            help='As in the make command, it specifies the build '
                            'directory.',
@@ -183,13 +183,13 @@ def add_common_opts(parser):
                            help='Run all KUnit tests through allyesconfig',
                            action='store_true')
 
-def add_build_opts(parser):
+def add_build_opts(parser) -> None:
        parser.add_argument('--jobs',
                            help='As in the make command, "Specifies  the number of '
                            'jobs (commands) to run simultaneously."',
                            type=int, default=8, metavar='jobs')
 
-def add_exec_opts(parser):
+def add_exec_opts(parser) -> None:
        parser.add_argument('--timeout',
                            help='maximum number of seconds to allow for all tests '
                            'to run. This does not include time taken to build the '
@@ -198,7 +198,7 @@ def add_exec_opts(parser):
                            default=300,
                            metavar='timeout')
 
-def add_parse_opts(parser):
+def add_parse_opts(parser) -> None:
        parser.add_argument('--raw_output', help='don\'t format output from kernel',
                            action='store_true')
        parser.add_argument('--json',
@@ -256,10 +256,7 @@ def main(argv, linux=None):
                        os.mkdir(cli_args.build_dir)
 
                if not linux:
-                       linux = kunit_kernel.LinuxSourceTree()
-
-               linux.create_kunitconfig(cli_args.build_dir)
-               linux.read_kunitconfig(cli_args.build_dir)
+                       linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
 
                request = KunitRequest(cli_args.raw_output,
                                       cli_args.timeout,
@@ -277,10 +274,7 @@ def main(argv, linux=None):
                        os.mkdir(cli_args.build_dir)
 
                if not linux:
-                       linux = kunit_kernel.LinuxSourceTree()
-
-               linux.create_kunitconfig(cli_args.build_dir)
-               linux.read_kunitconfig(cli_args.build_dir)
+                       linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
 
                request = KunitConfigRequest(cli_args.build_dir,
                                             cli_args.make_options)
@@ -292,10 +286,7 @@ def main(argv, linux=None):
                        sys.exit(1)
        elif cli_args.subcommand == 'build':
                if not linux:
-                       linux = kunit_kernel.LinuxSourceTree()
-
-               linux.create_kunitconfig(cli_args.build_dir)
-               linux.read_kunitconfig(cli_args.build_dir)
+                       linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
 
                request = KunitBuildRequest(cli_args.jobs,
                                            cli_args.build_dir,
@@ -309,10 +300,7 @@ def main(argv, linux=None):
                        sys.exit(1)
        elif cli_args.subcommand == 'exec':
                if not linux:
-                       linux = kunit_kernel.LinuxSourceTree()
-
-               linux.create_kunitconfig(cli_args.build_dir)
-               linux.read_kunitconfig(cli_args.build_dir)
+                       linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
 
                exec_request = KunitExecRequest(cli_args.timeout,
                                                cli_args.build_dir,
index 02ffc3a..bdd6023 100644 (file)
@@ -8,6 +8,7 @@
 
 import collections
 import re
+from typing import List, Set
 
 CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
 CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
@@ -30,10 +31,10 @@ class KconfigParseError(Exception):
 class Kconfig(object):
        """Represents defconfig or .config specified using the Kconfig language."""
 
-       def __init__(self):
-               self._entries = []
+       def __init__(self) -> None:
+               self._entries = []  # type: List[KconfigEntry]
 
-       def entries(self):
+       def entries(self) -> Set[KconfigEntry]:
                return set(self._entries)
 
        def add_entry(self, entry: KconfigEntry) -> None:
index 624b31b..f5cca5c 100644 (file)
@@ -13,7 +13,7 @@ import kunit_parser
 
 from kunit_parser import TestStatus
 
-def get_json_result(test_result, def_config, build_dir, json_path):
+def get_json_result(test_result, def_config, build_dir, json_path) -> str:
        sub_groups = []
 
        # Each test suite is mapped to a KernelCI sub_group
index 698358c..2076a5a 100644 (file)
@@ -11,6 +11,7 @@ import subprocess
 import os
 import shutil
 import signal
+from typing import Iterator
 
 from contextlib import ExitStack
 
@@ -39,7 +40,7 @@ class BuildError(Exception):
 class LinuxSourceTreeOperations(object):
        """An abstraction over command line operations performed on a source tree."""
 
-       def make_mrproper(self):
+       def make_mrproper(self) -> None:
                try:
                        subprocess.check_output(['make', 'mrproper'], stderr=subprocess.STDOUT)
                except OSError as e:
@@ -47,7 +48,7 @@ class LinuxSourceTreeOperations(object):
                except subprocess.CalledProcessError as e:
                        raise ConfigError(e.output.decode())
 
-       def make_olddefconfig(self, build_dir, make_options):
+       def make_olddefconfig(self, build_dir, make_options) -> None:
                command = ['make', 'ARCH=um', 'olddefconfig']
                if make_options:
                        command.extend(make_options)
@@ -60,7 +61,7 @@ class LinuxSourceTreeOperations(object):
                except subprocess.CalledProcessError as e:
                        raise ConfigError(e.output.decode())
 
-       def make_allyesconfig(self, build_dir, make_options):
+       def make_allyesconfig(self, build_dir, make_options) -> None:
                kunit_parser.print_with_timestamp(
                        'Enabling all CONFIGs for UML...')
                command = ['make', 'ARCH=um', 'allyesconfig']
@@ -82,7 +83,7 @@ class LinuxSourceTreeOperations(object):
                kunit_parser.print_with_timestamp(
                        'Starting Kernel with all configs takes a few minutes...')
 
-       def make(self, jobs, build_dir, make_options):
+       def make(self, jobs, build_dir, make_options) -> None:
                command = ['make', 'ARCH=um', '--jobs=' + str(jobs)]
                if make_options:
                        command.extend(make_options)
@@ -100,7 +101,7 @@ class LinuxSourceTreeOperations(object):
                if stderr:  # likely only due to build warnings
                        print(stderr.decode())
 
-       def linux_bin(self, params, timeout, build_dir):
+       def linux_bin(self, params, timeout, build_dir) -> None:
                """Runs the Linux UML binary. Must be named 'linux'."""
                linux_bin = get_file_path(build_dir, 'linux')
                outfile = get_outfile_path(build_dir)
@@ -110,41 +111,42 @@ class LinuxSourceTreeOperations(object):
                                                   stderr=subprocess.STDOUT)
                        process.wait(timeout)
 
-def get_kconfig_path(build_dir):
+def get_kconfig_path(build_dir) -> str:
        return get_file_path(build_dir, KCONFIG_PATH)
 
-def get_kunitconfig_path(build_dir):
+def get_kunitconfig_path(build_dir) -> str:
        return get_file_path(build_dir, KUNITCONFIG_PATH)
 
-def get_outfile_path(build_dir):
+def get_outfile_path(build_dir) -> str:
        return get_file_path(build_dir, OUTFILE_PATH)
 
 class LinuxSourceTree(object):
        """Represents a Linux kernel source tree with KUnit tests."""
 
-       def __init__(self):
-               self._ops = LinuxSourceTreeOperations()
+       def __init__(self, build_dir: str, load_config=True, defconfig=DEFAULT_KUNITCONFIG_PATH) -> None:
                signal.signal(signal.SIGINT, self.signal_handler)
 
-       def clean(self):
-               try:
-                       self._ops.make_mrproper()
-               except ConfigError as e:
-                       logging.error(e)
-                       return False
-               return True
+               self._ops = LinuxSourceTreeOperations()
+
+               if not load_config:
+                       return
 
-       def create_kunitconfig(self, build_dir, defconfig=DEFAULT_KUNITCONFIG_PATH):
                kunitconfig_path = get_kunitconfig_path(build_dir)
                if not os.path.exists(kunitconfig_path):
                        shutil.copyfile(defconfig, kunitconfig_path)
 
-       def read_kunitconfig(self, build_dir):
-               kunitconfig_path = get_kunitconfig_path(build_dir)
                self._kconfig = kunit_config.Kconfig()
                self._kconfig.read_from_file(kunitconfig_path)
 
-       def validate_config(self, build_dir):
+       def clean(self) -> bool:
+               try:
+                       self._ops.make_mrproper()
+               except ConfigError as e:
+                       logging.error(e)
+                       return False
+               return True
+
+       def validate_config(self, build_dir) -> bool:
                kconfig_path = get_kconfig_path(build_dir)
                validated_kconfig = kunit_config.Kconfig()
                validated_kconfig.read_from_file(kconfig_path)
@@ -158,7 +160,7 @@ class LinuxSourceTree(object):
                        return False
                return True
 
-       def build_config(self, build_dir, make_options):
+       def build_config(self, build_dir, make_options) -> bool:
                kconfig_path = get_kconfig_path(build_dir)
                if build_dir and not os.path.exists(build_dir):
                        os.mkdir(build_dir)
@@ -170,7 +172,7 @@ class LinuxSourceTree(object):
                        return False
                return self.validate_config(build_dir)
 
-       def build_reconfig(self, build_dir, make_options):
+       def build_reconfig(self, build_dir, make_options) -> bool:
                """Creates a new .config if it is not a subset of the .kunitconfig."""
                kconfig_path = get_kconfig_path(build_dir)
                if os.path.exists(kconfig_path):
@@ -186,7 +188,7 @@ class LinuxSourceTree(object):
                        print('Generating .config ...')
                        return self.build_config(build_dir, make_options)
 
-       def build_um_kernel(self, alltests, jobs, build_dir, make_options):
+       def build_um_kernel(self, alltests, jobs, build_dir, make_options) -> bool:
                try:
                        if alltests:
                                self._ops.make_allyesconfig(build_dir, make_options)
@@ -197,7 +199,7 @@ class LinuxSourceTree(object):
                        return False
                return self.validate_config(build_dir)
 
-       def run_kernel(self, args=[], build_dir='', timeout=None):
+       def run_kernel(self, args=[], build_dir='', timeout=None) -> Iterator[str]:
                args.extend(['mem=1G', 'console=tty'])
                self._ops.linux_bin(args, timeout, build_dir)
                outfile = get_outfile_path(build_dir)
@@ -206,6 +208,6 @@ class LinuxSourceTree(object):
                        for line in file:
                                yield line
 
-       def signal_handler(self, sig, frame):
+       def signal_handler(self, sig, frame) -> None:
                logging.error('Build interruption occurred. Cleaning console.')
                subprocess.call(['stty', 'sane'])
index 6614ec4..e8bcc13 100644 (file)
@@ -12,32 +12,32 @@ from collections import namedtuple
 from datetime import datetime
 from enum import Enum, auto
 from functools import reduce
-from typing import List, Optional, Tuple
+from typing import Iterable, Iterator, List, Optional, Tuple
 
 TestResult = namedtuple('TestResult', ['status','suites','log'])
 
 class TestSuite(object):
-       def __init__(self):
-               self.status = None
-               self.name = None
-               self.cases = []
+       def __init__(self) -> None:
+               self.status = TestStatus.SUCCESS
+               self.name = ''
+               self.cases = []  # type: List[TestCase]
 
-       def __str__(self):
-               return 'TestSuite(' + self.status + ',' + self.name + ',' + str(self.cases) + ')'
+       def __str__(self) -> str:
+               return 'TestSuite(' + str(self.status) + ',' + self.name + ',' + str(self.cases) + ')'
 
-       def __repr__(self):
+       def __repr__(self) -> str:
                return str(self)
 
 class TestCase(object):
-       def __init__(self):
-               self.status = None
+       def __init__(self) -> None:
+               self.status = TestStatus.SUCCESS
                self.name = ''
-               self.log = []
+               self.log = []  # type: List[str]
 
-       def __str__(self):
-               return 'TestCase(' + self.status + ',' + self.name + ',' + str(self.log) + ')'
+       def __str__(self) -> str:
+               return 'TestCase(' + str(self.status) + ',' + self.name + ',' + str(self.log) + ')'
 
-       def __repr__(self):
+       def __repr__(self) -> str:
                return str(self)
 
 class TestStatus(Enum):
@@ -51,7 +51,7 @@ kunit_start_re = re.compile(r'TAP version [0-9]+$')
 kunit_end_re = re.compile('(List of all partitions:|'
                          'Kernel panic - not syncing: VFS:)')
 
-def isolate_kunit_output(kernel_output):
+def isolate_kunit_output(kernel_output) -> Iterator[str]:
        started = False
        for line in kernel_output:
                line = line.rstrip()  # line always has a trailing \n
@@ -64,7 +64,7 @@ def isolate_kunit_output(kernel_output):
                elif started:
                        yield line[prefix_len:] if prefix_len > 0 else line
 
-def raw_output(kernel_output):
+def raw_output(kernel_output) -> None:
        for line in kernel_output:
                print(line.rstrip())
 
@@ -72,36 +72,36 @@ DIVIDER = '=' * 60
 
 RESET = '\033[0;0m'
 
-def red(text):
+def red(text) -> str:
        return '\033[1;31m' + text + RESET
 
-def yellow(text):
+def yellow(text) -> str:
        return '\033[1;33m' + text + RESET
 
-def green(text):
+def green(text) -> str:
        return '\033[1;32m' + text + RESET
 
-def print_with_timestamp(message):
+def print_with_timestamp(message) -> None:
        print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
 
-def format_suite_divider(message):
+def format_suite_divider(message) -> str:
        return '======== ' + message + ' ========'
 
-def print_suite_divider(message):
+def print_suite_divider(message) -> None:
        print_with_timestamp(DIVIDER)
        print_with_timestamp(format_suite_divider(message))
 
-def print_log(log):
+def print_log(log) -> None:
        for m in log:
                print_with_timestamp(m)
 
 TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*#).*$')
 
-def consume_non_diagnositic(lines: List[str]) -> None:
+def consume_non_diagnostic(lines: List[str]) -> None:
        while lines and not TAP_ENTRIES.match(lines[0]):
                lines.pop(0)
 
-def save_non_diagnositic(lines: List[str], test_case: TestCase) -> None:
+def save_non_diagnostic(lines: List[str], test_case: TestCase) -> None:
        while lines and not TAP_ENTRIES.match(lines[0]):
                test_case.log.append(lines[0])
                lines.pop(0)
@@ -113,7 +113,7 @@ OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
 OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
 
 def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool:
-       save_non_diagnositic(lines, test_case)
+       save_non_diagnostic(lines, test_case)
        if not lines:
                test_case.status = TestStatus.TEST_CRASHED
                return True
@@ -139,7 +139,7 @@ SUBTEST_DIAGNOSTIC = re.compile(r'^[\s]+# (.*)$')
 DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^[\s]+# .*?: kunit test case crashed!$')
 
 def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool:
-       save_non_diagnositic(lines, test_case)
+       save_non_diagnostic(lines, test_case)
        if not lines:
                return False
        line = lines[0]
@@ -155,7 +155,7 @@ def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool:
 
 def parse_test_case(lines: List[str]) -> Optional[TestCase]:
        test_case = TestCase()
-       save_non_diagnositic(lines, test_case)
+       save_non_diagnostic(lines, test_case)
        while parse_diagnostic(lines, test_case):
                pass
        if parse_ok_not_ok_test_case(lines, test_case):
@@ -166,7 +166,7 @@ def parse_test_case(lines: List[str]) -> Optional[TestCase]:
 SUBTEST_HEADER = re.compile(r'^[\s]+# Subtest: (.*)$')
 
 def parse_subtest_header(lines: List[str]) -> Optional[str]:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        if not lines:
                return None
        match = SUBTEST_HEADER.match(lines[0])
@@ -179,7 +179,7 @@ def parse_subtest_header(lines: List[str]) -> Optional[str]:
 SUBTEST_PLAN = re.compile(r'[\s]+[0-9]+\.\.([0-9]+)')
 
 def parse_subtest_plan(lines: List[str]) -> Optional[int]:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        match = SUBTEST_PLAN.match(lines[0])
        if match:
                lines.pop(0)
@@ -202,7 +202,7 @@ def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
 def parse_ok_not_ok_test_suite(lines: List[str],
                               test_suite: TestSuite,
                               expected_suite_index: int) -> bool:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        if not lines:
                test_suite.status = TestStatus.TEST_CRASHED
                return False
@@ -224,18 +224,17 @@ def parse_ok_not_ok_test_suite(lines: List[str],
        else:
                return False
 
-def bubble_up_errors(to_status, status_container_list) -> TestStatus:
-       status_list = map(to_status, status_container_list)
-       return reduce(max_status, status_list, TestStatus.SUCCESS)
+def bubble_up_errors(statuses: Iterable[TestStatus]) -> TestStatus:
+       return reduce(max_status, statuses, TestStatus.SUCCESS)
 
 def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
-       max_test_case_status = bubble_up_errors(lambda x: x.status, test_suite.cases)
+       max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
        return max_status(max_test_case_status, test_suite.status)
 
 def parse_test_suite(lines: List[str], expected_suite_index: int) -> Optional[TestSuite]:
        if not lines:
                return None
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        test_suite = TestSuite()
        test_suite.status = TestStatus.SUCCESS
        name = parse_subtest_header(lines)
@@ -264,7 +263,7 @@ def parse_test_suite(lines: List[str], expected_suite_index: int) -> Optional[Te
 TAP_HEADER = re.compile(r'^TAP version 14$')
 
 def parse_tap_header(lines: List[str]) -> bool:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        if TAP_HEADER.match(lines[0]):
                lines.pop(0)
                return True
@@ -274,7 +273,7 @@ def parse_tap_header(lines: List[str]) -> bool:
 TEST_PLAN = re.compile(r'[0-9]+\.\.([0-9]+)')
 
 def parse_test_plan(lines: List[str]) -> Optional[int]:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        match = TEST_PLAN.match(lines[0])
        if match:
                lines.pop(0)
@@ -282,11 +281,11 @@ def parse_test_plan(lines: List[str]) -> Optional[int]:
        else:
                return None
 
-def bubble_up_suite_errors(test_suite_list: List[TestSuite]) -> TestStatus:
-       return bubble_up_errors(lambda x: x.status, test_suite_list)
+def bubble_up_suite_errors(test_suites: Iterable[TestSuite]) -> TestStatus:
+       return bubble_up_errors(x.status for x in test_suites)
 
 def parse_test_result(lines: List[str]) -> TestResult:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        if not lines or not parse_tap_header(lines):
                return TestResult(TestStatus.NO_TESTS, [], lines)
        expected_test_suite_num = parse_test_plan(lines)
index 388e449..76efb1f 100755 (executable)
@@ -203,7 +203,7 @@ multipath4_test()
        t0_rp12=$(link_stats_tx_packets_get $rp12)
        t0_rp13=$(link_stats_tx_packets_get $rp13)
 
-       ip vrf exec vrf-h1 $MZ -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
+       ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
                -d 1msec -t udp "sp=1024,dp=0-32768"
 
        t1_rp12=$(link_stats_tx_packets_get $rp12)
index 79a2099..464821c 100755 (executable)
@@ -178,7 +178,7 @@ multipath4_test()
        t0_rp12=$(link_stats_tx_packets_get $rp12)
        t0_rp13=$(link_stats_tx_packets_get $rp13)
 
-       ip vrf exec vrf-h1 $MZ -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
+       ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
               -d 1msec -t udp "sp=1024,dp=0-32768"
 
        t1_rp12=$(link_stats_tx_packets_get $rp12)
index 7a1bf94..bdf450e 100755 (executable)
@@ -202,7 +202,7 @@ check_xfrm() {
        # 1: iptables -m policy rule count != 0
        rval=$1
        ip=$2
-       lret=0
+       local lret=0
 
        ip netns exec ns1 ping -q -c 1 10.0.2.$ip > /dev/null
 
@@ -287,6 +287,47 @@ check_hthresh_repeat()
        return 0
 }
 
+# insert non-overlapping policies in a random order and check that
+# all of them can be fetched using the traffic selectors.
+check_random_order()
+{
+       local ns=$1
+       local log=$2
+
+       for i in $(seq 100); do
+               ip -net $ns xfrm policy flush
+               for j in $(seq 0 16 255 | sort -R); do
+                       ip -net $ns xfrm policy add dst $j.0.0.0/24 dir out priority 10 action allow
+               done
+               for j in $(seq 0 16 255); do
+                       if ! ip -net $ns xfrm policy get dst $j.0.0.0/24 dir out > /dev/null; then
+                               echo "FAIL: $log" 1>&2
+                               return 1
+                       fi
+               done
+       done
+
+       for i in $(seq 100); do
+               ip -net $ns xfrm policy flush
+               for j in $(seq 0 16 255 | sort -R); do
+                       local addr=$(printf "e000:0000:%02x00::/56" $j)
+                       ip -net $ns xfrm policy add dst $addr dir out priority 10 action allow
+               done
+               for j in $(seq 0 16 255); do
+                       local addr=$(printf "e000:0000:%02x00::/56" $j)
+                       if ! ip -net $ns xfrm policy get dst $addr dir out > /dev/null; then
+                               echo "FAIL: $log" 1>&2
+                               return 1
+                       fi
+               done
+       done
+
+       ip -net $ns xfrm policy flush
+
+       echo "PASS: $log"
+       return 0
+}
+
 #check for needed privileges
 if [ "$(id -u)" -ne 0 ];then
        echo "SKIP: Need root privileges"
@@ -438,6 +479,8 @@ check_exceptions "exceptions and block policies after htresh change to normal"
 
 check_hthresh_repeat "policies with repeated htresh change"
 
+check_random_order ns3 "policies inserted in random order"
+
 for i in 1 2 3 4;do ip netns del ns$i;done
 
 exit $ret
index cb53a8b..c25cf7c 100644 (file)
@@ -443,7 +443,6 @@ int test_alignment_handler_integer(void)
        LOAD_DFORM_TEST(ldu);
        LOAD_XFORM_TEST(ldx);
        LOAD_XFORM_TEST(ldux);
-       LOAD_DFORM_TEST(lmw);
        STORE_DFORM_TEST(stb);
        STORE_XFORM_TEST(stbx);
        STORE_DFORM_TEST(stbu);
@@ -462,7 +461,11 @@ int test_alignment_handler_integer(void)
        STORE_XFORM_TEST(stdx);
        STORE_DFORM_TEST(stdu);
        STORE_XFORM_TEST(stdux);
+
+#ifdef __BIG_ENDIAN__
+       LOAD_DFORM_TEST(lmw);
        STORE_DFORM_TEST(stmw);
+#endif
 
        return rc;
 }
index 9e5c7f3..0af4f02 100644 (file)
@@ -290,5 +290,5 @@ static int test(void)
 
 int main(void)
 {
-       test_harness(test, "pkey_exec_prot");
+       return test_harness(test, "pkey_exec_prot");
 }
index 4f815d7..2db76e5 100644 (file)
@@ -329,5 +329,5 @@ static int test(void)
 
 int main(void)
 {
-       test_harness(test, "pkey_siginfo");
+       return test_harness(test, "pkey_siginfo");
 }
index fa9e361..8367d88 100644 (file)
@@ -1292,6 +1292,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
                return -EINVAL;
        /* We can read the guest memory with __xxx_user() later on. */
        if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
+           (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
             !access_ok((void __user *)(unsigned long)mem->userspace_addr,
                        mem->memory_size))
                return -EINVAL;