Merge tag 'char-misc-5.7-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 May 2020 16:11:53 +0000 (09:11 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 May 2020 16:11:53 +0000 (09:11 -0700)
Pull char/misc driver fixes from Greg KH:
 "Here are some small driver fixes for 5.7-rc5 that resolve a number of
  minor reported issues:

   - mhi bus driver fixes found as people actually use the code

   - phy driver fixes and compat string additions

   - most driver fix due to link order changing when the core moved out
     of staging

   - mei driver fix

   - interconnect build warning fix

  All of these have been in linux-next for a while with no reported
  issues"

* tag 'char-misc-5.7-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc:
  bus: mhi: core: Fix channel device name conflict
  bus: mhi: core: Fix typo in comment
  bus: mhi: core: Offload register accesses to the controller
  bus: mhi: core: Remove link_status() callback
  bus: mhi: core: Make sure to powerdown if mhi_sync_power_up fails
  bus: mhi: Fix parsing of mhi_flags
  mei: me: disable mei interface on LBG servers.
  phy: qualcomm: usb-hs-28nm: Prepare clocks in init
  MAINTAINERS: Add Vinod Koul as Generic PHY co-maintainer
  interconnect: qcom: Move the static keyword to the front of declaration
  most: core: use function subsys_initcall()
  bus: mhi: core: Fix a NULL vs IS_ERR check in mhi_create_devices()
  phy: qcom-qusb2: Re add "qcom,sdm845-qusb2-phy" compat string
  phy: tegra: Select USB_COMMON for usb_get_maximum_speed()

400 files changed:
Documentation/admin-guide/device-mapper/dm-integrity.rst
Documentation/devicetree/bindings/dma/socionext,uniphier-xdmac.yaml
Documentation/networking/devlink/ice.rst
Documentation/virt/kvm/index.rst
Documentation/virt/kvm/running-nested-guests.rst [new file with mode: 0644]
MAINTAINERS
Makefile
arch/arm/crypto/chacha-glue.c
arch/arm/crypto/nhpoly1305-neon-glue.c
arch/arm/crypto/poly1305-glue.c
arch/arm64/crypto/chacha-neon-glue.c
arch/arm64/crypto/nhpoly1305-neon-glue.c
arch/arm64/crypto/poly1305-glue.c
arch/arm64/kernel/vdso/Makefile
arch/arm64/kvm/guest.c
arch/arm64/kvm/hyp/entry.S
arch/arm64/kvm/hyp/hyp-entry.S
arch/arm64/kvm/hyp/sysreg-sr.c
arch/arm64/mm/hugetlbpage.c
arch/powerpc/kvm/powerpc.c
arch/riscv/Kconfig
arch/riscv/kernel/sbi.c
arch/riscv/kernel/stacktrace.c
arch/riscv/kernel/vdso/Makefile
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/priv.c
arch/s390/lib/uaccess.c
arch/s390/mm/pgalloc.c
arch/x86/crypto/blake2s-glue.c
arch/x86/crypto/chacha_glue.c
arch/x86/crypto/nhpoly1305-avx2-glue.c
arch/x86/crypto/nhpoly1305-sse2-glue.c
arch/x86/crypto/poly1305_glue.c
arch/x86/hyperv/hv_init.c
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mshyperv.h
arch/x86/kvm/ioapic.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmenter.S
arch/x86/kvm/x86.c
block/partitions/core.c
drivers/acpi/device_pm.c
drivers/amba/bus.c
drivers/base/component.c
drivers/base/core.c
drivers/base/dd.c
drivers/base/platform.c
drivers/block/virtio_blk.c
drivers/cpufreq/intel_pstate.c
drivers/crypto/caam/caamalg.c
drivers/crypto/caam/caamhash.c
drivers/crypto/caam/caampkc.c
drivers/crypto/chelsio/chcr_ktls.c
drivers/dma-buf/dma-buf.c
drivers/dma/Kconfig
drivers/dma/dmaengine.c
drivers/dma/dmatest.c
drivers/dma/mmp_tdma.c
drivers/dma/pch_dma.c
drivers/dma/tegra20-apb-dma.c
drivers/dma/ti/k3-psil.c
drivers/dma/xilinx/xilinx_dma.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/dc/os_types.h
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_hdcp.c
drivers/gpu/drm/i915/gem/i915_gem_tiling.c
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
drivers/gpu/drm/i915/gt/intel_timeline.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/ingenic/ingenic-drm.c
drivers/gpu/drm/qxl/qxl_cmd.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/qxl/qxl_draw.c
drivers/gpu/drm/qxl/qxl_image.c
drivers/gpu/drm/qxl/qxl_ioctl.c
drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_gem.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/virtio/virtgpu_kms.c
drivers/hid/Kconfig
drivers/hid/hid-alps.c
drivers/hid/hid-ids.h
drivers/hid/hid-lg-g15.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-quirks.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hid/usbhid/hid-core.c
drivers/hid/usbhid/usbhid.h
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/hv/hv.c
drivers/hv/hv_trace.h
drivers/hv/vmbus_drv.c
drivers/i2c/busses/i2c-amd-mp2-pci.c
drivers/i2c/busses/i2c-aspeed.c
drivers/i2c/busses/i2c-bcm-iproc.c
drivers/i2c/busses/i2c-tegra.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/rdma_core.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/i40iw/i40iw_ctrl.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/sw/rdmavt/cq.c
drivers/infiniband/sw/rdmavt/mmap.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/infiniband/sw/rdmavt/srq.c
drivers/infiniband/sw/siw/siw_qp_tx.c
drivers/iommu/Kconfig
drivers/iommu/amd_iommu_init.c
drivers/iommu/intel-iommu.c
drivers/iommu/iommu.c
drivers/iommu/qcom_iommu.c
drivers/md/dm-mpath.c
drivers/md/dm-verity-fec.c
drivers/md/dm-writecache.c
drivers/mmc/core/mmc_ops.c
drivers/mmc/host/cqhci.c
drivers/mmc/host/meson-mx-sdio.c
drivers/mmc/host/sdhci-msm.c
drivers/mmc/host/sdhci-pci-core.c
drivers/mmc/host/sdhci-xenon.c
drivers/net/dsa/mv88e6xxx/Kconfig
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/ocelot/felix.c
drivers/net/dsa/ocelot/felix.h
drivers/net/dsa/ocelot/felix_vsc9959.c
drivers/net/dsa/sja1105/Kconfig
drivers/net/dsa/sja1105/sja1105_ptp.c
drivers/net/ethernet/amazon/ena/ena_netdev.h
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/broadcom/bgmac-platform.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/cadence/Kconfig
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_regs.c
drivers/net/ethernet/natsemi/jazzsonic.c
drivers/net/ethernet/netronome/nfp/abm/main.c
drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/stmicro/stmmac/dwmac5.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/ethernet/toshiba/tc35815.c
drivers/net/gtp.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ipa/gsi.c
drivers/net/ipa/gsi_reg.h
drivers/net/ipa/ipa_endpoint.c
drivers/net/macsec.c
drivers/net/phy/dp83640.c
drivers/net/phy/dp83822.c
drivers/net/phy/dp83tc811.c
drivers/net/phy/marvell10g.c
drivers/net/usb/qmi_wwan.c
drivers/net/wireguard/queueing.c
drivers/net/wireguard/receive.c
drivers/net/wireguard/selftest/ratelimiter.c
drivers/net/wireguard/send.c
drivers/net/wireguard/socket.c
drivers/nvme/host/core.c
drivers/platform/chrome/cros_ec_sensorhub.c
drivers/platform/chrome/cros_ec_sensorhub_ring.c
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/intel-uncore-frequency.c
drivers/platform/x86/intel_pmc_core.c
drivers/platform/x86/intel_pmc_core.h
drivers/platform/x86/surface3_power.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/xiaomi-wmi.c
drivers/regulator/core.c
drivers/s390/net/qeth_core_main.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_lib.c
drivers/staging/gasket/gasket_core.c
drivers/staging/ks7010/TODO
drivers/target/target_core_iblock.c
drivers/thunderbolt/usb4.c
drivers/tty/hvc/Kconfig
drivers/tty/serial/Kconfig
drivers/tty/serial/bcm63xx_uart.c
drivers/tty/serial/xilinx_uartps.c
drivers/tty/vt/vt.c
drivers/usb/chipidea/ci_hdrc_msm.c
drivers/usb/core/devio.c
drivers/usb/core/message.c
drivers/usb/serial/garmin_gps.c
drivers/usb/serial/qcserial.c
drivers/usb/storage/unusual_uas.h
drivers/usb/typec/mux/intel_pmc_mux.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/vsock.c
fs/btrfs/backref.c
fs/btrfs/block-group.c
fs/btrfs/discard.h
fs/btrfs/disk-io.c
fs/btrfs/relocation.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/configfs/dir.c
fs/coredump.c
fs/eventpoll.c
fs/io_uring.c
fs/ioctl.c
fs/iomap/fiemap.c
fs/nfs/nfs3acl.c
fs/nfs/nfs4proc.c
fs/nfs/pnfs.c
fs/nfs/super.c
fs/ocfs2/dlmfs/dlmfs.c
fs/pnode.c
fs/super.c
include/linux/amba/bus.h
include/linux/dma-buf.h
include/linux/dmaengine.h
include/linux/fs.h
include/linux/lsm_hook_defs.h
include/linux/nfs_xdr.h
include/linux/platform_data/cros_ec_sensorhub.h
include/linux/platform_device.h
include/linux/sunrpc/clnt.h
include/linux/tcp.h
include/linux/tty.h
include/linux/virtio_net.h
include/linux/virtio_vsock.h
include/net/flow_offload.h
include/net/inet_ecn.h
include/net/ip6_fib.h
include/net/mptcp.h
include/net/net_namespace.h
include/net/sch_generic.h
include/soc/mscc/ocelot.h
include/trace/events/gpu_mem.h
include/trace/events/rpcrdma.h
include/uapi/drm/amdgpu_drm.h
include/uapi/linux/bpf.h
include/uapi/linux/dlm_device.h
include/uapi/linux/dma-buf.h
include/uapi/linux/fiemap.h
include/uapi/linux/hyperv.h
include/uapi/linux/if_arcnet.h
include/uapi/linux/mmc/ioctl.h
include/uapi/linux/net_dropmon.h
include/uapi/linux/netfilter_bridge/ebt_among.h
include/uapi/scsi/scsi_bsg_fc.h
init/main.c
ipc/mqueue.c
kernel/kcov.c
kernel/power/hibernate.c
kernel/trace/preemptirq_delay_test.c
kernel/trace/trace.c
kernel/trace/trace_boot.c
kernel/trace/trace_kprobe.c
kernel/umh.c
lib/Kconfig.ubsan
lib/kunit/test.c
mm/memcontrol.c
mm/page_alloc.c
mm/percpu.c
mm/slub.c
mm/vmscan.c
net/atm/common.c
net/atm/lec.c
net/batman-adv/bat_v_ogm.c
net/batman-adv/network-coding.c
net/batman-adv/sysfs.c
net/bridge/br_netlink.c
net/core/devlink.c
net/core/drop_monitor.c
net/core/neighbour.c
net/core/sock.c
net/dsa/dsa2.c
net/dsa/master.c
net/dsa/slave.c
net/hsr/hsr_slave.c
net/ipv4/tcp_input.c
net/ipv6/route.c
net/ipv6/seg6.c
net/mptcp/options.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/netfilter/nf_nat_proto.c
net/netfilter/nfnetlink_osf.c
net/sched/cls_api.c
net/sched/sch_choke.c
net/sched/sch_fq_codel.c
net/sched/sch_sfq.c
net/sched/sch_skbprio.c
net/sunrpc/clnt.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/verbs.c
net/tipc/topsrv.c
net/tls/tls_sw.c
net/vmw_vsock/virtio_transport_common.c
net/x25/x25_subr.c
samples/trace_events/trace-events-sample.h
scripts/decodecode
scripts/gcc-plugins/Makefile
scripts/gcc-plugins/gcc-common.h
scripts/gcc-plugins/stackleak_plugin.c
scripts/gdb/linux/rbtree.py
scripts/kallsyms.c
security/selinux/hooks.c
security/selinux/ss/conditional.c
sound/core/oss/pcm_plugin.c
sound/isa/opti9xx/miro.c
sound/isa/opti9xx/opti92x-ad1848.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/usb/line6/podhd.c
sound/usb/quirks.c
tools/bootconfig/main.c
tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c
tools/testing/selftests/ftrace/ftracetest
tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc
tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_mod_trace.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_stack_tracer.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc
tools/testing/selftests/ftrace/test.d/functions
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc
tools/testing/selftests/gpio/Makefile
tools/testing/selftests/intel_pstate/Makefile
tools/testing/selftests/kselftest_deps.sh [new file with mode: 0755]
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/include/evmcs.h
tools/testing/selftests/kvm/lib/x86_64/vmx.c
tools/testing/selftests/memfd/Makefile
tools/testing/selftests/net/tcp_mmap.c
tools/testing/selftests/wireguard/netns.sh
tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config
tools/testing/selftests/wireguard/qemu/debug.config
virt/kvm/arm/hyp/aarch32.c
virt/kvm/arm/psci.c
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-its.c
virt/kvm/arm/vgic/vgic-mmio-v2.c
virt/kvm/arm/vgic/vgic-mmio-v3.c
virt/kvm/arm/vgic/vgic-mmio.c
virt/kvm/arm/vgic/vgic-mmio.h

index c00f9f1..8439d2a 100644 (file)
@@ -182,12 +182,15 @@ fix_padding
        space-efficient. If this option is not present, large padding is
        used - that is for compatibility with older kernels.
 
-
-The journal mode (D/J), buffer_sectors, journal_watermark, commit_time can
-be changed when reloading the target (load an inactive table and swap the
-tables with suspend and resume). The other arguments should not be changed
-when reloading the target because the layout of disk data depend on them
-and the reloaded target would be non-functional.
+allow_discards
+       Allow block discard requests (a.k.a. TRIM) for the integrity device.
+       Discards are only allowed to devices using internal hash.
+
+The journal mode (D/J), buffer_sectors, journal_watermark, commit_time and
+allow_discards can be changed when reloading the target (load an inactive
+table and swap the tables with suspend and resume). The other arguments
+should not be changed when reloading the target because the layout of disk
+data depend on them and the reloaded target would be non-functional.
 
 
 The layout of the formatted block device:
index 86cfb59..371f187 100644 (file)
@@ -22,9 +22,7 @@ properties:
     const: socionext,uniphier-xdmac
 
   reg:
-    items:
-      - description: XDMAC base register region (offset and length)
-      - description: XDMAC extension register region (offset and length)
+    maxItems: 1
 
   interrupts:
     maxItems: 1
@@ -49,12 +47,13 @@ required:
   - reg
   - interrupts
   - "#dma-cells"
+  - dma-channels
 
 examples:
   - |
     xdmac: dma-controller@5fc10000 {
         compatible = "socionext,uniphier-xdmac";
-        reg = <0x5fc10000 0x1000>, <0x5fc20000 0x800>;
+        reg = <0x5fc10000 0x5300>;
         interrupts = <0 188 4>;
         #dma-cells = <2>;
         dma-channels = <16>;
index 5b58fc4..4574352 100644 (file)
@@ -61,8 +61,8 @@ The ``ice`` driver reports the following versions
       - running
       - ICE OS Default Package
       - The name of the DDP package that is active in the device. The DDP
-        package is loaded by the driver during initialization. Each varation
-        of DDP package shall have a unique name.
+        package is loaded by the driver during initialization. Each
+        variation of the DDP package has a unique name.
     * - ``fw.app``
       - running
       - 1.3.1.0
index dcc2526..b6833c7 100644 (file)
@@ -28,3 +28,5 @@ KVM
    arm/index
 
    devices/index
+
+   running-nested-guests
diff --git a/Documentation/virt/kvm/running-nested-guests.rst b/Documentation/virt/kvm/running-nested-guests.rst
new file mode 100644 (file)
index 0000000..d0a1fc7
--- /dev/null
@@ -0,0 +1,276 @@
+==============================
+Running nested guests with KVM
+==============================
+
+A nested guest is the ability to run a guest inside another guest (it
+can be KVM-based or a different hypervisor).  The straightforward
+example is a KVM guest that in turn runs on a KVM guest (the rest of
+this document is built on this example)::
+
+              .----------------.  .----------------.
+              |                |  |                |
+              |      L2        |  |      L2        |
+              | (Nested Guest) |  | (Nested Guest) |
+              |                |  |                |
+              |----------------'--'----------------|
+              |                                    |
+              |       L1 (Guest Hypervisor)        |
+              |          KVM (/dev/kvm)            |
+              |                                    |
+      .------------------------------------------------------.
+      |                 L0 (Host Hypervisor)                 |
+      |                    KVM (/dev/kvm)                    |
+      |------------------------------------------------------|
+      |        Hardware (with virtualization extensions)     |
+      '------------------------------------------------------'
+
+Terminology:
+
+- L0 – level-0; the bare metal host, running KVM
+
+- L1 – level-1 guest; a VM running on L0; also called the "guest
+  hypervisor", as it itself is capable of running KVM.
+
+- L2 – level-2 guest; a VM running on L1, this is the "nested guest"
+
+.. note:: The above diagram is modelled after the x86 architecture;
+          s390x, ppc64 and other architectures are likely to have
+          a different design for nesting.
+
+          For example, s390x always has an LPAR (LogicalPARtition)
+          hypervisor running on bare metal, adding another layer and
+          resulting in at least four levels in a nested setup — L0 (bare
+          metal, running the LPAR hypervisor), L1 (host hypervisor), L2
+          (guest hypervisor), L3 (nested guest).
+
+          This document will stick with the three-level terminology (L0,
+          L1, and L2) for all architectures; and will largely focus on
+          x86.
+
+
+Use Cases
+---------
+
+There are several scenarios where nested KVM can be useful, to name a
+few:
+
+- As a developer, you want to test your software on different operating
+  systems (OSes).  Instead of renting multiple VMs from a Cloud
+  Provider, using nested KVM lets you rent a large enough "guest
+  hypervisor" (level-1 guest).  This in turn allows you to create
+  multiple nested guests (level-2 guests), running different OSes, on
+  which you can develop and test your software.
+
+- Live migration of "guest hypervisors" and their nested guests, for
+  load balancing, disaster recovery, etc.
+
+- VM image creation tools (e.g. ``virt-install``,  etc) often run
+  their own VM, and users expect these to work inside a VM.
+
+- Some OSes use virtualization internally for security (e.g. to let
+  applications run safely in isolation).
+
+
+Enabling "nested" (x86)
+-----------------------
+
+From Linux kernel v4.19 onwards, the ``nested`` KVM parameter is enabled
+by default for Intel and AMD.  (Though your Linux distribution might
+override this default.)
+
+In case you are running a Linux kernel older than v4.19, to enable
+nesting, set the ``nested`` KVM module parameter to ``Y`` or ``1``.  To
+persist this setting across reboots, you can add it in a config file, as
+shown below:
+
+1. On the bare metal host (L0), list the kernel modules and ensure that
+   the KVM modules::
+
+    $ lsmod | grep -i kvm
+    kvm_intel             133627  0
+    kvm                   435079  1 kvm_intel
+
+2. Show information for ``kvm_intel`` module::
+
+    $ modinfo kvm_intel | grep -i nested
+    parm:           nested:bool
+
+3. For the nested KVM configuration to persist across reboots, place the
+   below in ``/etc/modprobed/kvm_intel.conf`` (create the file if it
+   doesn't exist)::
+
+    $ cat /etc/modprobe.d/kvm_intel.conf
+    options kvm-intel nested=y
+
+4. Unload and re-load the KVM Intel module::
+
+    $ sudo rmmod kvm-intel
+    $ sudo modprobe kvm-intel
+
+5. Verify if the ``nested`` parameter for KVM is enabled::
+
+    $ cat /sys/module/kvm_intel/parameters/nested
+    Y
+
+For AMD hosts, the process is the same as above, except that the module
+name is ``kvm-amd``.
+
+
+Additional nested-related kernel parameters (x86)
+-------------------------------------------------
+
+If your hardware is sufficiently advanced (Intel Haswell processor or
+higher, which has newer hardware virt extensions), the following
+additional features will also be enabled by default: "Shadow VMCS
+(Virtual Machine Control Structure)", APIC Virtualization on your bare
+metal host (L0).  Parameters for Intel hosts::
+
+    $ cat /sys/module/kvm_intel/parameters/enable_shadow_vmcs
+    Y
+
+    $ cat /sys/module/kvm_intel/parameters/enable_apicv
+    Y
+
+    $ cat /sys/module/kvm_intel/parameters/ept
+    Y
+
+.. note:: If you suspect your L2 (i.e. nested guest) is running slower,
+          ensure the above are enabled (particularly
+          ``enable_shadow_vmcs`` and ``ept``).
+
+
+Starting a nested guest (x86)
+-----------------------------
+
+Once your bare metal host (L0) is configured for nesting, you should be
+able to start an L1 guest with::
+
+    $ qemu-kvm -cpu host [...]
+
+The above will pass through the host CPU's capabilities as-is to the
+gues); or for better live migration compatibility, use a named CPU
+model supported by QEMU. e.g.::
+
+    $ qemu-kvm -cpu Haswell-noTSX-IBRS,vmx=on
+
+then the guest hypervisor will subsequently be capable of running a
+nested guest with accelerated KVM.
+
+
+Enabling "nested" (s390x)
+-------------------------
+
+1. On the host hypervisor (L0), enable the ``nested`` parameter on
+   s390x::
+
+    $ rmmod kvm
+    $ modprobe kvm nested=1
+
+.. note:: On s390x, the kernel parameter ``hpage`` is mutually exclusive
+          with the ``nested`` paramter — i.e. to be able to enable
+          ``nested``, the ``hpage`` parameter *must* be disabled.
+
+2. The guest hypervisor (L1) must be provided with the ``sie`` CPU
+   feature — with QEMU, this can be done by using "host passthrough"
+   (via the command-line ``-cpu host``).
+
+3. Now the KVM module can be loaded in the L1 (guest hypervisor)::
+
+    $ modprobe kvm
+
+
+Live migration with nested KVM
+------------------------------
+
+Migrating an L1 guest, with a  *live* nested guest in it, to another
+bare metal host, works as of Linux kernel 5.3 and QEMU 4.2.0 for
+Intel x86 systems, and even on older versions for s390x.
+
+On AMD systems, once an L1 guest has started an L2 guest, the L1 guest
+should no longer be migrated or saved (refer to QEMU documentation on
+"savevm"/"loadvm") until the L2 guest shuts down.  Attempting to migrate
+or save-and-load an L1 guest while an L2 guest is running will result in
+undefined behavior.  You might see a ``kernel BUG!`` entry in ``dmesg``, a
+kernel 'oops', or an outright kernel panic.  Such a migrated or loaded L1
+guest can no longer be considered stable or secure, and must be restarted.
+Migrating an L1 guest merely configured to support nesting, while not
+actually running L2 guests, is expected to function normally even on AMD
+systems but may fail once guests are started.
+
+Migrating an L2 guest is always expected to succeed, so all the following
+scenarios should work even on AMD systems:
+
+- Migrating a nested guest (L2) to another L1 guest on the *same* bare
+  metal host.
+
+- Migrating a nested guest (L2) to another L1 guest on a *different*
+  bare metal host.
+
+- Migrating a nested guest (L2) to a bare metal host.
+
+Reporting bugs from nested setups
+-----------------------------------
+
+Debugging "nested" problems can involve sifting through log files across
+L0, L1 and L2; this can result in tedious back-n-forth between the bug
+reporter and the bug fixer.
+
+- Mention that you are in a "nested" setup.  If you are running any kind
+  of "nesting" at all, say so.  Unfortunately, this needs to be called
+  out because when reporting bugs, people tend to forget to even
+  *mention* that they're using nested virtualization.
+
+- Ensure you are actually running KVM on KVM.  Sometimes people do not
+  have KVM enabled for their guest hypervisor (L1), which results in
+  them running with pure emulation or what QEMU calls it as "TCG", but
+  they think they're running nested KVM.  Thus confusing "nested Virt"
+  (which could also mean, QEMU on KVM) with "nested KVM" (KVM on KVM).
+
+Information to collect (generic)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following is not an exhaustive list, but a very good starting point:
+
+  - Kernel, libvirt, and QEMU version from L0
+
+  - Kernel, libvirt and QEMU version from L1
+
+  - QEMU command-line of L1 -- when using libvirt, you'll find it here:
+    ``/var/log/libvirt/qemu/instance.log``
+
+  - QEMU command-line of L2 -- as above, when using libvirt, get the
+    complete libvirt-generated QEMU command-line
+
+  - ``cat /sys/cpuinfo`` from L0
+
+  - ``cat /sys/cpuinfo`` from L1
+
+  - ``lscpu`` from L0
+
+  - ``lscpu`` from L1
+
+  - Full ``dmesg`` output from L0
+
+  - Full ``dmesg`` output from L1
+
+x86-specific info to collect
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Both the below commands, ``x86info`` and ``dmidecode``, should be
+available on most Linux distributions with the same name:
+
+  - Output of: ``x86info -a`` from L0
+
+  - Output of: ``x86info -a`` from L1
+
+  - Output of: ``dmidecode`` from L0
+
+  - Output of: ``dmidecode`` from L1
+
+s390x-specific info to collect
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Along with the earlier mentioned generic details, the below is
+also recommended:
+
+  - ``/proc/sysinfo`` from L1; this will also include the info from L0
index c8205a1..3e5d116 100644 (file)
@@ -3657,7 +3657,7 @@ L:        linux-btrfs@vger.kernel.org
 S:     Maintained
 W:     http://btrfs.wiki.kernel.org/
 Q:     http://patchwork.kernel.org/project/linux-btrfs/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
 F:     Documentation/filesystems/btrfs.rst
 F:     fs/btrfs/
 F:     include/linux/btrfs*
@@ -5935,9 +5935,9 @@ F:        lib/dynamic_debug.c
 DYNAMIC INTERRUPT MODERATION
 M:     Tal Gilboa <talgi@mellanox.com>
 S:     Maintained
+F:     Documentation/networking/net_dim.rst
 F:     include/linux/dim.h
 F:     lib/dim/
-F:     Documentation/networking/net_dim.rst
 
 DZ DECSTATION DZ11 SERIAL DRIVER
 M:     "Maciej W. Rozycki" <macro@linux-mips.org>
@@ -7747,11 +7747,6 @@ L:       platform-driver-x86@vger.kernel.org
 S:     Orphan
 F:     drivers/platform/x86/tc1100-wmi.c
 
-HP100: Driver for HP 10/100 Mbit/s Voice Grade Network Adapter Series
-M:     Jaroslav Kysela <perex@perex.cz>
-S:     Obsolete
-F:     drivers/staging/hp/hp100.*
-
 HPET:  High Precision Event Timers driver
 M:     Clemens Ladisch <clemens@ladisch.de>
 S:     Maintained
index 679f302..3512f7b 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 7
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
index 6fdb0ac..59da6c0 100644 (file)
@@ -91,9 +91,17 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
                return;
        }
 
-       kernel_neon_begin();
-       chacha_doneon(state, dst, src, bytes, nrounds);
-       kernel_neon_end();
+       do {
+               unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
+
+               kernel_neon_begin();
+               chacha_doneon(state, dst, src, todo, nrounds);
+               kernel_neon_end();
+
+               bytes -= todo;
+               src += todo;
+               dst += todo;
+       } while (bytes);
 }
 EXPORT_SYMBOL(chacha_crypt_arch);
 
index ae5aefc..ffa8d73 100644 (file)
@@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc,
                return crypto_nhpoly1305_update(desc, src, srclen);
 
        do {
-               unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
+               unsigned int n = min_t(unsigned int, srclen, SZ_4K);
 
                kernel_neon_begin();
                crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);
index ceec04e..13cfef4 100644 (file)
@@ -160,13 +160,20 @@ void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
                unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
 
                if (static_branch_likely(&have_neon) && do_neon) {
-                       kernel_neon_begin();
-                       poly1305_blocks_neon(&dctx->h, src, len, 1);
-                       kernel_neon_end();
+                       do {
+                               unsigned int todo = min_t(unsigned int, len, SZ_4K);
+
+                               kernel_neon_begin();
+                               poly1305_blocks_neon(&dctx->h, src, todo, 1);
+                               kernel_neon_end();
+
+                               len -= todo;
+                               src += todo;
+                       } while (len);
                } else {
                        poly1305_blocks_arm(&dctx->h, src, len, 1);
+                       src += len;
                }
-               src += len;
                nbytes %= POLY1305_BLOCK_SIZE;
        }
 
index 37ca3e8..af2bbca 100644 (file)
@@ -87,9 +87,17 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
            !crypto_simd_usable())
                return chacha_crypt_generic(state, dst, src, bytes, nrounds);
 
-       kernel_neon_begin();
-       chacha_doneon(state, dst, src, bytes, nrounds);
-       kernel_neon_end();
+       do {
+               unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
+
+               kernel_neon_begin();
+               chacha_doneon(state, dst, src, todo, nrounds);
+               kernel_neon_end();
+
+               bytes -= todo;
+               src += todo;
+               dst += todo;
+       } while (bytes);
 }
 EXPORT_SYMBOL(chacha_crypt_arch);
 
index 895d372..c5405e6 100644 (file)
@@ -30,7 +30,7 @@ static int nhpoly1305_neon_update(struct shash_desc *desc,
                return crypto_nhpoly1305_update(desc, src, srclen);
 
        do {
-               unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
+               unsigned int n = min_t(unsigned int, srclen, SZ_4K);
 
                kernel_neon_begin();
                crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);
index e97b092..f33ada7 100644 (file)
@@ -143,13 +143,20 @@ void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
                unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
 
                if (static_branch_likely(&have_neon) && crypto_simd_usable()) {
-                       kernel_neon_begin();
-                       poly1305_blocks_neon(&dctx->h, src, len, 1);
-                       kernel_neon_end();
+                       do {
+                               unsigned int todo = min_t(unsigned int, len, SZ_4K);
+
+                               kernel_neon_begin();
+                               poly1305_blocks_neon(&dctx->h, src, todo, 1);
+                               kernel_neon_end();
+
+                               len -= todo;
+                               src += todo;
+                       } while (len);
                } else {
                        poly1305_blocks(&dctx->h, src, len, 1);
+                       src += len;
                }
-               src += len;
                nbytes %= POLY1305_BLOCK_SIZE;
        }
 
index dd2514b..3862cad 100644 (file)
@@ -32,7 +32,7 @@ UBSAN_SANITIZE                        := n
 OBJECT_FILES_NON_STANDARD      := y
 KCOV_INSTRUMENT                        := n
 
-CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny
+CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -fasynchronous-unwind-tables
 
 ifneq ($(c-gettimeofday-y),)
   CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
index 23ebe51..50a279d 100644 (file)
@@ -200,6 +200,13 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        }
 
        memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id));
+
+       if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
+               int i;
+
+               for (i = 0; i < 16; i++)
+                       *vcpu_reg32(vcpu, i) = (u32)*vcpu_reg32(vcpu, i);
+       }
 out:
        return err;
 }
index d22d053..90186cf 100644 (file)
@@ -18,6 +18,7 @@
 
 #define CPU_GP_REG_OFFSET(x)   (CPU_GP_REGS + x)
 #define CPU_XREG_OFFSET(x)     CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
+#define CPU_SP_EL0_OFFSET      (CPU_XREG_OFFSET(30) + 8)
 
        .text
        .pushsection    .hyp.text, "ax"
        ldp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
 .endm
 
+.macro save_sp_el0 ctxt, tmp
+       mrs     \tmp,   sp_el0
+       str     \tmp,   [\ctxt, #CPU_SP_EL0_OFFSET]
+.endm
+
+.macro restore_sp_el0 ctxt, tmp
+       ldr     \tmp,     [\ctxt, #CPU_SP_EL0_OFFSET]
+       msr     sp_el0, \tmp
+.endm
+
 /*
  * u64 __guest_enter(struct kvm_vcpu *vcpu,
  *                  struct kvm_cpu_context *host_ctxt);
@@ -60,6 +71,9 @@ SYM_FUNC_START(__guest_enter)
        // Store the host regs
        save_callee_saved_regs x1
 
+       // Save the host's sp_el0
+       save_sp_el0     x1, x2
+
        // Now the host state is stored if we have a pending RAS SError it must
        // affect the host. If any asynchronous exception is pending we defer
        // the guest entry. The DSB isn't necessary before v8.2 as any SError
@@ -83,6 +97,9 @@ alternative_else_nop_endif
        // when this feature is enabled for kernel code.
        ptrauth_switch_to_guest x29, x0, x1, x2
 
+       // Restore the guest's sp_el0
+       restore_sp_el0 x29, x0
+
        // Restore guest regs x0-x17
        ldp     x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
        ldp     x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
@@ -130,6 +147,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
        // Store the guest regs x18-x29, lr
        save_callee_saved_regs x1
 
+       // Store the guest's sp_el0
+       save_sp_el0     x1, x2
+
        get_host_ctxt   x2, x3
 
        // Macro ptrauth_switch_to_guest format:
@@ -139,6 +159,9 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
        // when this feature is enabled for kernel code.
        ptrauth_switch_to_host x1, x2, x3, x4, x5
 
+       // Restore the hosts's sp_el0
+       restore_sp_el0 x2, x3
+
        // Now restore the host regs
        restore_callee_saved_regs x2
 
index c2a13ab..9c5cfb0 100644 (file)
@@ -198,7 +198,6 @@ SYM_CODE_END(__hyp_panic)
 .macro invalid_vector  label, target = __hyp_panic
        .align  2
 SYM_CODE_START(\label)
-\label:
        b \target
 SYM_CODE_END(\label)
 .endm
index 75b1925..6d2df9f 100644 (file)
@@ -15,8 +15,9 @@
 /*
  * Non-VHE: Both host and guest must save everything.
  *
- * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and pstate,
- * which are handled as part of the el2 return state) on every switch.
+ * VHE: Host and guest must save mdscr_el1 and sp_el0 (and the PC and
+ * pstate, which are handled as part of the el2 return state) on every
+ * switch (sp_el0 is being dealt with in the assembly code).
  * tpidr_el0 and tpidrro_el0 only need to be switched when going
  * to host userspace or a different VCPU.  EL1 registers only need to be
  * switched when potentially going to run a different VCPU.  The latter two
 static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
 {
        ctxt->sys_regs[MDSCR_EL1]       = read_sysreg(mdscr_el1);
-
-       /*
-        * The host arm64 Linux uses sp_el0 to point to 'current' and it must
-        * therefore be saved/restored on every entry/exit to/from the guest.
-        */
-       ctxt->gp_regs.regs.sp           = read_sysreg(sp_el0);
 }
 
 static void __hyp_text __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
@@ -99,12 +94,6 @@ NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
 static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
 {
        write_sysreg(ctxt->sys_regs[MDSCR_EL1],   mdscr_el1);
-
-       /*
-        * The host arm64 Linux uses sp_el0 to point to 'current' and it must
-        * therefore be saved/restored on every entry/exit to/from the guest.
-        */
-       write_sysreg(ctxt->gp_regs.regs.sp,       sp_el0);
 }
 
 static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
index bbeb6a5..0be3355 100644 (file)
@@ -230,6 +230,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
                ptep = (pte_t *)pudp;
        } else if (sz == (CONT_PTE_SIZE)) {
                pmdp = pmd_alloc(mm, pudp, addr);
+               if (!pmdp)
+                       return NULL;
 
                WARN_ON(addr & (sz - 1));
                /*
index e15166b..ad2f172 100644 (file)
@@ -521,6 +521,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_IOEVENTFD:
        case KVM_CAP_DEVICE_CTRL:
        case KVM_CAP_IMMEDIATE_EXIT:
+       case KVM_CAP_SET_GUEST_DEBUG:
                r = 1;
                break;
        case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
index 62f7bfe..74f82cf 100644 (file)
@@ -60,7 +60,7 @@ config RISCV
        select ARCH_HAS_GIGANTIC_PAGE
        select ARCH_HAS_SET_DIRECT_MAP
        select ARCH_HAS_SET_MEMORY
-       select ARCH_HAS_STRICT_KERNEL_RWX
+       select ARCH_HAS_STRICT_KERNEL_RWX if MMU
        select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
        select SPARSEMEM_STATIC if 32BIT
        select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
index 7c24da5..f383ef5 100644 (file)
@@ -102,7 +102,7 @@ void sbi_shutdown(void)
 {
        sbi_ecall(SBI_EXT_0_1_SHUTDOWN, 0, 0, 0, 0, 0, 0, 0);
 }
-EXPORT_SYMBOL(sbi_set_timer);
+EXPORT_SYMBOL(sbi_shutdown);
 
 /**
  * sbi_clear_ipi() - Clear any pending IPIs for the calling hart.
@@ -113,7 +113,7 @@ void sbi_clear_ipi(void)
 {
        sbi_ecall(SBI_EXT_0_1_CLEAR_IPI, 0, 0, 0, 0, 0, 0, 0);
 }
-EXPORT_SYMBOL(sbi_shutdown);
+EXPORT_SYMBOL(sbi_clear_ipi);
 
 /**
  * sbi_set_timer_v01() - Program the timer for next timer event.
@@ -167,6 +167,11 @@ static int __sbi_rfence_v01(int fid, const unsigned long *hart_mask,
 
        return result;
 }
+
+static void sbi_set_power_off(void)
+{
+       pm_power_off = sbi_shutdown;
+}
 #else
 static void __sbi_set_timer_v01(uint64_t stime_value)
 {
@@ -191,6 +196,8 @@ static int __sbi_rfence_v01(int fid, const unsigned long *hart_mask,
 
        return 0;
 }
+
+static void sbi_set_power_off(void) {}
 #endif /* CONFIG_RISCV_SBI_V01 */
 
 static void __sbi_set_timer_v02(uint64_t stime_value)
@@ -540,16 +547,12 @@ static inline long sbi_get_firmware_version(void)
        return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
 }
 
-static void sbi_power_off(void)
-{
-       sbi_shutdown();
-}
 
 int __init sbi_init(void)
 {
        int ret;
 
-       pm_power_off = sbi_power_off;
+       sbi_set_power_off();
        ret = sbi_get_spec_version();
        if (ret > 0)
                sbi_spec_version = ret;
index 02087fe..6c85487 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/stacktrace.h>
 #include <linux/ftrace.h>
 
+register unsigned long sp_in_global __asm__("sp");
+
 #ifdef CONFIG_FRAME_POINTER
 
 struct stackframe {
@@ -19,8 +21,6 @@ struct stackframe {
        unsigned long ra;
 };
 
-register unsigned long sp_in_global __asm__("sp");
-
 void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
                             bool (*fn)(unsigned long, void *), void *arg)
 {
index 33b16f4..a4ee3a0 100644 (file)
@@ -33,15 +33,15 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
        $(call if_changed,vdsold)
 
 # We also create a special relocatable object that should mirror the symbol
-# table and layout of the linked DSO.  With ld -R we can then refer to
-# these symbols in the kernel code rather than hand-coded addresses.
+# table and layout of the linked DSO. With ld --just-symbols we can then
+# refer to these symbols in the kernel code rather than hand-coded addresses.
 
 SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
        -Wl,--build-id -Wl,--hash-style=both
 $(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
        $(call if_changed,vdsold)
 
-LDFLAGS_vdso-syms.o := -r -R
+LDFLAGS_vdso-syms.o := -r --just-symbols
 $(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
        $(call if_changed,ld)
 
index 5dcf9ff..d05bb04 100644 (file)
@@ -545,6 +545,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_S390_AIS:
        case KVM_CAP_S390_AIS_MIGRATION:
        case KVM_CAP_S390_VCPU_RESETS:
+       case KVM_CAP_SET_GUEST_DEBUG:
                r = 1;
                break;
        case KVM_CAP_S390_HPAGE_1M:
index 69a824f..8938936 100644 (file)
@@ -626,10 +626,12 @@ static int handle_pqap(struct kvm_vcpu *vcpu)
         * available for the guest are AQIC and TAPQ with the t bit set
         * since we do not set IC.3 (FIII) we currently will only intercept
         * the AQIC function code.
+        * Note: running nested under z/VM can result in intercepts for other
+        * function codes, e.g. PQAP(QCI). We do not support this and bail out.
         */
        reg0 = vcpu->run->s.regs.gprs[0];
        fc = (reg0 >> 24) & 0xff;
-       if (WARN_ON_ONCE(fc != 0x03))
+       if (fc != 0x03)
                return -EOPNOTSUPP;
 
        /* PQAP instruction is allowed for guest kernel only */
index c4f8039..0267405 100644 (file)
@@ -64,10 +64,13 @@ mm_segment_t enable_sacf_uaccess(void)
 {
        mm_segment_t old_fs;
        unsigned long asce, cr;
+       unsigned long flags;
 
        old_fs = current->thread.mm_segment;
        if (old_fs & 1)
                return old_fs;
+       /* protect against a concurrent page table upgrade */
+       local_irq_save(flags);
        current->thread.mm_segment |= 1;
        asce = S390_lowcore.kernel_asce;
        if (likely(old_fs == USER_DS)) {
@@ -83,6 +86,7 @@ mm_segment_t enable_sacf_uaccess(void)
                __ctl_load(asce, 7, 7);
                set_cpu_flag(CIF_ASCE_SECONDARY);
        }
+       local_irq_restore(flags);
        return old_fs;
 }
 EXPORT_SYMBOL(enable_sacf_uaccess);
index 498c98a..fff169d 100644 (file)
@@ -70,8 +70,20 @@ static void __crst_table_upgrade(void *arg)
 {
        struct mm_struct *mm = arg;
 
-       if (current->active_mm == mm)
-               set_user_asce(mm);
+       /* we must change all active ASCEs to avoid the creation of new TLBs */
+       if (current->active_mm == mm) {
+               S390_lowcore.user_asce = mm->context.asce;
+               if (current->thread.mm_segment == USER_DS) {
+                       __ctl_load(S390_lowcore.user_asce, 1, 1);
+                       /* Mark user-ASCE present in CR1 */
+                       clear_cpu_flag(CIF_ASCE_PRIMARY);
+               }
+               if (current->thread.mm_segment == USER_DS_SACF) {
+                       __ctl_load(S390_lowcore.user_asce, 7, 7);
+                       /* enable_sacf_uaccess does all or nothing */
+                       WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY));
+               }
+       }
        __tlb_flush_local();
 }
 
index 06ef2d4..6737bce 100644 (file)
@@ -32,16 +32,16 @@ void blake2s_compress_arch(struct blake2s_state *state,
                           const u32 inc)
 {
        /* SIMD disables preemption, so relax after processing each page. */
-       BUILD_BUG_ON(PAGE_SIZE / BLAKE2S_BLOCK_SIZE < 8);
+       BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8);
 
        if (!static_branch_likely(&blake2s_use_ssse3) || !crypto_simd_usable()) {
                blake2s_compress_generic(state, block, nblocks, inc);
                return;
        }
 
-       for (;;) {
+       do {
                const size_t blocks = min_t(size_t, nblocks,
-                                           PAGE_SIZE / BLAKE2S_BLOCK_SIZE);
+                                           SZ_4K / BLAKE2S_BLOCK_SIZE);
 
                kernel_fpu_begin();
                if (IS_ENABLED(CONFIG_AS_AVX512) &&
@@ -52,10 +52,8 @@ void blake2s_compress_arch(struct blake2s_state *state,
                kernel_fpu_end();
 
                nblocks -= blocks;
-               if (!nblocks)
-                       break;
                block += blocks * BLAKE2S_BLOCK_SIZE;
-       }
+       } while (nblocks);
 }
 EXPORT_SYMBOL(blake2s_compress_arch);
 
index b412c21..2225009 100644 (file)
@@ -153,9 +153,17 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
            bytes <= CHACHA_BLOCK_SIZE)
                return chacha_crypt_generic(state, dst, src, bytes, nrounds);
 
-       kernel_fpu_begin();
-       chacha_dosimd(state, dst, src, bytes, nrounds);
-       kernel_fpu_end();
+       do {
+               unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
+
+               kernel_fpu_begin();
+               chacha_dosimd(state, dst, src, todo, nrounds);
+               kernel_fpu_end();
+
+               bytes -= todo;
+               src += todo;
+               dst += todo;
+       } while (bytes);
 }
 EXPORT_SYMBOL(chacha_crypt_arch);
 
index f7567cb..80fcb85 100644 (file)
@@ -29,7 +29,7 @@ static int nhpoly1305_avx2_update(struct shash_desc *desc,
                return crypto_nhpoly1305_update(desc, src, srclen);
 
        do {
-               unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
+               unsigned int n = min_t(unsigned int, srclen, SZ_4K);
 
                kernel_fpu_begin();
                crypto_nhpoly1305_update_helper(desc, src, n, _nh_avx2);
index a661ede..cc6b7c1 100644 (file)
@@ -29,7 +29,7 @@ static int nhpoly1305_sse2_update(struct shash_desc *desc,
                return crypto_nhpoly1305_update(desc, src, srclen);
 
        do {
-               unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
+               unsigned int n = min_t(unsigned int, srclen, SZ_4K);
 
                kernel_fpu_begin();
                crypto_nhpoly1305_update_helper(desc, src, n, _nh_sse2);
index 6dfec19..dfe921e 100644 (file)
@@ -91,8 +91,8 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len,
        struct poly1305_arch_internal *state = ctx;
 
        /* SIMD disables preemption, so relax after processing each page. */
-       BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE ||
-                    PAGE_SIZE % POLY1305_BLOCK_SIZE);
+       BUILD_BUG_ON(SZ_4K < POLY1305_BLOCK_SIZE ||
+                    SZ_4K % POLY1305_BLOCK_SIZE);
 
        if (!static_branch_likely(&poly1305_use_avx) ||
            (len < (POLY1305_BLOCK_SIZE * 18) && !state->is_base2_26) ||
@@ -102,8 +102,8 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len,
                return;
        }
 
-       for (;;) {
-               const size_t bytes = min_t(size_t, len, PAGE_SIZE);
+       do {
+               const size_t bytes = min_t(size_t, len, SZ_4K);
 
                kernel_fpu_begin();
                if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&poly1305_use_avx512))
@@ -113,11 +113,10 @@ static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len,
                else
                        poly1305_blocks_avx(ctx, inp, bytes, padbit);
                kernel_fpu_end();
+
                len -= bytes;
-               if (!len)
-                       break;
                inp += bytes;
-       }
+       } while (len);
 }
 
 static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
index 624f5d9..fd51bac 100644 (file)
@@ -73,7 +73,8 @@ static int hv_cpu_init(unsigned int cpu)
        struct page *pg;
 
        input_arg = (void **)this_cpu_ptr(hyperv_pcpu_input_arg);
-       pg = alloc_page(GFP_KERNEL);
+       /* hv_cpu_init() can be called with IRQs disabled from hv_resume() */
+       pg = alloc_page(irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
        if (unlikely(!pg))
                return -ENOMEM;
        *input_arg = page_address(pg);
@@ -254,6 +255,7 @@ static int __init hv_pci_init(void)
 static int hv_suspend(void)
 {
        union hv_x64_msr_hypercall_contents hypercall_msr;
+       int ret;
 
        /*
         * Reset the hypercall page as it is going to be invalidated
@@ -270,12 +272,17 @@ static int hv_suspend(void)
        hypercall_msr.enable = 0;
        wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
 
-       return 0;
+       ret = hv_cpu_die(0);
+       return ret;
 }
 
 static void hv_resume(void)
 {
        union hv_x64_msr_hypercall_contents hypercall_msr;
+       int ret;
+
+       ret = hv_cpu_init(0);
+       WARN_ON(ret);
 
        /* Re-enable the hypercall page */
        rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
@@ -288,6 +295,7 @@ static void hv_resume(void)
        hv_hypercall_pg_saved = NULL;
 }
 
+/* Note: when the ops are called, only CPU0 is online and IRQs are disabled. */
 static struct syscore_ops hv_syscore_ops = {
        .suspend        = hv_suspend,
        .resume         = hv_resume,
index 42a2d0d..0dea9f1 100644 (file)
@@ -1663,8 +1663,8 @@ void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
 static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
 {
        /* We can only post Fixed and LowPrio IRQs */
-       return (irq->delivery_mode == dest_Fixed ||
-               irq->delivery_mode == dest_LowestPrio);
+       return (irq->delivery_mode == APIC_DM_FIXED ||
+               irq->delivery_mode == APIC_DM_LOWEST);
 }
 
 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
index 1c42ecb..d30805e 100644 (file)
@@ -35,6 +35,8 @@ typedef int (*hyperv_fill_flush_list_func)(
        rdmsrl(HV_X64_MSR_SINT0 + int_num, val)
 #define hv_set_synint_state(int_num, val) \
        wrmsrl(HV_X64_MSR_SINT0 + int_num, val)
+#define hv_recommend_using_aeoi() \
+       (!(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED))
 
 #define hv_get_crash_ctl(val) \
        rdmsrl(HV_X64_MSR_CRASH_CTL, val)
index 750ff0b..d057376 100644 (file)
@@ -225,12 +225,12 @@ static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
        }
 
        /*
-        * AMD SVM AVIC accelerate EOI write and do not trap,
-        * in-kernel IOAPIC will not be able to receive the EOI.
-        * In this case, we do lazy update of the pending EOI when
-        * trying to set IOAPIC irq.
+        * AMD SVM AVIC accelerate EOI write iff the interrupt is edge
+        * triggered, in which case the in-kernel IOAPIC will not be able
+        * to receive the EOI.  In this case, we do a lazy update of the
+        * pending EOI when trying to set IOAPIC irq.
         */
-       if (kvm_apicv_activated(ioapic->kvm))
+       if (edge && kvm_apicv_activated(ioapic->kvm))
                ioapic_lazy_update_eoi(ioapic, irq);
 
        /*
index cf912b4..89f7f3a 100644 (file)
@@ -345,7 +345,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
                return NULL;
 
        /* Pin the user virtual address. */
-       npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages);
+       npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
        if (npinned != npages) {
                pr_err("SEV: Failure locking %lu pages.\n", npages);
                goto err;
index 2f379ba..38f6aee 100644 (file)
@@ -1752,6 +1752,8 @@ static int db_interception(struct vcpu_svm *svm)
        if (svm->vcpu.guest_debug &
            (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
                kvm_run->exit_reason = KVM_EXIT_DEBUG;
+               kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
+               kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;
                kvm_run->debug.arch.pc =
                        svm->vmcb->save.cs.base + svm->vmcb->save.rip;
                kvm_run->debug.arch.exception = DB_VECTOR;
index fd78ffb..e44f33c 100644 (file)
@@ -5165,7 +5165,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
         */
                break;
        default:
-               BUG_ON(1);
+               BUG();
                break;
        }
 
index 87f3f24..51d1a82 100644 (file)
@@ -82,6 +82,9 @@ SYM_FUNC_START(vmx_vmexit)
        /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
        FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
 
+       /* Clear RFLAGS.CF and RFLAGS.ZF to preserve VM-Exit, i.e. !VM-Fail. */
+       or $1, %_ASM_AX
+
        pop %_ASM_AX
 .Lvmexit_skip_rsb:
 #endif
index c5835f9..d786c7d 100644 (file)
@@ -926,19 +926,6 @@ EXPORT_SYMBOL_GPL(kvm_set_xcr);
        __reserved_bits;                                \
 })
 
-static u64 kvm_host_cr4_reserved_bits(struct cpuinfo_x86 *c)
-{
-       u64 reserved_bits = __cr4_reserved_bits(cpu_has, c);
-
-       if (kvm_cpu_cap_has(X86_FEATURE_LA57))
-               reserved_bits &= ~X86_CR4_LA57;
-
-       if (kvm_cpu_cap_has(X86_FEATURE_UMIP))
-               reserved_bits &= ~X86_CR4_UMIP;
-
-       return reserved_bits;
-}
-
 static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
        if (cr4 & cr4_reserved_bits)
@@ -3385,6 +3372,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_GET_MSR_FEATURES:
        case KVM_CAP_MSR_PLATFORM_INFO:
        case KVM_CAP_EXCEPTION_PAYLOAD:
+       case KVM_CAP_SET_GUEST_DEBUG:
                r = 1;
                break;
        case KVM_CAP_SYNC_REGS:
@@ -9675,7 +9663,9 @@ int kvm_arch_hardware_setup(void *opaque)
        if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
                supported_xss = 0;
 
-       cr4_reserved_bits = kvm_host_cr4_reserved_bits(&boot_cpu_data);
+#define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f)
+       cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_);
+#undef __kvm_cpu_cap_has
 
        if (kvm_has_tsc_control) {
                /*
@@ -9707,7 +9697,8 @@ int kvm_arch_check_processor_compat(void *opaque)
 
        WARN_ON(!irqs_disabled());
 
-       if (kvm_host_cr4_reserved_bits(c) != cr4_reserved_bits)
+       if (__cr4_reserved_bits(cpu_has, c) !=
+           __cr4_reserved_bits(cpu_has, &boot_cpu_data))
                return -EIO;
 
        return ops->check_processor_compatibility();
index bc1ded1..9ef48a8 100644 (file)
@@ -496,7 +496,7 @@ int blk_drop_partitions(struct gendisk *disk, struct block_device *bdev)
 
        if (!disk_part_scan_enabled(disk))
                return 0;
-       if (bdev->bd_part_count || bdev->bd_openers > 1)
+       if (bdev->bd_part_count)
                return -EBUSY;
        res = invalidate_partition(disk, 0);
        if (res)
index b2263ec..5832bc1 100644 (file)
@@ -273,13 +273,13 @@ int acpi_device_set_power(struct acpi_device *device, int state)
  end:
        if (result) {
                dev_warn(&device->dev, "Failed to change power state to %s\n",
-                        acpi_power_state_string(state));
+                        acpi_power_state_string(target_state));
        } else {
                device->power.state = target_state;
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                  "Device [%s] transitioned to %s\n",
                                  device->pnp.bus_id,
-                                 acpi_power_state_string(state)));
+                                 acpi_power_state_string(target_state)));
        }
 
        return result;
index fe15236..8558b62 100644 (file)
@@ -645,6 +645,7 @@ static void amba_device_initialize(struct amba_device *dev, const char *name)
        dev->dev.release = amba_device_release;
        dev->dev.bus = &amba_bustype;
        dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
+       dev->dev.dma_parms = &dev->dma_parms;
        dev->res.name = dev_name(&dev->dev);
 }
 
index e977041..dcfbe72 100644 (file)
@@ -256,7 +256,8 @@ static int try_to_bring_up_master(struct master *master,
        ret = master->ops->bind(master->dev);
        if (ret < 0) {
                devres_release_group(master->dev, NULL);
-               dev_info(master->dev, "master bind failed: %d\n", ret);
+               if (ret != -EPROBE_DEFER)
+                       dev_info(master->dev, "master bind failed: %d\n", ret);
                return ret;
        }
 
@@ -611,8 +612,9 @@ static int component_bind(struct component *component, struct master *master,
                devres_release_group(component->dev, NULL);
                devres_release_group(master->dev, NULL);
 
-               dev_err(master->dev, "failed to bind %s (ops %ps): %d\n",
-                       dev_name(component->dev), component->ops, ret);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(master->dev, "failed to bind %s (ops %ps): %d\n",
+                               dev_name(component->dev), component->ops, ret);
        }
 
        return ret;
index 139cdf7..073045c 100644 (file)
@@ -2370,6 +2370,11 @@ u32 fw_devlink_get_flags(void)
        return fw_devlink_flags;
 }
 
+static bool fw_devlink_is_permissive(void)
+{
+       return fw_devlink_flags == DL_FLAG_SYNC_STATE_ONLY;
+}
+
 /**
  * device_add - add device to device hierarchy.
  * @dev: device.
@@ -2524,7 +2529,7 @@ int device_add(struct device *dev)
        if (fw_devlink_flags && is_fwnode_dev &&
            fwnode_has_op(dev->fwnode, add_links)) {
                fw_ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
-               if (fw_ret == -ENODEV)
+               if (fw_ret == -ENODEV && !fw_devlink_is_permissive())
                        device_link_wait_for_mandatory_supplier(dev);
                else if (fw_ret)
                        device_link_wait_for_optional_supplier(dev);
index 06ec0e8..94037be 100644 (file)
@@ -224,17 +224,9 @@ static int deferred_devs_show(struct seq_file *s, void *data)
 }
 DEFINE_SHOW_ATTRIBUTE(deferred_devs);
 
-#ifdef CONFIG_MODULES
-/*
- * In the case of modules, set the default probe timeout to
- * 30 seconds to give userland some time to load needed modules
- */
-int driver_deferred_probe_timeout = 30;
-#else
-/* In the case of !modules, no probe timeout needed */
-int driver_deferred_probe_timeout = -1;
-#endif
+int driver_deferred_probe_timeout;
 EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
+static DECLARE_WAIT_QUEUE_HEAD(probe_timeout_waitqueue);
 
 static int __init deferred_probe_timeout_setup(char *str)
 {
@@ -266,8 +258,8 @@ int driver_deferred_probe_check_state(struct device *dev)
                return -ENODEV;
        }
 
-       if (!driver_deferred_probe_timeout) {
-               dev_WARN(dev, "deferred probe timeout, ignoring dependency");
+       if (!driver_deferred_probe_timeout && initcalls_done) {
+               dev_warn(dev, "deferred probe timeout, ignoring dependency");
                return -ETIMEDOUT;
        }
 
@@ -284,6 +276,7 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
 
        list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
                dev_info(private->device, "deferred probe pending");
+       wake_up(&probe_timeout_waitqueue);
 }
 static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
 
@@ -658,6 +651,9 @@ int driver_probe_done(void)
  */
 void wait_for_device_probe(void)
 {
+       /* wait for probe timeout */
+       wait_event(probe_timeout_waitqueue, !driver_deferred_probe_timeout);
+
        /* wait for the deferred probe workqueue to finish */
        flush_work(&deferred_probe_work);
 
index 5255550..b27d0f6 100644 (file)
@@ -380,6 +380,8 @@ struct platform_object {
  */
 static void setup_pdev_dma_masks(struct platform_device *pdev)
 {
+       pdev->dev.dma_parms = &pdev->dma_parms;
+
        if (!pdev->dev.coherent_dma_mask)
                pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
        if (!pdev->dev.dma_mask) {
index 93468b7..9d21bf0 100644 (file)
@@ -33,6 +33,15 @@ struct virtio_blk_vq {
 } ____cacheline_aligned_in_smp;
 
 struct virtio_blk {
+       /*
+        * This mutex must be held by anything that may run after
+        * virtblk_remove() sets vblk->vdev to NULL.
+        *
+        * blk-mq, virtqueue processing, and sysfs attribute code paths are
+        * shut down before vblk->vdev is set to NULL and therefore do not need
+        * to hold this mutex.
+        */
+       struct mutex vdev_mutex;
        struct virtio_device *vdev;
 
        /* The disk structure for the kernel. */
@@ -44,6 +53,13 @@ struct virtio_blk {
        /* Process context for config space updates */
        struct work_struct config_work;
 
+       /*
+        * Tracks references from block_device_operations open/release and
+        * virtio_driver probe/remove so this object can be freed once no
+        * longer in use.
+        */
+       refcount_t refs;
+
        /* What host tells us, plus 2 for header & tailer. */
        unsigned int sg_elems;
 
@@ -295,10 +311,55 @@ out:
        return err;
 }
 
+static void virtblk_get(struct virtio_blk *vblk)
+{
+       refcount_inc(&vblk->refs);
+}
+
+static void virtblk_put(struct virtio_blk *vblk)
+{
+       if (refcount_dec_and_test(&vblk->refs)) {
+               ida_simple_remove(&vd_index_ida, vblk->index);
+               mutex_destroy(&vblk->vdev_mutex);
+               kfree(vblk);
+       }
+}
+
+static int virtblk_open(struct block_device *bd, fmode_t mode)
+{
+       struct virtio_blk *vblk = bd->bd_disk->private_data;
+       int ret = 0;
+
+       mutex_lock(&vblk->vdev_mutex);
+
+       if (vblk->vdev)
+               virtblk_get(vblk);
+       else
+               ret = -ENXIO;
+
+       mutex_unlock(&vblk->vdev_mutex);
+       return ret;
+}
+
+static void virtblk_release(struct gendisk *disk, fmode_t mode)
+{
+       struct virtio_blk *vblk = disk->private_data;
+
+       virtblk_put(vblk);
+}
+
 /* We provide getgeo only to please some old bootloader/partitioning tools */
 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
 {
        struct virtio_blk *vblk = bd->bd_disk->private_data;
+       int ret = 0;
+
+       mutex_lock(&vblk->vdev_mutex);
+
+       if (!vblk->vdev) {
+               ret = -ENXIO;
+               goto out;
+       }
 
        /* see if the host passed in geometry config */
        if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
@@ -314,11 +375,15 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
                geo->sectors = 1 << 5;
                geo->cylinders = get_capacity(bd->bd_disk) >> 11;
        }
-       return 0;
+out:
+       mutex_unlock(&vblk->vdev_mutex);
+       return ret;
 }
 
 static const struct block_device_operations virtblk_fops = {
        .owner  = THIS_MODULE,
+       .open = virtblk_open,
+       .release = virtblk_release,
        .getgeo = virtblk_getgeo,
 };
 
@@ -655,6 +720,10 @@ static int virtblk_probe(struct virtio_device *vdev)
                goto out_free_index;
        }
 
+       /* This reference is dropped in virtblk_remove(). */
+       refcount_set(&vblk->refs, 1);
+       mutex_init(&vblk->vdev_mutex);
+
        vblk->vdev = vdev;
        vblk->sg_elems = sg_elems;
 
@@ -820,8 +889,6 @@ out:
 static void virtblk_remove(struct virtio_device *vdev)
 {
        struct virtio_blk *vblk = vdev->priv;
-       int index = vblk->index;
-       int refc;
 
        /* Make sure no work handler is accessing the device. */
        flush_work(&vblk->config_work);
@@ -831,18 +898,21 @@ static void virtblk_remove(struct virtio_device *vdev)
 
        blk_mq_free_tag_set(&vblk->tag_set);
 
+       mutex_lock(&vblk->vdev_mutex);
+
        /* Stop all the virtqueues. */
        vdev->config->reset(vdev);
 
-       refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
+       /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
+       vblk->vdev = NULL;
+
        put_disk(vblk->disk);
        vdev->config->del_vqs(vdev);
        kfree(vblk->vqs);
-       kfree(vblk);
 
-       /* Only free device id if we don't have any users */
-       if (refc == 1)
-               ida_simple_remove(&vd_index_ida, index);
+       mutex_unlock(&vblk->vdev_mutex);
+
+       virtblk_put(vblk);
 }
 
 #ifdef CONFIG_PM_SLEEP
index 4d1e25d..4d3429b 100644 (file)
@@ -1059,7 +1059,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
 
        update_turbo_state();
        if (global.turbo_disabled) {
-               pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
+               pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
                mutex_unlock(&intel_pstate_limits_lock);
                mutex_unlock(&intel_pstate_driver_lock);
                return -EPERM;
index b7bb7c3..b2f9882 100644 (file)
@@ -963,10 +963,12 @@ static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
        struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
        struct aead_edesc *edesc;
        int ecode = 0;
+       bool has_bklog;
 
        dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 
        edesc = rctx->edesc;
+       has_bklog = edesc->bklog;
 
        if (err)
                ecode = caam_jr_strstatus(jrdev, err);
@@ -979,7 +981,7 @@ static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err,
         * If no backlog flag, the completion of the request is done
         * by CAAM, not crypto engine.
         */
-       if (!edesc->bklog)
+       if (!has_bklog)
                aead_request_complete(req, ecode);
        else
                crypto_finalize_aead_request(jrp->engine, req, ecode);
@@ -995,10 +997,12 @@ static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
        struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev);
        int ivsize = crypto_skcipher_ivsize(skcipher);
        int ecode = 0;
+       bool has_bklog;
 
        dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 
        edesc = rctx->edesc;
+       has_bklog = edesc->bklog;
        if (err)
                ecode = caam_jr_strstatus(jrdev, err);
 
@@ -1028,7 +1032,7 @@ static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
         * If no backlog flag, the completion of the request is done
         * by CAAM, not crypto engine.
         */
-       if (!edesc->bklog)
+       if (!has_bklog)
                skcipher_request_complete(req, ecode);
        else
                crypto_finalize_skcipher_request(jrp->engine, req, ecode);
@@ -1711,7 +1715,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
 
        if (ivsize || mapped_dst_nents > 1)
                sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx +
-                                   mapped_dst_nents);
+                                   mapped_dst_nents - 1 + !!ivsize);
 
        if (sec4_sg_bytes) {
                edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
index 943bc02..27ff4a3 100644 (file)
@@ -583,10 +583,12 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
        struct caam_hash_state *state = ahash_request_ctx(req);
        struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
        int ecode = 0;
+       bool has_bklog;
 
        dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 
        edesc = state->edesc;
+       has_bklog = edesc->bklog;
 
        if (err)
                ecode = caam_jr_strstatus(jrdev, err);
@@ -603,7 +605,7 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
         * If no backlog flag, the completion of the request is done
         * by CAAM, not crypto engine.
         */
-       if (!edesc->bklog)
+       if (!has_bklog)
                req->base.complete(&req->base, ecode);
        else
                crypto_finalize_hash_request(jrp->engine, req, ecode);
@@ -632,10 +634,12 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
        struct caam_hash_state *state = ahash_request_ctx(req);
        int digestsize = crypto_ahash_digestsize(ahash);
        int ecode = 0;
+       bool has_bklog;
 
        dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
 
        edesc = state->edesc;
+       has_bklog = edesc->bklog;
        if (err)
                ecode = caam_jr_strstatus(jrdev, err);
 
@@ -663,7 +667,7 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
         * If no backlog flag, the completion of the request is done
         * by CAAM, not crypto engine.
         */
-       if (!edesc->bklog)
+       if (!has_bklog)
                req->base.complete(&req->base, ecode);
        else
                crypto_finalize_hash_request(jrp->engine, req, ecode);
index 4fcae37..2e44d68 100644 (file)
@@ -121,11 +121,13 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
        struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
        struct rsa_edesc *edesc;
        int ecode = 0;
+       bool has_bklog;
 
        if (err)
                ecode = caam_jr_strstatus(dev, err);
 
        edesc = req_ctx->edesc;
+       has_bklog = edesc->bklog;
 
        rsa_pub_unmap(dev, edesc, req);
        rsa_io_unmap(dev, edesc, req);
@@ -135,7 +137,7 @@ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
         * If no backlog flag, the completion of the request is done
         * by CAAM, not crypto engine.
         */
-       if (!edesc->bklog)
+       if (!has_bklog)
                akcipher_request_complete(req, ecode);
        else
                crypto_finalize_akcipher_request(jrp->engine, req, ecode);
@@ -152,11 +154,13 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
        struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
        struct rsa_edesc *edesc;
        int ecode = 0;
+       bool has_bklog;
 
        if (err)
                ecode = caam_jr_strstatus(dev, err);
 
        edesc = req_ctx->edesc;
+       has_bklog = edesc->bklog;
 
        switch (key->priv_form) {
        case FORM1:
@@ -176,7 +180,7 @@ static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
         * If no backlog flag, the completion of the request is done
         * by CAAM, not crypto engine.
         */
-       if (!edesc->bklog)
+       if (!has_bklog)
                akcipher_request_complete(req, ecode);
        else
                crypto_finalize_akcipher_request(jrp->engine, req, ecode);
index e92b352..43d9e24 100644 (file)
@@ -673,41 +673,14 @@ int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
        return 0;
 }
 
-/*
- * chcr_write_cpl_set_tcb_ulp: update tcb values.
- * TCB is responsible to create tcp headers, so all the related values
- * should be correctly updated.
- * @tx_info - driver specific tls info.
- * @q - tx queue on which packet is going out.
- * @tid - TCB identifier.
- * @pos - current index where should we start writing.
- * @word - TCB word.
- * @mask - TCB word related mask.
- * @val - TCB word related value.
- * @reply - set 1 if looking for TP response.
- * return - next position to write.
- */
-static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
-                                       struct sge_eth_txq *q, u32 tid,
-                                       void *pos, u16 word, u64 mask,
+static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
+                                       u32 tid, void *pos, u16 word, u64 mask,
                                        u64 val, u32 reply)
 {
        struct cpl_set_tcb_field_core *cpl;
        struct ulptx_idata *idata;
        struct ulp_txpkt *txpkt;
-       void *save_pos = NULL;
-       u8 buf[48] = {0};
-       int left;
 
-       left = (void *)q->q.stat - pos;
-       if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) {
-               if (!left) {
-                       pos = q->q.desc;
-               } else {
-                       save_pos = pos;
-                       pos = buf;
-               }
-       }
        /* ULP_TXPKT */
        txpkt = pos;
        txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
@@ -732,18 +705,54 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
        idata = (struct ulptx_idata *)(cpl + 1);
        idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
        idata->len = htonl(0);
+       pos = idata + 1;
 
-       if (save_pos) {
-               pos = chcr_copy_to_txd(buf, &q->q, save_pos,
-                                      CHCR_SET_TCB_FIELD_LEN);
-       } else {
-               /* check again if we are at the end of the queue */
-               if (left == CHCR_SET_TCB_FIELD_LEN)
+       return pos;
+}
+
+
+/*
+ * chcr_write_cpl_set_tcb_ulp: update tcb values.
+ * TCB is responsible to create tcp headers, so all the related values
+ * should be correctly updated.
+ * @tx_info - driver specific tls info.
+ * @q - tx queue on which packet is going out.
+ * @tid - TCB identifier.
+ * @pos - current index where should we start writing.
+ * @word - TCB word.
+ * @mask - TCB word related mask.
+ * @val - TCB word related value.
+ * @reply - set 1 if looking for TP response.
+ * return - next position to write.
+ */
+static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
+                                       struct sge_eth_txq *q, u32 tid,
+                                       void *pos, u16 word, u64 mask,
+                                       u64 val, u32 reply)
+{
+       int left = (void *)q->q.stat - pos;
+
+       if (unlikely(left < CHCR_SET_TCB_FIELD_LEN)) {
+               if (!left) {
                        pos = q->q.desc;
-               else
-                       pos = idata + 1;
+               } else {
+                       u8 buf[48] = {0};
+
+                       __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word,
+                                                    mask, val, reply);
+
+                       return chcr_copy_to_txd(buf, &q->q, pos,
+                                               CHCR_SET_TCB_FIELD_LEN);
+               }
        }
 
+       pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word,
+                                          mask, val, reply);
+
+       /* check again if we are at the end of the queue */
+       if (left == CHCR_SET_TCB_FIELD_LEN)
+               pos = q->q.desc;
+
        return pos;
 }
 
index ccc9eda..07df88f 100644 (file)
@@ -388,7 +388,8 @@ static long dma_buf_ioctl(struct file *file,
 
                return ret;
 
-       case DMA_BUF_SET_NAME:
+       case DMA_BUF_SET_NAME_A:
+       case DMA_BUF_SET_NAME_B:
                return dma_buf_set_name(dmabuf, (const char __user *)arg);
 
        default:
@@ -655,8 +656,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
  * calls attach() of dma_buf_ops to allow device-specific attach functionality
  * @dmabuf:            [in]    buffer to attach device to.
  * @dev:               [in]    device to be attached.
- * @importer_ops       [in]    importer operations for the attachment
- * @importer_priv      [in]    importer private pointer for the attachment
+ * @importer_ops:      [in]    importer operations for the attachment
+ * @importer_priv:     [in]    importer private pointer for the attachment
  *
  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
  * must be cleaned up by calling dma_buf_detach().
index 0924836..023db68 100644 (file)
@@ -241,7 +241,8 @@ config FSL_RAID
 
 config HISI_DMA
        tristate "HiSilicon DMA Engine support"
-       depends on ARM64 || (COMPILE_TEST && PCI_MSI)
+       depends on ARM64 || COMPILE_TEST
+       depends on PCI_MSI
        select DMA_ENGINE
        select DMA_VIRTUAL_CHANNELS
        help
index 4830ba6..d31076d 100644 (file)
@@ -232,10 +232,6 @@ static void chan_dev_release(struct device *dev)
        struct dma_chan_dev *chan_dev;
 
        chan_dev = container_of(dev, typeof(*chan_dev), device);
-       if (atomic_dec_and_test(chan_dev->idr_ref)) {
-               ida_free(&dma_ida, chan_dev->dev_id);
-               kfree(chan_dev->idr_ref);
-       }
        kfree(chan_dev);
 }
 
@@ -1043,27 +1039,9 @@ static int get_dma_id(struct dma_device *device)
 }
 
 static int __dma_async_device_channel_register(struct dma_device *device,
-                                              struct dma_chan *chan,
-                                              int chan_id)
+                                              struct dma_chan *chan)
 {
        int rc = 0;
-       int chancnt = device->chancnt;
-       atomic_t *idr_ref;
-       struct dma_chan *tchan;
-
-       tchan = list_first_entry_or_null(&device->channels,
-                                        struct dma_chan, device_node);
-       if (!tchan)
-               return -ENODEV;
-
-       if (tchan->dev) {
-               idr_ref = tchan->dev->idr_ref;
-       } else {
-               idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
-               if (!idr_ref)
-                       return -ENOMEM;
-               atomic_set(idr_ref, 0);
-       }
 
        chan->local = alloc_percpu(typeof(*chan->local));
        if (!chan->local)
@@ -1079,29 +1057,36 @@ static int __dma_async_device_channel_register(struct dma_device *device,
         * When the chan_id is a negative value, we are dynamically adding
         * the channel. Otherwise we are static enumerating.
         */
-       chan->chan_id = chan_id < 0 ? chancnt : chan_id;
+       mutex_lock(&device->chan_mutex);
+       chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
+       mutex_unlock(&device->chan_mutex);
+       if (chan->chan_id < 0) {
+               pr_err("%s: unable to alloc ida for chan: %d\n",
+                      __func__, chan->chan_id);
+               goto err_out;
+       }
+
        chan->dev->device.class = &dma_devclass;
        chan->dev->device.parent = device->dev;
        chan->dev->chan = chan;
-       chan->dev->idr_ref = idr_ref;
        chan->dev->dev_id = device->dev_id;
-       atomic_inc(idr_ref);
        dev_set_name(&chan->dev->device, "dma%dchan%d",
                     device->dev_id, chan->chan_id);
-
        rc = device_register(&chan->dev->device);
        if (rc)
-               goto err_out;
+               goto err_out_ida;
        chan->client_count = 0;
-       device->chancnt = chan->chan_id + 1;
+       device->chancnt++;
 
        return 0;
 
+ err_out_ida:
+       mutex_lock(&device->chan_mutex);
+       ida_free(&device->chan_ida, chan->chan_id);
+       mutex_unlock(&device->chan_mutex);
  err_out:
        free_percpu(chan->local);
        kfree(chan->dev);
-       if (atomic_dec_return(idr_ref) == 0)
-               kfree(idr_ref);
        return rc;
 }
 
@@ -1110,7 +1095,7 @@ int dma_async_device_channel_register(struct dma_device *device,
 {
        int rc;
 
-       rc = __dma_async_device_channel_register(device, chan, -1);
+       rc = __dma_async_device_channel_register(device, chan);
        if (rc < 0)
                return rc;
 
@@ -1130,6 +1115,9 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
        device->chancnt--;
        chan->dev->chan = NULL;
        mutex_unlock(&dma_list_mutex);
+       mutex_lock(&device->chan_mutex);
+       ida_free(&device->chan_ida, chan->chan_id);
+       mutex_unlock(&device->chan_mutex);
        device_unregister(&chan->dev->device);
        free_percpu(chan->local);
 }
@@ -1152,7 +1140,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
  */
 int dma_async_device_register(struct dma_device *device)
 {
-       int rc, i = 0;
+       int rc;
        struct dma_chan* chan;
 
        if (!device)
@@ -1257,9 +1245,12 @@ int dma_async_device_register(struct dma_device *device)
        if (rc != 0)
                return rc;
 
+       mutex_init(&device->chan_mutex);
+       ida_init(&device->chan_ida);
+
        /* represent channels in sysfs. Probably want devs too */
        list_for_each_entry(chan, &device->channels, device_node) {
-               rc = __dma_async_device_channel_register(device, chan, i++);
+               rc = __dma_async_device_channel_register(device, chan);
                if (rc < 0)
                        goto err_out;
        }
@@ -1334,6 +1325,7 @@ void dma_async_device_unregister(struct dma_device *device)
         */
        dma_cap_set(DMA_PRIVATE, device->cap_mask);
        dma_channel_rebalance();
+       ida_free(&dma_ida, device->dev_id);
        dma_device_put(device);
        mutex_unlock(&dma_list_mutex);
 }
index a2cadfa..364dd34 100644 (file)
@@ -240,7 +240,7 @@ static bool is_threaded_test_run(struct dmatest_info *info)
                struct dmatest_thread *thread;
 
                list_for_each_entry(thread, &dtc->threads, node) {
-                       if (!thread->done)
+                       if (!thread->done && !thread->pending)
                                return true;
                }
        }
@@ -662,8 +662,8 @@ static int dmatest_func(void *data)
                flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
 
        ktime = ktime_get();
-       while (!kthread_should_stop()
-              && !(params->iterations && total_tests >= params->iterations)) {
+       while (!(kthread_should_stop() ||
+              (params->iterations && total_tests >= params->iterations))) {
                struct dma_async_tx_descriptor *tx = NULL;
                struct dmaengine_unmap_data *um;
                dma_addr_t *dsts;
index 10117f2..d683232 100644 (file)
@@ -363,6 +363,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
                gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
                                size);
        tdmac->desc_arr = NULL;
+       if (tdmac->status == DMA_ERROR)
+               tdmac->status = DMA_COMPLETE;
 
        return;
 }
@@ -443,7 +445,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
        if (!desc)
                goto err_out;
 
-       mmp_tdma_config_write(chan, direction, &tdmac->slave_config);
+       if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config))
+               goto err_out;
 
        while (buf < buf_len) {
                desc = &tdmac->desc_arr[i];
index 581e7a2..a3b0b4c 100644 (file)
@@ -865,6 +865,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
        }
 
        pci_set_master(pdev);
+       pd->dma.dev = &pdev->dev;
 
        err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
        if (err) {
@@ -880,7 +881,6 @@ static int pch_dma_probe(struct pci_dev *pdev,
                goto err_free_irq;
        }
 
-       pd->dma.dev = &pdev->dev;
 
        INIT_LIST_HEAD(&pd->dma.channels);
 
index f6a2f42..b9f0d96 100644 (file)
@@ -816,6 +816,13 @@ static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc)
 static void tegra_dma_synchronize(struct dma_chan *dc)
 {
        struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+       int err;
+
+       err = pm_runtime_get_sync(tdc->tdma->dev);
+       if (err < 0) {
+               dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
+               return;
+       }
 
        /*
         * CPU, which handles interrupt, could be busy in
@@ -825,6 +832,8 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
        wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc));
 
        tasklet_kill(&tdc->tasklet);
+
+       pm_runtime_put(tdc->tdma->dev);
 }
 
 static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
index d7b9650..fb7c815 100644 (file)
@@ -27,6 +27,7 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
                        soc_ep_map = &j721e_ep_map;
                } else {
                        pr_err("PSIL: No compatible machine found for map\n");
+                       mutex_unlock(&ep_map_mutex);
                        return ERR_PTR(-ENOTSUPP);
                }
                pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name);
index aecd5a3..5429497 100644 (file)
@@ -1230,16 +1230,16 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
                return ret;
 
        spin_lock_irqsave(&chan->lock, flags);
-
-       desc = list_last_entry(&chan->active_list,
-                              struct xilinx_dma_tx_descriptor, node);
-       /*
-        * VDMA and simple mode do not support residue reporting, so the
-        * residue field will always be 0.
-        */
-       if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
-               residue = xilinx_dma_get_residue(chan, desc);
-
+       if (!list_empty(&chan->active_list)) {
+               desc = list_last_entry(&chan->active_list,
+                                      struct xilinx_dma_tx_descriptor, node);
+               /*
+                * VDMA and simple mode do not support residue reporting, so the
+                * residue field will always be 0.
+                */
+               if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
+                       residue = xilinx_dma_get_residue(chan, desc);
+       }
        spin_unlock_irqrestore(&chan->lock, flags);
 
        dma_set_residue(txstate, residue);
index f84f9e3..affde2d 100644 (file)
@@ -3372,15 +3372,12 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
                }
        }
 
-       amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
-       amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
-
-       amdgpu_amdkfd_suspend(adev, !fbcon);
-
        amdgpu_ras_suspend(adev);
 
        r = amdgpu_device_ip_suspend_phase1(adev);
 
+       amdgpu_amdkfd_suspend(adev, !fbcon);
+
        /* evict vram memory */
        amdgpu_bo_evict_vram(adev);
 
index 8ea86ff..466bfe5 100644 (file)
  * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
  * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
  * - 3.36.0 - Allow reading more status registers on si/cik
+ * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
  */
 #define KMS_DRIVER_MAJOR       3
-#define KMS_DRIVER_MINOR       36
+#define KMS_DRIVER_MINOR       37
 #define KMS_DRIVER_PATCHLEVEL  0
 
 int amdgpu_vram_limit = 0;
index 074a9a0..a5b60c9 100644 (file)
 #define SDMA_OP_AQL_COPY  0
 #define SDMA_OP_AQL_BARRIER_OR  0
 
+#define SDMA_GCR_RANGE_IS_PA           (1 << 18)
+#define SDMA_GCR_SEQ(x)                        (((x) & 0x3) << 16)
+#define SDMA_GCR_GL2_WB                        (1 << 15)
+#define SDMA_GCR_GL2_INV               (1 << 14)
+#define SDMA_GCR_GL2_DISCARD           (1 << 13)
+#define SDMA_GCR_GL2_RANGE(x)          (((x) & 0x3) << 11)
+#define SDMA_GCR_GL2_US                        (1 << 10)
+#define SDMA_GCR_GL1_INV               (1 << 9)
+#define SDMA_GCR_GLV_INV               (1 << 8)
+#define SDMA_GCR_GLK_INV               (1 << 7)
+#define SDMA_GCR_GLK_WB                        (1 << 6)
+#define SDMA_GCR_GLM_INV               (1 << 5)
+#define SDMA_GCR_GLM_WB                        (1 << 4)
+#define SDMA_GCR_GL1_RANGE(x)          (((x) & 0x3) << 2)
+#define SDMA_GCR_GLI_INV(x)            (((x) & 0x3) << 0)
+
 /*define for op field*/
 #define SDMA_PKT_HEADER_op_offset 0
 #define SDMA_PKT_HEADER_op_mask   0x000000FF
index ebfd2cd..d2840c2 100644 (file)
@@ -382,6 +382,18 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
        unsigned vmid = AMDGPU_JOB_GET_VMID(job);
        uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
 
+       /* Invalidate L2, because if we don't do it, we might get stale cache
+        * lines from previous IBs.
+        */
+       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
+       amdgpu_ring_write(ring, 0);
+       amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV |
+                                SDMA_GCR_GL2_WB |
+                                SDMA_GCR_GLM_INV |
+                                SDMA_GCR_GLM_WB) << 16);
+       amdgpu_ring_write(ring, 0xffffff80);
+       amdgpu_ring_write(ring, 0xffff);
+
        /* An IB packet must end on a 8 DW boundary--the next dword
         * must be on a 8-dword boundary. Our IB packet below is 6
         * dwords long, thus add x number of NOPs, such that, in
@@ -1595,7 +1607,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
                10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
-       .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
+       .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
        .emit_ib = sdma_v5_0_ring_emit_ib,
        .emit_fence = sdma_v5_0_ring_emit_fence,
        .emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
index c5ba5d4..9c83c13 100644 (file)
@@ -2008,17 +2008,22 @@ void amdgpu_dm_update_connector_after_detect(
                dc_sink_retain(aconnector->dc_sink);
                if (sink->dc_edid.length == 0) {
                        aconnector->edid = NULL;
-                       drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
+                       if (aconnector->dc_link->aux_mode) {
+                               drm_dp_cec_unset_edid(
+                                       &aconnector->dm_dp_aux.aux);
+                       }
                } else {
                        aconnector->edid =
-                               (struct edid *) sink->dc_edid.raw_edid;
-
+                               (struct edid *)sink->dc_edid.raw_edid;
 
                        drm_connector_update_edid_property(connector,
-                                       aconnector->edid);
-                       drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
-                                           aconnector->edid);
+                                                          aconnector->edid);
+
+                       if (aconnector->dc_link->aux_mode)
+                               drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
+                                                   aconnector->edid);
                }
+
                amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
                update_connector_ext_caps(aconnector);
        } else {
@@ -3340,7 +3345,8 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
                          const union dc_tiling_info *tiling_info,
                          const uint64_t info,
                          struct dc_plane_dcc_param *dcc,
-                         struct dc_plane_address *address)
+                         struct dc_plane_address *address,
+                         bool force_disable_dcc)
 {
        struct dc *dc = adev->dm.dc;
        struct dc_dcc_surface_param input;
@@ -3352,6 +3358,9 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
        memset(&input, 0, sizeof(input));
        memset(&output, 0, sizeof(output));
 
+       if (force_disable_dcc)
+               return 0;
+
        if (!offset)
                return 0;
 
@@ -3401,7 +3410,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
                             union dc_tiling_info *tiling_info,
                             struct plane_size *plane_size,
                             struct dc_plane_dcc_param *dcc,
-                            struct dc_plane_address *address)
+                            struct dc_plane_address *address,
+                            bool force_disable_dcc)
 {
        const struct drm_framebuffer *fb = &afb->base;
        int ret;
@@ -3507,7 +3517,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
 
                ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
                                                plane_size, tiling_info,
-                                               tiling_flags, dcc, address);
+                                               tiling_flags, dcc, address,
+                                               force_disable_dcc);
                if (ret)
                        return ret;
        }
@@ -3599,7 +3610,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
                            const struct drm_plane_state *plane_state,
                            const uint64_t tiling_flags,
                            struct dc_plane_info *plane_info,
-                           struct dc_plane_address *address)
+                           struct dc_plane_address *address,
+                           bool force_disable_dcc)
 {
        const struct drm_framebuffer *fb = plane_state->fb;
        const struct amdgpu_framebuffer *afb =
@@ -3681,7 +3693,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
                                           plane_info->rotation, tiling_flags,
                                           &plane_info->tiling_info,
                                           &plane_info->plane_size,
-                                          &plane_info->dcc, address);
+                                          &plane_info->dcc, address,
+                                          force_disable_dcc);
        if (ret)
                return ret;
 
@@ -3704,6 +3717,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
        struct dc_plane_info plane_info;
        uint64_t tiling_flags;
        int ret;
+       bool force_disable_dcc = false;
 
        ret = fill_dc_scaling_info(plane_state, &scaling_info);
        if (ret)
@@ -3718,9 +3732,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
        if (ret)
                return ret;
 
+       force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
        ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
                                          &plane_info,
-                                         &dc_plane_state->address);
+                                         &dc_plane_state->address,
+                                         force_disable_dcc);
        if (ret)
                return ret;
 
@@ -5342,6 +5358,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
        uint64_t tiling_flags;
        uint32_t domain;
        int r;
+       bool force_disable_dcc = false;
 
        dm_plane_state_old = to_dm_plane_state(plane->state);
        dm_plane_state_new = to_dm_plane_state(new_state);
@@ -5400,11 +5417,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
                        dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
                struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
 
+               force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
                fill_plane_buffer_attributes(
                        adev, afb, plane_state->format, plane_state->rotation,
                        tiling_flags, &plane_state->tiling_info,
                        &plane_state->plane_size, &plane_state->dcc,
-                       &plane_state->address);
+                       &plane_state->address,
+                       force_disable_dcc);
        }
 
        return 0;
@@ -6676,7 +6695,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                fill_dc_plane_info_and_addr(
                        dm->adev, new_plane_state, tiling_flags,
                        &bundle->plane_infos[planes_count],
-                       &bundle->flip_addrs[planes_count].address);
+                       &bundle->flip_addrs[planes_count].address,
+                       false);
+
+               DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
+                                new_plane_state->plane->index,
+                                bundle->plane_infos[planes_count].dcc.enable);
 
                bundle->surface_updates[planes_count].plane_info =
                        &bundle->plane_infos[planes_count];
@@ -8096,7 +8120,8 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
                                ret = fill_dc_plane_info_and_addr(
                                        dm->adev, new_plane_state, tiling_flags,
                                        plane_info,
-                                       &flip_addr->address);
+                                       &flip_addr->address,
+                                       false);
                                if (ret)
                                        goto cleanup;
 
index 8489f1e..47431ca 100644 (file)
@@ -834,11 +834,10 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
 {
        int i;
-       int count = 0;
-       struct pipe_ctx *pipe;
        PERF_TRACE();
        for (i = 0; i < MAX_PIPES; i++) {
-               pipe = &context->res_ctx.pipe_ctx[i];
+               int count = 0;
+               struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
 
                if (!pipe->plane_state)
                        continue;
index 7cbb1ef..27a7d2a 100644 (file)
@@ -2908,6 +2908,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
                                        sizeof(hpd_irq_dpcd_data),
                                        "Status: ");
 
+               for (i = 0; i < MAX_PIPES; i++) {
+                       pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+                               link->dc->hwss.blank_stream(pipe_ctx);
+               }
+
                for (i = 0; i < MAX_PIPES; i++) {
                        pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
                        if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
@@ -2927,6 +2933,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
                if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
                        dc_link_reallocate_mst_payload(link);
 
+               for (i = 0; i < MAX_PIPES; i++) {
+                       pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+                               link->dc->hwss.unblank_stream(pipe_ctx, &previous_link_settings);
+               }
+
                status = false;
                if (out_link_loss)
                        *out_link_loss = true;
@@ -4227,6 +4239,21 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
 void dpcd_set_source_specific_data(struct dc_link *link)
 {
        const uint32_t post_oui_delay = 30; // 30ms
+       uint8_t dspc = 0;
+       enum dc_status ret = DC_ERROR_UNEXPECTED;
+
+       ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
+                                 sizeof(dspc));
+
+       if (ret != DC_OK) {
+               DC_LOG_ERROR("Error in DP aux read transaction,"
+                            " not writing source specific data\n");
+               return;
+       }
+
+       /* Return if OUI unsupported */
+       if (!(dspc & DP_OUI_SUPPORT))
+               return;
 
        if (!link->dc->vendor_signature.is_valid) {
                struct dpcd_amd_signature amd_signature;
index 6ddbb00..4f0e720 100644 (file)
@@ -231,34 +231,6 @@ struct dc_stream_status *dc_stream_get_status(
        return dc_stream_get_status_from_state(dc->current_state, stream);
 }
 
-static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
-{
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-       unsigned int vupdate_line;
-       unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;
-       struct dc_stream_state *stream = pipe_ctx->stream;
-       unsigned int us_per_line;
-
-       if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
-                       ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
-
-               vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
-               if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
-                       return;
-
-               if (vpos >= vupdate_line)
-                       return;
-
-               us_per_line = stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz;
-               lines_to_vupdate = vupdate_line - vpos;
-               us_to_vupdate = lines_to_vupdate * us_per_line;
-
-               /* 70 us is a conservative estimate of cursor update time*/
-               if (us_to_vupdate < 70)
-                       udelay(us_to_vupdate);
-       }
-#endif
-}
 
 /**
  * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
@@ -298,9 +270,7 @@ bool dc_stream_set_cursor_attributes(
 
                if (!pipe_to_program) {
                        pipe_to_program = pipe_ctx;
-
-                       delay_cursor_until_vupdate(pipe_ctx, dc);
-                       dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
+                       dc->hwss.cursor_lock(dc, pipe_to_program, true);
                }
 
                dc->hwss.set_cursor_attribute(pipe_ctx);
@@ -309,7 +279,7 @@ bool dc_stream_set_cursor_attributes(
        }
 
        if (pipe_to_program)
-               dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
+               dc->hwss.cursor_lock(dc, pipe_to_program, false);
 
        return true;
 }
@@ -349,16 +319,14 @@ bool dc_stream_set_cursor_position(
 
                if (!pipe_to_program) {
                        pipe_to_program = pipe_ctx;
-
-                       delay_cursor_until_vupdate(pipe_ctx, dc);
-                       dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
+                       dc->hwss.cursor_lock(dc, pipe_to_program, true);
                }
 
                dc->hwss.set_cursor_position(pipe_ctx);
        }
 
        if (pipe_to_program)
-               dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
+               dc->hwss.cursor_lock(dc, pipe_to_program, false);
 
        return true;
 }
index c279982..1052759 100644 (file)
@@ -2757,6 +2757,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
        .disable_plane = dce110_power_down_fe,
        .pipe_control_lock = dce_pipe_control_lock,
        .interdependent_update_lock = NULL,
+       .cursor_lock = dce_pipe_control_lock,
        .prepare_bandwidth = dce110_prepare_bandwidth,
        .optimize_bandwidth = dce110_optimize_bandwidth,
        .set_drr = set_drr,
index b035754..085c1a3 100644 (file)
@@ -1625,6 +1625,16 @@ void dcn10_pipe_control_lock(
                hws->funcs.verify_allow_pstate_change_high(dc);
 }
 
+void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
+{
+       /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
+       if (!pipe || pipe->top_pipe)
+               return;
+
+       dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
+                       pipe->stream_res.opp->inst, lock);
+}
+
 static bool wait_for_reset_trigger_to_occur(
        struct dc_context *dc_ctx,
        struct timing_generator *tg)
index 16a50e0..af51424 100644 (file)
@@ -49,6 +49,7 @@ void dcn10_pipe_control_lock(
        struct dc *dc,
        struct pipe_ctx *pipe,
        bool lock);
+void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock);
 void dcn10_blank_pixel_data(
                struct dc *dc,
                struct pipe_ctx *pipe_ctx,
index dd02d39..700509b 100644 (file)
@@ -50,6 +50,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .disable_audio_stream = dce110_disable_audio_stream,
        .disable_plane = dcn10_disable_plane,
        .pipe_control_lock = dcn10_pipe_control_lock,
+       .cursor_lock = dcn10_cursor_lock,
        .interdependent_update_lock = dcn10_lock_all_pipes,
        .prepare_bandwidth = dcn10_prepare_bandwidth,
        .optimize_bandwidth = dcn10_optimize_bandwidth,
index 04f8634..3fcd408 100644 (file)
@@ -223,6 +223,9 @@ struct mpcc *mpc1_insert_plane(
        REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id);
        REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id);
 
+       /* Configure VUPDATE lock set for this MPCC to map to the OPP */
+       REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, tree->opp_id);
+
        /* update mpc tree mux setting */
        if (tree->opp_list == insert_above_mpcc) {
                /* insert the toppest mpcc */
@@ -318,6 +321,7 @@ void mpc1_remove_mpcc(
                REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
                REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
                REG_SET(MPCC_OPP_ID[mpcc_id],  0, MPCC_OPP_ID,  0xf);
+               REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
 
                /* mark this mpcc as not in use */
                mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
@@ -328,6 +332,7 @@ void mpc1_remove_mpcc(
                REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
                REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
                REG_SET(MPCC_OPP_ID[mpcc_id],  0, MPCC_OPP_ID,  0xf);
+               REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
        }
 }
 
@@ -361,6 +366,7 @@ void mpc1_mpc_init(struct mpc *mpc)
                REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
                REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
                REG_SET(MPCC_OPP_ID[mpcc_id],  0, MPCC_OPP_ID,  0xf);
+               REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
 
                mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
        }
@@ -381,6 +387,7 @@ void mpc1_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
        REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
        REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
        REG_SET(MPCC_OPP_ID[mpcc_id],  0, MPCC_OPP_ID,  0xf);
+       REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
 
        mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
 
@@ -453,6 +460,13 @@ void mpc1_read_mpcc_state(
                        MPCC_BUSY, &s->busy);
 }
 
+void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
+{
+       struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
+       REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0);
+}
+
 static const struct mpc_funcs dcn10_mpc_funcs = {
        .read_mpcc_state = mpc1_read_mpcc_state,
        .insert_plane = mpc1_insert_plane,
@@ -464,6 +478,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
        .assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect,
        .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
        .update_blending = mpc1_update_blending,
+       .cursor_lock = mpc1_cursor_lock,
        .set_denorm = NULL,
        .set_denorm_clamp = NULL,
        .set_output_csc = NULL,
index 962a68e..66a4719 100644 (file)
        SRII(MPCC_BG_G_Y, MPCC, inst),\
        SRII(MPCC_BG_R_CR, MPCC, inst),\
        SRII(MPCC_BG_B_CB, MPCC, inst),\
-       SRII(MPCC_BG_B_CB, MPCC, inst),\
-       SRII(MPCC_SM_CONTROL, MPCC, inst)
+       SRII(MPCC_SM_CONTROL, MPCC, inst),\
+       SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst)
 
 #define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \
-       SRII(MUX, MPC_OUT, inst)
+       SRII(MUX, MPC_OUT, inst),\
+       VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst)
 
 #define MPC_COMMON_REG_VARIABLE_LIST \
        uint32_t MPCC_TOP_SEL[MAX_MPCC]; \
@@ -55,7 +56,9 @@
        uint32_t MPCC_BG_R_CR[MAX_MPCC]; \
        uint32_t MPCC_BG_B_CB[MAX_MPCC]; \
        uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \
-       uint32_t MUX[MAX_OPP];
+       uint32_t MUX[MAX_OPP]; \
+       uint32_t MPCC_UPDATE_LOCK_SEL[MAX_MPCC]; \
+       uint32_t CUR[MAX_OPP];
 
 #define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
        SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\
@@ -78,7 +81,8 @@
        SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\
        SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\
        SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\
-       SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh)
+       SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh),\
+       SF(MPCC0_MPCC_UPDATE_LOCK_SEL, MPCC_UPDATE_LOCK_SEL, mask_sh)
 
 #define MPC_REG_FIELD_LIST(type) \
        type MPCC_TOP_SEL;\
        type MPCC_SM_FIELD_ALT;\
        type MPCC_SM_FORCE_NEXT_FRAME_POL;\
        type MPCC_SM_FORCE_NEXT_TOP_POL;\
-       type MPC_OUT_MUX;
+       type MPC_OUT_MUX;\
+       type MPCC_UPDATE_LOCK_SEL;\
+       type CUR_VUPDATE_LOCK_SET;
 
 struct dcn_mpc_registers {
        MPC_COMMON_REG_VARIABLE_LIST
@@ -192,4 +198,6 @@ void mpc1_read_mpcc_state(
                int mpcc_inst,
                struct mpcc_state *s);
 
+void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock);
+
 #endif
index 07265ca..ba849aa 100644 (file)
@@ -181,6 +181,14 @@ enum dcn10_clk_src_array_id {
        .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
                                        mm ## block ## id ## _ ## reg_name
 
+#define VUPDATE_SRII(reg_name, block, id)\
+       .reg_name[id] = BASE(mm ## reg_name ## 0 ## _ ## block ## id ## _BASE_IDX) + \
+                                       mm ## reg_name ## 0 ## _ ## block ## id
+
+/* set field/register/bitfield name */
+#define SFRB(field_name, reg_name, bitfield, post_fix)\
+       .field_name = reg_name ## __ ## bitfield ## post_fix
+
 /* NBIO */
 #define NBIO_BASE_INNER(seg) \
        NBIF_BASE__INST0_SEG ## seg
@@ -419,11 +427,13 @@ static const struct dcn_mpc_registers mpc_regs = {
 };
 
 static const struct dcn_mpc_shift mpc_shift = {
-       MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
+       MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT),\
+       SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, __SHIFT)
 };
 
 static const struct dcn_mpc_mask mpc_mask = {
-       MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),
+       MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),\
+       SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, _MASK)
 };
 
 #define tg_regs(id)\
index 22f421e..a023a4d 100644 (file)
@@ -2294,7 +2294,8 @@ void dcn20_fpga_init_hw(struct dc *dc)
 
        REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
        REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
-       REG_WRITE(REFCLK_CNTL, 0);
+       if (REG(REFCLK_CNTL))
+               REG_WRITE(REFCLK_CNTL, 0);
        //
 
 
index 1e73357..6a21228 100644 (file)
@@ -52,6 +52,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
        .disable_plane = dcn20_disable_plane,
        .pipe_control_lock = dcn20_pipe_control_lock,
        .interdependent_update_lock = dcn10_lock_all_pipes,
+       .cursor_lock = dcn10_cursor_lock,
        .prepare_bandwidth = dcn20_prepare_bandwidth,
        .optimize_bandwidth = dcn20_optimize_bandwidth,
        .update_bandwidth = dcn20_update_bandwidth,
index de9c857..570dfd9 100644 (file)
@@ -545,6 +545,7 @@ const struct mpc_funcs dcn20_mpc_funcs = {
        .mpc_init = mpc1_mpc_init,
        .mpc_init_single_inst = mpc1_mpc_init_single_inst,
        .update_blending = mpc2_update_blending,
+       .cursor_lock = mpc1_cursor_lock,
        .get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp,
        .wait_for_idle = mpc2_assert_idle_mpcc,
        .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
index c78fd51..496658f 100644 (file)
        SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\
        SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\
        SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
-       SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh)
+       SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\
+       SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh)
 
 /*
  *     DCN2 MPC_OCSC debug status register:
index 5cdbba0..e4348e3 100644 (file)
@@ -508,6 +508,10 @@ enum dcn20_clk_src_array_id {
        .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
                                        mm ## block ## id ## _ ## reg_name
 
+#define VUPDATE_SRII(reg_name, block, id)\
+       .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+                                       mm ## reg_name ## _ ## block ## id
+
 /* NBIO */
 #define NBIO_BASE_INNER(seg) \
        NBIO_BASE__INST0_SEG ## seg
@@ -3064,25 +3068,32 @@ validate_out:
        return out;
 }
 
-
-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
-               bool fast_validate)
+/*
+ * This must be noinline to ensure anything that deals with FP registers
+ * is contained within this call; previously our compiling with hard-float
+ * would result in fp instructions being emitted outside of the boundaries
+ * of the DC_FP_START/END macros, which makes sense as the compiler has no
+ * idea about what is wrapped and what is not
+ *
+ * This is largely just a workaround to avoid breakage introduced with 5.6,
+ * ideally all fp-using code should be moved into its own file, only that
+ * should be compiled with hard-float, and all code exported from there
+ * should be strictly wrapped with DC_FP_START/END
+ */
+static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
+               struct dc_state *context, bool fast_validate)
 {
        bool voltage_supported = false;
        bool full_pstate_supported = false;
        bool dummy_pstate_supported = false;
        double p_state_latency_us;
 
-       DC_FP_START();
        p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
        context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
                dc->debug.disable_dram_clock_change_vactive_support;
 
        if (fast_validate) {
-               voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true);
-
-               DC_FP_END();
-               return voltage_supported;
+               return dcn20_validate_bandwidth_internal(dc, context, true);
        }
 
        // Best case, we support full UCLK switch latency
@@ -3111,7 +3122,15 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
 
 restore_dml_state:
        context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
+       return voltage_supported;
+}
 
+bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
+               bool fast_validate)
+{
+       bool voltage_supported = false;
+       DC_FP_START();
+       voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
        DC_FP_END();
        return voltage_supported;
 }
index b9ff976..707ce0f 100644 (file)
@@ -53,6 +53,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
        .disable_plane = dcn20_disable_plane,
        .pipe_control_lock = dcn20_pipe_control_lock,
        .interdependent_update_lock = dcn10_lock_all_pipes,
+       .cursor_lock = dcn10_cursor_lock,
        .prepare_bandwidth = dcn20_prepare_bandwidth,
        .optimize_bandwidth = dcn20_optimize_bandwidth,
        .update_bandwidth = dcn20_update_bandwidth,
index b25484a..a721bb4 100644 (file)
@@ -284,7 +284,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
        .dram_channel_width_bytes = 4,
        .fabric_datapath_to_dcn_data_return_bytes = 32,
        .dcn_downspread_percent = 0.5,
-       .downspread_percent = 0.5,
+       .downspread_percent = 0.38,
        .dram_page_open_time_ns = 50.0,
        .dram_rw_turnaround_time_ns = 17.5,
        .dram_return_buffer_per_channel_bytes = 8192,
@@ -340,6 +340,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
        .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
                                        mm ## block ## id ## _ ## reg_name
 
+#define VUPDATE_SRII(reg_name, block, id)\
+       .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+                                       mm ## reg_name ## _ ## block ## id
+
 /* NBIO */
 #define NBIO_BASE_INNER(seg) \
        NBIF0_BASE__INST0_SEG ## seg
@@ -1374,64 +1378,49 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
 {
        struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
        struct clk_limit_table *clk_table = &bw_params->clk_table;
-       unsigned int i, j, k;
-       int closest_clk_lvl;
+       struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+       unsigned int i, j, closest_clk_lvl;
 
        // Default clock levels are used for diags, which may lead to overclocking.
-       if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment)) {
+       if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
                dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
                dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
                dcn2_1_soc.num_chans = bw_params->num_channels;
 
-               /* Vmin: leave lowest DCN clocks, override with dcfclk, fclk, memclk from fuse */
-               dcn2_1_soc.clock_limits[0].state = 0;
-               dcn2_1_soc.clock_limits[0].dcfclk_mhz = clk_table->entries[0].dcfclk_mhz;
-               dcn2_1_soc.clock_limits[0].fabricclk_mhz = clk_table->entries[0].fclk_mhz;
-               dcn2_1_soc.clock_limits[0].socclk_mhz = clk_table->entries[0].socclk_mhz;
-               dcn2_1_soc.clock_limits[0].dram_speed_mts = clk_table->entries[0].memclk_mhz * 2;
-
-               /*
-                * Other levels: find closest DCN clocks that fit the given clock limit using dcfclk
-                * as indicator
-                */
-
-               closest_clk_lvl = -1;
-               /* index currently being filled */
-               k = 1;
-               for (i = 1; i < clk_table->num_entries; i++) {
-                       /* loop backwards, skip duplicate state*/
-                       for (j = dcn2_1_soc.num_states - 1; j >= k; j--) {
+               ASSERT(clk_table->num_entries);
+               for (i = 0; i < clk_table->num_entries; i++) {
+                       /* loop backwards*/
+                       for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) {
                                if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
                                        closest_clk_lvl = j;
                                        break;
                                }
                        }
 
-                       /* if found a lvl that fits, use the DCN clks from it, if not, go to next clk limit*/
-                       if (closest_clk_lvl != -1) {
-                               dcn2_1_soc.clock_limits[k].state = i;
-                               dcn2_1_soc.clock_limits[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
-                               dcn2_1_soc.clock_limits[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
-                               dcn2_1_soc.clock_limits[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
-                               dcn2_1_soc.clock_limits[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
-
-                               dcn2_1_soc.clock_limits[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
-                               dcn2_1_soc.clock_limits[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
-                               dcn2_1_soc.clock_limits[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
-                               dcn2_1_soc.clock_limits[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
-                               dcn2_1_soc.clock_limits[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
-                               dcn2_1_soc.clock_limits[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
-                               dcn2_1_soc.clock_limits[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
-                               k++;
-                       }
+                       clock_limits[i].state = i;
+                       clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+                       clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+                       clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+                       clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+                       clock_limits[i].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+                       clock_limits[i].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+                       clock_limits[i].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+                       clock_limits[i].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+                       clock_limits[i].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+                       clock_limits[i].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+                       clock_limits[i].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+               }
+               for (i = 0; i < clk_table->num_entries; i++)
+                       dcn2_1_soc.clock_limits[i] = clock_limits[i];
+               if (clk_table->num_entries) {
+                       dcn2_1_soc.num_states = clk_table->num_entries;
+                       /* duplicate last level */
+                       dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
+                       dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
                }
-               dcn2_1_soc.num_states = k;
        }
 
-       /* duplicate last level */
-       dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
-       dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
-
        dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
 }
 
index a38baa7..b8ec08e 100644 (file)
@@ -1200,7 +1200,7 @@ static void dml_rq_dlg_get_dlg_params(
        min_hratio_fact_l = 1.0;
        min_hratio_fact_c = 1.0;
 
-       if (htaps_l <= 1)
+       if (hratio_l <= 1)
                min_hratio_fact_l = 2.0;
        else if (htaps_l <= 6) {
                if ((hratio_l * 2.0) > 4.0)
@@ -1216,7 +1216,7 @@ static void dml_rq_dlg_get_dlg_params(
 
        hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
 
-       if (htaps_c <= 1)
+       if (hratio_c <= 1)
                min_hratio_fact_c = 2.0;
        else if (htaps_c <= 6) {
                if ((hratio_c * 2.0) > 4.0)
@@ -1522,8 +1522,8 @@ static void dml_rq_dlg_get_dlg_params(
 
        disp_dlg_regs->refcyc_per_vm_group_vblank   = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
        disp_dlg_regs->refcyc_per_vm_group_flip     = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
-       disp_dlg_regs->refcyc_per_vm_req_vblank     = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
-       disp_dlg_regs->refcyc_per_vm_req_flip       = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
+       disp_dlg_regs->refcyc_per_vm_req_vblank     = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
+       disp_dlg_regs->refcyc_per_vm_req_flip       = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
 
        // Clamp to max for now
        if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23))
index 094afc4..50ee8aa 100644 (file)
@@ -210,6 +210,22 @@ struct mpc_funcs {
                struct mpcc_blnd_cfg *blnd_cfg,
                int mpcc_id);
 
+       /*
+        * Lock cursor updates for the specified OPP.
+        * OPP defines the set of MPCC that are locked together for cursor.
+        *
+        * Parameters:
+        * [in]         mpc             - MPC context.
+        * [in]     opp_id      - The OPP to lock cursor updates on
+        * [in]         lock    - lock/unlock the OPP
+        *
+        * Return:  void
+        */
+       void (*cursor_lock)(
+                       struct mpc *mpc,
+                       int opp_id,
+                       bool lock);
+
        struct mpcc* (*get_mpcc_for_dpp)(
                        struct mpc_tree *tree,
                        int dpp_id);
index d4c1fb2..e57467d 100644 (file)
@@ -86,6 +86,7 @@ struct hw_sequencer_funcs {
                        struct dc_state *context, bool lock);
        void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx,
                        bool flip_immediate);
+       void (*cursor_lock)(struct dc *dc, struct pipe_ctx *pipe, bool lock);
 
        /* Timing Related */
        void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
index c34eba1..6d7bca5 100644 (file)
 #define ASSERT(expr) ASSERT_CRITICAL(expr)
 
 #else
-#define ASSERT(expr) WARN_ON(!(expr))
+#define ASSERT(expr) WARN_ON_ONCE(!(expr))
 #endif
 
 #define BREAK_TO_DEBUGGER() ASSERT(0)
index 2a12614..e4e5a53 100644 (file)
@@ -1435,7 +1435,8 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
        if (!hwmgr)
                return -EINVAL;
 
-       if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability)
+       if (!(hwmgr->not_vf && amdgpu_dpm) ||
+               !hwmgr->hwmgr_func->get_asic_baco_capability)
                return 0;
 
        mutex_lock(&hwmgr->smu_lock);
@@ -1452,8 +1453,7 @@ static int pp_get_asic_baco_state(void *handle, int *state)
        if (!hwmgr)
                return -EINVAL;
 
-       if (!(hwmgr->not_vf && amdgpu_dpm) ||
-               !hwmgr->hwmgr_func->get_asic_baco_state)
+       if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
                return 0;
 
        mutex_lock(&hwmgr->smu_lock);
@@ -1470,7 +1470,8 @@ static int pp_set_asic_baco_state(void *handle, int state)
        if (!hwmgr)
                return -EINVAL;
 
-       if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state)
+       if (!(hwmgr->not_vf && amdgpu_dpm) ||
+               !hwmgr->hwmgr_func->set_asic_baco_state)
                return 0;
 
        mutex_lock(&hwmgr->smu_lock);
index 283615e..9d89ebf 100644 (file)
@@ -3442,8 +3442,12 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
        drm_dp_queue_down_tx(mgr, txmsg);
 
        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
-       if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
-               ret = -EIO;
+       if (ret > 0) {
+               if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+                       ret = -EIO;
+               else
+                       ret = size;
+       }
 
        kfree(txmsg);
 fail_put:
index 1164511..4ede08a 100644 (file)
@@ -5111,7 +5111,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d
        struct drm_display_mode *mode;
        unsigned pixel_clock = (timings->pixel_clock[0] |
                                (timings->pixel_clock[1] << 8) |
-                               (timings->pixel_clock[2] << 16));
+                               (timings->pixel_clock[2] << 16)) + 1;
        unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
        unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
        unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
index 7f386ad..910108c 100644 (file)
@@ -241,8 +241,12 @@ static int drm_hdcp_request_srm(struct drm_device *drm_dev,
 
        ret = request_firmware_direct(&fw, (const char *)fw_name,
                                      drm_dev->dev);
-       if (ret < 0)
+       if (ret < 0) {
+               *revoked_ksv_cnt = 0;
+               *revoked_ksv_list = NULL;
+               ret = 0;
                goto exit;
+       }
 
        if (fw->size && fw->data)
                ret = drm_hdcp_srm_update(fw->data, fw->size, revoked_ksv_list,
@@ -287,6 +291,8 @@ int drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs,
 
        ret = drm_hdcp_request_srm(drm_dev, &revoked_ksv_list,
                                   &revoked_ksv_cnt);
+       if (ret)
+               return ret;
 
        /* revoked_ksv_cnt will be zero when above function failed */
        for (i = 0; i < revoked_ksv_cnt; i++)
index 37f77ae..0158e49 100644 (file)
@@ -182,21 +182,35 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
                              int tiling_mode, unsigned int stride)
 {
        struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
-       struct i915_vma *vma;
+       struct i915_vma *vma, *vn;
+       LIST_HEAD(unbind);
        int ret = 0;
 
        if (tiling_mode == I915_TILING_NONE)
                return 0;
 
        mutex_lock(&ggtt->vm.mutex);
+
+       spin_lock(&obj->vma.lock);
        for_each_ggtt_vma(vma, obj) {
+               GEM_BUG_ON(vma->vm != &ggtt->vm);
+
                if (i915_vma_fence_prepare(vma, tiling_mode, stride))
                        continue;
 
+               list_move(&vma->vm_link, &unbind);
+       }
+       spin_unlock(&obj->vma.lock);
+
+       list_for_each_entry_safe(vma, vn, &unbind, vm_link) {
                ret = __i915_vma_unbind(vma);
-               if (ret)
+               if (ret) {
+                       /* Restore the remaining vma on an error */
+                       list_splice(&unbind, &ggtt->vm.bound_list);
                        break;
+               }
        }
+
        mutex_unlock(&ggtt->vm.mutex);
 
        return ret;
@@ -268,6 +282,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
        }
        mutex_unlock(&obj->mm.lock);
 
+       spin_lock(&obj->vma.lock);
        for_each_ggtt_vma(vma, obj) {
                vma->fence_size =
                        i915_gem_fence_size(i915, vma->size, tiling, stride);
@@ -278,6 +293,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
                if (vma->fence)
                        vma->fence->dirty = true;
        }
+       spin_unlock(&obj->vma.lock);
 
        obj->tiling_and_stride = tiling | stride;
        i915_gem_object_unlock(obj);
index 2d0fd50..d4f94ca 100644 (file)
@@ -1477,8 +1477,10 @@ static int igt_ppgtt_pin_update(void *arg)
                unsigned int page_size = BIT(first);
 
                obj = i915_gem_object_create_internal(dev_priv, page_size);
-               if (IS_ERR(obj))
-                       return PTR_ERR(obj);
+               if (IS_ERR(obj)) {
+                       err = PTR_ERR(obj);
+                       goto out_vm;
+               }
 
                vma = i915_vma_instance(obj, vm, NULL);
                if (IS_ERR(vma)) {
@@ -1531,8 +1533,10 @@ static int igt_ppgtt_pin_update(void *arg)
        }
 
        obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
-       if (IS_ERR(obj))
-               return PTR_ERR(obj);
+       if (IS_ERR(obj)) {
+               err = PTR_ERR(obj);
+               goto out_vm;
+       }
 
        vma = i915_vma_instance(obj, vm, NULL);
        if (IS_ERR(vma)) {
index 91debbc..08b56d7 100644 (file)
@@ -521,6 +521,8 @@ int intel_timeline_read_hwsp(struct i915_request *from,
 
        rcu_read_lock();
        cl = rcu_dereference(from->hwsp_cacheline);
+       if (i915_request_completed(from)) /* confirm cacheline is valid */
+               goto unlock;
        if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
                goto unlock; /* seqno wrapped and completed! */
        if (unlikely(i915_request_completed(from)))
index 9f0653c..d91557d 100644 (file)
@@ -3358,7 +3358,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
 {
        struct intel_uncore *uncore = &dev_priv->uncore;
 
-       u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
+       u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
+               GEN8_PIPE_CDCLK_CRC_DONE;
        u32 de_pipe_enables;
        u32 de_port_masked = GEN8_AUX_CHANNEL_A;
        u32 de_port_enables;
@@ -3369,13 +3370,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
                de_misc_masked |= GEN8_DE_MISC_GSE;
 
        if (INTEL_GEN(dev_priv) >= 9) {
-               de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
                de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
                                  GEN9_AUX_CHANNEL_D;
                if (IS_GEN9_LP(dev_priv))
                        de_port_masked |= BXT_DE_PORT_GMBUS;
-       } else {
-               de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
        }
 
        if (INTEL_GEN(dev_priv) >= 11)
index 08699fa..82e3bc2 100644 (file)
@@ -158,16 +158,18 @@ vma_create(struct drm_i915_gem_object *obj,
 
        GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
 
+       spin_lock(&obj->vma.lock);
+
        if (i915_is_ggtt(vm)) {
                if (unlikely(overflows_type(vma->size, u32)))
-                       goto err_vma;
+                       goto err_unlock;
 
                vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
                                                      i915_gem_object_get_tiling(obj),
                                                      i915_gem_object_get_stride(obj));
                if (unlikely(vma->fence_size < vma->size || /* overflow */
                             vma->fence_size > vm->total))
-                       goto err_vma;
+                       goto err_unlock;
 
                GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
 
@@ -179,8 +181,6 @@ vma_create(struct drm_i915_gem_object *obj,
                __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
        }
 
-       spin_lock(&obj->vma.lock);
-
        rb = NULL;
        p = &obj->vma.tree.rb_node;
        while (*p) {
@@ -225,6 +225,8 @@ vma_create(struct drm_i915_gem_object *obj,
 
        return vma;
 
+err_unlock:
+       spin_unlock(&obj->vma.lock);
 err_vma:
        i915_vma_free(vma);
        return ERR_PTR(-E2BIG);
index 9dfe7cb..1754c05 100644 (file)
@@ -843,6 +843,7 @@ static const struct of_device_id ingenic_drm_of_match[] = {
        { .compatible = "ingenic,jz4770-lcd", .data = &jz4770_soc_info },
        { /* sentinel */ },
 };
+MODULE_DEVICE_TABLE(of, ingenic_drm_of_match);
 
 static struct platform_driver ingenic_drm_driver = {
        .driver = {
index d1086b2..05863b2 100644 (file)
@@ -480,9 +480,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
                return ret;
 
        ret = qxl_release_reserve_list(release, true);
-       if (ret)
+       if (ret) {
+               qxl_release_free(qdev, release);
                return ret;
-
+       }
        cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
        cmd->type = QXL_SURFACE_CMD_CREATE;
        cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
@@ -499,8 +500,8 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
        /* no need to add a release to the fence for this surface bo,
           since it is only released when we ask to destroy the surface
           and it would never signal otherwise */
-       qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
        qxl_release_fence_buffer_objects(release);
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
 
        surf->hw_surf_alloc = true;
        spin_lock(&qdev->surf_id_idr_lock);
@@ -542,9 +543,8 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
        cmd->surface_id = id;
        qxl_release_unmap(qdev, release, &cmd->release_info);
 
-       qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
-
        qxl_release_fence_buffer_objects(release);
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
 
        return 0;
 }
index 09583a0..91f398d 100644 (file)
@@ -510,8 +510,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane)
        cmd->u.set.visible = 1;
        qxl_release_unmap(qdev, release, &cmd->release_info);
 
-       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
        qxl_release_fence_buffer_objects(release);
+       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
 
        return ret;
 
@@ -652,8 +652,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
        cmd->u.position.y = plane->state->crtc_y + fb->hot_y;
 
        qxl_release_unmap(qdev, release, &cmd->release_info);
-       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
        qxl_release_fence_buffer_objects(release);
+       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
 
        if (old_cursor_bo != NULL)
                qxl_bo_unpin(old_cursor_bo);
@@ -700,8 +700,8 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
        cmd->type = QXL_CURSOR_HIDE;
        qxl_release_unmap(qdev, release, &cmd->release_info);
 
-       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
        qxl_release_fence_buffer_objects(release);
+       qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
 }
 
 static void qxl_update_dumb_head(struct qxl_device *qdev,
index 5bebf1e..3599db0 100644 (file)
@@ -209,9 +209,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
                goto out_release_backoff;
 
        rects = drawable_set_clipping(qdev, num_clips, clips_bo);
-       if (!rects)
+       if (!rects) {
+               ret = -EINVAL;
                goto out_release_backoff;
-
+       }
        drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
 
        drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
@@ -242,8 +243,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
        }
        qxl_bo_kunmap(clips_bo);
 
-       qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
        qxl_release_fence_buffer_objects(release);
+       qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
 
 out_release_backoff:
        if (ret)
index 43688ec..60ab715 100644 (file)
@@ -212,7 +212,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
                break;
        default:
                DRM_ERROR("unsupported image bit depth\n");
-               return -EINVAL; /* TODO: cleanup */
+               qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
+               return -EINVAL;
        }
        image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
        image->u.bitmap.x = width;
index 8117a45..72f3f1b 100644 (file)
@@ -261,11 +261,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
                        apply_surf_reloc(qdev, &reloc_info[i]);
        }
 
+       qxl_release_fence_buffer_objects(release);
        ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
-       if (ret)
-               qxl_release_backoff_reserve_list(release);
-       else
-               qxl_release_fence_buffer_objects(release);
 
 out_free_bos:
 out_free_release:
index 0599397..3eb89f1 100644 (file)
@@ -717,7 +717,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
        struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
        struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
        struct mipi_dsi_device *device = dsi->device;
-       union phy_configure_opts opts = { };
+       union phy_configure_opts opts = { };
        struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
        u16 delay;
        int err;
index c1824bd..7879ff5 100644 (file)
@@ -221,6 +221,7 @@ struct virtio_gpu_fpriv {
 /* virtio_ioctl.c */
 #define DRM_VIRTIO_NUM_IOCTLS 10
 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
+void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
 
 /* virtio_kms.c */
 int virtio_gpu_init(struct drm_device *dev);
index 0d6152c..f0d5a89 100644 (file)
@@ -39,6 +39,9 @@ int virtio_gpu_gem_create(struct drm_file *file,
        int ret;
        u32 handle;
 
+       if (vgdev->has_virgl_3d)
+               virtio_gpu_create_context(dev, file);
+
        ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
        if (ret < 0)
                return ret;
index 3f60bf2..512daff 100644 (file)
@@ -34,8 +34,7 @@
 
 #include "virtgpu_drv.h"
 
-static void virtio_gpu_create_context(struct drm_device *dev,
-                                     struct drm_file *file)
+void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
 {
        struct virtio_gpu_device *vgdev = dev->dev_private;
        struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
index f4ea4ce..0a5c8cf 100644 (file)
@@ -53,14 +53,6 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
                      events_clear, &events_clear);
 }
 
-static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
-                                     uint32_t ctx_id)
-{
-       virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
-       virtio_gpu_notify(vgdev);
-       ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
-}
-
 static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
                               void (*work_func)(struct work_struct *work))
 {
@@ -275,14 +267,17 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
 void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
 {
        struct virtio_gpu_device *vgdev = dev->dev_private;
-       struct virtio_gpu_fpriv *vfpriv;
+       struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
 
        if (!vgdev->has_virgl_3d)
                return;
 
-       vfpriv = file->driver_priv;
+       if (vfpriv->context_created) {
+               virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id);
+               virtio_gpu_notify(vgdev);
+       }
 
-       virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
+       ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1);
        mutex_destroy(&vfpriv->context_lock);
        kfree(vfpriv);
        file->driver_priv = NULL;
index 7c89edb..34f0737 100644 (file)
@@ -1155,6 +1155,7 @@ config HID_ALPS
 config HID_MCP2221
        tristate "Microchip MCP2221 HID USB-to-I2C/SMbus host support"
        depends on USB_HID && I2C
+       depends on GPIOLIB
        ---help---
        Provides I2C and SMBUS host adapter functionality over USB-HID
        through MCP2221 device.
index fa70415..b2ad319 100644 (file)
@@ -802,6 +802,7 @@ static int alps_probe(struct hid_device *hdev, const struct hid_device_id *id)
                break;
        case HID_DEVICE_ID_ALPS_U1_DUAL:
        case HID_DEVICE_ID_ALPS_U1:
+       case HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY:
                data->dev_type = U1;
                break;
        default:
index b18b131..1c71a1a 100644 (file)
 #define HID_DEVICE_ID_ALPS_U1_DUAL_PTP 0x121F
 #define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP    0x1220
 #define HID_DEVICE_ID_ALPS_U1          0x1215
+#define HID_DEVICE_ID_ALPS_U1_UNICORN_LEGACY         0x121E
 #define HID_DEVICE_ID_ALPS_T4_BTNLESS  0x120C
 #define HID_DEVICE_ID_ALPS_1222                0x1222
 
-
 #define USB_VENDOR_ID_AMI              0x046b
 #define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE      0xff10
 
 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349      0x7349
 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7      0x73f7
 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001      0xa001
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002      0xc002
 
 #define USB_VENDOR_ID_ELAN             0x04f3
 #define USB_DEVICE_ID_TOSHIBA_CLICK_L9W        0x0401
 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2      0xc218
 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2    0xc219
 #define USB_DEVICE_ID_LOGITECH_G15_LCD         0xc222
+#define USB_DEVICE_ID_LOGITECH_G11             0xc225
 #define USB_DEVICE_ID_LOGITECH_G15_V2_LCD      0xc227
 #define USB_DEVICE_ID_LOGITECH_G510            0xc22d
 #define USB_DEVICE_ID_LOGITECH_G510_USB_AUDIO  0xc22e
 #define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300
 #define USB_DEVICE_ID_SYMBOL_SCANNER_3 0x1200
 
+#define I2C_VENDOR_ID_SYNAPTICS     0x06cb
+#define I2C_PRODUCT_ID_SYNAPTICS_SYNA2393   0x7a13
+
 #define USB_VENDOR_ID_SYNAPTICS                0x06cb
 #define USB_DEVICE_ID_SYNAPTICS_TP     0x0001
 #define USB_DEVICE_ID_SYNAPTICS_INT_TP 0x0002
 #define USB_DEVICE_ID_SYNAPTICS_LTS2   0x1d10
 #define USB_DEVICE_ID_SYNAPTICS_HD     0x0ac3
 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD        0x1ac3
+#define USB_DEVICE_ID_SYNAPTICS_DELL_K12A      0x2819
 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012       0x2968
 #define USB_DEVICE_ID_SYNAPTICS_TP_V103        0x5710
 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5   0x81a7
index ad4b541..ef0cbcd 100644 (file)
@@ -872,6 +872,10 @@ error_hw_stop:
 }
 
 static const struct hid_device_id lg_g15_devices[] = {
+       /* The G11 is a G15 without the LCD, treat it as a G15 */
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+               USB_DEVICE_ID_LOGITECH_G11),
+               .driver_data = LG_G15 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
                         USB_DEVICE_ID_LOGITECH_G15_LCD),
                .driver_data = LG_G15 },
index 362805d..03c720b 100644 (file)
@@ -1922,6 +1922,9 @@ static const struct hid_device_id mt_devices[] = {
        { .driver_data = MT_CLS_EGALAX_SERIAL,
                MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
                        USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
+       { .driver_data = MT_CLS_EGALAX,
+               MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
+                       USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) },
 
        /* Elitegroup panel */
        { .driver_data = MT_CLS_SERIAL,
index ebec818..e4cb543 100644 (file)
@@ -163,6 +163,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K12A), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD },
        { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET },
index 009000c..294c84e 100644 (file)
@@ -177,6 +177,8 @@ static const struct i2c_hid_quirks {
                 I2C_HID_QUIRK_BOGUS_IRQ },
        { USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
                 I2C_HID_QUIRK_RESET_ON_RESUME },
+       { I2C_VENDOR_ID_SYNAPTICS, I2C_PRODUCT_ID_SYNAPTICS_SYNA2393,
+                I2C_HID_QUIRK_RESET_ON_RESUME },
        { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
                I2C_HID_QUIRK_BAD_INPUT_SIZE },
        { 0, 0 }
index c7bc9db..17a638f 100644 (file)
@@ -682,16 +682,21 @@ static int usbhid_open(struct hid_device *hid)
        struct usbhid_device *usbhid = hid->driver_data;
        int res;
 
+       mutex_lock(&usbhid->mutex);
+
        set_bit(HID_OPENED, &usbhid->iofl);
 
-       if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
-               return 0;
+       if (hid->quirks & HID_QUIRK_ALWAYS_POLL) {
+               res = 0;
+               goto Done;
+       }
 
        res = usb_autopm_get_interface(usbhid->intf);
        /* the device must be awake to reliably request remote wakeup */
        if (res < 0) {
                clear_bit(HID_OPENED, &usbhid->iofl);
-               return -EIO;
+               res = -EIO;
+               goto Done;
        }
 
        usbhid->intf->needs_remote_wakeup = 1;
@@ -725,6 +730,9 @@ static int usbhid_open(struct hid_device *hid)
                msleep(50);
 
        clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
+
+ Done:
+       mutex_unlock(&usbhid->mutex);
        return res;
 }
 
@@ -732,6 +740,8 @@ static void usbhid_close(struct hid_device *hid)
 {
        struct usbhid_device *usbhid = hid->driver_data;
 
+       mutex_lock(&usbhid->mutex);
+
        /*
         * Make sure we don't restart data acquisition due to
         * a resumption we no longer care about by avoiding racing
@@ -743,12 +753,13 @@ static void usbhid_close(struct hid_device *hid)
                clear_bit(HID_IN_POLLING, &usbhid->iofl);
        spin_unlock_irq(&usbhid->lock);
 
-       if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
-               return;
+       if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) {
+               hid_cancel_delayed_stuff(usbhid);
+               usb_kill_urb(usbhid->urbin);
+               usbhid->intf->needs_remote_wakeup = 0;
+       }
 
-       hid_cancel_delayed_stuff(usbhid);
-       usb_kill_urb(usbhid->urbin);
-       usbhid->intf->needs_remote_wakeup = 0;
+       mutex_unlock(&usbhid->mutex);
 }
 
 /*
@@ -1057,6 +1068,8 @@ static int usbhid_start(struct hid_device *hid)
        unsigned int n, insize = 0;
        int ret;
 
+       mutex_lock(&usbhid->mutex);
+
        clear_bit(HID_DISCONNECTED, &usbhid->iofl);
 
        usbhid->bufsize = HID_MIN_BUFFER_SIZE;
@@ -1177,6 +1190,8 @@ static int usbhid_start(struct hid_device *hid)
                usbhid_set_leds(hid);
                device_set_wakeup_enable(&dev->dev, 1);
        }
+
+       mutex_unlock(&usbhid->mutex);
        return 0;
 
 fail:
@@ -1187,6 +1202,7 @@ fail:
        usbhid->urbout = NULL;
        usbhid->urbctrl = NULL;
        hid_free_buffers(dev, hid);
+       mutex_unlock(&usbhid->mutex);
        return ret;
 }
 
@@ -1202,6 +1218,8 @@ static void usbhid_stop(struct hid_device *hid)
                usbhid->intf->needs_remote_wakeup = 0;
        }
 
+       mutex_lock(&usbhid->mutex);
+
        clear_bit(HID_STARTED, &usbhid->iofl);
        spin_lock_irq(&usbhid->lock);   /* Sync with error and led handlers */
        set_bit(HID_DISCONNECTED, &usbhid->iofl);
@@ -1222,6 +1240,8 @@ static void usbhid_stop(struct hid_device *hid)
        usbhid->urbout = NULL;
 
        hid_free_buffers(hid_to_usb_dev(hid), hid);
+
+       mutex_unlock(&usbhid->mutex);
 }
 
 static int usbhid_power(struct hid_device *hid, int lvl)
@@ -1382,6 +1402,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
        INIT_WORK(&usbhid->reset_work, hid_reset);
        timer_setup(&usbhid->io_retry, hid_retry_timeout, 0);
        spin_lock_init(&usbhid->lock);
+       mutex_init(&usbhid->mutex);
 
        ret = hid_add_device(hid);
        if (ret) {
index 8620408..75fe85d 100644 (file)
@@ -80,6 +80,7 @@ struct usbhid_device {
        dma_addr_t outbuf_dma;                                          /* Output buffer dma */
        unsigned long last_out;                                                 /* record of last output for timeouts */
 
+       struct mutex mutex;                                             /* start/stop/open/close */
        spinlock_t lock;                                                /* fifo spinlock */
        unsigned long iofl;                                             /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */
        struct timer_list io_retry;                                     /* Retry timer */
index 5ded94b..cd71e71 100644 (file)
@@ -319,9 +319,11 @@ static void wacom_feature_mapping(struct hid_device *hdev,
                        data[0] = field->report->id;
                        ret = wacom_get_report(hdev, HID_FEATURE_REPORT,
                                               data, n, WAC_CMD_RETRIES);
-                       if (ret == n) {
+                       if (ret == n && features->type == HID_GENERIC) {
                                ret = hid_report_raw_event(hdev,
                                        HID_FEATURE_REPORT, data, n, 0);
+                       } else if (ret == 2 && features->type != HID_GENERIC) {
+                               features->touch_max = data[1];
                        } else {
                                features->touch_max = 16;
                                hid_warn(hdev, "wacom_feature_mapping: "
index d99a9d4..1c96809 100644 (file)
@@ -1427,11 +1427,13 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
 {
        struct input_dev *pad_input = wacom->pad_input;
        unsigned char *data = wacom->data;
+       int nbuttons = wacom->features.numbered_buttons;
 
-       int buttons = data[282] | ((data[281] & 0x40) << 2);
+       int expresskeys = data[282];
+       int center = (data[281] & 0x40) >> 6;
        int ring = data[285] & 0x7F;
        bool ringstatus = data[285] & 0x80;
-       bool prox = buttons || ringstatus;
+       bool prox = expresskeys || center || ringstatus;
 
        /* Fix touchring data: userspace expects 0 at left and increasing clockwise */
        ring = 71 - ring;
@@ -1439,7 +1441,8 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
        if (ring > 71)
                ring -= 72;
 
-       wacom_report_numbered_buttons(pad_input, 9, buttons);
+       wacom_report_numbered_buttons(pad_input, nbuttons,
+                                      expresskeys | (center << (nbuttons - 1)));
 
        input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0);
 
@@ -2637,9 +2640,25 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
                        case HID_DG_TIPSWITCH:
                                hid_data->last_slot_field = equivalent_usage;
                                break;
+                       case HID_DG_CONTACTCOUNT:
+                               hid_data->cc_report = report->id;
+                               hid_data->cc_index = i;
+                               hid_data->cc_value_index = j;
+                               break;
                        }
                }
        }
+
+       if (hid_data->cc_report != 0 &&
+           hid_data->cc_index >= 0) {
+               struct hid_field *field = report->field[hid_data->cc_index];
+               int value = field->value[hid_data->cc_value_index];
+               if (value)
+                       hid_data->num_expected = value;
+       }
+       else {
+               hid_data->num_expected = wacom_wac->features.touch_max;
+       }
 }
 
 static void wacom_wac_finger_report(struct hid_device *hdev,
@@ -2649,7 +2668,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
        struct wacom_wac *wacom_wac = &wacom->wacom_wac;
        struct input_dev *input = wacom_wac->touch_input;
        unsigned touch_max = wacom_wac->features.touch_max;
-       struct hid_data *hid_data = &wacom_wac->hid_data;
 
        /* If more packets of data are expected, give us a chance to
         * process them rather than immediately syncing a partial
@@ -2663,7 +2681,6 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
 
        input_sync(input);
        wacom_wac->hid_data.num_received = 0;
-       hid_data->num_expected = 0;
 
        /* keep touch state for pen event */
        wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac);
@@ -2738,73 +2755,12 @@ static void wacom_report_events(struct hid_device *hdev,
        }
 }
 
-static void wacom_set_num_expected(struct hid_device *hdev,
-                                  struct hid_report *report,
-                                  int collection_index,
-                                  struct hid_field *field,
-                                  int field_index)
-{
-       struct wacom *wacom = hid_get_drvdata(hdev);
-       struct wacom_wac *wacom_wac = &wacom->wacom_wac;
-       struct hid_data *hid_data = &wacom_wac->hid_data;
-       unsigned int original_collection_level =
-               hdev->collection[collection_index].level;
-       bool end_collection = false;
-       int i;
-
-       if (hid_data->num_expected)
-               return;
-
-       // find the contact count value for this segment
-       for (i = field_index; i < report->maxfield && !end_collection; i++) {
-               struct hid_field *field = report->field[i];
-               unsigned int field_level =
-                       hdev->collection[field->usage[0].collection_index].level;
-               unsigned int j;
-
-               if (field_level != original_collection_level)
-                       continue;
-
-               for (j = 0; j < field->maxusage; j++) {
-                       struct hid_usage *usage = &field->usage[j];
-
-                       if (usage->collection_index != collection_index) {
-                               end_collection = true;
-                               break;
-                       }
-                       if (wacom_equivalent_usage(usage->hid) == HID_DG_CONTACTCOUNT) {
-                               hid_data->cc_report = report->id;
-                               hid_data->cc_index = i;
-                               hid_data->cc_value_index = j;
-
-                               if (hid_data->cc_report != 0 &&
-                                   hid_data->cc_index >= 0) {
-
-                                       struct hid_field *field =
-                                               report->field[hid_data->cc_index];
-                                       int value =
-                                               field->value[hid_data->cc_value_index];
-
-                                       if (value)
-                                               hid_data->num_expected = value;
-                               }
-                       }
-               }
-       }
-
-       if (hid_data->cc_report == 0 || hid_data->cc_index < 0)
-               hid_data->num_expected = wacom_wac->features.touch_max;
-}
-
 static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *report,
                         int collection_index, struct hid_field *field,
                         int field_index)
 {
        struct wacom *wacom = hid_get_drvdata(hdev);
 
-       if (WACOM_FINGER_FIELD(field))
-               wacom_set_num_expected(hdev, report, collection_index, field,
-                                      field_index);
        wacom_report_events(hdev, report, collection_index, field_index);
 
        /*
index 6098e0c..533c8b8 100644 (file)
@@ -184,11 +184,7 @@ void hv_synic_enable_regs(unsigned int cpu)
 
        shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
        shared_sint.masked = false;
-       if (ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)
-               shared_sint.auto_eoi = false;
-       else
-               shared_sint.auto_eoi = true;
-
+       shared_sint.auto_eoi = hv_recommend_using_aeoi();
        hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 
        /* Enable the global synic bit */
index e70783e..f9d14db 100644 (file)
@@ -286,8 +286,8 @@ TRACE_EVENT(vmbus_send_tl_connect_request,
                    __field(int, ret)
                    ),
            TP_fast_assign(
-                   memcpy(__entry->guest_id, &msg->guest_endpoint_id.b, 16);
-                   memcpy(__entry->host_id, &msg->host_service_id.b, 16);
+                   export_guid(__entry->guest_id, &msg->guest_endpoint_id);
+                   export_guid(__entry->host_id, &msg->host_service_id);
                    __entry->ret = ret;
                    ),
            TP_printk("sending guest_endpoint_id %pUl, host_service_id %pUl, "
index a68bce4..e06c6b9 100644 (file)
@@ -978,6 +978,9 @@ static int vmbus_resume(struct device *child_device)
 
        return drv->resume(dev);
 }
+#else
+#define vmbus_suspend NULL
+#define vmbus_resume NULL
 #endif /* CONFIG_PM_SLEEP */
 
 /*
@@ -997,11 +1000,22 @@ static void vmbus_device_release(struct device *device)
 }
 
 /*
- * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than
- * SET_SYSTEM_SLEEP_PM_OPS: see the comment before vmbus_bus_pm.
+ * Note: we must use the "noirq" ops: see the comment before vmbus_bus_pm.
+ *
+ * suspend_noirq/resume_noirq are set to NULL to support Suspend-to-Idle: we
+ * shouldn't suspend the vmbus devices upon Suspend-to-Idle, otherwise there
+ * is no way to wake up a Generation-2 VM.
+ *
+ * The other 4 ops are for hibernation.
  */
+
 static const struct dev_pm_ops vmbus_pm = {
-       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_suspend, vmbus_resume)
+       .suspend_noirq  = NULL,
+       .resume_noirq   = NULL,
+       .freeze_noirq   = vmbus_suspend,
+       .thaw_noirq     = vmbus_resume,
+       .poweroff_noirq = vmbus_suspend,
+       .restore_noirq  = vmbus_resume,
 };
 
 /* The one and only one */
@@ -2281,6 +2295,9 @@ static int vmbus_bus_resume(struct device *dev)
 
        return 0;
 }
+#else
+#define vmbus_bus_suspend NULL
+#define vmbus_bus_resume NULL
 #endif /* CONFIG_PM_SLEEP */
 
 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
@@ -2291,16 +2308,24 @@ static const struct acpi_device_id vmbus_acpi_device_ids[] = {
 MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
 
 /*
- * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than
- * SET_SYSTEM_SLEEP_PM_OPS, otherwise NIC SR-IOV can not work, because the
- * "pci_dev_pm_ops" uses the "noirq" callbacks: in the resume path, the
- * pci "noirq" restore callback runs before "non-noirq" callbacks (see
+ * Note: we must use the "no_irq" ops, otherwise hibernation can not work with
+ * PCI device assignment, because "pci_dev_pm_ops" uses the "noirq" ops: in
+ * the resume path, the pci "noirq" restore op runs before "non-noirq" op (see
  * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
  * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
- * resume callback must also run via the "noirq" callbacks.
+ * resume callback must also run via the "noirq" ops.
+ *
+ * Set suspend_noirq/resume_noirq to NULL for Suspend-to-Idle: see the comment
+ * earlier in this file before vmbus_pm.
  */
+
 static const struct dev_pm_ops vmbus_bus_pm = {
-       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_bus_suspend, vmbus_bus_resume)
+       .suspend_noirq  = NULL,
+       .resume_noirq   = NULL,
+       .freeze_noirq   = vmbus_bus_suspend,
+       .thaw_noirq     = vmbus_bus_resume,
+       .poweroff_noirq = vmbus_bus_suspend,
+       .restore_noirq  = vmbus_bus_resume
 };
 
 static struct acpi_driver vmbus_acpi_driver = {
index 5e4800d..cd3fd5e 100644 (file)
@@ -349,12 +349,12 @@ static int amd_mp2_pci_probe(struct pci_dev *pci_dev,
        if (!privdata)
                return -ENOMEM;
 
+       privdata->pci_dev = pci_dev;
        rc = amd_mp2_pci_init(privdata, pci_dev);
        if (rc)
                return rc;
 
        mutex_init(&privdata->c2p_lock);
-       privdata->pci_dev = pci_dev;
 
        pm_runtime_set_autosuspend_delay(&pci_dev->dev, 1000);
        pm_runtime_use_autosuspend(&pci_dev->dev);
index 07c1993..f51702d 100644 (file)
@@ -603,6 +603,7 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
        /* Ack all interrupts except for Rx done */
        writel(irq_received & ~ASPEED_I2CD_INTR_RX_DONE,
               bus->base + ASPEED_I2C_INTR_STS_REG);
+       readl(bus->base + ASPEED_I2C_INTR_STS_REG);
        irq_remaining = irq_received;
 
 #if IS_ENABLED(CONFIG_I2C_SLAVE)
@@ -645,9 +646,11 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
                        irq_received, irq_handled);
 
        /* Ack Rx done */
-       if (irq_received & ASPEED_I2CD_INTR_RX_DONE)
+       if (irq_received & ASPEED_I2CD_INTR_RX_DONE) {
                writel(ASPEED_I2CD_INTR_RX_DONE,
                       bus->base + ASPEED_I2C_INTR_STS_REG);
+               readl(bus->base + ASPEED_I2C_INTR_STS_REG);
+       }
        spin_unlock(&bus->lock);
        return irq_remaining ? IRQ_NONE : IRQ_HANDLED;
 }
index 44be092..d091a12 100644 (file)
@@ -360,6 +360,9 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
                        value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
                        i2c_slave_event(iproc_i2c->slave,
                                        I2C_SLAVE_WRITE_RECEIVED, &value);
+                       if (rx_status == I2C_SLAVE_RX_END)
+                               i2c_slave_event(iproc_i2c->slave,
+                                               I2C_SLAVE_STOP, &value);
                }
        } else if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
                /* Master read other than start */
index 8280ac7..4c4d17d 100644 (file)
@@ -996,13 +996,14 @@ tegra_i2c_poll_completion_timeout(struct tegra_i2c_dev *i2c_dev,
        do {
                u32 status = i2c_readl(i2c_dev, I2C_INT_STATUS);
 
-               if (status)
+               if (status) {
                        tegra_i2c_isr(i2c_dev->irq, i2c_dev);
 
-               if (completion_done(complete)) {
-                       s64 delta = ktime_ms_delta(ktimeout, ktime);
+                       if (completion_done(complete)) {
+                               s64 delta = ktime_ms_delta(ktimeout, ktime);
 
-                       return msecs_to_jiffies(delta) ?: 1;
+                               return msecs_to_jiffies(delta) ?: 1;
+                       }
                }
 
                ktime = ktime_get();
@@ -1029,18 +1030,14 @@ tegra_i2c_wait_completion_timeout(struct tegra_i2c_dev *i2c_dev,
                disable_irq(i2c_dev->irq);
 
                /*
-                * Under some rare circumstances (like running KASAN +
-                * NFS root) CPU, which handles interrupt, may stuck in
-                * uninterruptible state for a significant time.  In this
-                * case we will get timeout if I2C transfer is running on
-                * a sibling CPU, despite of IRQ being raised.
-                *
-                * In order to handle this rare condition, the IRQ status
-                * needs to be checked after timeout.
+                * There is a chance that completion may happen after IRQ
+                * synchronization, which is done by disable_irq().
                 */
-               if (ret == 0)
-                       ret = tegra_i2c_poll_completion_timeout(i2c_dev,
-                                                               complete, 0);
+               if (ret == 0 && completion_done(complete)) {
+                       dev_warn(i2c_dev->dev,
+                                "completion done after timeout\n");
+                       ret = 1;
+               }
        }
 
        return ret;
@@ -1219,15 +1216,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
                time_left = tegra_i2c_wait_completion_timeout(
                                i2c_dev, &i2c_dev->dma_complete, xfer_time);
 
-               /*
-                * Synchronize DMA first, since dmaengine_terminate_sync()
-                * performs synchronization after the transfer's termination
-                * and we want to get a completion if transfer succeeded.
-                */
-               dmaengine_synchronize(i2c_dev->msg_read ?
-                                     i2c_dev->rx_dma_chan :
-                                     i2c_dev->tx_dma_chan);
-
                dmaengine_terminate_sync(i2c_dev->msg_read ?
                                         i2c_dev->rx_dma_chan :
                                         i2c_dev->tx_dma_chan);
index 4794113..17f14e0 100644 (file)
@@ -862,7 +862,7 @@ static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
 
        ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b,
                                  &cm.local_id_next, GFP_KERNEL);
-       if (ret)
+       if (ret < 0)
                goto error;
        cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
 
@@ -1828,11 +1828,9 @@ static void cm_format_mra(struct cm_mra_msg *mra_msg,
 
 static void cm_format_rej(struct cm_rej_msg *rej_msg,
                          struct cm_id_private *cm_id_priv,
-                         enum ib_cm_rej_reason reason,
-                         void *ari,
-                         u8 ari_length,
-                         const void *private_data,
-                         u8 private_data_len)
+                         enum ib_cm_rej_reason reason, void *ari,
+                         u8 ari_length, const void *private_data,
+                         u8 private_data_len, enum ib_cm_state state)
 {
        lockdep_assert_held(&cm_id_priv->lock);
 
@@ -1840,7 +1838,7 @@ static void cm_format_rej(struct cm_rej_msg *rej_msg,
        IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
                be32_to_cpu(cm_id_priv->id.remote_id));
 
-       switch(cm_id_priv->id.state) {
+       switch (state) {
        case IB_CM_REQ_RCVD:
                IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
                IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
@@ -1905,8 +1903,9 @@ static void cm_dup_req_handler(struct cm_work *work,
                              cm_id_priv->private_data_len);
                break;
        case IB_CM_TIMEWAIT:
-               cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
-                             IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
+               cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
+                             IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
+                             IB_CM_TIMEWAIT);
                break;
        default:
                goto unlock;
@@ -2904,6 +2903,7 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
                              u8 ari_length, const void *private_data,
                              u8 private_data_len)
 {
+       enum ib_cm_state state = cm_id_priv->id.state;
        struct ib_mad_send_buf *msg;
        int ret;
 
@@ -2913,7 +2913,7 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
            (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
                return -EINVAL;
 
-       switch (cm_id_priv->id.state) {
+       switch (state) {
        case IB_CM_REQ_SENT:
        case IB_CM_MRA_REQ_RCVD:
        case IB_CM_REQ_RCVD:
@@ -2925,7 +2925,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
                if (ret)
                        return ret;
                cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
-                             ari, ari_length, private_data, private_data_len);
+                             ari, ari_length, private_data, private_data_len,
+                             state);
                break;
        case IB_CM_REP_SENT:
        case IB_CM_MRA_REP_RCVD:
@@ -2934,7 +2935,8 @@ static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
                if (ret)
                        return ret;
                cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
-                             ari, ari_length, private_data, private_data_len);
+                             ari, ari_length, private_data, private_data_len,
+                             state);
                break;
        default:
                pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
index 5128cb1..177333d 100644 (file)
@@ -360,7 +360,7 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj,
         * uverbs_uobject_fd_release(), and the caller is expected to ensure
         * that release is never done while a call to lookup is possible.
         */
-       if (f->f_op != fd_type->fops) {
+       if (f->f_op != fd_type->fops || uobject->ufile != ufile) {
                fput(f);
                return ERR_PTR(-EBADF);
        }
@@ -474,16 +474,15 @@ alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
        filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
                                  fd_type->flags);
        if (IS_ERR(filp)) {
+               uverbs_uobject_put(uobj);
                uobj = ERR_CAST(filp);
-               goto err_uobj;
+               goto err_fd;
        }
        uobj->object = filp;
 
        uobj->id = new_fd;
        return uobj;
 
-err_uobj:
-       uverbs_uobject_put(uobj);
 err_fd:
        put_unused_fd(new_fd);
        return uobj;
@@ -679,7 +678,6 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
                             enum rdma_lookup_mode mode)
 {
        assert_uverbs_usecnt(uobj, mode);
-       uobj->uapi_object->type_class->lookup_put(uobj, mode);
        /*
         * In order to unlock an object, either decrease its usecnt for
         * read access or zero it in case of exclusive access. See
@@ -696,6 +694,7 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
                break;
        }
 
+       uobj->uapi_object->type_class->lookup_put(uobj, mode);
        /* Pairs with the kref obtained by type->lookup_get */
        uverbs_uobject_put(uobj);
 }
index 2d4083b..17fc25d 100644 (file)
@@ -820,6 +820,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
                        ret = mmget_not_zero(mm);
                        if (!ret) {
                                list_del_init(&priv->list);
+                               if (priv->entry) {
+                                       rdma_user_mmap_entry_put(priv->entry);
+                                       priv->entry = NULL;
+                               }
                                mm = NULL;
                                continue;
                        }
index e8b4b37..688f196 100644 (file)
@@ -1046,7 +1046,7 @@ i40iw_sc_query_rdma_features(struct i40iw_sc_cqp *cqp,
        u64 header;
 
        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
-       if (wqe)
+       if (!wqe)
                return I40IW_ERR_RING_FULL;
 
        set_64bit_val(wqe, 32, feat_mem->pa);
index a66518a..275722c 100644 (file)
@@ -1499,8 +1499,9 @@ static int __mlx4_ib_create_default_rules(
        int i;
 
        for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
+               union ib_flow_spec ib_spec = {};
                int ret;
-               union ib_flow_spec ib_spec;
+
                switch (pdefault_rules->rules_create_list[i]) {
                case 0:
                        /* no rule */
index 1456db4..2210759 100644 (file)
@@ -5558,7 +5558,9 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
        rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f);
        rdma_ah_set_static_rate(ah_attr,
                                path->static_rate ? path->static_rate - 5 : 0);
-       if (path->grh_mlid & (1 << 7)) {
+
+       if (path->grh_mlid & (1 << 7) ||
+           ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
                u32 tc_fl = be32_to_cpu(path->tclass_flowlabel);
 
                rdma_ah_set_grh(ah_attr, NULL,
index 5724cbb..04d2e72 100644 (file)
@@ -248,8 +248,8 @@ int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
         */
        if (udata && udata->outlen >= sizeof(__u64)) {
                cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc);
-               if (!cq->ip) {
-                       err = -ENOMEM;
+               if (IS_ERR(cq->ip)) {
+                       err = PTR_ERR(cq->ip);
                        goto bail_wc;
                }
 
index 652f4a7..37853aa 100644 (file)
@@ -154,7 +154,7 @@ done:
  * @udata: user data (must be valid!)
  * @obj: opaque pointer to a cq, wq etc
  *
- * Return: rvt_mmap struct on success
+ * Return: rvt_mmap struct on success, ERR_PTR on failure
  */
 struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
                                           struct ib_udata *udata, void *obj)
@@ -166,7 +166,7 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
 
        ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
        if (!ip)
-               return ip;
+               return ERR_PTR(-ENOMEM);
 
        size = PAGE_ALIGN(size);
 
index 0e1b291..500a7ee 100644 (file)
@@ -1244,8 +1244,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
 
                        qp->ip = rvt_create_mmap_info(rdi, s, udata,
                                                      qp->r_rq.wq);
-                       if (!qp->ip) {
-                               ret = ERR_PTR(-ENOMEM);
+                       if (IS_ERR(qp->ip)) {
+                               ret = ERR_CAST(qp->ip);
                                goto bail_qpn;
                        }
 
index 24fef02..f547c11 100644 (file)
@@ -111,8 +111,8 @@ int rvt_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *srq_init_attr,
                u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz;
 
                srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq);
-               if (!srq->ip) {
-                       ret = -ENOMEM;
+               if (IS_ERR(srq->ip)) {
+                       ret = PTR_ERR(srq->ip);
                        goto bail_wq;
                }
 
index ae92c80..9f53aa4 100644 (file)
@@ -920,20 +920,27 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
 {
        struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
        struct siw_device *sdev = to_siw_dev(pd->device);
-       struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey  >> 8);
+       struct siw_mem *mem;
        int rv = 0;
 
        siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey);
 
-       if (unlikely(!mem || !base_mr)) {
+       if (unlikely(!base_mr)) {
                pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
                return -EINVAL;
        }
+
        if (unlikely(base_mr->rkey >> 8 != sqe->rkey  >> 8)) {
                pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey);
-               rv = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
+
+       mem = siw_mem_id2obj(sdev, sqe->rkey  >> 8);
+       if (unlikely(!mem)) {
+               pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey);
+               return -EINVAL;
+       }
+
        if (unlikely(mem->pd != pd)) {
                pr_warn("siw: fastreg: PD mismatch\n");
                rv = -EINVAL;
index 58b4a4d..2ab07ce 100644 (file)
@@ -362,7 +362,7 @@ config IPMMU_VMSA
 
 config SPAPR_TCE_IOMMU
        bool "sPAPR TCE IOMMU Support"
-       depends on PPC_POWERNV || PPC_PSERIES || (PPC && COMPILE_TEST)
+       depends on PPC_POWERNV || PPC_PSERIES
        select IOMMU_API
        help
          Enables bits of IOMMU API required by VFIO. The iommu_ops
@@ -457,7 +457,7 @@ config S390_AP_IOMMU
 
 config MTK_IOMMU
        bool "MTK IOMMU Support"
-       depends on ARM || ARM64 || COMPILE_TEST
+       depends on HAS_DMA
        depends on ARCH_MEDIATEK || COMPILE_TEST
        select ARM_DMA_USE_IOMMU
        select IOMMU_API
index 6be3853..2b9a67e 100644 (file)
@@ -2936,7 +2936,7 @@ static int __init parse_amd_iommu_intr(char *str)
 {
        for (; *str; ++str) {
                if (strncmp(str, "legacy", 6) == 0) {
-                       amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+                       amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
                        break;
                }
                if (strncmp(str, "vapic", 5) == 0) {
index ef0a524..0182cff 100644 (file)
@@ -371,11 +371,11 @@ int dmar_disabled = 0;
 int dmar_disabled = 1;
 #endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */
 
-#ifdef INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
+#ifdef CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
 int intel_iommu_sm = 1;
 #else
 int intel_iommu_sm;
-#endif /* INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
+#endif /* CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */
 
 int intel_iommu_enabled = 0;
 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
index 2b47141..7b37542 100644 (file)
@@ -170,6 +170,7 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
 
 static void dev_iommu_free(struct device *dev)
 {
+       iommu_fwspec_free(dev);
        kfree(dev->iommu);
        dev->iommu = NULL;
 }
@@ -1428,7 +1429,7 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
 
        return group;
 }
-EXPORT_SYMBOL(iommu_group_get_for_dev);
+EXPORT_SYMBOL_GPL(iommu_group_get_for_dev);
 
 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
 {
index 0e2a964..5b3b270 100644 (file)
@@ -824,8 +824,11 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
        qcom_iommu->dev = dev;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (res)
+       if (res) {
                qcom_iommu->local_base = devm_ioremap_resource(dev, res);
+               if (IS_ERR(qcom_iommu->local_base))
+                       return PTR_ERR(qcom_iommu->local_base);
+       }
 
        qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
        if (IS_ERR(qcom_iommu->iface_clk)) {
index 58fd137..3e50009 100644 (file)
@@ -585,10 +585,12 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
 
        /* Do we need to select a new pgpath? */
        pgpath = READ_ONCE(m->current_pgpath);
-       queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
-       if (!pgpath || !queue_io)
+       if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
                pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
 
+       /* MPATHF_QUEUE_IO might have been cleared by choose_pgpath. */
+       queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
+
        if ((pgpath && queue_io) ||
            (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
                /* Queue for the daemon to resubmit */
index 49147e6..fb41b4f 100644 (file)
@@ -435,7 +435,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
        fio->level++;
 
        if (type == DM_VERITY_BLOCK_TYPE_METADATA)
-               block += v->data_blocks;
+               block = block - v->hash_start + v->data_blocks;
 
        /*
         * For RS(M, N), the continuous FEC data is divided into blocks of N
index 114927d..613c171 100644 (file)
@@ -931,6 +931,24 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
        return 0;
 }
 
+static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
+{
+       struct dm_io_region region;
+       struct dm_io_request req;
+
+       region.bdev = wc->ssd_dev->bdev;
+       region.sector = wc->start_sector;
+       region.count = n_sectors;
+       req.bi_op = REQ_OP_READ;
+       req.bi_op_flags = REQ_SYNC;
+       req.mem.type = DM_IO_VMA;
+       req.mem.ptr.vma = (char *)wc->memory_map;
+       req.client = wc->dm_io;
+       req.notify.fn = NULL;
+
+       return dm_io(&req, 1, &region, NULL);
+}
+
 static void writecache_resume(struct dm_target *ti)
 {
        struct dm_writecache *wc = ti->private;
@@ -941,8 +959,18 @@ static void writecache_resume(struct dm_target *ti)
 
        wc_lock(wc);
 
-       if (WC_MODE_PMEM(wc))
+       if (WC_MODE_PMEM(wc)) {
                persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
+       } else {
+               r = writecache_read_metadata(wc, wc->metadata_sectors);
+               if (r) {
+                       size_t sb_entries_offset;
+                       writecache_error(wc, r, "unable to read metadata: %d", r);
+                       sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
+                       memset((char *)wc->memory_map + sb_entries_offset, -1,
+                              (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
+               }
+       }
 
        wc->tree = RB_ROOT;
        INIT_LIST_HEAD(&wc->lru);
@@ -2102,6 +2130,12 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
                ti->error = "Invalid block size";
                goto bad;
        }
+       if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
+           wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
+               r = -EINVAL;
+               ti->error = "Block size is smaller than device logical block size";
+               goto bad;
+       }
        wc->block_size_bits = __ffs(wc->block_size);
 
        wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
@@ -2200,8 +2234,6 @@ invalid_optional:
                        goto bad;
                }
        } else {
-               struct dm_io_region region;
-               struct dm_io_request req;
                size_t n_blocks, n_metadata_blocks;
                uint64_t n_bitmap_bits;
 
@@ -2258,19 +2290,9 @@ invalid_optional:
                        goto bad;
                }
 
-               region.bdev = wc->ssd_dev->bdev;
-               region.sector = wc->start_sector;
-               region.count = wc->metadata_sectors;
-               req.bi_op = REQ_OP_READ;
-               req.bi_op_flags = REQ_SYNC;
-               req.mem.type = DM_IO_VMA;
-               req.mem.ptr.vma = (char *)wc->memory_map;
-               req.client = wc->dm_io;
-               req.notify.fn = NULL;
-
-               r = dm_io(&req, 1, &region, NULL);
+               r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
                if (r) {
-                       ti->error = "Unable to read metadata";
+                       ti->error = "Unable to read first block of metadata";
                        goto bad;
                }
        }
index 5bd0ab8..baa6314 100644 (file)
@@ -878,7 +878,7 @@ static int mmc_send_hpi_cmd(struct mmc_card *card)
  *     Issued High Priority Interrupt, and check for card status
  *     until out-of prg-state.
  */
-int mmc_interrupt_hpi(struct mmc_card *card)
+static int mmc_interrupt_hpi(struct mmc_card *card)
 {
        int err;
        u32 status;
index c2239ee..75934f3 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/delay.h>
 #include <linux/highmem.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
@@ -349,12 +350,16 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
 /* CQHCI is idle and should halt immediately, so set a small timeout */
 #define CQHCI_OFF_TIMEOUT 100
 
+static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
+{
+       return cqhci_readl(cq_host, CQHCI_CTL);
+}
+
 static void cqhci_off(struct mmc_host *mmc)
 {
        struct cqhci_host *cq_host = mmc->cqe_private;
-       ktime_t timeout;
-       bool timed_out;
        u32 reg;
+       int err;
 
        if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
                return;
@@ -364,15 +369,9 @@ static void cqhci_off(struct mmc_host *mmc)
 
        cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);
 
-       timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT);
-       while (1) {
-               timed_out = ktime_compare(ktime_get(), timeout) > 0;
-               reg = cqhci_readl(cq_host, CQHCI_CTL);
-               if ((reg & CQHCI_HALT) || timed_out)
-                       break;
-       }
-
-       if (timed_out)
+       err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
+                                reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
+       if (err < 0)
                pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
        else
                pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
index 8b038e7..2e58743 100644 (file)
@@ -357,14 +357,6 @@ static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
                meson_mx_mmc_start_cmd(mmc, mrq->cmd);
 }
 
-static int meson_mx_mmc_card_busy(struct mmc_host *mmc)
-{
-       struct meson_mx_mmc_host *host = mmc_priv(mmc);
-       u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC);
-
-       return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK);
-}
-
 static void meson_mx_mmc_read_response(struct mmc_host *mmc,
                                       struct mmc_command *cmd)
 {
@@ -506,7 +498,6 @@ static void meson_mx_mmc_timeout(struct timer_list *t)
 static struct mmc_host_ops meson_mx_mmc_ops = {
        .request                = meson_mx_mmc_request,
        .set_ios                = meson_mx_mmc_set_ios,
-       .card_busy              = meson_mx_mmc_card_busy,
        .get_cd                 = mmc_gpio_get_cd,
        .get_ro                 = mmc_gpio_get_ro,
 };
@@ -570,7 +561,7 @@ static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
        mmc->f_max = clk_round_rate(host->cfg_div_clk,
                                    clk_get_rate(host->parent_clk));
 
-       mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
+       mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
        mmc->ops = &meson_mx_mmc_ops;
 
        ret = mmc_of_parse(mmc);
index 09ff731..a8bcb3f 100644 (file)
@@ -2087,6 +2087,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
                goto clk_disable;
        }
 
+       msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
+
        pm_runtime_get_noresume(&pdev->dev);
        pm_runtime_set_active(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
index 525de24..2527244 100644 (file)
@@ -601,6 +601,9 @@ static int intel_select_drive_strength(struct mmc_card *card,
        struct sdhci_pci_slot *slot = sdhci_priv(host);
        struct intel_host *intel_host = sdhci_pci_priv(slot);
 
+       if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
+               return 0;
+
        return intel_host->drv_strength;
 }
 
index 1dea1ba..4703cd5 100644 (file)
@@ -235,6 +235,16 @@ static void xenon_voltage_switch(struct sdhci_host *host)
 {
        /* Wait for 5ms after set 1.8V signal enable bit */
        usleep_range(5000, 5500);
+
+       /*
+        * For some reason the controller's Host Control2 register reports
+        * the bit representing 1.8V signaling as 0 when read after it was
+        * written as 1. Subsequent read reports 1.
+        *
+        * Since this may cause some issues, do an empty read of the Host
+        * Control2 register here to circumvent this.
+        */
+       sdhci_readw(host, SDHCI_HOST_CONTROL2);
 }
 
 static const struct sdhci_ops sdhci_xenon_ops = {
index 6435020..51185e4 100644 (file)
@@ -24,8 +24,8 @@ config NET_DSA_MV88E6XXX_PTP
        bool "PTP support for Marvell 88E6xxx"
        default n
        depends on NET_DSA_MV88E6XXX_GLOBAL2
+       depends on PTP_1588_CLOCK
        imply NETWORK_PHY_TIMESTAMPING
-       imply PTP_1588_CLOCK
        help
          Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
          chips that support it.
index dd8a566..2b4a723 100644 (file)
@@ -3962,7 +3962,6 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
        .serdes_get_stats = mv88e6390_serdes_get_stats,
        .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
        .serdes_get_regs = mv88e6390_serdes_get_regs,
-       .phylink_validate = mv88e6390_phylink_validate,
        .gpio_ops = &mv88e6352_gpio_ops,
        .phylink_validate = mv88e6390_phylink_validate,
 };
@@ -4021,7 +4020,6 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
        .serdes_get_stats = mv88e6390_serdes_get_stats,
        .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
        .serdes_get_regs = mv88e6390_serdes_get_regs,
-       .phylink_validate = mv88e6390_phylink_validate,
        .gpio_ops = &mv88e6352_gpio_ops,
        .phylink_validate = mv88e6390x_phylink_validate,
 };
@@ -4079,7 +4077,6 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
        .serdes_get_stats = mv88e6390_serdes_get_stats,
        .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
        .serdes_get_regs = mv88e6390_serdes_get_regs,
-       .phylink_validate = mv88e6390_phylink_validate,
        .avb_ops = &mv88e6390_avb_ops,
        .ptp_ops = &mv88e6352_ptp_ops,
        .phylink_validate = mv88e6390_phylink_validate,
@@ -4235,7 +4232,6 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
        .serdes_get_stats = mv88e6390_serdes_get_stats,
        .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
        .serdes_get_regs = mv88e6390_serdes_get_regs,
-       .phylink_validate = mv88e6390_phylink_validate,
        .gpio_ops = &mv88e6352_gpio_ops,
        .avb_ops = &mv88e6390_avb_ops,
        .ptp_ops = &mv88e6352_ptp_ops,
index d0a3764..e2c6bf0 100644 (file)
@@ -400,6 +400,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
        ocelot->stats_layout    = felix->info->stats_layout;
        ocelot->num_stats       = felix->info->num_stats;
        ocelot->shared_queue_sz = felix->info->shared_queue_sz;
+       ocelot->num_mact_rows   = felix->info->num_mact_rows;
        ocelot->vcap_is2_keys   = felix->info->vcap_is2_keys;
        ocelot->vcap_is2_actions= felix->info->vcap_is2_actions;
        ocelot->vcap            = felix->info->vcap;
index 82d46f2..9af1065 100644 (file)
@@ -15,6 +15,7 @@ struct felix_info {
        const u32 *const                *map;
        const struct ocelot_ops         *ops;
        int                             shared_queue_sz;
+       int                             num_mact_rows;
        const struct ocelot_stat_layout *stats_layout;
        unsigned int                    num_stats;
        int                             num_ports;
index b4078f3..8bf395f 100644 (file)
@@ -1220,6 +1220,7 @@ struct felix_info felix_info_vsc9959 = {
        .vcap_is2_actions       = vsc9959_vcap_is2_actions,
        .vcap                   = vsc9959_vcap_props,
        .shared_queue_sz        = 128 * 1024,
+       .num_mact_rows          = 2048,
        .num_ports              = 6,
        .switch_pci_bar         = 4,
        .imdio_pci_bar          = 0,
index 0fe1ae1..68c3086 100644 (file)
@@ -20,6 +20,7 @@ tristate "NXP SJA1105 Ethernet switch family support"
 config NET_DSA_SJA1105_PTP
        bool "Support for the PTP clock on the NXP SJA1105 Ethernet switch"
        depends on NET_DSA_SJA1105
+       depends on PTP_1588_CLOCK
        help
          This enables support for timestamping and PTP clock manipulations in
          the SJA1105 DSA driver.
index a22f8e3..bc0e47c 100644 (file)
 
 /* PTPSYNCTS has no interrupt or update mechanism, because the intended
  * hardware use case is for the timestamp to be collected synchronously,
- * immediately after the CAS_MASTER SJA1105 switch has triggered a CASSYNC
- * pulse on the PTP_CLK pin. When used as a generic extts source, it needs
- * polling and a comparison with the old value. The polling interval is just
- * the Nyquist rate of a canonical PPS input (e.g. from a GPS module).
- * Anything of higher frequency than 1 Hz will be lost, since there is no
- * timestamp FIFO.
+ * immediately after the CAS_MASTER SJA1105 switch has performed a CASSYNC
+ * one-shot toggle (no return to level) on the PTP_CLK pin. When used as a
+ * generic extts source, the PTPSYNCTS register needs polling and a comparison
+ * with the old value. The polling interval is configured as the Nyquist rate
+ * of a signal with 50% duty cycle and 1Hz frequency, which is sadly all that
+ * this hardware can do (but may be enough for some setups). Anything of higher
+ * frequency than 1 Hz will be lost, since there is no timestamp FIFO.
  */
-#define SJA1105_EXTTS_INTERVAL         (HZ / 2)
+#define SJA1105_EXTTS_INTERVAL         (HZ / 4)
 
 /*            This range is actually +/- SJA1105_MAX_ADJ_PPB
  *            divided by 1000 (ppb -> ppm) and with a 16-bit
@@ -754,7 +755,16 @@ static int sja1105_extts_enable(struct sja1105_private *priv,
                return -EOPNOTSUPP;
 
        /* Reject requests with unsupported flags */
-       if (extts->flags)
+       if (extts->flags & ~(PTP_ENABLE_FEATURE |
+                            PTP_RISING_EDGE |
+                            PTP_FALLING_EDGE |
+                            PTP_STRICT_FLAGS))
+               return -EOPNOTSUPP;
+
+       /* We can only enable time stamping on both edges, sadly. */
+       if ((extts->flags & PTP_STRICT_FLAGS) &&
+           (extts->flags & PTP_ENABLE_FEATURE) &&
+           (extts->flags & PTP_EXTTS_EDGES) != PTP_EXTTS_EDGES)
                return -EOPNOTSUPP;
 
        rc = sja1105_change_ptp_clk_pin_func(priv, PTP_PF_EXTTS);
index 97dfd0c..9e1860d 100644 (file)
@@ -69,7 +69,7 @@
  * 16kB.
  */
 #if PAGE_SIZE > SZ_16K
-#define ENA_PAGE_SIZE SZ_16K
+#define ENA_PAGE_SIZE (_AC(SZ_16K, UL))
 #else
 #define ENA_PAGE_SIZE PAGE_SIZE
 #endif
index 2edf137..8a70ffe 100644 (file)
@@ -57,7 +57,7 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
        { AQ_DEVICE_ID_D108,    AQ_HWREV_2,     &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
        { AQ_DEVICE_ID_D109,    AQ_HWREV_2,     &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
 
-       { AQ_DEVICE_ID_AQC100,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
+       { AQ_DEVICE_ID_AQC100,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100, },
        { AQ_DEVICE_ID_AQC107,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
        { AQ_DEVICE_ID_AQC108,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
        { AQ_DEVICE_ID_AQC109,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
index a5d1a6c..6795b6d 100644 (file)
@@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        struct bgmac *bgmac;
+       struct resource *regs;
        const u8 *mac_addr;
 
        bgmac = bgmac_alloc(&pdev->dev);
@@ -206,16 +207,21 @@ static int bgmac_probe(struct platform_device *pdev)
        if (IS_ERR(bgmac->plat.base))
                return PTR_ERR(bgmac->plat.base);
 
-       bgmac->plat.idm_base =
-               devm_platform_ioremap_resource_byname(pdev, "idm_base");
-       if (IS_ERR(bgmac->plat.idm_base))
-               return PTR_ERR(bgmac->plat.idm_base);
-       bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
+       regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
+       if (regs) {
+               bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
+               if (IS_ERR(bgmac->plat.idm_base))
+                       return PTR_ERR(bgmac->plat.idm_base);
+               bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
+       }
 
-       bgmac->plat.nicpm_base =
-               devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
-       if (IS_ERR(bgmac->plat.nicpm_base))
-               return PTR_ERR(bgmac->plat.nicpm_base);
+       regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
+       if (regs) {
+               bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
+                                                              regs);
+               if (IS_ERR(bgmac->plat.nicpm_base))
+                       return PTR_ERR(bgmac->plat.nicpm_base);
+       }
 
        bgmac->read = platform_bgmac_read;
        bgmac->write = platform_bgmac_write;
index fead64f..d1a8371 100644 (file)
@@ -6642,7 +6642,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
        int rc;
 
        if (!mem_size)
-               return 0;
+               return -EINVAL;
 
        ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
        if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
@@ -9780,6 +9780,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
                                           netdev_features_t features)
 {
        struct bnxt *bp = netdev_priv(dev);
+       netdev_features_t vlan_features;
 
        if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
                features &= ~NETIF_F_NTUPLE;
@@ -9796,12 +9797,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
        /* Both CTAG and STAG VLAN accelaration on the RX side have to be
         * turned on or off together.
         */
-       if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
-           (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
+       vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX |
+                                   NETIF_F_HW_VLAN_STAG_RX);
+       if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX |
+                             NETIF_F_HW_VLAN_STAG_RX)) {
                if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
                        features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
                                      NETIF_F_HW_VLAN_STAG_RX);
-               else
+               else if (vlan_features)
                        features |= NETIF_F_HW_VLAN_CTAG_RX |
                                    NETIF_F_HW_VLAN_STAG_RX;
        }
@@ -12212,12 +12215,15 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
                bnxt_ulp_start(bp, err);
        }
 
-       if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
-               dev_close(netdev);
+       if (result != PCI_ERS_RESULT_RECOVERED) {
+               if (netif_running(netdev))
+                       dev_close(netdev);
+               pci_disable_device(pdev);
+       }
 
        rtnl_unlock();
 
-       return PCI_ERS_RESULT_RECOVERED;
+       return result;
 }
 
 /**
index f2caa27..f6a3250 100644 (file)
@@ -1066,7 +1066,6 @@ struct bnxt_vf_info {
 #define BNXT_VF_LINK_FORCED    0x4
 #define BNXT_VF_LINK_UP                0x8
 #define BNXT_VF_TRUST          0x10
-       u32     func_flags; /* func cfg flags */
        u32     min_tx_rate;
        u32     max_tx_rate;
        void    *hwrm_cmd_req_addr;
index 95f893f..d5c8bd4 100644 (file)
@@ -43,7 +43,7 @@ static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
 #define BNXT_NVM_CFG_VER_BITS          24
 #define BNXT_NVM_CFG_VER_BYTES         4
 
-#define BNXT_MSIX_VEC_MAX      1280
+#define BNXT_MSIX_VEC_MAX      512
 #define BNXT_MSIX_VEC_MIN_MAX  128
 
 enum bnxt_nvm_dir_type {
index 6ea3df6..cea2f99 100644 (file)
@@ -85,11 +85,10 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
        if (old_setting == setting)
                return 0;
 
-       func_flags = vf->func_flags;
        if (setting)
-               func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
+               func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
        else
-               func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
+               func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
        /*TODO: if the driver supports VLAN filter on guest VLAN,
         * the spoof check should also include vlan anti-spoofing
         */
@@ -98,7 +97,6 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
        req.flags = cpu_to_le32(func_flags);
        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
        if (!rc) {
-               vf->func_flags = func_flags;
                if (setting)
                        vf->flags |= BNXT_VF_SPOOFCHK;
                else
@@ -228,7 +226,6 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
        memcpy(vf->mac_addr, mac, ETH_ALEN);
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
        req.fid = cpu_to_le16(vf->fw_fid);
-       req.flags = cpu_to_le32(vf->func_flags);
        req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
        memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
        return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -266,7 +263,6 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
        req.fid = cpu_to_le16(vf->fw_fid);
-       req.flags = cpu_to_le32(vf->func_flags);
        req.dflt_vlan = cpu_to_le16(vlan_tag);
        req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -305,7 +301,6 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
                return 0;
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
        req.fid = cpu_to_le16(vf->fw_fid);
-       req.flags = cpu_to_le32(vf->func_flags);
        req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
        req.max_bw = cpu_to_le32(max_tx_rate);
        req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
@@ -477,7 +472,6 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
        vf = &bp->pf.vf[vf_id];
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
        req.fid = cpu_to_le16(vf->fw_fid);
-       req.flags = cpu_to_le32(vf->func_flags);
 
        if (is_valid_ether_addr(vf->mac_addr)) {
                req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
index 53b50c2..2c4c12b 100644 (file)
@@ -35,8 +35,8 @@ config MACB
 config MACB_USE_HWSTAMP
        bool "Use IEEE 1588 hwstamp"
        depends on MACB
+       depends on PTP_1588_CLOCK
        default y
-       imply PTP_1588_CLOCK
        ---help---
          Enable IEEE 1588 Precision Time Protocol (PTP) support for MACB.
 
index a0e8c5b..36290a8 100644 (file)
@@ -334,8 +334,10 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
        int status;
 
        status = pm_runtime_get_sync(&bp->pdev->dev);
-       if (status < 0)
+       if (status < 0) {
+               pm_runtime_put_noidle(&bp->pdev->dev);
                goto mdio_pm_exit;
+       }
 
        status = macb_mdio_wait_for_idle(bp);
        if (status < 0)
@@ -386,8 +388,10 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
        int status;
 
        status = pm_runtime_get_sync(&bp->pdev->dev);
-       if (status < 0)
+       if (status < 0) {
+               pm_runtime_put_noidle(&bp->pdev->dev);
                goto mdio_pm_exit;
+       }
 
        status = macb_mdio_wait_for_idle(bp);
        if (status < 0)
@@ -3816,8 +3820,10 @@ static int at91ether_open(struct net_device *dev)
        int ret;
 
        ret = pm_runtime_get_sync(&lp->pdev->dev);
-       if (ret < 0)
+       if (ret < 0) {
+               pm_runtime_put_noidle(&lp->pdev->dev);
                return ret;
+       }
 
        /* Clear internal statistics */
        ctl = macb_readl(lp, NCR);
@@ -4172,15 +4178,9 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
 
 static int fu540_c000_init(struct platform_device *pdev)
 {
-       struct resource *res;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!res)
-               return -ENODEV;
-
-       mgmt->reg = ioremap(res->start, resource_size(res));
-       if (!mgmt->reg)
-               return -ENOMEM;
+       mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
+       if (IS_ERR(mgmt->reg))
+               return PTR_ERR(mgmt->reg);
 
        return macb_init(pdev);
 }
index 6a700d3..4520e7e 100644 (file)
@@ -54,7 +54,7 @@ config        THUNDER_NIC_RGX
 config CAVIUM_PTP
        tristate "Cavium PTP coprocessor as PTP clock"
        depends on 64BIT && PCI
-       imply PTP_1588_CLOCK
+       depends on PTP_1588_CLOCK
        ---help---
          This driver adds support for the Precision Time Protocol Clocks and
          Timestamping coprocessor (PTP) found on Cavium processors.
index f5dd34d..6516c45 100644 (file)
@@ -2207,6 +2207,9 @@ static void ethofld_hard_xmit(struct net_device *dev,
        if (unlikely(skip_eotx_wr)) {
                start = (u64 *)wr;
                eosw_txq->state = next_state;
+               eosw_txq->cred -= wrlen16;
+               eosw_txq->ncompl++;
+               eosw_txq->last_compl = 0;
                goto write_wr_headers;
        }
 
@@ -2365,6 +2368,34 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return cxgb4_eth_xmit(skb, dev);
 }
 
+static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq)
+{
+       int pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
+       int pidx = eosw_txq->pidx;
+       struct sk_buff *skb;
+
+       if (!pktcount)
+               return;
+
+       if (pktcount < 0)
+               pktcount += eosw_txq->ndesc;
+
+       while (pktcount--) {
+               pidx--;
+               if (pidx < 0)
+                       pidx += eosw_txq->ndesc;
+
+               skb = eosw_txq->desc[pidx].skb;
+               if (skb) {
+                       dev_consume_skb_any(skb);
+                       eosw_txq->desc[pidx].skb = NULL;
+                       eosw_txq->inuse--;
+               }
+       }
+
+       eosw_txq->pidx = eosw_txq->last_pidx + 1;
+}
+
 /**
  * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
  * @dev - netdevice
@@ -2440,9 +2471,11 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
                                            FW_FLOWC_MNEM_EOSTATE_CLOSING :
                                            FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
 
-       eosw_txq->cred -= len16;
-       eosw_txq->ncompl++;
-       eosw_txq->last_compl = 0;
+       /* Free up any pending skbs to ensure there's room for
+        * termination FLOWC.
+        */
+       if (tc == FW_SCHED_CLS_NONE)
+               eosw_txq_flush_pending_skbs(eosw_txq);
 
        ret = eosw_txq_enqueue(eosw_txq, skb);
        if (ret) {
@@ -2695,6 +2728,7 @@ static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
  *     is ever running at a time ...
  */
 static void service_ofldq(struct sge_uld_txq *q)
+       __must_hold(&q->sendq.lock)
 {
        u64 *pos, *before, *end;
        int credits;
index ebc635f..15f37c5 100644 (file)
@@ -74,8 +74,8 @@ err_pci_mem_reg:
        pci_disable_device(pdev);
 err_pci_enable:
 err_mdiobus_alloc:
-       iounmap(port_regs);
 err_hw_alloc:
+       iounmap(port_regs);
 err_ioremap:
        return err;
 }
index 4bd3324..3de549c 100644 (file)
@@ -2189,7 +2189,8 @@ static void __ibmvnic_reset(struct work_struct *work)
                                rc = do_hard_reset(adapter, rwi, reset_state);
                                rtnl_unlock();
                        }
-               } else {
+               } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
+                               adapter->from_passive_init)) {
                        rc = do_reset(adapter, rwi, reset_state);
                }
                kfree(rwi);
index 8972cdd..7352244 100644 (file)
@@ -1428,6 +1428,9 @@ int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
        struct mvpp2_ethtool_fs *efs;
        int ret;
 
+       if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
+               return -EINVAL;
+
        efs = port->rfs_rules[info->fs.location];
        if (!efs)
                return -EINVAL;
index 1fa60e9..2b5dad2 100644 (file)
@@ -4329,6 +4329,8 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
 
        if (!mvpp22_rss_is_supported())
                return -EOPNOTSUPP;
+       if (rss_context >= MVPP22_N_RSS_TABLES)
+               return -EINVAL;
 
        if (hfunc)
                *hfunc = ETH_RSS_HASH_CRC32;
index 5716c3d..c72c4e1 100644 (file)
@@ -2550,6 +2550,7 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
 
                if (!err || err == -ENOSPC) {
                        priv->def_counter[port] = idx;
+                       err = 0;
                } else if (err == -ENOENT) {
                        err = 0;
                        continue;
@@ -2600,7 +2601,8 @@ int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage)
                                   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
                if (!err)
                        *idx = get_param_l(&out_param);
-
+               if (WARN_ON(err == -ENOSPC))
+                       err = -EINVAL;
                return err;
        }
        return __mlx4_counter_alloc(dev, idx);
index 34cba97..cede5bd 100644 (file)
@@ -888,7 +888,6 @@ static void cmd_work_handler(struct work_struct *work)
        }
 
        cmd->ent_arr[ent->idx] = ent;
-       set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
        lay = get_inst(cmd, ent->idx);
        ent->lay = lay;
        memset(lay, 0, sizeof(*lay));
@@ -910,6 +909,7 @@ static void cmd_work_handler(struct work_struct *work)
 
        if (ent->callback)
                schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+       set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
 
        /* Skip sending command to fw if internal error */
        if (pci_channel_offline(dev->pdev) ||
@@ -922,6 +922,10 @@ static void cmd_work_handler(struct work_struct *work)
                MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
 
                mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
+               /* no doorbell, no need to keep the entry */
+               free_ent(cmd, ent->idx);
+               if (ent->callback)
+                       free_cmd(ent);
                return;
        }
 
index 55457f2..f372e94 100644 (file)
@@ -1773,19 +1773,14 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
 
 static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
 {
-       int err = mlx5e_init_rep_rx(priv);
-
-       if (err)
-               return err;
-
        mlx5e_create_q_counters(priv);
-       return 0;
+       return mlx5e_init_rep_rx(priv);
 }
 
 static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
 {
-       mlx5e_destroy_q_counters(priv);
        mlx5e_cleanup_rep_rx(priv);
+       mlx5e_destroy_q_counters(priv);
 }
 
 static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
index b2e38e0..5d9def1 100644 (file)
@@ -1550,9 +1550,9 @@ static int esw_create_restore_table(struct mlx5_eswitch *esw)
                                           MLX5_FLOW_NAMESPACE_KERNEL, 1,
                                           modact);
        if (IS_ERR(mod_hdr)) {
+               err = PTR_ERR(mod_hdr);
                esw_warn(dev, "Failed to create restore mod header, err: %d\n",
                         err);
-               err = PTR_ERR(mod_hdr);
                goto err_mod_hdr;
        }
 
@@ -2219,10 +2219,12 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
                total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev);
 
        memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
+       mutex_init(&esw->fdb_table.offloads.vports.lock);
+       hash_init(esw->fdb_table.offloads.vports.table);
 
        err = esw_create_uplink_offloads_acl_tables(esw);
        if (err)
-               return err;
+               goto create_acl_err;
 
        err = esw_create_offloads_table(esw, total_vports);
        if (err)
@@ -2240,9 +2242,6 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
        if (err)
                goto create_fg_err;
 
-       mutex_init(&esw->fdb_table.offloads.vports.lock);
-       hash_init(esw->fdb_table.offloads.vports.table);
-
        return 0;
 
 create_fg_err:
@@ -2253,18 +2252,19 @@ create_restore_err:
        esw_destroy_offloads_table(esw);
 create_offloads_err:
        esw_destroy_uplink_offloads_acl_tables(esw);
-
+create_acl_err:
+       mutex_destroy(&esw->fdb_table.offloads.vports.lock);
        return err;
 }
 
 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
 {
-       mutex_destroy(&esw->fdb_table.offloads.vports.lock);
        esw_destroy_vport_rx_group(esw);
        esw_destroy_offloads_fdb_tables(esw);
        esw_destroy_restore_table(esw);
        esw_destroy_offloads_table(esw);
        esw_destroy_uplink_offloads_acl_tables(esw);
+       mutex_destroy(&esw->fdb_table.offloads.vports.lock);
 }
 
 static void
@@ -2377,9 +2377,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
 err_vports:
        esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
 err_uplink:
-       esw_set_passing_vport_metadata(esw, false);
-err_steering_init:
        esw_offloads_steering_cleanup(esw);
+err_steering_init:
+       esw_set_passing_vport_metadata(esw, false);
 err_vport_metadata:
        mlx5_rdma_disable_roce(esw->dev);
        mutex_destroy(&esw->offloads.termtbl_mutex);
index c0ab9cf..18719ac 100644 (file)
@@ -695,6 +695,12 @@ static void dr_cq_event(struct mlx5_core_cq *mcq,
        pr_info("CQ event %u on CQ #%u\n", event, mcq->cqn);
 }
 
+static void dr_cq_complete(struct mlx5_core_cq *mcq,
+                          struct mlx5_eqe *eqe)
+{
+       pr_err("CQ completion CQ: #%u\n", mcq->cqn);
+}
+
 static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
                                      struct mlx5_uars_page *uar,
                                      size_t ncqe)
@@ -756,6 +762,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
        mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
 
        cq->mcq.event = dr_cq_event;
+       cq->mcq.comp  = dr_cq_complete;
 
        err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
        kvfree(in);
@@ -767,7 +774,12 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
        cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
        cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
        *cq->mcq.set_ci_db = 0;
-       *cq->mcq.arm_db = 0;
+
+       /* set no-zero value, in order to avoid the HW to run db-recovery on
+        * CQ that used in polling mode.
+        */
+       *cq->mcq.arm_db = cpu_to_be32(2 << 28);
+
        cq->mcq.vector = 0;
        cq->mcq.irqn = irqn;
        cq->mcq.uar = uar;
index 430da69..a6e30e0 100644 (file)
@@ -986,8 +986,9 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
                                unsigned int priority,
                                struct mlxsw_afk_element_usage *elusage)
 {
+       struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
        struct mlxsw_sp_acl_tcam_vregion *vregion;
-       struct mlxsw_sp_acl_tcam_vchunk *vchunk;
+       struct list_head *pos;
        int err;
 
        if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
@@ -1025,7 +1026,14 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
        }
 
        mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
-       list_add_tail(&vchunk->list, &vregion->vchunk_list);
+
+       /* Position the vchunk inside the list according to priority */
+       list_for_each(pos, &vregion->vchunk_list) {
+               vchunk2 = list_entry(pos, typeof(*vchunk2), list);
+               if (vchunk2->priority > priority)
+                       break;
+       }
+       list_add_tail(&vchunk->list, pos);
        mutex_unlock(&vregion->lock);
 
        return vchunk;
index 51117a5..890b078 100644 (file)
@@ -36,7 +36,8 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
                err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
                if (err)
                        return err;
-       } else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED) {
+       } else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED &&
+                  act->hw_stats != FLOW_ACTION_HW_STATS_DONT_CARE) {
                NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
                return -EOPNOTSUPP;
        }
index e165175..f70bb81 100644 (file)
@@ -564,7 +564,7 @@ static int moxart_remove(struct platform_device *pdev)
        struct net_device *ndev = platform_get_drvdata(pdev);
 
        unregister_netdev(ndev);
-       free_irq(ndev->irq, ndev);
+       devm_free_irq(&pdev->dev, ndev->irq, ndev);
        moxart_mac_free_memory(ndev);
        free_netdev(ndev);
 
index a8c48a4..02350c3 100644 (file)
@@ -1031,10 +1031,8 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port,
 {
        int i, j;
 
-       /* Loop through all the mac tables entries. There are 1024 rows of 4
-        * entries.
-        */
-       for (i = 0; i < 1024; i++) {
+       /* Loop through all the mac tables entries. */
+       for (i = 0; i < ocelot->num_mact_rows; i++) {
                for (j = 0; j < 4; j++) {
                        struct ocelot_mact_entry entry;
                        bool is_static;
@@ -1453,8 +1451,15 @@ static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
 
 void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs)
 {
-       ocelot_write(ocelot, ANA_AUTOAGE_AGE_PERIOD(msecs / 2),
-                    ANA_AUTOAGE);
+       unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000);
+
+       /* Setting AGE_PERIOD to zero effectively disables automatic aging,
+        * which is clearly not what our intention is. So avoid that.
+        */
+       if (!age_period)
+               age_period = 1;
+
+       ocelot_rmw(ocelot, age_period, ANA_AUTOAGE_AGE_PERIOD_M, ANA_AUTOAGE);
 }
 EXPORT_SYMBOL(ocelot_set_ageing_time);
 
index b88b589..7d4fd1b 100644 (file)
@@ -431,6 +431,7 @@ int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
        ocelot->stats_layout = ocelot_stats_layout;
        ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
        ocelot->shared_queue_sz = 224 * 1024;
+       ocelot->num_mact_rows = 1024;
        ocelot->ops = ops;
 
        ret = ocelot_regfields_init(ocelot, ocelot_regfields);
index bfa0c0d..8b018ed 100644 (file)
@@ -208,11 +208,13 @@ static int jazz_sonic_probe(struct platform_device *pdev)
 
        err = register_netdev(dev);
        if (err)
-               goto out1;
+               goto undo_probe1;
 
        return 0;
 
-out1:
+undo_probe1:
+       dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+                         lp->descriptors, lp->descriptors_laddr);
        release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
 out:
        free_netdev(dev);
index 9183b3e..354efff 100644 (file)
@@ -283,6 +283,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn,
        if (!nfp_nsp_has_hwinfo_lookup(nsp)) {
                nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n");
                eth_hw_addr_random(nn->dp.netdev);
+               nfp_nsp_close(nsp);
                return;
        }
 
index 5f8fc58..11621cc 100644 (file)
@@ -170,8 +170,7 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
        debugfs_create_x64("base_pa", 0400, cq_dentry, &cq->base_pa);
        debugfs_create_u32("num_descs", 0400, cq_dentry, &cq->num_descs);
        debugfs_create_u32("desc_size", 0400, cq_dentry, &cq->desc_size);
-       debugfs_create_u8("done_color", 0400, cq_dentry,
-                         (u8 *)&cq->done_color);
+       debugfs_create_bool("done_color", 0400, cq_dentry, &cq->done_color);
 
        debugfs_create_file("tail", 0400, cq_dentry, cq, &cq_tail_fops);
 
index 5acf4f4..d5293bf 100644 (file)
@@ -2101,6 +2101,7 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
                ionic_txrx_free(lif);
        }
        ionic_lifs_deinit(ionic);
+       ionic_reset(ionic);
        ionic_qcqs_free(lif);
 
        dev_info(ionic->dev, "FW Down: LIFs stopped\n");
@@ -2116,6 +2117,7 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
 
        dev_info(ionic->dev, "FW Up: restarting LIFs\n");
 
+       ionic_init_devinfo(ionic);
        err = ionic_qcqs_alloc(lif);
        if (err)
                goto err_out;
@@ -2549,8 +2551,6 @@ int ionic_lifs_register(struct ionic *ionic)
                dev_err(ionic->dev, "Cannot register net device, aborting\n");
                return err;
        }
-
-       ionic_link_status_check_request(ionic->master_lif);
        ionic->master_lif->registered = true;
 
        return 0;
index 494c859..67ba67e 100644 (file)
@@ -624,7 +624,7 @@ int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
                total_offset += offset;
        }
 
-       total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000;
+       total_ctr = cfg->ctr[0] + cfg->ctr[1] * 1000000000ULL;
        total_ctr += total_offset;
 
        ctr_low = do_div(total_ctr, 1000000000);
index 565da64..a999d6b 100644 (file)
@@ -4060,7 +4060,7 @@ static int stmmac_set_features(struct net_device *netdev,
 /**
  *  stmmac_interrupt - main ISR
  *  @irq: interrupt number.
- *  @dev_id: to pass the net device pointer.
+ *  @dev_id: to pass the net device pointer (must be valid).
  *  Description: this is the main driver interrupt service routine.
  *  It can call:
  *  o DMA service routine (to manage incoming frame reception and transmission
@@ -4084,11 +4084,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
        if (priv->irq_wake)
                pm_wakeup_event(priv->device, 0);
 
-       if (unlikely(!dev)) {
-               netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
-               return IRQ_NONE;
-       }
-
        /* Check if adapter is up */
        if (test_bit(STMMAC_DOWN, &priv->state))
                return IRQ_HANDLED;
@@ -4991,7 +4986,7 @@ int stmmac_dvr_probe(struct device *device,
                                                 priv->plat->bsp_priv);
 
                if (ret < 0)
-                       return ret;
+                       goto error_serdes_powerup;
        }
 
 #ifdef CONFIG_DEBUG_FS
@@ -5000,6 +4995,8 @@ int stmmac_dvr_probe(struct device *device,
 
        return ret;
 
+error_serdes_powerup:
+       unregister_netdev(ndev);
 error_netdev_register:
        phylink_destroy(priv->phylink);
 error_phy_setup:
index 89cec77..8e34878 100644 (file)
@@ -90,9 +90,8 @@ config TI_CPTS
 config TI_CPTS_MOD
        tristate
        depends on TI_CPTS
+       depends on PTP_1588_CLOCK
        default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y || TI_CPSW_SWITCHDEV=y
-       select NET_PTP_CLASSIFY
-       imply PTP_1588_CLOCK
        default m
 
 config TI_K3_AM65_CPSW_NUSS
index 2bf5673..2517ffb 100644 (file)
@@ -1719,7 +1719,8 @@ static int am65_cpsw_nuss_ndev_add_napi_2g(struct am65_cpsw_common *common)
 
                ret = devm_request_irq(dev, tx_chn->irq,
                                       am65_cpsw_nuss_tx_irq,
-                                      0, tx_chn->tx_chn_name, tx_chn);
+                                      IRQF_TRIGGER_HIGH,
+                                      tx_chn->tx_chn_name, tx_chn);
                if (ret) {
                        dev_err(dev, "failure requesting tx%u irq %u, %d\n",
                                tx_chn->id, tx_chn->irq, ret);
@@ -1744,7 +1745,7 @@ static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common)
 
        ret = devm_request_irq(dev, common->rx_chns.irq,
                               am65_cpsw_nuss_rx_irq,
-                              0, dev_name(dev), common);
+                              IRQF_TRIGGER_HIGH, dev_name(dev), common);
        if (ret) {
                dev_err(dev, "failure requesting rx irq %u, %d\n",
                        common->rx_chns.irq, ret);
index b50c3ec..6bcda20 100644 (file)
@@ -643,7 +643,7 @@ static int tc_mii_probe(struct net_device *dev)
                linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
                linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
        }
-       linkmode_and(phydev->supported, phydev->supported, mask);
+       linkmode_andnot(phydev->supported, phydev->supported, mask);
        linkmode_copy(phydev->advertising, phydev->supported);
 
        lp->link = 0;
index 672cd2c..21640a0 100644 (file)
@@ -1169,11 +1169,11 @@ out_unlock:
 static struct genl_family gtp_genl_family;
 
 static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
-                             u32 type, struct pdp_ctx *pctx)
+                             int flags, u32 type, struct pdp_ctx *pctx)
 {
        void *genlh;
 
-       genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, 0,
+       genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, flags,
                            type);
        if (genlh == NULL)
                goto nlmsg_failure;
@@ -1227,8 +1227,8 @@ static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
                goto err_unlock;
        }
 
-       err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid,
-                                info->snd_seq, info->nlhdr->nlmsg_type, pctx);
+       err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid, info->snd_seq,
+                                0, info->nlhdr->nlmsg_type, pctx);
        if (err < 0)
                goto err_unlock_free;
 
@@ -1271,6 +1271,7 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
                                    gtp_genl_fill_info(skb,
                                            NETLINK_CB(cb->skb).portid,
                                            cb->nlh->nlmsg_seq,
+                                           NLM_F_MULTI,
                                            cb->nlh->nlmsg_type, pctx)) {
                                        cb->args[0] = i;
                                        cb->args[1] = j;
index d8e86bd..ebcfbae 100644 (file)
@@ -707,7 +707,8 @@ no_memory:
        goto drop;
 }
 
-static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t netvsc_start_xmit(struct sk_buff *skb,
+                                    struct net_device *ndev)
 {
        return netvsc_xmit(skb, ndev, false);
 }
index 845478a..b671bea 100644 (file)
@@ -1041,6 +1041,7 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
 
        complete(&gsi->completion);
 }
+
 /* Inter-EE interrupt handler */
 static void gsi_isr_glob_ee(struct gsi *gsi)
 {
@@ -1493,6 +1494,12 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
        struct completion *completion = &gsi->completion;
        u32 val;
 
+       /* First zero the result code field */
+       val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
+       val &= ~GENERIC_EE_RESULT_FMASK;
+       iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
+
+       /* Now issue the command */
        val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
        val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
        val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
@@ -1798,9 +1805,9 @@ static int gsi_channel_init_one(struct gsi *gsi,
 
        /* Worst case we need an event for every outstanding TRE */
        if (data->channel.tre_count > data->channel.event_count) {
-               dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
-                       data->channel_id, data->channel.tre_count);
                tre_count = data->channel.event_count;
+               dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
+                        data->channel_id, tre_count);
        } else {
                tre_count = data->channel.tre_count;
        }
index 7613b9c..acc9e74 100644 (file)
 #define INTER_EE_RESULT_FMASK          GENMASK(2, 0)
 #define GENERIC_EE_RESULT_FMASK                GENMASK(7, 5)
 #define GENERIC_EE_SUCCESS_FVAL                        1
+#define GENERIC_EE_INCORRECT_DIRECTION_FVAL    3
+#define GENERIC_EE_INCORRECT_CHANNEL_FVAL      5
 #define GENERIC_EE_NO_RESOURCES_FVAL           7
 #define USB_MAX_PACKET_FMASK           GENMASK(15, 15) /* 0: HS; 1: SS */
 #define MHI_BASE_CHANNEL_FMASK         GENMASK(31, 24)
index 6de03be..a21534f 100644 (file)
@@ -1283,7 +1283,7 @@ static int ipa_endpoint_stop_rx_dma(struct ipa *ipa)
  */
 int ipa_endpoint_stop(struct ipa_endpoint *endpoint)
 {
-       u32 retries = endpoint->toward_ipa ? 0 : IPA_ENDPOINT_STOP_RX_RETRIES;
+       u32 retries = IPA_ENDPOINT_STOP_RX_RETRIES;
        int ret;
 
        do {
@@ -1291,12 +1291,9 @@ int ipa_endpoint_stop(struct ipa_endpoint *endpoint)
                struct gsi *gsi = &ipa->gsi;
 
                ret = gsi_channel_stop(gsi, endpoint->channel_id);
-               if (ret != -EAGAIN)
+               if (ret != -EAGAIN || endpoint->toward_ipa)
                        break;
 
-               if (endpoint->toward_ipa)
-                       continue;
-
                /* For IPA v3.5.1, send a DMA read task and check again */
                if (ipa->version == IPA_VERSION_3_5_1) {
                        ret = ipa_endpoint_stop_rx_dma(ipa);
index 758baf7..d0d31cb 100644 (file)
@@ -1305,7 +1305,8 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
        struct crypto_aead *tfm;
        int ret;
 
-       tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
+       /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
+       tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
 
        if (IS_ERR(tfm))
                return tfm;
@@ -2640,11 +2641,12 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
        if (ret)
                goto rollback;
 
-       rtnl_unlock();
        /* Force features update, since they are different for SW MACSec and
         * HW offloading cases.
         */
        netdev_update_features(dev);
+
+       rtnl_unlock();
        return 0;
 
 rollback:
index 415c273..ecbd5e0 100644 (file)
@@ -1120,7 +1120,7 @@ static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus)
                goto out;
        }
        dp83640_clock_init(clock, bus);
-       list_add_tail(&phyter_clocks, &clock->list);
+       list_add_tail(&clock->list, &phyter_clocks);
 out:
        mutex_unlock(&phyter_clocks_lock);
 
index fe9aa3a..1dd19d0 100644 (file)
@@ -137,19 +137,18 @@ static int dp83822_set_wol(struct phy_device *phydev,
                        value &= ~DP83822_WOL_SECURE_ON;
                }
 
-               value |= (DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL |
-                         DP83822_WOL_CLR_INDICATION);
-               phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
-                             value);
+               /* Clear any pending WoL interrupt */
+               phy_read(phydev, MII_DP83822_MISR2);
+
+               value |= DP83822_WOL_EN | DP83822_WOL_INDICATION_SEL |
+                        DP83822_WOL_CLR_INDICATION;
+
+               return phy_write_mmd(phydev, DP83822_DEVADDR,
+                                    MII_DP83822_WOL_CFG, value);
        } else {
-               value = phy_read_mmd(phydev, DP83822_DEVADDR,
-                                    MII_DP83822_WOL_CFG);
-               value &= ~DP83822_WOL_EN;
-               phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
-                             value);
+               return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+                                         MII_DP83822_WOL_CFG, DP83822_WOL_EN);
        }
-
-       return 0;
 }
 
 static void dp83822_get_wol(struct phy_device *phydev,
@@ -258,12 +257,11 @@ static int dp83822_config_intr(struct phy_device *phydev)
 
 static int dp83822_config_init(struct phy_device *phydev)
 {
-       int value;
-
-       value = DP83822_WOL_MAGIC_EN | DP83822_WOL_SECURE_ON | DP83822_WOL_EN;
+       int value = DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
+                   DP83822_WOL_SECURE_ON;
 
-       return phy_write_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
-             value);
+       return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+                                 MII_DP83822_WOL_CFG, value);
 }
 
 static int dp83822_phy_reset(struct phy_device *phydev)
index 06f0883..d737253 100644 (file)
@@ -139,16 +139,19 @@ static int dp83811_set_wol(struct phy_device *phydev,
                        value &= ~DP83811_WOL_SECURE_ON;
                }
 
-               value |= (DP83811_WOL_EN | DP83811_WOL_INDICATION_SEL |
-                         DP83811_WOL_CLR_INDICATION);
-               phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
-                             value);
+               /* Clear any pending WoL interrupt */
+               phy_read(phydev, MII_DP83811_INT_STAT1);
+
+               value |= DP83811_WOL_EN | DP83811_WOL_INDICATION_SEL |
+                        DP83811_WOL_CLR_INDICATION;
+
+               return phy_write_mmd(phydev, DP83811_DEVADDR,
+                                    MII_DP83811_WOL_CFG, value);
        } else {
-               phy_clear_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
-                                  DP83811_WOL_EN);
+               return phy_clear_bits_mmd(phydev, DP83811_DEVADDR,
+                                         MII_DP83811_WOL_CFG, DP83811_WOL_EN);
        }
 
-       return 0;
 }
 
 static void dp83811_get_wol(struct phy_device *phydev,
@@ -292,8 +295,8 @@ static int dp83811_config_init(struct phy_device *phydev)
 
        value = DP83811_WOL_MAGIC_EN | DP83811_WOL_SECURE_ON | DP83811_WOL_EN;
 
-       return phy_write_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
-             value);
+       return phy_clear_bits_mmd(phydev, DP83811_DEVADDR, MII_DP83811_WOL_CFG,
+                                 value);
 }
 
 static int dp83811_phy_reset(struct phy_device *phydev)
index ff12492..1f1a01c 100644 (file)
@@ -66,6 +66,9 @@ enum {
        MV_PCS_CSSR1_SPD2_2500  = 0x0004,
        MV_PCS_CSSR1_SPD2_10000 = 0x0000,
 
+       /* Temperature read register (88E2110 only) */
+       MV_PCS_TEMP             = 0x8042,
+
        /* These registers appear at 0x800X and 0xa00X - the 0xa00X control
         * registers appear to set themselves to the 0x800X when AN is
         * restarted, but status registers appear readable from either.
@@ -77,6 +80,7 @@ enum {
        MV_V2_PORT_CTRL         = 0xf001,
        MV_V2_PORT_CTRL_SWRST   = BIT(15),
        MV_V2_PORT_CTRL_PWRDOWN = BIT(11),
+       /* Temperature control/read registers (88X3310 only) */
        MV_V2_TEMP_CTRL         = 0xf08a,
        MV_V2_TEMP_CTRL_MASK    = 0xc000,
        MV_V2_TEMP_CTRL_SAMPLE  = 0x0000,
@@ -104,6 +108,24 @@ static umode_t mv3310_hwmon_is_visible(const void *data,
        return 0;
 }
 
+static int mv3310_hwmon_read_temp_reg(struct phy_device *phydev)
+{
+       return phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP);
+}
+
+static int mv2110_hwmon_read_temp_reg(struct phy_device *phydev)
+{
+       return phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_TEMP);
+}
+
+static int mv10g_hwmon_read_temp_reg(struct phy_device *phydev)
+{
+       if (phydev->drv->phy_id == MARVELL_PHY_ID_88X3310)
+               return mv3310_hwmon_read_temp_reg(phydev);
+       else /* MARVELL_PHY_ID_88E2110 */
+               return mv2110_hwmon_read_temp_reg(phydev);
+}
+
 static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
                             u32 attr, int channel, long *value)
 {
@@ -116,7 +138,7 @@ static int mv3310_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
        }
 
        if (type == hwmon_temp && attr == hwmon_temp_input) {
-               temp = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP);
+               temp = mv10g_hwmon_read_temp_reg(phydev);
                if (temp < 0)
                        return temp;
 
@@ -169,6 +191,9 @@ static int mv3310_hwmon_config(struct phy_device *phydev, bool enable)
        u16 val;
        int ret;
 
+       if (phydev->drv->phy_id != MARVELL_PHY_ID_88X3310)
+               return 0;
+
        ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, MV_V2_TEMP,
                            MV_V2_TEMP_UNKNOWN);
        if (ret < 0)
index 6c738a2..4bb8552 100644 (file)
@@ -1359,6 +1359,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81b3, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
        {QMI_FIXED_INTF(0x413c, 0x81b6, 8)},    /* Dell Wireless 5811e */
        {QMI_FIXED_INTF(0x413c, 0x81b6, 10)},   /* Dell Wireless 5811e */
+       {QMI_FIXED_INTF(0x413c, 0x81cc, 8)},    /* Dell Wireless 5816e */
        {QMI_FIXED_INTF(0x413c, 0x81d7, 0)},    /* Dell Wireless 5821e */
        {QMI_FIXED_INTF(0x413c, 0x81d7, 1)},    /* Dell Wireless 5821e preproduction config */
        {QMI_FIXED_INTF(0x413c, 0x81e0, 0)},    /* Dell Wireless 5821e with eSIM support*/
index 5c964fc..71b8e80 100644 (file)
@@ -35,8 +35,10 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
                if (multicore) {
                        queue->worker = wg_packet_percpu_multicore_worker_alloc(
                                function, queue);
-                       if (!queue->worker)
+                       if (!queue->worker) {
+                               ptr_ring_cleanup(&queue->ring, NULL);
                                return -ENOMEM;
+                       }
                } else {
                        INIT_WORK(&queue->work, function);
                }
index da3b782..3bb5b9a 100644 (file)
@@ -226,21 +226,20 @@ void wg_packet_handshake_receive_worker(struct work_struct *work)
 static void keep_key_fresh(struct wg_peer *peer)
 {
        struct noise_keypair *keypair;
-       bool send = false;
+       bool send;
 
        if (peer->sent_lastminute_handshake)
                return;
 
        rcu_read_lock_bh();
        keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
-       if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
-           keypair->i_am_the_initiator &&
-           unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
-                       REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT)))
-               send = true;
+       send = keypair && READ_ONCE(keypair->sending.is_valid) &&
+              keypair->i_am_the_initiator &&
+              wg_birthdate_has_expired(keypair->sending.birthdate,
+                       REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT);
        rcu_read_unlock_bh();
 
-       if (send) {
+       if (unlikely(send)) {
                peer->sent_lastminute_handshake = true;
                wg_packet_send_queued_handshake_initiation(peer, false);
        }
@@ -393,13 +392,11 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
                len = ntohs(ip_hdr(skb)->tot_len);
                if (unlikely(len < sizeof(struct iphdr)))
                        goto dishonest_packet_size;
-               if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
-                       IP_ECN_set_ce(ip_hdr(skb));
+               INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos);
        } else if (skb->protocol == htons(ETH_P_IPV6)) {
                len = ntohs(ipv6_hdr(skb)->payload_len) +
                      sizeof(struct ipv6hdr);
-               if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
-                       IP6_ECN_set_ce(skb, ipv6_hdr(skb));
+               INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb)));
        } else {
                goto dishonest_packet_type;
        }
@@ -518,6 +515,8 @@ void wg_packet_decrypt_worker(struct work_struct *work)
                                &PACKET_CB(skb)->keypair->receiving)) ?
                                PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
                wg_queue_enqueue_per_peer_napi(skb, state);
+               if (need_resched())
+                       cond_resched();
        }
 }
 
index bcd6462..007cd44 100644 (file)
@@ -120,9 +120,9 @@ bool __init wg_ratelimiter_selftest(void)
        enum { TRIALS_BEFORE_GIVING_UP = 5000 };
        bool success = false;
        int test = 0, trials;
-       struct sk_buff *skb4, *skb6;
+       struct sk_buff *skb4, *skb6 = NULL;
        struct iphdr *hdr4;
-       struct ipv6hdr *hdr6;
+       struct ipv6hdr *hdr6 = NULL;
 
        if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
                return true;
index 7348c10..6687db6 100644 (file)
@@ -124,20 +124,17 @@ void wg_packet_send_handshake_cookie(struct wg_device *wg,
 static void keep_key_fresh(struct wg_peer *peer)
 {
        struct noise_keypair *keypair;
-       bool send = false;
+       bool send;
 
        rcu_read_lock_bh();
        keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
-       if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
-           (unlikely(atomic64_read(&keypair->sending.counter.counter) >
-                     REKEY_AFTER_MESSAGES) ||
-            (keypair->i_am_the_initiator &&
-             unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
-                                               REKEY_AFTER_TIME)))))
-               send = true;
+       send = keypair && READ_ONCE(keypair->sending.is_valid) &&
+              (atomic64_read(&keypair->sending.counter.counter) > REKEY_AFTER_MESSAGES ||
+               (keypair->i_am_the_initiator &&
+                wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME)));
        rcu_read_unlock_bh();
 
-       if (send)
+       if (unlikely(send))
                wg_packet_send_queued_handshake_initiation(peer, false);
 }
 
@@ -281,6 +278,8 @@ void wg_packet_tx_worker(struct work_struct *work)
 
                wg_noise_keypair_put(keypair, false);
                wg_peer_put(peer);
+               if (need_resched())
+                       cond_resched();
        }
 }
 
@@ -304,7 +303,8 @@ void wg_packet_encrypt_worker(struct work_struct *work)
                }
                wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first,
                                          state);
-
+               if (need_resched())
+                       cond_resched();
        }
 }
 
index b0d6541..f901802 100644 (file)
@@ -76,12 +76,6 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
                        net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
                                            wg->dev->name, &endpoint->addr, ret);
                        goto err;
-               } else if (unlikely(rt->dst.dev == skb->dev)) {
-                       ip_rt_put(rt);
-                       ret = -ELOOP;
-                       net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
-                                           wg->dev->name, &endpoint->addr);
-                       goto err;
                }
                if (cache)
                        dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
@@ -149,12 +143,6 @@ static int send6(struct wg_device *wg, struct sk_buff *skb,
                        net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
                                            wg->dev->name, &endpoint->addr, ret);
                        goto err;
-               } else if (unlikely(dst->dev == skb->dev)) {
-                       dst_release(dst);
-                       ret = -ELOOP;
-                       net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
-                                           wg->dev->name, &endpoint->addr);
-                       goto err;
                }
                if (cache)
                        dst_cache_set_ip6(cache, dst, &fl.saddr);
index 91c1bd6..f2adea9 100644 (file)
@@ -3642,6 +3642,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
        return;
  out_put_disk:
+       /* prevent double queue cleanup */
+       ns->disk->queue = NULL;
        put_disk(ns->disk);
  out_unlink_ns:
        mutex_lock(&ctrl->subsys->lock);
index b7f2c00..9c4af76 100644 (file)
@@ -52,28 +52,15 @@ static int cros_ec_sensorhub_register(struct device *dev,
        int sensor_type[MOTIONSENSE_TYPE_MAX] = { 0 };
        struct cros_ec_command *msg = sensorhub->msg;
        struct cros_ec_dev *ec = sensorhub->ec;
-       int ret, i, sensor_num;
+       int ret, i;
        char *name;
 
-       sensor_num = cros_ec_get_sensor_count(ec);
-       if (sensor_num < 0) {
-               dev_err(dev,
-                       "Unable to retrieve sensor information (err:%d)\n",
-                       sensor_num);
-               return sensor_num;
-       }
-
-       sensorhub->sensor_num = sensor_num;
-       if (sensor_num == 0) {
-               dev_err(dev, "Zero sensors reported.\n");
-               return -EINVAL;
-       }
 
        msg->version = 1;
        msg->insize = sizeof(struct ec_response_motion_sense);
        msg->outsize = sizeof(struct ec_params_motion_sense);
 
-       for (i = 0; i < sensor_num; i++) {
+       for (i = 0; i < sensorhub->sensor_num; i++) {
                sensorhub->params->cmd = MOTIONSENSE_CMD_INFO;
                sensorhub->params->info.sensor_num = i;
 
@@ -140,8 +127,7 @@ static int cros_ec_sensorhub_probe(struct platform_device *pdev)
        struct cros_ec_dev *ec = dev_get_drvdata(dev->parent);
        struct cros_ec_sensorhub *data;
        struct cros_ec_command *msg;
-       int ret;
-       int i;
+       int ret, i, sensor_num;
 
        msg = devm_kzalloc(dev, sizeof(struct cros_ec_command) +
                           max((u16)sizeof(struct ec_params_motion_sense),
@@ -166,10 +152,52 @@ static int cros_ec_sensorhub_probe(struct platform_device *pdev)
        dev_set_drvdata(dev, data);
 
        /* Check whether this EC is a sensor hub. */
-       if (cros_ec_check_features(data->ec, EC_FEATURE_MOTION_SENSE)) {
+       if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE)) {
+               sensor_num = cros_ec_get_sensor_count(ec);
+               if (sensor_num < 0) {
+                       dev_err(dev,
+                               "Unable to retrieve sensor information (err:%d)\n",
+                               sensor_num);
+                       return sensor_num;
+               }
+               if (sensor_num == 0) {
+                       dev_err(dev, "Zero sensors reported.\n");
+                       return -EINVAL;
+               }
+               data->sensor_num = sensor_num;
+
+               /*
+                * Prepare the ring handler before enumering the
+                * sensors.
+                */
+               if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
+                       ret = cros_ec_sensorhub_ring_allocate(data);
+                       if (ret)
+                               return ret;
+               }
+
+               /* Enumerate the sensors.*/
                ret = cros_ec_sensorhub_register(dev, data);
                if (ret)
                        return ret;
+
+               /*
+                * When the EC does not have a FIFO, the sensors will query
+                * their data themselves via sysfs or a software trigger.
+                */
+               if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
+                       ret = cros_ec_sensorhub_ring_add(data);
+                       if (ret)
+                               return ret;
+                       /*
+                        * The msg and its data is not under the control of the
+                        * ring handler.
+                        */
+                       return devm_add_action_or_reset(dev,
+                                       cros_ec_sensorhub_ring_remove,
+                                       data);
+               }
+
        } else {
                /*
                 * If the device has sensors but does not claim to
@@ -184,22 +212,6 @@ static int cros_ec_sensorhub_probe(struct platform_device *pdev)
                }
        }
 
-       /*
-        * If the EC does not have a FIFO, the sensors will query their data
-        * themselves via sysfs or a software trigger.
-        */
-       if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
-               ret = cros_ec_sensorhub_ring_add(data);
-               if (ret)
-                       return ret;
-               /*
-                * The msg and its data is not under the control of the ring
-                * handler.
-                */
-               return devm_add_action_or_reset(dev,
-                                               cros_ec_sensorhub_ring_remove,
-                                               data);
-       }
 
        return 0;
 }
index c48e5b3..24e48d9 100644 (file)
@@ -957,17 +957,15 @@ static int cros_ec_sensorhub_event(struct notifier_block *nb,
 }
 
 /**
- * cros_ec_sensorhub_ring_add() - Add the FIFO functionality if the EC
- *                               supports it.
+ * cros_ec_sensorhub_ring_allocate() - Prepare the FIFO functionality if the EC
+ *                                    supports it.
  *
  * @sensorhub : Sensor Hub object.
  *
  * Return: 0 on success.
  */
-int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub)
+int cros_ec_sensorhub_ring_allocate(struct cros_ec_sensorhub *sensorhub)
 {
-       struct cros_ec_dev *ec = sensorhub->ec;
-       int ret;
        int fifo_info_length =
                sizeof(struct ec_response_motion_sense_fifo_info) +
                sizeof(u16) * sensorhub->sensor_num;
@@ -978,6 +976,49 @@ int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub)
        if (!sensorhub->fifo_info)
                return -ENOMEM;
 
+       /*
+        * Allocate the callback area based on the number of sensors.
+        * Add one for the sensor ring.
+        */
+       sensorhub->push_data = devm_kcalloc(sensorhub->dev,
+                       sensorhub->sensor_num,
+                       sizeof(*sensorhub->push_data),
+                       GFP_KERNEL);
+       if (!sensorhub->push_data)
+               return -ENOMEM;
+
+       sensorhub->tight_timestamps = cros_ec_check_features(
+                       sensorhub->ec,
+                       EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS);
+
+       if (sensorhub->tight_timestamps) {
+               sensorhub->batch_state = devm_kcalloc(sensorhub->dev,
+                               sensorhub->sensor_num,
+                               sizeof(*sensorhub->batch_state),
+                               GFP_KERNEL);
+               if (!sensorhub->batch_state)
+                       return -ENOMEM;
+       }
+
+       return 0;
+}
+
+/**
+ * cros_ec_sensorhub_ring_add() - Add the FIFO functionality if the EC
+ *                               supports it.
+ *
+ * @sensorhub : Sensor Hub object.
+ *
+ * Return: 0 on success.
+ */
+int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub)
+{
+       struct cros_ec_dev *ec = sensorhub->ec;
+       int ret;
+       int fifo_info_length =
+               sizeof(struct ec_response_motion_sense_fifo_info) +
+               sizeof(u16) * sensorhub->sensor_num;
+
        /* Retrieve FIFO information */
        sensorhub->msg->version = 2;
        sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INFO;
@@ -998,31 +1039,9 @@ int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub)
        if (!sensorhub->ring)
                return -ENOMEM;
 
-       /*
-        * Allocate the callback area based on the number of sensors.
-        */
-       sensorhub->push_data = devm_kcalloc(
-                       sensorhub->dev, sensorhub->sensor_num,
-                       sizeof(*sensorhub->push_data),
-                       GFP_KERNEL);
-       if (!sensorhub->push_data)
-               return -ENOMEM;
-
        sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] =
                cros_ec_get_time_ns();
 
-       sensorhub->tight_timestamps = cros_ec_check_features(
-                       ec, EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS);
-
-       if (sensorhub->tight_timestamps) {
-               sensorhub->batch_state = devm_kcalloc(sensorhub->dev,
-                               sensorhub->sensor_num,
-                               sizeof(*sensorhub->batch_state),
-                               GFP_KERNEL);
-               if (!sensorhub->batch_state)
-                       return -ENOMEM;
-       }
-
        /* Register the notifier that will act as a top half interrupt. */
        sensorhub->notifier.notifier_call = cros_ec_sensorhub_event;
        ret = blocking_notifier_chain_register(&ec->ec_dev->event_notifier,
index 6f12747..c4404d9 100644 (file)
@@ -515,9 +515,33 @@ static struct asus_wmi_driver asus_nb_wmi_driver = {
        .detect_quirks = asus_nb_wmi_quirks,
 };
 
+static const struct dmi_system_id asus_nb_wmi_blacklist[] __initconst = {
+       {
+               /*
+                * asus-nb-wm adds no functionality. The T100TA has a detachable
+                * USB kbd, so no hotkeys and it has no WMI rfkill; and loading
+                * asus-nb-wm causes the camera LED to turn and _stay_ on.
+                */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+               },
+       },
+       {
+               /* The Asus T200TA has the same issue as the T100TA */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T200TA"),
+               },
+       },
+       {} /* Terminating entry */
+};
 
 static int __init asus_nb_wmi_init(void)
 {
+       if (dmi_check_system(asus_nb_wmi_blacklist))
+               return -ENODEV;
+
        return asus_wmi_register_driver(&asus_nb_wmi_driver);
 }
 
index b96d172..12d5ab7 100644 (file)
@@ -53,7 +53,7 @@ static int uncore_max_entries __read_mostly;
 /* Storage for uncore data for all instances */
 static struct uncore_data *uncore_instances;
 /* Root of the all uncore sysfs kobjs */
-struct kobject *uncore_root_kobj;
+static struct kobject *uncore_root_kobj;
 /* Stores the CPU mask of the target CPUs to use during uncore read/write */
 static cpumask_t uncore_cpu_mask;
 /* CPU online callback register instance */
index d2a5d4c..7c8bdab 100644 (file)
@@ -255,7 +255,7 @@ static const struct pmc_bit_map *ext_cnp_pfear_map[] = {
 };
 
 static const struct pmc_bit_map icl_pfear_map[] = {
-       /* Ice Lake generation onwards only */
+       /* Ice Lake and Jasper Lake generation onwards only */
        {"RES_65",              BIT(0)},
        {"RES_66",              BIT(1)},
        {"RES_67",              BIT(2)},
@@ -274,7 +274,7 @@ static const struct pmc_bit_map *ext_icl_pfear_map[] = {
 };
 
 static const struct pmc_bit_map tgl_pfear_map[] = {
-       /* Tiger Lake, Elkhart Lake and Jasper Lake generation onwards only */
+       /* Tiger Lake and Elkhart Lake generation onwards only */
        {"PSF9",                BIT(0)},
        {"RES_66",              BIT(1)},
        {"RES_67",              BIT(2)},
@@ -692,7 +692,6 @@ static void pmc_core_lpm_display(struct pmc_dev *pmcdev, struct device *dev,
        kfree(lpm_regs);
 }
 
-#if IS_ENABLED(CONFIG_DEBUG_FS)
 static bool slps0_dbg_latch;
 
 static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
@@ -1133,15 +1132,6 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
                                    &pmc_core_substate_l_sts_regs_fops);
        }
 }
-#else
-static inline void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
-{
-}
-
-static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
-{
-}
-#endif /* CONFIG_DEBUG_FS */
 
 static const struct x86_cpu_id intel_pmc_core_ids[] = {
        X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L,           &spt_reg_map),
@@ -1156,7 +1146,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
        X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,         &tgl_reg_map),
        X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,           &tgl_reg_map),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT,        &tgl_reg_map),
-       X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L,      &tgl_reg_map),
+       X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L,      &icl_reg_map),
        {}
 };
 
@@ -1260,13 +1250,11 @@ static int pmc_core_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-
 static bool warn_on_s0ix_failures;
 module_param(warn_on_s0ix_failures, bool, 0644);
 MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
 
-static int pmc_core_suspend(struct device *dev)
+static __maybe_unused int pmc_core_suspend(struct device *dev)
 {
        struct pmc_dev *pmcdev = dev_get_drvdata(dev);
 
@@ -1318,7 +1306,7 @@ static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
        return false;
 }
 
-static int pmc_core_resume(struct device *dev)
+static __maybe_unused int pmc_core_resume(struct device *dev)
 {
        struct pmc_dev *pmcdev = dev_get_drvdata(dev);
        const struct pmc_bit_map **maps = pmcdev->map->lpm_sts;
@@ -1348,8 +1336,6 @@ static int pmc_core_resume(struct device *dev)
        return 0;
 }
 
-#endif
-
 static const struct dev_pm_ops pmc_core_pm_ops = {
        SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
 };
index 0d50b24..5eae55d 100644 (file)
@@ -282,9 +282,7 @@ struct pmc_dev {
        u32 base_addr;
        void __iomem *regbase;
        const struct pmc_reg_map *map;
-#if IS_ENABLED(CONFIG_DEBUG_FS)
        struct dentry *dbgfs_dir;
-#endif /* CONFIG_DEBUG_FS */
        int pmc_xram_read_bit;
        struct mutex lock; /* generic mutex lock for PMC Core */
 
index 946ac2d..cc4f9cb 100644 (file)
@@ -522,8 +522,8 @@ static int mshw0011_probe(struct i2c_client *client)
        strlcpy(board_info.type, "MSHW0011-bat0", I2C_NAME_SIZE);
 
        bat0 = i2c_acpi_new_device(dev, 1, &board_info);
-       if (!bat0)
-               return -ENOMEM;
+       if (IS_ERR(bat0))
+               return PTR_ERR(bat0);
 
        data->bat0 = bat0;
        i2c_set_clientdata(bat0, data);
index 8eaadba..0f70448 100644 (file)
@@ -9548,7 +9548,7 @@ static ssize_t tpacpi_battery_store(int what,
                if (!battery_info.batteries[battery].start_support)
                        return -ENODEV;
                /* valid values are [0, 99] */
-               if (value < 0 || value > 99)
+               if (value > 99)
                        return -EINVAL;
                if (value > battery_info.batteries[battery].charge_stop)
                        return -EINVAL;
index 601cbb2..54a2546 100644 (file)
@@ -23,7 +23,7 @@ struct xiaomi_wmi {
        unsigned int key_code;
 };
 
-int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context)
+static int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context)
 {
        struct xiaomi_wmi *data;
 
@@ -48,7 +48,7 @@ int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context)
        return input_register_device(data->input_dev);
 }
 
-void xiaomi_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy)
+static void xiaomi_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy)
 {
        struct xiaomi_wmi *data;
 
index c340505..7486f6e 100644 (file)
@@ -5754,10 +5754,6 @@ static DECLARE_DELAYED_WORK(regulator_init_complete_work,
 
 static int __init regulator_init_complete(void)
 {
-       int delay = driver_deferred_probe_timeout;
-
-       if (delay < 0)
-               delay = 0;
        /*
         * Since DT doesn't provide an idiomatic mechanism for
         * enabling full constraints and since it's much more natural
@@ -5768,17 +5764,18 @@ static int __init regulator_init_complete(void)
                has_full_constraints = true;
 
        /*
-        * If driver_deferred_probe_timeout is set, we punt
-        * completion for that many seconds since systems like
-        * distros will load many drivers from userspace so consumers
-        * might not always be ready yet, this is particularly an
-        * issue with laptops where this might bounce the display off
-        * then on.  Ideally we'd get a notification from userspace
-        * when this happens but we don't so just wait a bit and hope
-        * we waited long enough.  It'd be better if we'd only do
-        * this on systems that need it.
+        * We punt completion for an arbitrary amount of time since
+        * systems like distros will load many drivers from userspace
+        * so consumers might not always be ready yet, this is
+        * particularly an issue with laptops where this might bounce
+        * the display off then on.  Ideally we'd get a notification
+        * from userspace when this happens but we don't so just wait
+        * a bit and hope we waited long enough.  It'd be better if
+        * we'd only do this on systems that need it, and a kernel
+        * command line option might be useful.
         */
-       schedule_delayed_work(&regulator_init_complete_work, delay * HZ);
+       schedule_delayed_work(&regulator_init_complete_work,
+                             msecs_to_jiffies(30000));
 
        return 0;
 }
index f768946..569966b 100644 (file)
@@ -6717,17 +6717,17 @@ int qeth_stop(struct net_device *dev)
                unsigned int i;
 
                /* Quiesce the NAPI instances: */
-               qeth_for_each_output_queue(card, queue, i) {
+               qeth_for_each_output_queue(card, queue, i)
                        napi_disable(&queue->napi);
-                       del_timer_sync(&queue->timer);
-               }
 
                /* Stop .ndo_start_xmit, might still access queue->napi. */
                netif_tx_disable(dev);
 
-               /* Queues may get re-allocated, so remove the NAPIs here. */
-               qeth_for_each_output_queue(card, queue, i)
+               qeth_for_each_output_queue(card, queue, i) {
+                       del_timer_sync(&queue->timer);
+                       /* Queues may get re-allocated, so remove the NAPIs. */
                        netif_napi_del(&queue->napi);
+               }
        } else {
                netif_tx_disable(dev);
        }
index d190db5..1d9a486 100644 (file)
@@ -3732,6 +3732,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
        }
        qla2x00_wait_for_hba_ready(base_vha);
 
+       /*
+        * if UNLOADING flag is already set, then continue unload,
+        * where it was set first.
+        */
+       if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
+               return;
+
        if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
            IS_QLA28XX(ha)) {
                if (ha->flags.fw_started)
@@ -3750,15 +3757,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
 
        qla2x00_wait_for_sess_deletion(base_vha);
 
-       /*
-        * if UNLOAD flag is already set, then continue unload,
-        * where it was set first.
-        */
-       if (test_bit(UNLOADING, &base_vha->dpc_flags))
-               return;
-
-       set_bit(UNLOADING, &base_vha->dpc_flags);
-
        qla_nvme_delete(base_vha);
 
        dma_free_coherent(&ha->pdev->dev,
@@ -4864,6 +4862,9 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
        struct qla_work_evt *e;
        uint8_t bail;
 
+       if (test_bit(UNLOADING, &vha->dpc_flags))
+               return NULL;
+
        QLA_VHA_MARK_BUSY(vha, bail);
        if (bail)
                return NULL;
@@ -6628,13 +6629,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
        struct pci_dev *pdev = ha->pdev;
        scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
-       /*
-        * if UNLOAD flag is already set, then continue unload,
-        * where it was set first.
-        */
-       if (test_bit(UNLOADING, &base_vha->dpc_flags))
-               return;
-
        ql_log(ql_log_warn, base_vha, 0x015b,
            "Disabling adapter.\n");
 
@@ -6645,9 +6639,14 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
                return;
        }
 
-       qla2x00_wait_for_sess_deletion(base_vha);
+       /*
+        * if UNLOADING flag is already set, then continue unload,
+        * where it was set first.
+        */
+       if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
+               return;
 
-       set_bit(UNLOADING, &base_vha->dpc_flags);
+       qla2x00_wait_for_sess_deletion(base_vha);
 
        qla2x00_delete_all_vps(ha, base_vha);
 
index 47835c4..06c260f 100644 (file)
@@ -2284,6 +2284,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
                switch (oldstate) {
                case SDEV_RUNNING:
                case SDEV_CREATED_BLOCK:
+               case SDEV_QUIESCE:
                case SDEV_OFFLINE:
                        break;
                default:
index 8e0575f..67325fb 100644 (file)
@@ -925,6 +925,10 @@ do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma,
                gasket_get_bar_index(gasket_dev,
                                     (vma->vm_pgoff << PAGE_SHIFT) +
                                     driver_desc->legacy_mmap_address_offset);
+
+       if (bar_index < 0)
+               return DO_MAP_REGION_INVALID;
+
        phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset;
        while (mapped_bytes < map_length) {
                /*
index 87a6dac..ab6f391 100644 (file)
@@ -30,5 +30,4 @@ Now the TODOs:
 
 Please send any patches to:
 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Wolfram Sang <wsa@the-dreams.de>
 Linux Driver Project Developer List <driverdev-devel@linuxdriverproject.org>
index 51ffd5c..1c181d3 100644 (file)
@@ -432,7 +432,7 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
                                target_to_linux_sector(dev, cmd->t_task_lba),
                                target_to_linux_sector(dev,
                                        sbc_get_write_same_sectors(cmd)),
-                               GFP_KERNEL, false);
+                               GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
        if (ret)
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
index 3d084ce..50c7534 100644 (file)
@@ -182,6 +182,9 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status)
                return ret;
 
        ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
+       if (ret)
+               return ret;
+
        if (val & ROUTER_CS_26_ONS)
                return -EOPNOTSUPP;
 
index 31b7e1b..d1b27b0 100644 (file)
@@ -88,7 +88,7 @@ config HVC_DCC
 
 config HVC_RISCV_SBI
        bool "RISC-V SBI console support"
-       depends on RISCV_SBI
+       depends on RISCV_SBI_V01
        select HVC_DRIVER
        help
          This enables support for console output via RISC-V SBI calls, which
index 0aea76c..adf9e80 100644 (file)
@@ -86,7 +86,7 @@ config SERIAL_EARLYCON_ARM_SEMIHOST
 
 config SERIAL_EARLYCON_RISCV_SBI
        bool "Early console using RISC-V SBI"
-       depends on RISCV_SBI
+       depends on RISCV_SBI_V01
        select SERIAL_CORE
        select SERIAL_CORE_CONSOLE
        select SERIAL_EARLYCON
index ed0aa5c..5674da2 100644 (file)
@@ -843,10 +843,8 @@ static int bcm_uart_probe(struct platform_device *pdev)
        if (IS_ERR(clk) && pdev->dev.of_node)
                clk = of_clk_get(pdev->dev.of_node, 0);
 
-       if (IS_ERR(clk)) {
-               clk_put(clk);
+       if (IS_ERR(clk))
                return -ENODEV;
-       }
 
        port->iotype = UPIO_MEM;
        port->irq = res_irq->start;
index ac137b6..35e9e8f 100644 (file)
@@ -1459,6 +1459,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
                cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS;
 #ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
                cdns_uart_uart_driver.cons = &cdns_uart_console;
+               cdns_uart_console.index = id;
 #endif
 
                rc = uart_register_driver(&cdns_uart_uart_driver);
index e5ffed7..48a8199 100644 (file)
@@ -365,9 +365,14 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
        return uniscr;
 }
 
+static void vc_uniscr_free(struct uni_screen *uniscr)
+{
+       vfree(uniscr);
+}
+
 static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr)
 {
-       vfree(vc->vc_uni_screen);
+       vc_uniscr_free(vc->vc_uni_screen);
        vc->vc_uni_screen = new_uniscr;
 }
 
@@ -1230,7 +1235,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
        err = resize_screen(vc, new_cols, new_rows, user);
        if (err) {
                kfree(newscreen);
-               kfree(new_uniscr);
+               vc_uniscr_free(new_uniscr);
                return err;
        }
 
index af648ba..4610545 100644 (file)
@@ -114,7 +114,7 @@ static int ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
                        hw_write_id_reg(ci, HS_PHY_GENCONFIG_2,
                                        HS_PHY_ULPI_TX_PKT_EN_CLR_FIX, 0);
 
-               if (!IS_ERR(ci->platdata->vbus_extcon.edev)) {
+               if (!IS_ERR(ci->platdata->vbus_extcon.edev) || ci->role_switch) {
                        hw_write_id_reg(ci, HS_PHY_GENCONFIG_2,
                                        HS_PHY_SESS_VLD_CTRL_EN,
                                        HS_PHY_SESS_VLD_CTRL_EN);
index 6833c91..b9db981 100644 (file)
@@ -217,6 +217,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct usb_memory *usbm = NULL;
        struct usb_dev_state *ps = file->private_data;
+       struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus);
        size_t size = vma->vm_end - vma->vm_start;
        void *mem;
        unsigned long flags;
@@ -250,9 +251,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
        usbm->vma_use_count = 1;
        INIT_LIST_HEAD(&usbm->memlist);
 
-       if (remap_pfn_range(vma, vma->vm_start,
-                       virt_to_phys(usbm->mem) >> PAGE_SHIFT,
-                       size, vma->vm_page_prot) < 0) {
+       if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle, size)) {
                dec_usb_memory_use_count(usbm, &usbm->vma_use_count);
                return -EAGAIN;
        }
index a48678a..6197938 100644 (file)
@@ -1144,11 +1144,11 @@ void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr,
 
        if (usb_endpoint_out(epaddr)) {
                ep = dev->ep_out[epnum];
-               if (reset_hardware)
+               if (reset_hardware && epnum != 0)
                        dev->ep_out[epnum] = NULL;
        } else {
                ep = dev->ep_in[epnum];
-               if (reset_hardware)
+               if (reset_hardware && epnum != 0)
                        dev->ep_in[epnum] = NULL;
        }
        if (ep) {
index ffd9841..d63072f 100644 (file)
@@ -1138,8 +1138,8 @@ static void garmin_read_process(struct garmin_data *garmin_data_p,
                   send it directly to the tty port */
                if (garmin_data_p->flags & FLAGS_QUEUING) {
                        pkt_add(garmin_data_p, data, data_length);
-               } else if (bulk_data ||
-                          getLayerId(data) == GARMIN_LAYERID_APPL) {
+               } else if (bulk_data || (data_length >= sizeof(u32) &&
+                               getLayerId(data) == GARMIN_LAYERID_APPL)) {
 
                        spin_lock_irqsave(&garmin_data_p->lock, flags);
                        garmin_data_p->flags |= APP_RESP_SEEN;
index 613f91a..ce0401d 100644 (file)
@@ -173,6 +173,7 @@ static const struct usb_device_id id_table[] = {
        {DEVICE_SWI(0x413c, 0x81b3)},   /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
        {DEVICE_SWI(0x413c, 0x81b5)},   /* Dell Wireless 5811e QDL */
        {DEVICE_SWI(0x413c, 0x81b6)},   /* Dell Wireless 5811e QDL */
+       {DEVICE_SWI(0x413c, 0x81cc)},   /* Dell Wireless 5816e */
        {DEVICE_SWI(0x413c, 0x81cf)},   /* Dell Wireless 5819 */
        {DEVICE_SWI(0x413c, 0x81d0)},   /* Dell Wireless 5819 */
        {DEVICE_SWI(0x413c, 0x81d1)},   /* Dell Wireless 5818 */
index 1b23741..37157ed 100644 (file)
  * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
  */
 
+/* Reported-by: Julian Groß <julian.g@posteo.de> */
+UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
+               "LaCie",
+               "2Big Quadra USB3",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_REPORT_OPCODES),
+
 /*
  * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
  * commands in UAS mode.  Observed with the 1.28 firmware; are there others?
index f5c5e0a..67c5139 100644 (file)
@@ -157,6 +157,10 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
        req.mode_data |= (state->mode - TYPEC_STATE_MODAL) <<
                         PMC_USB_ALTMODE_DP_MODE_SHIFT;
 
+       if (data->status & DP_STATUS_HPD_STATE)
+               req.mode_data |= PMC_USB_DP_HPD_LVL <<
+                                PMC_USB_ALTMODE_DP_MODE_SHIFT;
+
        return pmc_usb_command(port, (void *)&req, sizeof(req));
 }
 
@@ -298,11 +302,11 @@ static int pmc_usb_register_port(struct pmc_usb *pmc, int index,
        struct typec_mux_desc mux_desc = { };
        int ret;
 
-       ret = fwnode_property_read_u8(fwnode, "usb2-port", &port->usb2_port);
+       ret = fwnode_property_read_u8(fwnode, "usb2-port-number", &port->usb2_port);
        if (ret)
                return ret;
 
-       ret = fwnode_property_read_u8(fwnode, "usb3-port", &port->usb3_port);
+       ret = fwnode_property_read_u8(fwnode, "usb3-port-number", &port->usb3_port);
        if (ret)
                return ret;
 
index 85b32c3..cc1d647 100644 (file)
@@ -342,8 +342,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
        vma = find_vma_intersection(mm, vaddr, vaddr + 1);
 
        if (vma && vma->vm_flags & VM_PFNMAP) {
-               *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
-               if (is_invalid_reserved_pfn(*pfn))
+               if (!follow_pfn(vma, vaddr, pfn) &&
+                   is_invalid_reserved_pfn(*pfn))
                        ret = 0;
        }
 done:
@@ -555,7 +555,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
                        continue;
                }
 
-               remote_vaddr = dma->vaddr + iova - dma->iova;
+               remote_vaddr = dma->vaddr + (iova - dma->iova);
                ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
                                             do_accounting);
                if (ret)
@@ -2345,10 +2345,10 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
        vaddr = dma->vaddr + offset;
 
        if (write)
-               *copied = __copy_to_user((void __user *)vaddr, data,
+               *copied = copy_to_user((void __user *)vaddr, data,
                                         count) ? 0 : count;
        else
-               *copied = __copy_from_user(data, (void __user *)vaddr,
+               *copied = copy_from_user(data, (void __user *)vaddr,
                                           count) ? 0 : count;
        if (kthread)
                unuse_mm(mm);
index e36aaf9..fb4e944 100644 (file)
@@ -181,14 +181,14 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
                        break;
                }
 
-               vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
-               added = true;
-
-               /* Deliver to monitoring devices all correctly transmitted
-                * packets.
+               /* Deliver to monitoring devices all packets that we
+                * will transmit.
                 */
                virtio_transport_deliver_tap_pkt(pkt);
 
+               vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
+               added = true;
+
                pkt->off += payload_len;
                total_len += payload_len;
 
@@ -196,6 +196,12 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
                 * to send it with the next available buffer.
                 */
                if (pkt->off < pkt->len) {
+                       /* We are queueing the same virtio_vsock_pkt to handle
+                        * the remaining bytes, and we want to deliver it
+                        * to monitoring devices in the next iteration.
+                        */
+                       pkt->tap_delivered = false;
+
                        spin_lock_bh(&vsock->send_pkt_list_lock);
                        list_add(&pkt->list, &vsock->send_pkt_list);
                        spin_unlock_bh(&vsock->send_pkt_list_lock);
@@ -543,6 +549,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
                mutex_unlock(&vq->mutex);
        }
 
+       /* Some packets may have been queued before the device was started,
+        * let's kick the send worker to send them.
+        */
+       vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+
        mutex_unlock(&vsock->dev.mutex);
        return 0;
 
index 9c380e7..0cc0257 100644 (file)
@@ -391,7 +391,7 @@ static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
        struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
        struct rb_node *parent = NULL;
        struct prelim_ref *ref = NULL;
-       struct prelim_ref target = {0};
+       struct prelim_ref target = {};
        int result;
 
        target.parent = bytenr;
index 47f66c6..696f471 100644 (file)
@@ -916,7 +916,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        path = btrfs_alloc_path();
        if (!path) {
                ret = -ENOMEM;
-               goto out;
+               goto out_put_group;
        }
 
        /*
@@ -954,7 +954,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                ret = btrfs_orphan_add(trans, BTRFS_I(inode));
                if (ret) {
                        btrfs_add_delayed_iput(inode);
-                       goto out;
+                       goto out_put_group;
                }
                clear_nlink(inode);
                /* One for the block groups ref */
@@ -977,13 +977,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 
        ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
        if (ret < 0)
-               goto out;
+               goto out_put_group;
        if (ret > 0)
                btrfs_release_path(path);
        if (ret == 0) {
                ret = btrfs_del_item(trans, tree_root, path);
                if (ret)
-                       goto out;
+                       goto out_put_group;
                btrfs_release_path(path);
        }
 
@@ -1102,9 +1102,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 
        ret = remove_block_group_free_space(trans, block_group);
        if (ret)
-               goto out;
+               goto out_put_group;
 
-       btrfs_put_block_group(block_group);
+       /* Once for the block groups rbtree */
        btrfs_put_block_group(block_group);
 
        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
@@ -1127,6 +1127,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                /* once for the tree */
                free_extent_map(em);
        }
+
+out_put_group:
+       /* Once for the lookup reference */
+       btrfs_put_block_group(block_group);
 out:
        if (remove_rsv)
                btrfs_delayed_refs_rsv_release(fs_info, 1);
@@ -1288,11 +1292,15 @@ static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
        if (ret)
                goto err;
        mutex_unlock(&fs_info->unused_bg_unpin_mutex);
+       if (prev_trans)
+               btrfs_put_transaction(prev_trans);
 
        return true;
 
 err:
        mutex_unlock(&fs_info->unused_bg_unpin_mutex);
+       if (prev_trans)
+               btrfs_put_transaction(prev_trans);
        btrfs_dec_block_group_ro(bg);
        return false;
 }
index 21a1577..353228d 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 
 #ifndef BTRFS_DISCARD_H
 #define BTRFS_DISCARD_H
index a6cb5cb..d10c7be 100644 (file)
@@ -2036,9 +2036,6 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
                for (i = 0; i < ret; i++)
                        btrfs_drop_and_free_fs_root(fs_info, gang[i]);
        }
-
-       if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
-               btrfs_free_log_root_tree(NULL, fs_info);
 }
 
 static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
@@ -3888,7 +3885,7 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
        spin_unlock(&fs_info->fs_roots_radix_lock);
 
        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
-               btrfs_free_log(NULL, root);
+               ASSERT(root->log_root == NULL);
                if (root->reloc_root) {
                        btrfs_put_root(root->reloc_root);
                        root->reloc_root = NULL;
@@ -4211,6 +4208,36 @@ static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
        up_write(&fs_info->cleanup_work_sem);
 }
 
+static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info)
+{
+       struct btrfs_root *gang[8];
+       u64 root_objectid = 0;
+       int ret;
+
+       spin_lock(&fs_info->fs_roots_radix_lock);
+       while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
+                                            (void **)gang, root_objectid,
+                                            ARRAY_SIZE(gang))) != 0) {
+               int i;
+
+               for (i = 0; i < ret; i++)
+                       gang[i] = btrfs_grab_root(gang[i]);
+               spin_unlock(&fs_info->fs_roots_radix_lock);
+
+               for (i = 0; i < ret; i++) {
+                       if (!gang[i])
+                               continue;
+                       root_objectid = gang[i]->root_key.objectid;
+                       btrfs_free_log(NULL, gang[i]);
+                       btrfs_put_root(gang[i]);
+               }
+               root_objectid++;
+               spin_lock(&fs_info->fs_roots_radix_lock);
+       }
+       spin_unlock(&fs_info->fs_roots_radix_lock);
+       btrfs_free_log_root_tree(NULL, fs_info);
+}
+
 static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
 {
        struct btrfs_ordered_extent *ordered;
@@ -4603,6 +4630,7 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
        btrfs_destroy_delayed_inodes(fs_info);
        btrfs_assert_delayed_root_empty(fs_info);
        btrfs_destroy_all_delalloc_inodes(fs_info);
+       btrfs_drop_all_logs(fs_info);
        mutex_unlock(&fs_info->transaction_kthread_mutex);
 
        return 0;
index d35936c..03bc713 100644 (file)
@@ -4559,6 +4559,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
                if (IS_ERR(fs_root)) {
                        err = PTR_ERR(fs_root);
                        list_add_tail(&reloc_root->root_list, &reloc_roots);
+                       btrfs_end_transaction(trans);
                        goto out_unset;
                }
 
index 8cede6e..2d54981 100644 (file)
@@ -662,10 +662,19 @@ again:
        }
 
 got_it:
-       btrfs_record_root_in_trans(h, root);
-
        if (!current->journal_info)
                current->journal_info = h;
+
+       /*
+        * btrfs_record_root_in_trans() needs to alloc new extents, and may
+        * call btrfs_join_transaction() while we're also starting a
+        * transaction.
+        *
+        * Thus it need to be called after current->journal_info initialized,
+        * or we can deadlock.
+        */
+       btrfs_record_root_in_trans(h, root);
+
        return h;
 
 join_fail:
index ec36a7c..02ebdd9 100644 (file)
@@ -4226,6 +4226,9 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
        const u64 ino = btrfs_ino(inode);
        struct btrfs_path *dst_path = NULL;
        bool dropped_extents = false;
+       u64 truncate_offset = i_size;
+       struct extent_buffer *leaf;
+       int slot;
        int ins_nr = 0;
        int start_slot;
        int ret;
@@ -4240,9 +4243,43 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
        if (ret < 0)
                goto out;
 
+       /*
+        * We must check if there is a prealloc extent that starts before the
+        * i_size and crosses the i_size boundary. This is to ensure later we
+        * truncate down to the end of that extent and not to the i_size, as
+        * otherwise we end up losing part of the prealloc extent after a log
+        * replay and with an implicit hole if there is another prealloc extent
+        * that starts at an offset beyond i_size.
+        */
+       ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
+       if (ret < 0)
+               goto out;
+
+       if (ret == 0) {
+               struct btrfs_file_extent_item *ei;
+
+               leaf = path->nodes[0];
+               slot = path->slots[0];
+               ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+
+               if (btrfs_file_extent_type(leaf, ei) ==
+                   BTRFS_FILE_EXTENT_PREALLOC) {
+                       u64 extent_end;
+
+                       btrfs_item_key_to_cpu(leaf, &key, slot);
+                       extent_end = key.offset +
+                               btrfs_file_extent_num_bytes(leaf, ei);
+
+                       if (extent_end > i_size)
+                               truncate_offset = extent_end;
+               }
+       } else {
+               ret = 0;
+       }
+
        while (true) {
-               struct extent_buffer *leaf = path->nodes[0];
-               int slot = path->slots[0];
+               leaf = path->nodes[0];
+               slot = path->slots[0];
 
                if (slot >= btrfs_header_nritems(leaf)) {
                        if (ins_nr > 0) {
@@ -4280,7 +4317,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
                                ret = btrfs_truncate_inode_items(trans,
                                                         root->log_root,
                                                         &inode->vfs_inode,
-                                                        i_size,
+                                                        truncate_offset,
                                                         BTRFS_EXTENT_DATA_KEY);
                        } while (ret == -EAGAIN);
                        if (ret)
index cf7b7e1..cb73365 100644 (file)
@@ -1519,6 +1519,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
                spin_lock(&configfs_dirent_lock);
                configfs_detach_rollback(dentry);
                spin_unlock(&configfs_dirent_lock);
+               config_item_put(parent_item);
                return -EINTR;
        }
        frag->frag_dead = true;
index 408418e..478a0d8 100644 (file)
@@ -788,6 +788,14 @@ void do_coredump(const kernel_siginfo_t *siginfo)
        if (displaced)
                put_files_struct(displaced);
        if (!dump_interrupted()) {
+               /*
+                * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would
+                * have this set to NULL.
+                */
+               if (!cprm.file) {
+                       pr_info("Core dump to |%s disabled\n", cn.corename);
+                       goto close_fail;
+               }
                file_start_write(cprm.file);
                core_dumped = binfmt->core_dump(&cprm);
                file_end_write(cprm.file);
index 8c59664..aba03ee 100644 (file)
@@ -1171,6 +1171,10 @@ static inline bool chain_epi_lockless(struct epitem *epi)
 {
        struct eventpoll *ep = epi->ep;
 
+       /* Fast preliminary check */
+       if (epi->next != EP_UNACTIVE_PTR)
+               return false;
+
        /* Check that the same epi has not been just chained from another CPU */
        if (cmpxchg(&epi->next, EP_UNACTIVE_PTR, NULL) != EP_UNACTIVE_PTR)
                return false;
@@ -1237,16 +1241,12 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
         * chained in ep->ovflist and requeued later on.
         */
        if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {
-               if (epi->next == EP_UNACTIVE_PTR &&
-                   chain_epi_lockless(epi))
+               if (chain_epi_lockless(epi))
+                       ep_pm_stay_awake_rcu(epi);
+       } else if (!ep_is_linked(epi)) {
+               /* In the usual case, add event to ready list. */
+               if (list_add_tail_lockless(&epi->rdllink, &ep->rdllist))
                        ep_pm_stay_awake_rcu(epi);
-               goto out_unlock;
-       }
-
-       /* If this file is already in the ready list we exit soon */
-       if (!ep_is_linked(epi) &&
-           list_add_tail_lockless(&epi->rdllink, &ep->rdllist)) {
-               ep_pm_stay_awake_rcu(epi);
        }
 
        /*
@@ -1822,7 +1822,6 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
 {
        int res = 0, eavail, timed_out = 0;
        u64 slack = 0;
-       bool waiter = false;
        wait_queue_entry_t wait;
        ktime_t expires, *to = NULL;
 
@@ -1867,21 +1866,23 @@ fetch_events:
         */
        ep_reset_busy_poll_napi_id(ep);
 
-       /*
-        * We don't have any available event to return to the caller.  We need
-        * to sleep here, and we will be woken by ep_poll_callback() when events
-        * become available.
-        */
-       if (!waiter) {
-               waiter = true;
-               init_waitqueue_entry(&wait, current);
-
+       do {
+               /*
+                * Internally init_wait() uses autoremove_wake_function(),
+                * thus wait entry is removed from the wait queue on each
+                * wakeup. Why it is important? In case of several waiters
+                * each new wakeup will hit the next waiter, giving it the
+                * chance to harvest new event. Otherwise wakeup can be
+                * lost. This is also good performance-wise, because on
+                * normal wakeup path no need to call __remove_wait_queue()
+                * explicitly, thus ep->lock is not taken, which halts the
+                * event delivery.
+                */
+               init_wait(&wait);
                write_lock_irq(&ep->lock);
                __add_wait_queue_exclusive(&ep->wq, &wait);
                write_unlock_irq(&ep->lock);
-       }
 
-       for (;;) {
                /*
                 * We don't want to sleep if the ep_poll_callback() sends us
                 * a wakeup in between. That's why we set the task state
@@ -1911,10 +1912,20 @@ fetch_events:
                        timed_out = 1;
                        break;
                }
-       }
+
+               /* We were woken up, thus go and try to harvest some events */
+               eavail = 1;
+
+       } while (0);
 
        __set_current_state(TASK_RUNNING);
 
+       if (!list_empty_careful(&wait.entry)) {
+               write_lock_irq(&ep->lock);
+               __remove_wait_queue(&ep->wq, &wait);
+               write_unlock_irq(&ep->lock);
+       }
+
 send_events:
        /*
         * Try to transfer events to user space. In case we get 0 events and
@@ -1925,12 +1936,6 @@ send_events:
            !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
                goto fetch_events;
 
-       if (waiter) {
-               write_lock_irq(&ep->lock);
-               __remove_wait_queue(&ep->wq, &wait);
-               write_unlock_irq(&ep->lock);
-       }
-
        return res;
 }
 
index c687f57..0b91b06 100644 (file)
@@ -524,6 +524,7 @@ enum {
        REQ_F_OVERFLOW_BIT,
        REQ_F_POLLED_BIT,
        REQ_F_BUFFER_SELECTED_BIT,
+       REQ_F_NO_FILE_TABLE_BIT,
 
        /* not a real bit, just to check we're not overflowing the space */
        __REQ_F_LAST_BIT,
@@ -577,6 +578,8 @@ enum {
        REQ_F_POLLED            = BIT(REQ_F_POLLED_BIT),
        /* buffer already selected */
        REQ_F_BUFFER_SELECTED   = BIT(REQ_F_BUFFER_SELECTED_BIT),
+       /* doesn't need file table for this request */
+       REQ_F_NO_FILE_TABLE     = BIT(REQ_F_NO_FILE_TABLE_BIT),
 };
 
 struct async_poll {
@@ -799,6 +802,7 @@ static const struct io_op_def io_op_defs[] = {
                .needs_file             = 1,
                .fd_non_neg             = 1,
                .needs_fs               = 1,
+               .file_table             = 1,
        },
        [IORING_OP_READ] = {
                .needs_mm               = 1,
@@ -1291,7 +1295,7 @@ static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
        struct io_kiocb *req;
 
        req = ctx->fallback_req;
-       if (!test_and_set_bit_lock(0, (unsigned long *) ctx->fallback_req))
+       if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
                return req;
 
        return NULL;
@@ -1378,7 +1382,7 @@ static void __io_free_req(struct io_kiocb *req)
        if (likely(!io_is_fallback_req(req)))
                kmem_cache_free(req_cachep, req);
        else
-               clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req);
+               clear_bit_unlock(0, (unsigned long *) &req->ctx->fallback_req);
 }
 
 struct req_batch {
@@ -2034,7 +2038,7 @@ static struct file *__io_file_get(struct io_submit_state *state, int fd)
  * any file. For now, just ensure that anything potentially problematic is done
  * inline.
  */
-static bool io_file_supports_async(struct file *file)
+static bool io_file_supports_async(struct file *file, int rw)
 {
        umode_t mode = file_inode(file)->i_mode;
 
@@ -2043,7 +2047,13 @@ static bool io_file_supports_async(struct file *file)
        if (S_ISREG(mode) && file->f_op != &io_uring_fops)
                return true;
 
-       return false;
+       if (!(file->f_mode & FMODE_NOWAIT))
+               return false;
+
+       if (rw == READ)
+               return file->f_op->read_iter != NULL;
+
+       return file->f_op->write_iter != NULL;
 }
 
 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
@@ -2571,7 +2581,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock)
         * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
         * we know to async punt it even if it was opened O_NONBLOCK
         */
-       if (force_nonblock && !io_file_supports_async(req->file))
+       if (force_nonblock && !io_file_supports_async(req->file, READ))
                goto copy_iov;
 
        iov_count = iov_iter_count(&iter);
@@ -2594,7 +2604,8 @@ copy_iov:
                        if (ret)
                                goto out_free;
                        /* any defer here is final, must blocking retry */
-                       if (!(req->flags & REQ_F_NOWAIT))
+                       if (!(req->flags & REQ_F_NOWAIT) &&
+                           !file_can_poll(req->file))
                                req->flags |= REQ_F_MUST_PUNT;
                        return -EAGAIN;
                }
@@ -2662,7 +2673,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock)
         * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
         * we know to async punt it even if it was opened O_NONBLOCK
         */
-       if (force_nonblock && !io_file_supports_async(req->file))
+       if (force_nonblock && !io_file_supports_async(req->file, WRITE))
                goto copy_iov;
 
        /* file path doesn't support NOWAIT for non-direct_IO */
@@ -2716,7 +2727,8 @@ copy_iov:
                        if (ret)
                                goto out_free;
                        /* any defer here is final, must blocking retry */
-                       req->flags |= REQ_F_MUST_PUNT;
+                       if (!file_can_poll(req->file))
+                               req->flags |= REQ_F_MUST_PUNT;
                        return -EAGAIN;
                }
        }
@@ -2756,15 +2768,6 @@ static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        return 0;
 }
 
-static bool io_splice_punt(struct file *file)
-{
-       if (get_pipe_info(file))
-               return false;
-       if (!io_file_supports_async(file))
-               return true;
-       return !(file->f_flags & O_NONBLOCK);
-}
-
 static int io_splice(struct io_kiocb *req, bool force_nonblock)
 {
        struct io_splice *sp = &req->splice;
@@ -2774,11 +2777,8 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock)
        loff_t *poff_in, *poff_out;
        long ret;
 
-       if (force_nonblock) {
-               if (io_splice_punt(in) || io_splice_punt(out))
-                       return -EAGAIN;
-               flags |= SPLICE_F_NONBLOCK;
-       }
+       if (force_nonblock)
+               return -EAGAIN;
 
        poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
        poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
@@ -3355,8 +3355,12 @@ static int io_statx(struct io_kiocb *req, bool force_nonblock)
        struct kstat stat;
        int ret;
 
-       if (force_nonblock)
+       if (force_nonblock) {
+               /* only need file table for an actual valid fd */
+               if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
+                       req->flags |= REQ_F_NO_FILE_TABLE;
                return -EAGAIN;
+       }
 
        if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->how.flags))
                return -EINVAL;
@@ -3502,7 +3506,7 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr)
        if (io_req_cancelled(req))
                return;
        __io_sync_file_range(req);
-       io_put_req(req); /* put submission ref */
+       io_steal_work(req, workptr);
 }
 
 static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
@@ -5015,7 +5019,7 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        int ret;
 
        /* Still need defer if there is pending req in defer list. */
-       if (!req_need_defer(req) && list_empty(&ctx->defer_list))
+       if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list))
                return 0;
 
        if (!req->io && io_alloc_async_ctx(req))
@@ -5429,7 +5433,7 @@ static int io_grab_files(struct io_kiocb *req)
        int ret = -EBADF;
        struct io_ring_ctx *ctx = req->ctx;
 
-       if (req->work.files)
+       if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
                return 0;
        if (!ctx->ring_file)
                return -EBADF;
@@ -7327,7 +7331,7 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
         * it could cause shutdown to hang.
         */
        while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait))
-               cpu_relax();
+               cond_resched();
 
        io_kill_timeouts(ctx);
        io_poll_remove_all(ctx);
index 282d45b..5e80b40 100644 (file)
@@ -55,6 +55,7 @@ EXPORT_SYMBOL(vfs_ioctl);
 static int ioctl_fibmap(struct file *filp, int __user *p)
 {
        struct inode *inode = file_inode(filp);
+       struct super_block *sb = inode->i_sb;
        int error, ur_block;
        sector_t block;
 
@@ -71,6 +72,13 @@ static int ioctl_fibmap(struct file *filp, int __user *p)
        block = ur_block;
        error = bmap(inode, &block);
 
+       if (block > INT_MAX) {
+               error = -ERANGE;
+               pr_warn_ratelimited("[%s/%d] FS: %s File: %pD4 would truncate fibmap result\n",
+                                   current->comm, task_pid_nr(current),
+                                   sb->s_id, filp);
+       }
+
        if (error)
                ur_block = 0;
        else
index bccf305..d55e8f4 100644 (file)
@@ -117,10 +117,7 @@ iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
 
        if (iomap->type == IOMAP_MAPPED) {
                addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
-               if (addr > INT_MAX)
-                       WARN(1, "would truncate bmap result\n");
-               else
-                       *bno = addr;
+               *bno = addr;
        }
        return 0;
 }
index c5c3fc6..26c94b3 100644 (file)
@@ -253,37 +253,45 @@ int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
 
 int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
 {
-       struct posix_acl *alloc = NULL, *dfacl = NULL;
+       struct posix_acl *orig = acl, *dfacl = NULL, *alloc;
        int status;
 
        if (S_ISDIR(inode->i_mode)) {
                switch(type) {
                case ACL_TYPE_ACCESS:
-                       alloc = dfacl = get_acl(inode, ACL_TYPE_DEFAULT);
+                       alloc = get_acl(inode, ACL_TYPE_DEFAULT);
                        if (IS_ERR(alloc))
                                goto fail;
+                       dfacl = alloc;
                        break;
 
                case ACL_TYPE_DEFAULT:
-                       dfacl = acl;
-                       alloc = acl = get_acl(inode, ACL_TYPE_ACCESS);
+                       alloc = get_acl(inode, ACL_TYPE_ACCESS);
                        if (IS_ERR(alloc))
                                goto fail;
+                       dfacl = acl;
+                       acl = alloc;
                        break;
                }
        }
 
        if (acl == NULL) {
-               alloc = acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
+               alloc = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
                if (IS_ERR(alloc))
                        goto fail;
+               acl = alloc;
        }
        status = __nfs3_proc_setacls(inode, acl, dfacl);
-       posix_acl_release(alloc);
+out:
+       if (acl != orig)
+               posix_acl_release(acl);
+       if (dfacl != orig)
+               posix_acl_release(dfacl);
        return status;
 
 fail:
-       return PTR_ERR(alloc);
+       status = PTR_ERR(alloc);
+       goto out;
 }
 
 const struct xattr_handler *nfs3_xattr_handlers[] = {
index 512afb1..a0c1e65 100644 (file)
@@ -7891,6 +7891,7 @@ static void
 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
 {
        struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp;
+       struct nfs41_bind_conn_to_session_res *res = task->tk_msg.rpc_resp;
        struct nfs_client *clp = args->client;
 
        switch (task->tk_status) {
@@ -7899,6 +7900,12 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
                nfs4_schedule_session_recovery(clp->cl_session,
                                task->tk_status);
        }
+       if (args->dir == NFS4_CDFC4_FORE_OR_BOTH &&
+                       res->dir != NFS4_CDFS4_BOTH) {
+               rpc_task_close_connection(task);
+               if (args->retries++ < MAX_BIND_CONN_TO_SESSION_RETRIES)
+                       rpc_restart_call(task);
+       }
 }
 
 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
@@ -7921,6 +7928,7 @@ int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
        struct nfs41_bind_conn_to_session_args args = {
                .client = clp,
                .dir = NFS4_CDFC4_FORE_OR_BOTH,
+               .retries = 0,
        };
        struct nfs41_bind_conn_to_session_res res;
        struct rpc_message msg = {
@@ -9191,8 +9199,7 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
        nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
 
        task = rpc_run_task(&task_setup_data);
-       if (IS_ERR(task))
-               return ERR_CAST(task);
+
        status = rpc_wait_for_completion_task(task);
        if (status != 0)
                goto out;
index b8d78f3..dd2e14f 100644 (file)
@@ -1332,13 +1332,15 @@ _pnfs_return_layout(struct inode *ino)
                        !valid_layout) {
                spin_unlock(&ino->i_lock);
                dprintk("NFS: %s no layout segments to return\n", __func__);
-               goto out_put_layout_hdr;
+               goto out_wait_layoutreturn;
        }
 
        send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, NULL);
        spin_unlock(&ino->i_lock);
        if (send)
                status = pnfs_send_layoutreturn(lo, &stateid, &cred, IOMODE_ANY, true);
+out_wait_layoutreturn:
+       wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN, TASK_UNINTERRUPTIBLE);
 out_put_layout_hdr:
        pnfs_free_lseg_list(&tmp_list);
        pnfs_put_layout_hdr(lo);
@@ -1456,18 +1458,15 @@ retry:
        /* lo ref dropped in pnfs_roc_release() */
        layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &lc_cred, &iomode);
        /* If the creds don't match, we can't compound the layoutreturn */
-       if (!layoutreturn)
+       if (!layoutreturn || cred_fscmp(cred, lc_cred) != 0)
                goto out_noroc;
-       if (cred_fscmp(cred, lc_cred) != 0)
-               goto out_noroc_put_cred;
 
        roc = layoutreturn;
        pnfs_init_layoutreturn_args(args, lo, &stateid, iomode);
        res->lrs_present = 0;
        layoutreturn = false;
-
-out_noroc_put_cred:
        put_cred(lc_cred);
+
 out_noroc:
        spin_unlock(&ino->i_lock);
        rcu_read_unlock();
index 59ef3b1..bdb6d0c 100644 (file)
@@ -185,7 +185,7 @@ static int __nfs_list_for_each_server(struct list_head *head,
 
        rcu_read_lock();
        list_for_each_entry_rcu(server, head, client_link) {
-               if (!nfs_sb_active(server->super))
+               if (!(server->super && nfs_sb_active(server->super)))
                        continue;
                rcu_read_unlock();
                if (last)
index 8e4f1ac..1de77f1 100644 (file)
@@ -275,7 +275,6 @@ static ssize_t dlmfs_file_write(struct file *filp,
                                loff_t *ppos)
 {
        int bytes_left;
-       ssize_t writelen;
        char *lvb_buf;
        struct inode *inode = file_inode(filp);
 
@@ -285,32 +284,30 @@ static ssize_t dlmfs_file_write(struct file *filp,
        if (*ppos >= i_size_read(inode))
                return -ENOSPC;
 
+       /* don't write past the lvb */
+       if (count > i_size_read(inode) - *ppos)
+               count = i_size_read(inode) - *ppos;
+
        if (!count)
                return 0;
 
        if (!access_ok(buf, count))
                return -EFAULT;
 
-       /* don't write past the lvb */
-       if ((count + *ppos) > i_size_read(inode))
-               writelen = i_size_read(inode) - *ppos;
-       else
-               writelen = count - *ppos;
-
-       lvb_buf = kmalloc(writelen, GFP_NOFS);
+       lvb_buf = kmalloc(count, GFP_NOFS);
        if (!lvb_buf)
                return -ENOMEM;
 
-       bytes_left = copy_from_user(lvb_buf, buf, writelen);
-       writelen -= bytes_left;
-       if (writelen)
-               user_dlm_write_lvb(inode, lvb_buf, writelen);
+       bytes_left = copy_from_user(lvb_buf, buf, count);
+       count -= bytes_left;
+       if (count)
+               user_dlm_write_lvb(inode, lvb_buf, count);
 
        kfree(lvb_buf);
 
-       *ppos = *ppos + writelen;
-       mlog(0, "wrote %zd bytes\n", writelen);
-       return writelen;
+       *ppos = *ppos + count;
+       mlog(0, "wrote %zu bytes\n", count);
+       return count;
 }
 
 static void dlmfs_init_once(void *foo)
index 49f6d7f..1106137 100644 (file)
@@ -261,14 +261,13 @@ static int propagate_one(struct mount *m)
        child = copy_tree(last_source, last_source->mnt.mnt_root, type);
        if (IS_ERR(child))
                return PTR_ERR(child);
+       read_seqlock_excl(&mount_lock);
        mnt_set_mountpoint(m, mp, child);
+       if (m->mnt_master != dest_master)
+               SET_MNT_MARK(m->mnt_master);
+       read_sequnlock_excl(&mount_lock);
        last_dest = m;
        last_source = child;
-       if (m->mnt_master != dest_master) {
-               read_seqlock_excl(&mount_lock);
-               SET_MNT_MARK(m->mnt_master);
-               read_sequnlock_excl(&mount_lock);
-       }
        hlist_add_head(&child->mnt_hash, list);
        return count_mounts(m->mnt_ns, child);
 }
index cd35253..a288cd6 100644 (file)
@@ -1302,8 +1302,8 @@ int get_tree_bdev(struct fs_context *fc,
        mutex_lock(&bdev->bd_fsfreeze_mutex);
        if (bdev->bd_fsfreeze_count > 0) {
                mutex_unlock(&bdev->bd_fsfreeze_mutex);
-               blkdev_put(bdev, mode);
                warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
+               blkdev_put(bdev, mode);
                return -EBUSY;
        }
 
index 26f0ecf..0bbfd64 100644 (file)
@@ -65,6 +65,7 @@ struct amba_device {
        struct device           dev;
        struct resource         res;
        struct clk              *pclk;
+       struct device_dma_parameters dma_parms;
        unsigned int            periphid;
        unsigned int            cid;
        struct amba_cs_uci_id   uci;
index 1ade486..57bcef6 100644 (file)
@@ -329,13 +329,12 @@ struct dma_buf {
 
 /**
  * struct dma_buf_attach_ops - importer operations for an attachment
- * @move_notify: [optional] notification that the DMA-buf is moving
  *
  * Attachment operations implemented by the importer.
  */
 struct dma_buf_attach_ops {
        /**
-        * @move_notify
+        * @move_notify: [optional] notification that the DMA-buf is moving
         *
         * If this callback is provided the framework can avoid pinning the
         * backing store while mappings exists.
index 21065c0..e1c0333 100644 (file)
@@ -83,9 +83,9 @@ enum dma_transfer_direction {
 /**
  * Interleaved Transfer Request
  * ----------------------------
- * A chunk is collection of contiguous bytes to be transfered.
+ * A chunk is collection of contiguous bytes to be transferred.
  * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
- * ICGs may or maynot change between chunks.
+ * ICGs may or may not change between chunks.
  * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
  *  that when repeated an integral number of times, specifies the transfer.
  * A transfer template is specification of a Frame, the number of times
@@ -341,13 +341,11 @@ struct dma_chan {
  * @chan: driver channel device
  * @device: sysfs device
  * @dev_id: parent dma_device dev_id
- * @idr_ref: reference count to gate release of dma_device dev_id
  */
 struct dma_chan_dev {
        struct dma_chan *chan;
        struct device device;
        int dev_id;
-       atomic_t *idr_ref;
 };
 
 /**
@@ -835,6 +833,8 @@ struct dma_device {
        int dev_id;
        struct device *dev;
        struct module *owner;
+       struct ida chan_ida;
+       struct mutex chan_mutex;        /* to protect chan_ida */
 
        u32 src_addr_widths;
        u32 dst_addr_widths;
@@ -1069,7 +1069,7 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan)
  * dmaengine_synchronize() needs to be called before it is safe to free
  * any memory that is accessed by previously submitted descriptors or before
  * freeing any resources accessed from within the completion callback of any
- * perviously submitted descriptors.
+ * previously submitted descriptors.
  *
  * This function can be called from atomic context as well as from within a
  * complete callback of a descriptor submitted on the same channel.
@@ -1091,7 +1091,7 @@ static inline int dmaengine_terminate_async(struct dma_chan *chan)
  *
  * Synchronizes to the DMA channel termination to the current context. When this
  * function returns it is guaranteed that all transfers for previously issued
- * descriptors have stopped and and it is safe to free the memory assoicated
+ * descriptors have stopped and it is safe to free the memory associated
  * with them. Furthermore it is guaranteed that all complete callback functions
  * for a previously submitted descriptor have finished running and it is safe to
  * free resources accessed from within the complete callbacks.
index 4f6f59b..45cc10c 100644 (file)
@@ -983,7 +983,7 @@ struct file_handle {
        __u32 handle_bytes;
        int handle_type;
        /* file identifier */
-       unsigned char f_handle[0];
+       unsigned char f_handle[];
 };
 
 static inline struct file *get_file(struct file *f)
index 9cd4455..1bdd027 100644 (file)
@@ -55,7 +55,7 @@ LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm)
 LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm)
 LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc,
         struct fs_context *src_sc)
-LSM_HOOK(int, 0, fs_context_parse_param, struct fs_context *fc,
+LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc,
         struct fs_parameter *param)
 LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb)
 LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb)
index 4402304..e5f3e7d 100644 (file)
@@ -1317,11 +1317,13 @@ struct nfs41_impl_id {
        struct nfstime4                 date;
 };
 
+#define MAX_BIND_CONN_TO_SESSION_RETRIES 3
 struct nfs41_bind_conn_to_session_args {
        struct nfs_client               *client;
        struct nfs4_sessionid           sessionid;
        u32                             dir;
        bool                            use_conn_in_rdma_mode;
+       int                             retries;
 };
 
 struct nfs41_bind_conn_to_session_res {
index c588be8..0ecce6a 100644 (file)
@@ -185,6 +185,7 @@ int cros_ec_sensorhub_register_push_data(struct cros_ec_sensorhub *sensorhub,
 void cros_ec_sensorhub_unregister_push_data(struct cros_ec_sensorhub *sensorhub,
                                            u8 sensor_num);
 
+int cros_ec_sensorhub_ring_allocate(struct cros_ec_sensorhub *sensorhub);
 int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub);
 void cros_ec_sensorhub_ring_remove(void *arg);
 int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub,
index bdc3575..77a2aad 100644 (file)
@@ -25,6 +25,7 @@ struct platform_device {
        bool            id_auto;
        struct device   dev;
        u64             platform_dma_mask;
+       struct device_dma_parameters dma_parms;
        u32             num_resources;
        struct resource *resource;
 
index ca7e108..02e7a58 100644 (file)
@@ -71,7 +71,13 @@ struct rpc_clnt {
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
        struct dentry           *cl_debugfs;    /* debugfs directory */
 #endif
-       struct rpc_xprt_iter    cl_xpi;
+       /* cl_work is only needed after cl_xpi is no longer used,
+        * and that are of similar size
+        */
+       union {
+               struct rpc_xprt_iter    cl_xpi;
+               struct work_struct      cl_work;
+       };
        const struct cred       *cl_cred;
 };
 
@@ -236,4 +242,9 @@ static inline int rpc_reply_expected(struct rpc_task *task)
                (task->tk_msg.rpc_proc->p_decode != NULL);
 }
 
+static inline void rpc_task_close_connection(struct rpc_task *task)
+{
+       if (task->tk_xprt)
+               xprt_force_disconnect(task->tk_xprt);
+}
 #endif /* _LINUX_SUNRPC_CLNT_H */
index 421c99c..4f8159e 100644 (file)
@@ -78,47 +78,6 @@ struct tcp_sack_block {
 #define TCP_SACK_SEEN     (1 << 0)   /*1 = peer is SACK capable, */
 #define TCP_DSACK_SEEN    (1 << 2)   /*1 = DSACK was received from peer*/
 
-#if IS_ENABLED(CONFIG_MPTCP)
-struct mptcp_options_received {
-       u64     sndr_key;
-       u64     rcvr_key;
-       u64     data_ack;
-       u64     data_seq;
-       u32     subflow_seq;
-       u16     data_len;
-       u16     mp_capable : 1,
-               mp_join : 1,
-               dss : 1,
-               add_addr : 1,
-               rm_addr : 1,
-               family : 4,
-               echo : 1,
-               backup : 1;
-       u32     token;
-       u32     nonce;
-       u64     thmac;
-       u8      hmac[20];
-       u8      join_id;
-       u8      use_map:1,
-               dsn64:1,
-               data_fin:1,
-               use_ack:1,
-               ack64:1,
-               mpc_map:1,
-               __unused:2;
-       u8      addr_id;
-       u8      rm_id;
-       union {
-               struct in_addr  addr;
-#if IS_ENABLED(CONFIG_MPTCP_IPV6)
-               struct in6_addr addr6;
-#endif
-       };
-       u64     ahmac;
-       u16     port;
-};
-#endif
-
 struct tcp_options_received {
 /*     PAWS/RTTM data  */
        int     ts_recent_stamp;/* Time we stored ts_recent (for aging) */
@@ -136,9 +95,6 @@ struct tcp_options_received {
        u8      num_sacks;      /* Number of SACK blocks                */
        u16     user_mss;       /* mss requested by user in ioctl       */
        u16     mss_clamp;      /* Maximal mss, negotiated at connection setup */
-#if IS_ENABLED(CONFIG_MPTCP)
-       struct mptcp_options_received   mptcp;
-#endif
 };
 
 static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
@@ -148,13 +104,6 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
 #if IS_ENABLED(CONFIG_SMC)
        rx_opt->smc_ok = 0;
 #endif
-#if IS_ENABLED(CONFIG_MPTCP)
-       rx_opt->mptcp.mp_capable = 0;
-       rx_opt->mptcp.mp_join = 0;
-       rx_opt->mptcp.add_addr = 0;
-       rx_opt->mptcp.rm_addr = 0;
-       rx_opt->mptcp.dss = 0;
-#endif
 }
 
 /* This is the max number of SACKS that we'll generate and process. It's safe
index bd5fe0e..a99e9b8 100644 (file)
@@ -66,7 +66,7 @@ struct tty_buffer {
        int read;
        int flags;
        /* Data points here */
-       unsigned long data[0];
+       unsigned long data[];
 };
 
 /* Values for .flags field of tty_buffer */
index 0d1fe92..6f6ade6 100644 (file)
@@ -3,6 +3,8 @@
 #define _LINUX_VIRTIO_NET_H
 
 #include <linux/if_vlan.h>
+#include <uapi/linux/tcp.h>
+#include <uapi/linux/udp.h>
 #include <uapi/linux/virtio_net.h>
 
 static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
@@ -28,17 +30,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                                        bool little_endian)
 {
        unsigned int gso_type = 0;
+       unsigned int thlen = 0;
+       unsigned int ip_proto;
 
        if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
                switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
                case VIRTIO_NET_HDR_GSO_TCPV4:
                        gso_type = SKB_GSO_TCPV4;
+                       ip_proto = IPPROTO_TCP;
+                       thlen = sizeof(struct tcphdr);
                        break;
                case VIRTIO_NET_HDR_GSO_TCPV6:
                        gso_type = SKB_GSO_TCPV6;
+                       ip_proto = IPPROTO_TCP;
+                       thlen = sizeof(struct tcphdr);
                        break;
                case VIRTIO_NET_HDR_GSO_UDP:
                        gso_type = SKB_GSO_UDP;
+                       ip_proto = IPPROTO_UDP;
+                       thlen = sizeof(struct udphdr);
                        break;
                default:
                        return -EINVAL;
@@ -57,16 +67,22 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
 
                if (!skb_partial_csum_set(skb, start, off))
                        return -EINVAL;
+
+               if (skb_transport_offset(skb) + thlen > skb_headlen(skb))
+                       return -EINVAL;
        } else {
                /* gso packets without NEEDS_CSUM do not set transport_offset.
                 * probe and drop if does not match one of the above types.
                 */
                if (gso_type && skb->network_header) {
+                       struct flow_keys_basic keys;
+
                        if (!skb->protocol)
                                virtio_net_hdr_set_proto(skb, hdr);
 retry:
-                       skb_probe_transport_header(skb);
-                       if (!skb_transport_header_was_set(skb)) {
+                       if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
+                                                             NULL, 0, 0, 0,
+                                                             0)) {
                                /* UFO does not specify ipv4 or 6: try both */
                                if (gso_type & SKB_GSO_UDP &&
                                    skb->protocol == htons(ETH_P_IP)) {
@@ -75,6 +91,12 @@ retry:
                                }
                                return -EINVAL;
                        }
+
+                       if (keys.control.thoff + thlen > skb_headlen(skb) ||
+                           keys.basic.ip_proto != ip_proto)
+                               return -EINVAL;
+
+                       skb_set_transport_header(skb, keys.control.thoff);
                }
        }
 
index 71c81e0..dc636b7 100644 (file)
@@ -48,6 +48,7 @@ struct virtio_vsock_pkt {
        u32 len;
        u32 off;
        bool reply;
+       bool tap_delivered;
 };
 
 struct virtio_vsock_pkt_info {
index 3619c6a..efc8350 100644 (file)
@@ -166,15 +166,18 @@ enum flow_action_mangle_base {
 enum flow_action_hw_stats_bit {
        FLOW_ACTION_HW_STATS_IMMEDIATE_BIT,
        FLOW_ACTION_HW_STATS_DELAYED_BIT,
+       FLOW_ACTION_HW_STATS_DISABLED_BIT,
 };
 
 enum flow_action_hw_stats {
-       FLOW_ACTION_HW_STATS_DISABLED = 0,
+       FLOW_ACTION_HW_STATS_DONT_CARE = 0,
        FLOW_ACTION_HW_STATS_IMMEDIATE =
                BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT),
        FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT),
        FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE |
                                   FLOW_ACTION_HW_STATS_DELAYED,
+       FLOW_ACTION_HW_STATS_DISABLED =
+               BIT(FLOW_ACTION_HW_STATS_DISABLED_BIT),
 };
 
 typedef void (*action_destr)(void *priv);
@@ -325,7 +328,11 @@ __flow_action_hw_stats_check(const struct flow_action *action,
                return true;
        if (!flow_action_mixed_hw_stats_check(action, extack))
                return false;
+
        action_entry = flow_action_first_entry_get(action);
+       if (action_entry->hw_stats == FLOW_ACTION_HW_STATS_DONT_CARE)
+               return true;
+
        if (!check_allow_bit &&
            action_entry->hw_stats != FLOW_ACTION_HW_STATS_ANY) {
                NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\"");
index c8e2beb..0f0d1ef 100644 (file)
@@ -99,6 +99,20 @@ static inline int IP_ECN_set_ce(struct iphdr *iph)
        return 1;
 }
 
+static inline int IP_ECN_set_ect1(struct iphdr *iph)
+{
+       u32 check = (__force u32)iph->check;
+
+       if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0)
+               return 0;
+
+       check += (__force u16)htons(0x100);
+
+       iph->check = (__force __sum16)(check + (check>=0xFFFF));
+       iph->tos ^= INET_ECN_MASK;
+       return 1;
+}
+
 static inline void IP_ECN_clear(struct iphdr *iph)
 {
        iph->tos &= ~INET_ECN_MASK;
@@ -134,6 +148,22 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
        return 1;
 }
 
+static inline int IP6_ECN_set_ect1(struct sk_buff *skb, struct ipv6hdr *iph)
+{
+       __be32 from, to;
+
+       if ((ipv6_get_dsfield(iph) & INET_ECN_MASK) != INET_ECN_ECT_0)
+               return 0;
+
+       from = *(__be32 *)iph;
+       to = from ^ htonl(INET_ECN_MASK << 20);
+       *(__be32 *)iph = to;
+       if (skb->ip_summed == CHECKSUM_COMPLETE)
+               skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
+                                    (__force __wsum)to);
+       return 1;
+}
+
 static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
 {
        dscp &= ~INET_ECN_MASK;
@@ -159,6 +189,25 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
        return 0;
 }
 
+static inline int INET_ECN_set_ect1(struct sk_buff *skb)
+{
+       switch (skb->protocol) {
+       case cpu_to_be16(ETH_P_IP):
+               if (skb_network_header(skb) + sizeof(struct iphdr) <=
+                   skb_tail_pointer(skb))
+                       return IP_ECN_set_ect1(ip_hdr(skb));
+               break;
+
+       case cpu_to_be16(ETH_P_IPV6):
+               if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
+                   skb_tail_pointer(skb))
+                       return IP6_ECN_set_ect1(skb, ipv6_hdr(skb));
+               break;
+       }
+
+       return 0;
+}
+
 /*
  * RFC 6040 4.2
  *  To decapsulate the inner header at the tunnel egress, a compliant
@@ -208,8 +257,12 @@ static inline int INET_ECN_decapsulate(struct sk_buff *skb,
        int rc;
 
        rc = __INET_ECN_decapsulate(outer, inner, &set_ce);
-       if (!rc && set_ce)
-               INET_ECN_set_ce(skb);
+       if (!rc) {
+               if (set_ce)
+                       INET_ECN_set_ce(skb);
+               else if ((outer & INET_ECN_MASK) == INET_ECN_ECT_1)
+                       INET_ECN_set_ect1(skb);
+       }
 
        return rc;
 }
index 80262d2..1d98828 100644 (file)
@@ -203,6 +203,7 @@ struct fib6_info {
 struct rt6_info {
        struct dst_entry                dst;
        struct fib6_info __rcu          *from;
+       int                             sernum;
 
        struct rt6key                   rt6i_dst;
        struct rt6key                   rt6i_src;
@@ -291,6 +292,9 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
        struct fib6_info *from;
        u32 cookie = 0;
 
+       if (rt->sernum)
+               return rt->sernum;
+
        rcu_read_lock();
 
        from = rcu_dereference(rt->from);
index 0e7c547..3bce201 100644 (file)
@@ -68,11 +68,8 @@ static inline bool rsk_is_mptcp(const struct request_sock *req)
        return tcp_rsk(req)->is_mptcp;
 }
 
-void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr,
-                       int opsize, struct tcp_options_received *opt_rx);
 bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
                       unsigned int *size, struct mptcp_out_options *opts);
-void mptcp_rcv_synsent(struct sock *sk);
 bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
                          struct mptcp_out_options *opts);
 bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
index ab96fb5..8e001e0 100644 (file)
@@ -437,6 +437,13 @@ static inline int rt_genid_ipv4(const struct net *net)
        return atomic_read(&net->ipv4.rt_genid);
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int rt_genid_ipv6(const struct net *net)
+{
+       return atomic_read(&net->ipv6.fib6_sernum);
+}
+#endif
+
 static inline void rt_genid_bump_ipv4(struct net *net)
 {
        atomic_inc(&net->ipv4.rt_genid);
index 25d2ec4..8428aa6 100644 (file)
@@ -407,6 +407,7 @@ struct tcf_block {
        struct mutex lock;
        struct list_head chain_list;
        u32 index; /* block index for shared blocks */
+       u32 classid; /* which class this block belongs to */
        refcount_t refcnt;
        struct net *net;
        struct Qdisc *q;
index 6d6a394..efc8b61 100644 (file)
@@ -502,6 +502,7 @@ struct ocelot {
        unsigned int                    num_stats;
 
        int                             shared_queue_sz;
+       int                             num_mact_rows;
 
        struct net_device               *hw_bridge_dev;
        u16                             bridge_mask;
index 1897822..26d871f 100644 (file)
@@ -24,7 +24,7 @@
  *
  * @pid: Put 0 for global total, while positive pid for process total.
  *
- * @size: Virtual size of the allocation in bytes.
+ * @size: Size of the allocation in bytes.
  *
  */
 TRACE_EVENT(gpu_mem_total,
index 596e0a8..132c3c7 100644 (file)
@@ -692,11 +692,10 @@ TRACE_EVENT(xprtrdma_prepsend_failed,
 
 TRACE_EVENT(xprtrdma_post_send,
        TP_PROTO(
-               const struct rpcrdma_req *req,
-               int status
+               const struct rpcrdma_req *req
        ),
 
-       TP_ARGS(req, status),
+       TP_ARGS(req),
 
        TP_STRUCT__entry(
                __field(const void *, req)
@@ -705,7 +704,6 @@ TRACE_EVENT(xprtrdma_post_send,
                __field(unsigned int, client_id)
                __field(int, num_sge)
                __field(int, signaled)
-               __field(int, status)
        ),
 
        TP_fast_assign(
@@ -718,15 +716,13 @@ TRACE_EVENT(xprtrdma_post_send,
                __entry->sc = req->rl_sendctx;
                __entry->num_sge = req->rl_wr.num_sge;
                __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
-               __entry->status = status;
        ),
 
-       TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %sstatus=%d",
+       TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %s",
                __entry->task_id, __entry->client_id,
                __entry->req, __entry->sc, __entry->num_sge,
                (__entry->num_sge == 1 ? "" : "s"),
-               (__entry->signaled ? "signaled " : ""),
-               __entry->status
+               (__entry->signaled ? "signaled" : "")
        )
 );
 
index 65f6972..d28b4ce 100644 (file)
@@ -346,6 +346,10 @@ struct drm_amdgpu_gem_userptr {
 #define AMDGPU_TILING_DCC_PITCH_MAX_MASK               0x3FFF
 #define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT                43
 #define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK         0x1
+#define AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT       44
+#define AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK                0x1
+#define AMDGPU_TILING_SCANOUT_SHIFT                    63
+#define AMDGPU_TILING_SCANOUT_MASK                     0x1
 
 /* Set/Get helpers for tiling flags. */
 #define AMDGPU_TILING_SET(field, value) \
index 7bbf1b6..f9b7fdd 100644 (file)
@@ -73,7 +73,7 @@ struct bpf_insn {
 /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
 struct bpf_lpm_trie_key {
        __u32   prefixlen;      /* up to 32 for AF_INET, 128 for AF_INET6 */
-       __u8    data[]; /* Arbitrary size */
+       __u8    data[0];        /* Arbitrary size */
 };
 
 struct bpf_cgroup_storage_key {
index e83954c..f880d28 100644 (file)
@@ -45,13 +45,13 @@ struct dlm_lock_params {
        void __user *bastaddr;
        struct dlm_lksb __user *lksb;
        char lvb[DLM_USER_LVB_LEN];
-       char name[];
+       char name[0];
 };
 
 struct dlm_lspace_params {
        __u32 flags;
        __u32 minor;
-       char name[];
+       char name[0];
 };
 
 struct dlm_purge_params {
index dbc7092..7f30393 100644 (file)
@@ -39,6 +39,12 @@ struct dma_buf_sync {
 
 #define DMA_BUF_BASE           'b'
 #define DMA_BUF_IOCTL_SYNC     _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
+
+/* 32/64bitness of this uapi was botched in android, there's no difference
+ * between them in actual uapi, they're just different numbers.
+ */
 #define DMA_BUF_SET_NAME       _IOW(DMA_BUF_BASE, 1, const char *)
+#define DMA_BUF_SET_NAME_A     _IOW(DMA_BUF_BASE, 1, u32)
+#define DMA_BUF_SET_NAME_B     _IOW(DMA_BUF_BASE, 1, u64)
 
 #endif
index 7a900b2..8c0bc24 100644 (file)
@@ -34,7 +34,7 @@ struct fiemap {
        __u32 fm_mapped_extents;/* number of extents that were mapped (out) */
        __u32 fm_extent_count;  /* size of fm_extents array (in) */
        __u32 fm_reserved;
-       struct fiemap_extent fm_extents[]; /* array of mapped extents (out) */
+       struct fiemap_extent fm_extents[0]; /* array of mapped extents (out) */
 };
 
 #define FIEMAP_MAX_OFFSET      (~0ULL)
index 991b2b7..8f24404 100644 (file)
@@ -119,8 +119,8 @@ enum hv_fcopy_op {
 
 struct hv_fcopy_hdr {
        __u32 operation;
-       uuid_le service_id0; /* currently unused */
-       uuid_le service_id1; /* currently unused */
+       __u8 service_id0[16]; /* currently unused */
+       __u8 service_id1[16]; /* currently unused */
 } __attribute__((packed));
 
 #define OVER_WRITE     0x1
index b122cfa..6838780 100644 (file)
@@ -60,7 +60,7 @@ struct arc_rfc1201 {
        __u8  proto;            /* protocol ID field - varies           */
        __u8  split_flag;       /* for use with split packets           */
        __be16   sequence;      /* sequence number                      */
-       __u8  payload[];        /* space remaining in packet (504 bytes)*/
+       __u8  payload[0];       /* space remaining in packet (504 bytes)*/
 };
 #define RFC1201_HDR_SIZE 4
 
@@ -69,7 +69,7 @@ struct arc_rfc1201 {
  */
 struct arc_rfc1051 {
        __u8 proto;             /* ARC_P_RFC1051_ARP/RFC1051_IP */
-       __u8 payload[]; /* 507 bytes                    */
+       __u8 payload[0];        /* 507 bytes                    */
 };
 #define RFC1051_HDR_SIZE 1
 
@@ -80,7 +80,7 @@ struct arc_rfc1051 {
 struct arc_eth_encap {
        __u8 proto;             /* Always ARC_P_ETHER                   */
        struct ethhdr eth;      /* standard ethernet header (yuck!)     */
-       __u8 payload[]; /* 493 bytes                            */
+       __u8 payload[0];        /* 493 bytes                            */
 };
 #define ETH_ENCAP_HDR_SIZE 14
 
index 98e29e7..00c0812 100644 (file)
@@ -57,7 +57,7 @@ struct mmc_ioc_cmd {
  */
 struct mmc_ioc_multi_cmd {
        __u64 num_of_cmds;
-       struct mmc_ioc_cmd cmds[];
+       struct mmc_ioc_cmd cmds[0];
 };
 
 #define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)
index 67e31f3..66048cc 100644 (file)
@@ -29,12 +29,12 @@ struct net_dm_config_entry {
 
 struct net_dm_config_msg {
        __u32 entries;
-       struct net_dm_config_entry options[];
+       struct net_dm_config_entry options[0];
 };
 
 struct net_dm_alert_msg {
        __u32 entries;
-       struct net_dm_drop_point points[];
+       struct net_dm_drop_point points[0];
 };
 
 struct net_dm_user_msg {
index 73b26a2..9acf757 100644 (file)
@@ -40,7 +40,7 @@ struct ebt_mac_wormhash_tuple {
 struct ebt_mac_wormhash {
        int table[257];
        int poolsize;
-       struct ebt_mac_wormhash_tuple pool[];
+       struct ebt_mac_wormhash_tuple pool[0];
 };
 
 #define ebt_mac_wormhash_size(x) ((x) ? sizeof(struct ebt_mac_wormhash) \
index 7f59308..3ae65e9 100644 (file)
@@ -209,7 +209,7 @@ struct fc_bsg_host_vendor {
        __u64 vendor_id;
 
        /* start of vendor command area */
-       __u32 vendor_cmd[];
+       __u32 vendor_cmd[0];
 };
 
 /* Response:
index a48617f..1a5da2c 100644 (file)
@@ -257,6 +257,47 @@ static int __init loglevel(char *str)
 
 early_param("loglevel", loglevel);
 
+#ifdef CONFIG_BLK_DEV_INITRD
+static void * __init get_boot_config_from_initrd(u32 *_size, u32 *_csum)
+{
+       u32 size, csum;
+       char *data;
+       u32 *hdr;
+
+       if (!initrd_end)
+               return NULL;
+
+       data = (char *)initrd_end - BOOTCONFIG_MAGIC_LEN;
+       if (memcmp(data, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN))
+               return NULL;
+
+       hdr = (u32 *)(data - 8);
+       size = hdr[0];
+       csum = hdr[1];
+
+       data = ((void *)hdr) - size;
+       if ((unsigned long)data < initrd_start) {
+               pr_err("bootconfig size %d is greater than initrd size %ld\n",
+                       size, initrd_end - initrd_start);
+               return NULL;
+       }
+
+       /* Remove bootconfig from initramfs/initrd */
+       initrd_end = (unsigned long)data;
+       if (_size)
+               *_size = size;
+       if (_csum)
+               *_csum = csum;
+
+       return data;
+}
+#else
+static void * __init get_boot_config_from_initrd(u32 *_size, u32 *_csum)
+{
+       return NULL;
+}
+#endif
+
 #ifdef CONFIG_BOOT_CONFIG
 
 char xbc_namebuf[XBC_KEYLEN_MAX] __initdata;
@@ -357,9 +398,12 @@ static void __init setup_boot_config(const char *cmdline)
        int pos;
        u32 size, csum;
        char *data, *copy;
-       u32 *hdr;
        int ret;
 
+       data = get_boot_config_from_initrd(&size, &csum);
+       if (!data)
+               goto not_found;
+
        strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
        parse_args("bootconfig", tmp_cmdline, NULL, 0, 0, 0, NULL,
                   bootconfig_params);
@@ -367,27 +411,12 @@ static void __init setup_boot_config(const char *cmdline)
        if (!bootconfig_found)
                return;
 
-       if (!initrd_end)
-               goto not_found;
-
-       data = (char *)initrd_end - BOOTCONFIG_MAGIC_LEN;
-       if (memcmp(data, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN))
-               goto not_found;
-
-       hdr = (u32 *)(data - 8);
-       size = hdr[0];
-       csum = hdr[1];
-
        if (size >= XBC_DATA_MAX) {
                pr_err("bootconfig size %d greater than max size %d\n",
                        size, XBC_DATA_MAX);
                return;
        }
 
-       data = ((void *)hdr) - size;
-       if ((unsigned long)data < initrd_start)
-               goto not_found;
-
        if (boot_config_checksum((unsigned char *)data, size) != csum) {
                pr_err("bootconfig checksum failed\n");
                return;
@@ -420,8 +449,14 @@ static void __init setup_boot_config(const char *cmdline)
 not_found:
        pr_err("'bootconfig' found on command line, but no bootconfig found\n");
 }
+
 #else
-#define setup_boot_config(cmdline)     do { } while (0)
+
+static void __init setup_boot_config(const char *cmdline)
+{
+       /* Remove bootconfig data from initrd */
+       get_boot_config_from_initrd(NULL, NULL);
+}
 
 static int __init warn_bootconfig(char *str)
 {
index dc8307b..beff0cf 100644 (file)
@@ -142,6 +142,7 @@ struct mqueue_inode_info {
 
        struct sigevent notify;
        struct pid *notify_owner;
+       u32 notify_self_exec_id;
        struct user_namespace *notify_user_ns;
        struct user_struct *user;       /* user who created, for accounting */
        struct sock *notify_sock;
@@ -773,28 +774,44 @@ static void __do_notify(struct mqueue_inode_info *info)
         * synchronously. */
        if (info->notify_owner &&
            info->attr.mq_curmsgs == 1) {
-               struct kernel_siginfo sig_i;
                switch (info->notify.sigev_notify) {
                case SIGEV_NONE:
                        break;
-               case SIGEV_SIGNAL:
-                       /* sends signal */
+               case SIGEV_SIGNAL: {
+                       struct kernel_siginfo sig_i;
+                       struct task_struct *task;
+
+                       /* do_mq_notify() accepts sigev_signo == 0, why?? */
+                       if (!info->notify.sigev_signo)
+                               break;
 
                        clear_siginfo(&sig_i);
                        sig_i.si_signo = info->notify.sigev_signo;
                        sig_i.si_errno = 0;
                        sig_i.si_code = SI_MESGQ;
                        sig_i.si_value = info->notify.sigev_value;
-                       /* map current pid/uid into info->owner's namespaces */
                        rcu_read_lock();
+                       /* map current pid/uid into info->owner's namespaces */
                        sig_i.si_pid = task_tgid_nr_ns(current,
                                                ns_of_pid(info->notify_owner));
-                       sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
+                       sig_i.si_uid = from_kuid_munged(info->notify_user_ns,
+                                               current_uid());
+                       /*
+                        * We can't use kill_pid_info(), this signal should
+                        * bypass check_kill_permission(). It is from kernel
+                        * but si_fromuser() can't know this.
+                        * We do check the self_exec_id, to avoid sending
+                        * signals to programs that don't expect them.
+                        */
+                       task = pid_task(info->notify_owner, PIDTYPE_TGID);
+                       if (task && task->self_exec_id ==
+                                               info->notify_self_exec_id) {
+                               do_send_sig_info(info->notify.sigev_signo,
+                                               &sig_i, task, PIDTYPE_TGID);
+                       }
                        rcu_read_unlock();
-
-                       kill_pid_info(info->notify.sigev_signo,
-                                     &sig_i, info->notify_owner);
                        break;
+               }
                case SIGEV_THREAD:
                        set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
                        netlink_sendskb(info->notify_sock, info->notify_cookie);
@@ -1383,6 +1400,7 @@ retry:
                        info->notify.sigev_signo = notification->sigev_signo;
                        info->notify.sigev_value = notification->sigev_value;
                        info->notify.sigev_notify = SIGEV_SIGNAL;
+                       info->notify_self_exec_id = current->self_exec_id;
                        break;
                }
 
index f503542..8accc97 100644 (file)
@@ -740,8 +740,8 @@ static const struct file_operations kcov_fops = {
  * kcov_remote_handle() with KCOV_SUBSYSTEM_COMMON as the subsystem id and an
  * arbitrary 4-byte non-zero number as the instance id). This common handle
  * then gets saved into the task_struct of the process that issued the
- * KCOV_REMOTE_ENABLE ioctl. When this proccess issues system calls that spawn
- * kernel threads, the common handle must be retrived via kcov_common_handle()
+ * KCOV_REMOTE_ENABLE ioctl. When this process issues system calls that spawn
+ * kernel threads, the common handle must be retrieved via kcov_common_handle()
  * and passed to the spawned threads via custom annotations. Those kernel
  * threads must in turn be annotated with kcov_remote_start(common_handle) and
  * kcov_remote_stop(). All of the threads that are spawned by the same process
index 86aba87..30bd28d 100644 (file)
@@ -898,6 +898,13 @@ static int software_resume(void)
        error = freeze_processes();
        if (error)
                goto Close_Finish;
+
+       error = freeze_kernel_threads();
+       if (error) {
+               thaw_processes();
+               goto Close_Finish;
+       }
+
        error = load_image_and_restore();
        thaw_processes();
  Finish:
index 31c0fad..c4c86de 100644 (file)
@@ -113,22 +113,42 @@ static int preemptirq_delay_run(void *data)
 
        for (i = 0; i < s; i++)
                (testfuncs[i])(i);
+
+       set_current_state(TASK_INTERRUPTIBLE);
+       while (!kthread_should_stop()) {
+               schedule();
+               set_current_state(TASK_INTERRUPTIBLE);
+       }
+
+       __set_current_state(TASK_RUNNING);
+
        return 0;
 }
 
-static struct task_struct *preemptirq_start_test(void)
+static int preemptirq_run_test(void)
 {
+       struct task_struct *task;
+
        char task_name[50];
 
        snprintf(task_name, sizeof(task_name), "%s_test", test_mode);
-       return kthread_run(preemptirq_delay_run, NULL, task_name);
+       task =  kthread_run(preemptirq_delay_run, NULL, task_name);
+       if (IS_ERR(task))
+               return PTR_ERR(task);
+       if (task)
+               kthread_stop(task);
+       return 0;
 }
 
 
 static ssize_t trigger_store(struct kobject *kobj, struct kobj_attribute *attr,
                         const char *buf, size_t count)
 {
-       preemptirq_start_test();
+       ssize_t ret;
+
+       ret = preemptirq_run_test();
+       if (ret)
+               return ret;
        return count;
 }
 
@@ -148,11 +168,9 @@ static struct kobject *preemptirq_delay_kobj;
 
 static int __init preemptirq_delay_init(void)
 {
-       struct task_struct *test_task;
        int retval;
 
-       test_task = preemptirq_start_test();
-       retval = PTR_ERR_OR_ZERO(test_task);
+       retval = preemptirq_run_test();
        if (retval != 0)
                return retval;
 
index 8d2b988..29615f1 100644 (file)
@@ -947,7 +947,8 @@ int __trace_bputs(unsigned long ip, const char *str)
 EXPORT_SYMBOL_GPL(__trace_bputs);
 
 #ifdef CONFIG_TRACER_SNAPSHOT
-void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data)
+static void tracing_snapshot_instance_cond(struct trace_array *tr,
+                                          void *cond_data)
 {
        struct tracer *tracer = tr->current_trace;
        unsigned long flags;
@@ -8525,6 +8526,19 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
         */
        allocate_snapshot = false;
 #endif
+
+       /*
+        * Because of some magic with the way alloc_percpu() works on
+        * x86_64, we need to synchronize the pgd of all the tables,
+        * otherwise the trace events that happen in x86_64 page fault
+        * handlers can't cope with accessing the chance that a
+        * alloc_percpu()'d memory might be touched in the page fault trace
+        * event. Oh, and we need to audit all other alloc_percpu() and vmalloc()
+        * calls in tracing, because something might get triggered within a
+        * page fault trace event!
+        */
+       vmalloc_sync_mappings();
+
        return 0;
 }
 
index 06d7feb..9de29bb 100644 (file)
@@ -95,24 +95,20 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
        struct xbc_node *anode;
        char buf[MAX_BUF_LEN];
        const char *val;
-       int ret;
+       int ret = 0;
 
-       kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
+       xbc_node_for_each_array_value(node, "probes", anode, val) {
+               kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
 
-       ret = kprobe_event_gen_cmd_start(&cmd, event, NULL);
-       if (ret)
-               return ret;
+               ret = kprobe_event_gen_cmd_start(&cmd, event, val);
+               if (ret)
+                       break;
 
-       xbc_node_for_each_array_value(node, "probes", anode, val) {
-               ret = kprobe_event_add_field(&cmd, val);
+               ret = kprobe_event_gen_cmd_end(&cmd);
                if (ret)
-                       return ret;
+                       pr_err("Failed to add probe: %s\n", buf);
        }
 
-       ret = kprobe_event_gen_cmd_end(&cmd);
-       if (ret)
-               pr_err("Failed to add probe: %s\n", buf);
-
        return ret;
 }
 #else
index d0568af..3598938 100644 (file)
@@ -453,7 +453,7 @@ static bool __within_notrace_func(unsigned long addr)
 
 static bool within_notrace_func(struct trace_kprobe *tk)
 {
-       unsigned long addr = addr = trace_kprobe_address(tk);
+       unsigned long addr = trace_kprobe_address(tk);
        char symname[KSYM_NAME_LEN], *p;
 
        if (!__within_notrace_func(addr))
@@ -940,6 +940,9 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
  * complete command or only the first part of it; in the latter case,
  * kprobe_event_add_fields() can be used to add more fields following this.
  *
+ * Unlikely the synth_event_gen_cmd_start(), @loc must be specified. This
+ * returns -EINVAL if @loc == NULL.
+ *
  * Return: 0 if successful, error otherwise.
  */
 int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
@@ -953,6 +956,9 @@ int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd, bool kretprobe,
        if (cmd->type != DYNEVENT_TYPE_KPROBE)
                return -EINVAL;
 
+       if (!loc)
+               return -EINVAL;
+
        if (kretprobe)
                snprintf(buf, MAX_EVENT_NAME_LEN, "r:kprobes/%s", name);
        else
index 7f255b5..11bf5ee 100644 (file)
@@ -544,6 +544,11 @@ EXPORT_SYMBOL_GPL(fork_usermode_blob);
  * Runs a user-space application.  The application is started
  * asynchronously if wait is not set, and runs as a child of system workqueues.
  * (ie. it runs with full root capabilities and optimized affinity).
+ *
+ * Note: successful return value does not guarantee the helper was called at
+ * all. You can't rely on sub_info->{init,cleanup} being called even for
+ * UMH_WAIT_* wait modes as STATIC_USERMODEHELPER_PATH="" turns all helpers
+ * into a successful no-op.
  */
 int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
 {
index 48469c9..9292110 100644 (file)
@@ -60,18 +60,15 @@ config UBSAN_SANITIZE_ALL
          Enabling this option will get kernel image size increased
          significantly.
 
-config UBSAN_NO_ALIGNMENT
-       bool "Disable checking of pointers alignment"
-       default y if HAVE_EFFICIENT_UNALIGNED_ACCESS
+config UBSAN_ALIGNMENT
+       bool "Enable checks for pointers alignment"
+       default !HAVE_EFFICIENT_UNALIGNED_ACCESS
+       depends on !X86 || !COMPILE_TEST
        help
-         This option disables the check of unaligned memory accesses.
-         This option should be used when building allmodconfig.
-         Disabling this option on architectures that support unaligned
+         This option enables the check of unaligned memory accesses.
+         Enabling this option on architectures that support unaligned
          accesses may produce a lot of false positives.
 
-config UBSAN_ALIGNMENT
-       def_bool !UBSAN_NO_ALIGNMENT
-
 config TEST_UBSAN
        tristate "Module for testing for undefined behavior detection"
        depends on m
index 7a6430a..ccb2ffa 100644 (file)
@@ -93,7 +93,7 @@ static void kunit_print_ok_not_ok(void *test_or_suite,
         * representation.
         */
        if (suite)
-               pr_info("%s %zd - %s",
+               pr_info("%s %zd - %s\n",
                        kunit_status_to_string(is_ok),
                        test_number, description);
        else
index 5beea03..a3b97f1 100644 (file)
@@ -4990,19 +4990,22 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
        unsigned int size;
        int node;
        int __maybe_unused i;
+       long error = -ENOMEM;
 
        size = sizeof(struct mem_cgroup);
        size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
 
        memcg = kzalloc(size, GFP_KERNEL);
        if (!memcg)
-               return NULL;
+               return ERR_PTR(error);
 
        memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
                                 1, MEM_CGROUP_ID_MAX,
                                 GFP_KERNEL);
-       if (memcg->id.id < 0)
+       if (memcg->id.id < 0) {
+               error = memcg->id.id;
                goto fail;
+       }
 
        memcg->vmstats_local = alloc_percpu(struct memcg_vmstats_percpu);
        if (!memcg->vmstats_local)
@@ -5046,7 +5049,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
 fail:
        mem_cgroup_id_remove(memcg);
        __mem_cgroup_free(memcg);
-       return NULL;
+       return ERR_PTR(error);
 }
 
 static struct cgroup_subsys_state * __ref
@@ -5057,8 +5060,8 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        long error = -ENOMEM;
 
        memcg = mem_cgroup_alloc();
-       if (!memcg)
-               return ERR_PTR(error);
+       if (IS_ERR(memcg))
+               return ERR_CAST(memcg);
 
        WRITE_ONCE(memcg->high, PAGE_COUNTER_MAX);
        memcg->soft_limit = PAGE_COUNTER_MAX;
@@ -5108,7 +5111,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 fail:
        mem_cgroup_id_remove(memcg);
        mem_cgroup_free(memcg);
-       return ERR_PTR(-ENOMEM);
+       return ERR_PTR(error);
 }
 
 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
index 69827d4..13cc653 100644 (file)
@@ -1607,6 +1607,7 @@ void set_zone_contiguous(struct zone *zone)
                if (!__pageblock_pfn_to_page(block_start_pfn,
                                             block_end_pfn, zone))
                        return;
+               cond_resched();
        }
 
        /* We confirm that there is no hole */
@@ -2400,6 +2401,14 @@ static inline void boost_watermark(struct zone *zone)
 
        if (!watermark_boost_factor)
                return;
+       /*
+        * Don't bother in zones that are unlikely to produce results.
+        * On small machines, including kdump capture kernels running
+        * in a small area, boosting the watermark can cause an out of
+        * memory situation immediately.
+        */
+       if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
+               return;
 
        max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
                        watermark_boost_factor, 10000);
index d7e3bc6..7da7d77 100644 (file)
@@ -80,6 +80,7 @@
 #include <linux/workqueue.h>
 #include <linux/kmemleak.h>
 #include <linux/sched.h>
+#include <linux/sched/mm.h>
 
 #include <asm/cacheflush.h>
 #include <asm/sections.h>
@@ -1557,10 +1558,9 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
                                 gfp_t gfp)
 {
-       /* whitelisted flags that can be passed to the backing allocators */
-       gfp_t pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
-       bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
-       bool do_warn = !(gfp & __GFP_NOWARN);
+       gfp_t pcpu_gfp;
+       bool is_atomic;
+       bool do_warn;
        static int warn_limit = 10;
        struct pcpu_chunk *chunk, *next;
        const char *err;
@@ -1569,6 +1569,12 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
        void __percpu *ptr;
        size_t bits, bit_align;
 
+       gfp = current_gfp_context(gfp);
+       /* whitelisted flags that can be passed to the backing allocators */
+       pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
+       is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
+       do_warn = !(gfp & __GFP_NOWARN);
+
        /*
         * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
         * therefore alignment must be a minimum of that many bytes.
index 9bf4495..b762450 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -551,15 +551,32 @@ static void print_section(char *level, char *text, u8 *addr,
        metadata_access_disable();
 }
 
+/*
+ * See comment in calculate_sizes().
+ */
+static inline bool freeptr_outside_object(struct kmem_cache *s)
+{
+       return s->offset >= s->inuse;
+}
+
+/*
+ * Return offset of the end of info block which is inuse + free pointer if
+ * not overlapping with object.
+ */
+static inline unsigned int get_info_end(struct kmem_cache *s)
+{
+       if (freeptr_outside_object(s))
+               return s->inuse + sizeof(void *);
+       else
+               return s->inuse;
+}
+
 static struct track *get_track(struct kmem_cache *s, void *object,
        enum track_item alloc)
 {
        struct track *p;
 
-       if (s->offset)
-               p = object + s->offset + sizeof(void *);
-       else
-               p = object + s->inuse;
+       p = object + get_info_end(s);
 
        return p + alloc;
 }
@@ -686,10 +703,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
                print_section(KERN_ERR, "Redzone ", p + s->object_size,
                        s->inuse - s->object_size);
 
-       if (s->offset)
-               off = s->offset + sizeof(void *);
-       else
-               off = s->inuse;
+       off = get_info_end(s);
 
        if (s->flags & SLAB_STORE_USER)
                off += 2 * sizeof(struct track);
@@ -782,7 +796,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
  * object address
  *     Bytes of the object to be managed.
  *     If the freepointer may overlay the object then the free
- *     pointer is the first word of the object.
+ *     pointer is at the middle of the object.
  *
  *     Poisoning uses 0x6b (POISON_FREE) and the last byte is
  *     0xa5 (POISON_END)
@@ -816,11 +830,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
 
 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
 {
-       unsigned long off = s->inuse;   /* The end of info */
-
-       if (s->offset)
-               /* Freepointer is placed after the object. */
-               off += sizeof(void *);
+       unsigned long off = get_info_end(s);    /* The end of info */
 
        if (s->flags & SLAB_STORE_USER)
                /* We also have user information there */
@@ -907,7 +917,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
                check_pad_bytes(s, page, p);
        }
 
-       if (!s->offset && val == SLUB_RED_ACTIVE)
+       if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE)
                /*
                 * Object and freepointer overlap. Cannot check
                 * freepointer while object is allocated.
@@ -3587,6 +3597,11 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
                 *
                 * This is the case if we do RCU, have a constructor or
                 * destructor or are poisoning the objects.
+                *
+                * The assumption that s->offset >= s->inuse means free
+                * pointer is outside of the object is used in the
+                * freeptr_outside_object() function. If that is no
+                * longer true, the function needs to be modified.
                 */
                s->offset = size;
                size += sizeof(void *);
index b06868f..a37c87b 100644 (file)
@@ -1625,7 +1625,6 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
  * @dst:       The temp list to put pages on to.
  * @nr_scanned:        The number of pages that were scanned.
  * @sc:                The scan_control struct for this reclaim session
- * @mode:      One of the LRU isolation modes
  * @lru:       LRU list id for isolating
  *
  * returns how many pages were moved onto *@dst.
index 0ce530a..8575f5d 100644 (file)
@@ -177,18 +177,18 @@ static void vcc_destroy_socket(struct sock *sk)
 
        set_bit(ATM_VF_CLOSE, &vcc->flags);
        clear_bit(ATM_VF_READY, &vcc->flags);
-       if (vcc->dev) {
-               if (vcc->dev->ops->close)
-                       vcc->dev->ops->close(vcc);
-               if (vcc->push)
-                       vcc->push(vcc, NULL); /* atmarpd has no push */
-               module_put(vcc->owner);
-
-               while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
-                       atm_return(vcc, skb->truesize);
-                       kfree_skb(skb);
-               }
+       if (vcc->dev && vcc->dev->ops->close)
+               vcc->dev->ops->close(vcc);
+       if (vcc->push)
+               vcc->push(vcc, NULL); /* atmarpd has no push */
+       module_put(vcc->owner);
+
+       while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
+               atm_return(vcc, skb->truesize);
+               kfree_skb(skb);
+       }
 
+       if (vcc->dev && vcc->dev->ops->owner) {
                module_put(vcc->dev->ops->owner);
                atm_dev_put(vcc->dev);
        }
index 25fa3a7..ca37f5a 100644 (file)
@@ -1264,6 +1264,12 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry)
                entry->vcc = NULL;
        }
        if (entry->recv_vcc) {
+               struct atm_vcc *vcc = entry->recv_vcc;
+               struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
+
+               kfree(vpriv);
+               vcc->user_back = NULL;
+
                entry->recv_vcc->push = entry->old_recv_push;
                vcc_release_async(entry->recv_vcc, -EPIPE);
                entry->recv_vcc = NULL;
index 9694662..80b87b1 100644 (file)
@@ -893,7 +893,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
 
        orig_node = batadv_v_ogm_orig_get(bat_priv, ogm_packet->orig);
        if (!orig_node)
-               return;
+               goto out;
 
        neigh_node = batadv_neigh_node_get_or_create(orig_node, if_incoming,
                                                     ethhdr->h_source);
index 8f0717c..b0469d1 100644 (file)
@@ -1009,15 +1009,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
  */
 static u8 batadv_nc_random_weight_tq(u8 tq)
 {
-       u8 rand_val, rand_tq;
-
-       get_random_bytes(&rand_val, sizeof(rand_val));
-
        /* randomize the estimated packet loss (max TQ - estimated TQ) */
-       rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq);
-
-       /* normalize the randomized packet loss */
-       rand_tq /= BATADV_TQ_MAX_VALUE;
+       u8 rand_tq = prandom_u32_max(BATADV_TQ_MAX_VALUE + 1 - tq);
 
        /* convert to (randomized) estimated tq again */
        return BATADV_TQ_MAX_VALUE - rand_tq;
index c45962d..0f962dc 100644 (file)
@@ -1150,7 +1150,7 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
        ret = batadv_parse_throughput(net_dev, buff, "throughput_override",
                                      &tp_override);
        if (!ret)
-               return count;
+               goto out;
 
        old_tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
        if (old_tp_override == tp_override)
@@ -1190,6 +1190,7 @@ static ssize_t batadv_show_throughput_override(struct kobject *kobj,
 
        tp_override = atomic_read(&hard_iface->bat_v.throughput_override);
 
+       batadv_hardif_put(hard_iface);
        return sprintf(buff, "%u.%u MBit\n", tp_override / 10,
                       tp_override % 10);
 }
index 43dab40..a0f5dbe 100644 (file)
@@ -612,6 +612,7 @@ int br_process_vlan_info(struct net_bridge *br,
                                               v - 1, rtm_cmd);
                                v_change_start = 0;
                        }
+                       cond_resched();
                }
                /* v_change_start is set only if the last/whole range changed */
                if (v_change_start)
index 80f9772..899edce 100644 (file)
@@ -4283,6 +4283,11 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
                end_offset = nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_ADDR]);
                end_offset += nla_get_u64(attrs[DEVLINK_ATTR_REGION_CHUNK_LEN]);
                dump = false;
+
+               if (start_offset == end_offset) {
+                       err = 0;
+                       goto nla_put_failure;
+               }
        }
 
        err = devlink_nl_region_read_snapshot_fill(skb, devlink,
@@ -5363,6 +5368,7 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
 {
        enum devlink_health_reporter_state prev_health_state;
        struct devlink *devlink = reporter->devlink;
+       unsigned long recover_ts_threshold;
 
        /* write a log message of the current error */
        WARN_ON(!msg);
@@ -5373,10 +5379,12 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
        devlink_recover_notify(reporter, DEVLINK_CMD_HEALTH_REPORTER_RECOVER);
 
        /* abort if the previous error wasn't recovered */
+       recover_ts_threshold = reporter->last_recovery_ts +
+                              msecs_to_jiffies(reporter->graceful_period);
        if (reporter->auto_recover &&
            (prev_health_state != DEVLINK_HEALTH_REPORTER_STATE_HEALTHY ||
-            jiffies - reporter->last_recovery_ts <
-            msecs_to_jiffies(reporter->graceful_period))) {
+            (reporter->last_recovery_ts && reporter->recovery_count &&
+             time_is_after_jiffies(recover_ts_threshold)))) {
                trace_devlink_health_recover_aborted(devlink,
                                                     reporter->ops->name,
                                                     reporter->health_state,
index 8e33cec..2ee7bc4 100644 (file)
@@ -213,6 +213,7 @@ static void sched_send_work(struct timer_list *t)
 static void trace_drop_common(struct sk_buff *skb, void *location)
 {
        struct net_dm_alert_msg *msg;
+       struct net_dm_drop_point *point;
        struct nlmsghdr *nlh;
        struct nlattr *nla;
        int i;
@@ -231,11 +232,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
        nlh = (struct nlmsghdr *)dskb->data;
        nla = genlmsg_data(nlmsg_data(nlh));
        msg = nla_data(nla);
+       point = msg->points;
        for (i = 0; i < msg->entries; i++) {
-               if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
-                       msg->points[i].count++;
+               if (!memcmp(&location, &point->pc, sizeof(void *))) {
+                       point->count++;
                        goto out;
                }
+               point++;
        }
        if (msg->entries == dm_hit_limit)
                goto out;
@@ -244,8 +247,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
         */
        __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point));
        nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point));
-       memcpy(msg->points[msg->entries].pc, &location, sizeof(void *));
-       msg->points[msg->entries].count = 1;
+       memcpy(point->pc, &location, sizeof(void *));
+       point->count = 1;
        msg->entries++;
 
        if (!timer_pending(&data->send_timer)) {
index 39d37d0..1161392 100644 (file)
@@ -1956,6 +1956,9 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
                                   NEIGH_UPDATE_F_OVERRIDE_ISROUTER);
        }
 
+       if (protocol)
+               neigh->protocol = protocol;
+
        if (ndm->ndm_flags & NTF_EXT_LEARNED)
                flags |= NEIGH_UPDATE_F_EXT_LEARNED;
 
@@ -1969,9 +1972,6 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
                err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
                                     NETLINK_CB(skb).portid, extack);
 
-       if (protocol)
-               neigh->protocol = protocol;
-
        neigh_release(neigh);
 
 out:
index 90509c3..b714162 100644 (file)
@@ -2364,7 +2364,6 @@ static void sk_leave_memory_pressure(struct sock *sk)
        }
 }
 
-/* On 32bit arches, an skb frag is limited to 2^15 */
 #define SKB_FRAG_PAGE_ORDER    get_order(32768)
 DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
 
index 9a271a5..d90665b 100644 (file)
@@ -459,7 +459,7 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
        list_for_each_entry(dp, &dst->ports, list) {
                err = dsa_port_setup(dp);
                if (err)
-                       goto teardown;
+                       continue;
        }
 
        return 0;
index b5c535a..a621367 100644 (file)
@@ -289,7 +289,8 @@ static void dsa_master_ndo_teardown(struct net_device *dev)
 {
        struct dsa_port *cpu_dp = dev->dsa_ptr;
 
-       dev->netdev_ops = cpu_dp->orig_ndo_ops;
+       if (cpu_dp->orig_ndo_ops)
+               dev->netdev_ops = cpu_dp->orig_ndo_ops;
        cpu_dp->orig_ndo_ops = NULL;
 }
 
index d106880..62f4ee3 100644 (file)
@@ -856,20 +856,18 @@ dsa_slave_add_cls_matchall_mirred(struct net_device *dev,
        struct dsa_port *to_dp;
        int err;
 
-       act = &cls->rule->action.entries[0];
-
        if (!ds->ops->port_mirror_add)
                return -EOPNOTSUPP;
 
-       if (!act->dev)
-               return -EINVAL;
-
        if (!flow_action_basic_hw_stats_check(&cls->rule->action,
                                              cls->common.extack))
                return -EOPNOTSUPP;
 
        act = &cls->rule->action.entries[0];
 
+       if (!act->dev)
+               return -EINVAL;
+
        if (!dsa_slave_dev_check(act->dev))
                return -EOPNOTSUPP;
 
index f4b9f7a..25b6ffb 100644 (file)
@@ -18,7 +18,7 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
 {
        struct sk_buff *skb = *pskb;
        struct hsr_port *port;
-       u16 protocol;
+       __be16 protocol;
 
        if (!skb_mac_header_was_set(skb)) {
                WARN_ONCE(1, "%s: skb invalid", __func__);
index bf4ced9..b996dc1 100644 (file)
@@ -3926,10 +3926,6 @@ void tcp_parse_options(const struct net *net,
                                 */
                                break;
 #endif
-                       case TCPOPT_MPTCP:
-                               mptcp_parse_option(skb, ptr, opsize, opt_rx);
-                               break;
-
                        case TCPOPT_FASTOPEN:
                                tcp_parse_fastopen_option(
                                        opsize - TCPOLEN_FASTOPEN_BASE,
@@ -5990,9 +5986,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
                tcp_initialize_rcv_mss(sk);
 
-               if (sk_is_mptcp(sk))
-                       mptcp_rcv_synsent(sk);
-
                /* Remember, tcp_poll() does not lock socket!
                 * Change state from SYN-SENT only after copied_seq
                 * is initialized. */
index 310cbdd..8d41803 100644 (file)
@@ -1385,9 +1385,18 @@ static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
        }
        ip6_rt_copy_init(pcpu_rt, res);
        pcpu_rt->rt6i_flags |= RTF_PCPU;
+
+       if (f6i->nh)
+               pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
+
        return pcpu_rt;
 }
 
+static bool rt6_is_valid(const struct rt6_info *rt6)
+{
+       return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
+}
+
 /* It should be called with rcu_read_lock() acquired */
 static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
 {
@@ -1395,6 +1404,19 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
 
        pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
 
+       if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
+               struct rt6_info *prev, **p;
+
+               p = this_cpu_ptr(res->nh->rt6i_pcpu);
+               prev = xchg(p, NULL);
+               if (prev) {
+                       dst_dev_put(&prev->dst);
+                       dst_release(&prev->dst);
+               }
+
+               pcpu_rt = NULL;
+       }
+
        return pcpu_rt;
 }
 
@@ -2593,6 +2615,9 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
 
        rt = container_of(dst, struct rt6_info, dst);
 
+       if (rt->sernum)
+               return rt6_is_valid(rt) ? dst : NULL;
+
        rcu_read_lock();
 
        /* All IPV6 dsts are created with ->obsolete set to the value
index 4c7e0a2..37b4342 100644 (file)
@@ -27,8 +27,9 @@
 
 bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
 {
-       int trailing;
        unsigned int tlv_offset;
+       int max_last_entry;
+       int trailing;
 
        if (srh->type != IPV6_SRCRT_TYPE_4)
                return false;
@@ -36,7 +37,12 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len)
        if (((srh->hdrlen + 1) << 3) != len)
                return false;
 
-       if (srh->segments_left > srh->first_segment)
+       max_last_entry = (srh->hdrlen / 2) - 1;
+
+       if (srh->first_segment > max_last_entry)
+               return false;
+
+       if (srh->segments_left > srh->first_segment + 1)
                return false;
 
        tlv_offset = sizeof(*srh) + ((srh->first_segment + 1) << 4);
index 4a7c467..45497af 100644 (file)
@@ -16,10 +16,10 @@ static bool mptcp_cap_flag_sha256(u8 flags)
        return (flags & MPTCP_CAP_FLAG_MASK) == MPTCP_CAP_HMAC_SHA256;
 }
 
-void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr,
-                       int opsize, struct tcp_options_received *opt_rx)
+static void mptcp_parse_option(const struct sk_buff *skb,
+                              const unsigned char *ptr, int opsize,
+                              struct mptcp_options_received *mp_opt)
 {
-       struct mptcp_options_received *mp_opt = &opt_rx->mptcp;
        u8 subtype = *ptr >> 4;
        int expected_opsize;
        u8 version;
@@ -283,12 +283,20 @@ void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr,
 }
 
 void mptcp_get_options(const struct sk_buff *skb,
-                      struct tcp_options_received *opt_rx)
+                      struct mptcp_options_received *mp_opt)
 {
-       const unsigned char *ptr;
        const struct tcphdr *th = tcp_hdr(skb);
-       int length = (th->doff * 4) - sizeof(struct tcphdr);
+       const unsigned char *ptr;
+       int length;
+
+       /* initialize option status */
+       mp_opt->mp_capable = 0;
+       mp_opt->mp_join = 0;
+       mp_opt->add_addr = 0;
+       mp_opt->rm_addr = 0;
+       mp_opt->dss = 0;
 
+       length = (th->doff * 4) - sizeof(struct tcphdr);
        ptr = (const unsigned char *)(th + 1);
 
        while (length > 0) {
@@ -308,7 +316,7 @@ void mptcp_get_options(const struct sk_buff *skb,
                        if (opsize > length)
                                return; /* don't parse partial options */
                        if (opcode == TCPOPT_MPTCP)
-                               mptcp_parse_option(skb, ptr, opsize, opt_rx);
+                               mptcp_parse_option(skb, ptr, opsize, mp_opt);
                        ptr += opsize - 2;
                        length -= opsize;
                }
@@ -344,28 +352,6 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
        return false;
 }
 
-void mptcp_rcv_synsent(struct sock *sk)
-{
-       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
-       struct tcp_sock *tp = tcp_sk(sk);
-
-       if (subflow->request_mptcp && tp->rx_opt.mptcp.mp_capable) {
-               subflow->mp_capable = 1;
-               subflow->can_ack = 1;
-               subflow->remote_key = tp->rx_opt.mptcp.sndr_key;
-               pr_debug("subflow=%p, remote_key=%llu", subflow,
-                        subflow->remote_key);
-       } else if (subflow->request_join && tp->rx_opt.mptcp.mp_join) {
-               subflow->mp_join = 1;
-               subflow->thmac = tp->rx_opt.mptcp.thmac;
-               subflow->remote_nonce = tp->rx_opt.mptcp.nonce;
-               pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
-                        subflow->thmac, subflow->remote_nonce);
-       } else if (subflow->request_mptcp) {
-               tcp_sk(sk)->is_mptcp = 0;
-       }
-}
-
 /* MP_JOIN client subflow must wait for 4th ack before sending any data:
  * TCP can't schedule delack timer before the subflow is fully established.
  * MPTCP uses the delack timer to do 3rd ack retransmissions
@@ -709,7 +695,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
        if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1)
                return subflow->mp_capable;
 
-       if (mp_opt->use_ack) {
+       if (mp_opt->dss && mp_opt->use_ack) {
                /* subflows are fully established as soon as we get any
                 * additional ack.
                 */
@@ -717,8 +703,6 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
                goto fully_established;
        }
 
-       WARN_ON_ONCE(subflow->can_ack);
-
        /* If the first established packet does not contain MP_CAPABLE + data
         * then fallback to TCP
         */
@@ -728,6 +712,8 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
                return false;
        }
 
+       if (unlikely(!READ_ONCE(msk->pm.server_side)))
+               pr_warn_once("bogus mpc option on established client sk");
        subflow->fully_established = 1;
        subflow->remote_key = mp_opt->sndr_key;
        subflow->can_ack = 1;
@@ -819,41 +805,41 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
 {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
        struct mptcp_sock *msk = mptcp_sk(subflow->conn);
-       struct mptcp_options_received *mp_opt;
+       struct mptcp_options_received mp_opt;
        struct mptcp_ext *mpext;
 
-       mp_opt = &opt_rx->mptcp;
-       if (!check_fully_established(msk, sk, subflow, skb, mp_opt))
+       mptcp_get_options(skb, &mp_opt);
+       if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))
                return;
 
-       if (mp_opt->add_addr && add_addr_hmac_valid(msk, mp_opt)) {
+       if (mp_opt.add_addr && add_addr_hmac_valid(msk, &mp_opt)) {
                struct mptcp_addr_info addr;
 
-               addr.port = htons(mp_opt->port);
-               addr.id = mp_opt->addr_id;
-               if (mp_opt->family == MPTCP_ADDR_IPVERSION_4) {
+               addr.port = htons(mp_opt.port);
+               addr.id = mp_opt.addr_id;
+               if (mp_opt.family == MPTCP_ADDR_IPVERSION_4) {
                        addr.family = AF_INET;
-                       addr.addr = mp_opt->addr;
+                       addr.addr = mp_opt.addr;
                }
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
-               else if (mp_opt->family == MPTCP_ADDR_IPVERSION_6) {
+               else if (mp_opt.family == MPTCP_ADDR_IPVERSION_6) {
                        addr.family = AF_INET6;
-                       addr.addr6 = mp_opt->addr6;
+                       addr.addr6 = mp_opt.addr6;
                }
 #endif
-               if (!mp_opt->echo)
+               if (!mp_opt.echo)
                        mptcp_pm_add_addr_received(msk, &addr);
-               mp_opt->add_addr = 0;
+               mp_opt.add_addr = 0;
        }
 
-       if (!mp_opt->dss)
+       if (!mp_opt.dss)
                return;
 
        /* we can't wait for recvmsg() to update the ack_seq, otherwise
         * monodirectional flows will stuck
         */
-       if (mp_opt->use_ack)
-               update_una(msk, mp_opt);
+       if (mp_opt.use_ack)
+               update_una(msk, &mp_opt);
 
        mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
        if (!mpext)
@@ -861,8 +847,8 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
 
        memset(mpext, 0, sizeof(*mpext));
 
-       if (mp_opt->use_map) {
-               if (mp_opt->mpc_map) {
+       if (mp_opt.use_map) {
+               if (mp_opt.mpc_map) {
                        /* this is an MP_CAPABLE carrying MPTCP data
                         * we know this map the first chunk of data
                         */
@@ -872,13 +858,14 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
                        mpext->subflow_seq = 1;
                        mpext->dsn64 = 1;
                        mpext->mpc_map = 1;
+                       mpext->data_fin = 0;
                } else {
-                       mpext->data_seq = mp_opt->data_seq;
-                       mpext->subflow_seq = mp_opt->subflow_seq;
-                       mpext->dsn64 = mp_opt->dsn64;
-                       mpext->data_fin = mp_opt->data_fin;
+                       mpext->data_seq = mp_opt.data_seq;
+                       mpext->subflow_seq = mp_opt.subflow_seq;
+                       mpext->dsn64 = mp_opt.dsn64;
+                       mpext->data_fin = mp_opt.data_fin;
                }
-               mpext->data_len = mp_opt->data_len;
+               mpext->data_len = mp_opt.data_len;
                mpext->use_map = 1;
        }
 }
index b22a63b..e1f2301 100644 (file)
@@ -1316,11 +1316,12 @@ static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
 
 static int mptcp_disconnect(struct sock *sk, int flags)
 {
-       lock_sock(sk);
-       __mptcp_clear_xmit(sk);
-       release_sock(sk);
-       mptcp_cancel_work(sk);
-       return tcp_disconnect(sk, flags);
+       /* Should never be called.
+        * inet_stream_connect() calls ->disconnect, but that
+        * refers to the subflow socket, not the mptcp one.
+        */
+       WARN_ON_ONCE(1);
+       return 0;
 }
 
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
@@ -1333,7 +1334,7 @@ static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
 #endif
 
 struct sock *mptcp_sk_clone(const struct sock *sk,
-                           const struct tcp_options_received *opt_rx,
+                           const struct mptcp_options_received *mp_opt,
                            struct request_sock *req)
 {
        struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
@@ -1372,9 +1373,9 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
 
        msk->write_seq = subflow_req->idsn + 1;
        atomic64_set(&msk->snd_una, msk->write_seq);
-       if (opt_rx->mptcp.mp_capable) {
+       if (mp_opt->mp_capable) {
                msk->can_ack = true;
-               msk->remote_key = opt_rx->mptcp.sndr_key;
+               msk->remote_key = mp_opt->sndr_key;
                mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
                ack_seq++;
                msk->ack_seq = ack_seq;
index a2b3048..e4ca632 100644 (file)
 #define MPTCP_WORK_RTX         2
 #define MPTCP_WORK_EOF         3
 
+struct mptcp_options_received {
+       u64     sndr_key;
+       u64     rcvr_key;
+       u64     data_ack;
+       u64     data_seq;
+       u32     subflow_seq;
+       u16     data_len;
+       u16     mp_capable : 1,
+               mp_join : 1,
+               dss : 1,
+               add_addr : 1,
+               rm_addr : 1,
+               family : 4,
+               echo : 1,
+               backup : 1;
+       u32     token;
+       u32     nonce;
+       u64     thmac;
+       u8      hmac[20];
+       u8      join_id;
+       u8      use_map:1,
+               dsn64:1,
+               data_fin:1,
+               use_ack:1,
+               ack64:1,
+               mpc_map:1,
+               __unused:2;
+       u8      addr_id;
+       u8      rm_id;
+       union {
+               struct in_addr  addr;
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+               struct in6_addr addr6;
+#endif
+       };
+       u64     ahmac;
+       u16     port;
+};
+
 static inline __be32 mptcp_option(u8 subopt, u8 len, u8 nib, u8 field)
 {
        return htonl((TCPOPT_MPTCP << 24) | (len << 16) | (subopt << 12) |
@@ -331,10 +370,10 @@ int mptcp_proto_v6_init(void);
 #endif
 
 struct sock *mptcp_sk_clone(const struct sock *sk,
-                           const struct tcp_options_received *opt_rx,
+                           const struct mptcp_options_received *mp_opt,
                            struct request_sock *req);
 void mptcp_get_options(const struct sk_buff *skb,
-                      struct tcp_options_received *opt_rx);
+                      struct mptcp_options_received *mp_opt);
 
 void mptcp_finish_connect(struct sock *sk);
 void mptcp_data_ready(struct sock *sk, struct sock *ssk);
index fabd06f..67a4e35 100644 (file)
@@ -124,12 +124,11 @@ static void subflow_init_req(struct request_sock *req,
 {
        struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
        struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
-       struct tcp_options_received rx_opt;
+       struct mptcp_options_received mp_opt;
 
        pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
 
-       memset(&rx_opt.mptcp, 0, sizeof(rx_opt.mptcp));
-       mptcp_get_options(skb, &rx_opt);
+       mptcp_get_options(skb, &mp_opt);
 
        subflow_req->mp_capable = 0;
        subflow_req->mp_join = 0;
@@ -142,16 +141,16 @@ static void subflow_init_req(struct request_sock *req,
                return;
 #endif
 
-       if (rx_opt.mptcp.mp_capable) {
+       if (mp_opt.mp_capable) {
                SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
 
-               if (rx_opt.mptcp.mp_join)
+               if (mp_opt.mp_join)
                        return;
-       } else if (rx_opt.mptcp.mp_join) {
+       } else if (mp_opt.mp_join) {
                SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
        }
 
-       if (rx_opt.mptcp.mp_capable && listener->request_mptcp) {
+       if (mp_opt.mp_capable && listener->request_mptcp) {
                int err;
 
                err = mptcp_token_new_request(req);
@@ -159,13 +158,13 @@ static void subflow_init_req(struct request_sock *req,
                        subflow_req->mp_capable = 1;
 
                subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
-       } else if (rx_opt.mptcp.mp_join && listener->request_mptcp) {
+       } else if (mp_opt.mp_join && listener->request_mptcp) {
                subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
                subflow_req->mp_join = 1;
-               subflow_req->backup = rx_opt.mptcp.backup;
-               subflow_req->remote_id = rx_opt.mptcp.join_id;
-               subflow_req->token = rx_opt.mptcp.token;
-               subflow_req->remote_nonce = rx_opt.mptcp.nonce;
+               subflow_req->backup = mp_opt.backup;
+               subflow_req->remote_id = mp_opt.join_id;
+               subflow_req->token = mp_opt.token;
+               subflow_req->remote_nonce = mp_opt.nonce;
                pr_debug("token=%u, remote_nonce=%u", subflow_req->token,
                         subflow_req->remote_nonce);
                if (!subflow_token_join_request(req, skb)) {
@@ -221,23 +220,47 @@ static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
 {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+       struct mptcp_options_received mp_opt;
        struct sock *parent = subflow->conn;
+       struct tcp_sock *tp = tcp_sk(sk);
 
        subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
 
-       if (inet_sk_state_load(parent) != TCP_ESTABLISHED) {
+       if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
                inet_sk_state_store(parent, TCP_ESTABLISHED);
                parent->sk_state_change(parent);
        }
 
-       if (subflow->conn_finished || !tcp_sk(sk)->is_mptcp)
+       /* be sure no special action on any packet other than syn-ack */
+       if (subflow->conn_finished)
+               return;
+
+       subflow->conn_finished = 1;
+
+       mptcp_get_options(skb, &mp_opt);
+       if (subflow->request_mptcp && mp_opt.mp_capable) {
+               subflow->mp_capable = 1;
+               subflow->can_ack = 1;
+               subflow->remote_key = mp_opt.sndr_key;
+               pr_debug("subflow=%p, remote_key=%llu", subflow,
+                        subflow->remote_key);
+       } else if (subflow->request_join && mp_opt.mp_join) {
+               subflow->mp_join = 1;
+               subflow->thmac = mp_opt.thmac;
+               subflow->remote_nonce = mp_opt.nonce;
+               pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
+                        subflow->thmac, subflow->remote_nonce);
+       } else if (subflow->request_mptcp) {
+               tp->is_mptcp = 0;
+       }
+
+       if (!tp->is_mptcp)
                return;
 
        if (subflow->mp_capable) {
                pr_debug("subflow=%p, remote_key=%llu", mptcp_subflow_ctx(sk),
                         subflow->remote_key);
                mptcp_finish_connect(sk);
-               subflow->conn_finished = 1;
 
                if (skb) {
                        pr_debug("synack seq=%u", TCP_SKB_CB(skb)->seq);
@@ -264,7 +287,6 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
                if (!mptcp_finish_join(sk))
                        goto do_reset;
 
-               subflow->conn_finished = 1;
                MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
        } else {
 do_reset:
@@ -322,7 +344,7 @@ drop:
 
 /* validate hmac received in third ACK */
 static bool subflow_hmac_valid(const struct request_sock *req,
-                              const struct tcp_options_received *rx_opt)
+                              const struct mptcp_options_received *mp_opt)
 {
        const struct mptcp_subflow_request_sock *subflow_req;
        u8 hmac[MPTCPOPT_HMAC_LEN];
@@ -339,7 +361,7 @@ static bool subflow_hmac_valid(const struct request_sock *req,
                              subflow_req->local_nonce, hmac);
 
        ret = true;
-       if (crypto_memneq(hmac, rx_opt->mptcp.hmac, sizeof(hmac)))
+       if (crypto_memneq(hmac, mp_opt->hmac, sizeof(hmac)))
                ret = false;
 
        sock_put((struct sock *)msk);
@@ -395,7 +417,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
 {
        struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
        struct mptcp_subflow_request_sock *subflow_req;
-       struct tcp_options_received opt_rx;
+       struct mptcp_options_received mp_opt;
        bool fallback_is_fatal = false;
        struct sock *new_msk = NULL;
        bool fallback = false;
@@ -403,7 +425,10 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
 
        pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
 
-       opt_rx.mptcp.mp_capable = 0;
+       /* we need later a valid 'mp_capable' value even when options are not
+        * parsed
+        */
+       mp_opt.mp_capable = 0;
        if (tcp_rsk(req)->is_mptcp == 0)
                goto create_child;
 
@@ -418,22 +443,21 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
                        goto create_msk;
                }
 
-               mptcp_get_options(skb, &opt_rx);
-               if (!opt_rx.mptcp.mp_capable) {
+               mptcp_get_options(skb, &mp_opt);
+               if (!mp_opt.mp_capable) {
                        fallback = true;
                        goto create_child;
                }
 
 create_msk:
-               new_msk = mptcp_sk_clone(listener->conn, &opt_rx, req);
+               new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req);
                if (!new_msk)
                        fallback = true;
        } else if (subflow_req->mp_join) {
                fallback_is_fatal = true;
-               opt_rx.mptcp.mp_join = 0;
-               mptcp_get_options(skb, &opt_rx);
-               if (!opt_rx.mptcp.mp_join ||
-                   !subflow_hmac_valid(req, &opt_rx)) {
+               mptcp_get_options(skb, &mp_opt);
+               if (!mp_opt.mp_join ||
+                   !subflow_hmac_valid(req, &mp_opt)) {
                        SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
                        return NULL;
                }
@@ -473,9 +497,9 @@ create_child:
                        /* with OoO packets we can reach here without ingress
                         * mpc option
                         */
-                       ctx->remote_key = opt_rx.mptcp.sndr_key;
-                       ctx->fully_established = opt_rx.mptcp.mp_capable;
-                       ctx->can_ack = opt_rx.mptcp.mp_capable;
+                       ctx->remote_key = mp_opt.sndr_key;
+                       ctx->fully_established = mp_opt.mp_capable;
+                       ctx->can_ack = mp_opt.mp_capable;
                } else if (ctx->mp_join) {
                        struct mptcp_sock *owner;
 
@@ -499,7 +523,7 @@ out:
        /* check for expected invariant - should never trigger, just help
         * catching eariler subtle bugs
         */
-       WARN_ON_ONCE(*own_req && child && tcp_sk(child)->is_mptcp &&
+       WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
                     (!mptcp_subflow_ctx(child) ||
                      !mptcp_subflow_ctx(child)->conn));
        return child;
index 3d816a1..59151dc 100644 (file)
@@ -68,15 +68,13 @@ static bool udp_manip_pkt(struct sk_buff *skb,
                          enum nf_nat_manip_type maniptype)
 {
        struct udphdr *hdr;
-       bool do_csum;
 
        if (skb_ensure_writable(skb, hdroff + sizeof(*hdr)))
                return false;
 
        hdr = (struct udphdr *)(skb->data + hdroff);
-       do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL;
+       __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, !!hdr->check);
 
-       __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, do_csum);
        return true;
 }
 
index 9f5dea0..916a3c7 100644 (file)
@@ -165,12 +165,12 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
 static const struct tcphdr *nf_osf_hdr_ctx_init(struct nf_osf_hdr_ctx *ctx,
                                                const struct sk_buff *skb,
                                                const struct iphdr *ip,
-                                               unsigned char *opts)
+                                               unsigned char *opts,
+                                               struct tcphdr *_tcph)
 {
        const struct tcphdr *tcp;
-       struct tcphdr _tcph;
 
-       tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph);
+       tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), _tcph);
        if (!tcp)
                return NULL;
 
@@ -205,10 +205,11 @@ nf_osf_match(const struct sk_buff *skb, u_int8_t family,
        int fmatch = FMATCH_WRONG;
        struct nf_osf_hdr_ctx ctx;
        const struct tcphdr *tcp;
+       struct tcphdr _tcph;
 
        memset(&ctx, 0, sizeof(ctx));
 
-       tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts);
+       tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
        if (!tcp)
                return false;
 
@@ -265,10 +266,11 @@ bool nf_osf_find(const struct sk_buff *skb,
        const struct nf_osf_finger *kf;
        struct nf_osf_hdr_ctx ctx;
        const struct tcphdr *tcp;
+       struct tcphdr _tcph;
 
        memset(&ctx, 0, sizeof(ctx));
 
-       tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts);
+       tcp = nf_osf_hdr_ctx_init(&ctx, skb, ip, opts, &_tcph);
        if (!tcp)
                return false;
 
index 55bd142..0a7ecc2 100644 (file)
@@ -2070,6 +2070,7 @@ replay:
                err = PTR_ERR(block);
                goto errout;
        }
+       block->classid = parent;
 
        chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
        if (chain_index > TC_ACT_EXT_VAL_MASK) {
@@ -2612,12 +2613,10 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
                        return skb->len;
 
                parent = tcm->tcm_parent;
-               if (!parent) {
+               if (!parent)
                        q = dev->qdisc;
-                       parent = q->handle;
-               } else {
+               else
                        q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
-               }
                if (!q)
                        goto out;
                cops = q->ops->cl_ops;
@@ -2633,6 +2632,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
                block = cops->tcf_block(q, cl, NULL);
                if (!block)
                        goto out;
+               parent = block->classid;
                if (tcf_block_shared(block))
                        q = NULL;
        }
@@ -3523,6 +3523,16 @@ static void tcf_sample_get_group(struct flow_action_entry *entry,
 #endif
 }
 
+static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
+{
+       if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
+               return FLOW_ACTION_HW_STATS_DONT_CARE;
+       else if (!hw_stats)
+               return FLOW_ACTION_HW_STATS_DISABLED;
+
+       return hw_stats;
+}
+
 int tc_setup_flow_action(struct flow_action *flow_action,
                         const struct tcf_exts *exts)
 {
@@ -3546,7 +3556,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
                if (err)
                        goto err_out_locked;
 
-               entry->hw_stats = act->hw_stats;
+               entry->hw_stats = tc_act_hw_stats(act->hw_stats);
 
                if (is_tcf_gact_ok(act)) {
                        entry->id = FLOW_ACTION_ACCEPT;
@@ -3614,7 +3624,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
                                entry->mangle.mask = tcf_pedit_mask(act, k);
                                entry->mangle.val = tcf_pedit_val(act, k);
                                entry->mangle.offset = tcf_pedit_offset(act, k);
-                               entry->hw_stats = act->hw_stats;
+                               entry->hw_stats = tc_act_hw_stats(act->hw_stats);
                                entry = &flow_action->entries[++j];
                        }
                } else if (is_tcf_csum(act)) {
index a36974e..1bcf8fb 100644 (file)
@@ -323,7 +323,8 @@ static void choke_reset(struct Qdisc *sch)
 
        sch->q.qlen = 0;
        sch->qstats.backlog = 0;
-       memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
+       if (q->tab)
+               memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
        q->head = q->tail = 0;
        red_restart(&q->vars);
 }
index 968519f..436160b 100644 (file)
@@ -416,7 +416,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
                q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
 
        if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
-               q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
+               q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
 
        if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
                q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
index c787d4d..5a6def5 100644 (file)
@@ -637,6 +637,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
        if (ctl->divisor &&
            (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
                return -EINVAL;
+
+       /* slot->allot is a short, make sure quantum is not too big. */
+       if (ctl->quantum) {
+               unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum);
+
+               if (scaled <= 0 || scaled > SHRT_MAX)
+                       return -EINVAL;
+       }
+
        if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
                                        ctl_v1->Wlog))
                return -EINVAL;
index 0fb10ab..7a5e4c4 100644 (file)
@@ -169,6 +169,9 @@ static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
 {
        struct tc_skbprio_qopt *ctl = nla_data(opt);
 
+       if (opt->nla_len != nla_attr_size(sizeof(*ctl)))
+               return -EINVAL;
+
        sch->limit = ctl->limit;
        return 0;
 }
index 325a085..8350d3a 100644 (file)
@@ -880,6 +880,20 @@ EXPORT_SYMBOL_GPL(rpc_shutdown_client);
 /*
  * Free an RPC client
  */
+static void rpc_free_client_work(struct work_struct *work)
+{
+       struct rpc_clnt *clnt = container_of(work, struct rpc_clnt, cl_work);
+
+       /* These might block on processes that might allocate memory,
+        * so they cannot be called in rpciod, so they are handled separately
+        * here.
+        */
+       rpc_clnt_debugfs_unregister(clnt);
+       rpc_clnt_remove_pipedir(clnt);
+
+       kfree(clnt);
+       rpciod_down();
+}
 static struct rpc_clnt *
 rpc_free_client(struct rpc_clnt *clnt)
 {
@@ -890,17 +904,16 @@ rpc_free_client(struct rpc_clnt *clnt)
                        rcu_dereference(clnt->cl_xprt)->servername);
        if (clnt->cl_parent != clnt)
                parent = clnt->cl_parent;
-       rpc_clnt_debugfs_unregister(clnt);
-       rpc_clnt_remove_pipedir(clnt);
        rpc_unregister_client(clnt);
        rpc_free_iostats(clnt->cl_metrics);
        clnt->cl_metrics = NULL;
        xprt_put(rcu_dereference_raw(clnt->cl_xprt));
        xprt_iter_destroy(&clnt->cl_xpi);
-       rpciod_down();
        put_cred(clnt->cl_cred);
        rpc_free_clid(clnt);
-       kfree(clnt);
+
+       INIT_WORK(&clnt->cl_work, rpc_free_client_work);
+       schedule_work(&clnt->cl_work);
        return parent;
 }
 
@@ -2808,8 +2821,7 @@ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
        task = rpc_call_null_helper(clnt, xprt, NULL,
                        RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC|RPC_TASK_NULLCREDS,
                        &rpc_cb_add_xprt_call_ops, data);
-       if (IS_ERR(task))
-               return PTR_ERR(task);
+
        rpc_put_task(task);
 success:
        return 1;
index 4a81e69..3c627dc 100644 (file)
@@ -388,7 +388,9 @@ static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
        } while (nsegs);
 
 done:
-       return xdr_stream_encode_item_absent(xdr);
+       if (xdr_stream_encode_item_absent(xdr) < 0)
+               return -EMSGSIZE;
+       return 0;
 }
 
 /* Register and XDR encode the Write list. Supports encoding a list
@@ -454,7 +456,9 @@ static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
        *segcount = cpu_to_be32(nchunks);
 
 done:
-       return xdr_stream_encode_item_absent(xdr);
+       if (xdr_stream_encode_item_absent(xdr) < 0)
+               return -EMSGSIZE;
+       return 0;
 }
 
 /* Register and XDR encode the Reply chunk. Supports encoding an array
@@ -480,8 +484,11 @@ static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
        int nsegs, nchunks;
        __be32 *segcount;
 
-       if (wtype != rpcrdma_replych)
-               return xdr_stream_encode_item_absent(xdr);
+       if (wtype != rpcrdma_replych) {
+               if (xdr_stream_encode_item_absent(xdr) < 0)
+                       return -EMSGSIZE;
+               return 0;
+       }
 
        seg = req->rl_segments;
        nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
index cdd84c0..05c4d3a 100644 (file)
@@ -289,6 +289,7 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
        case RDMA_CM_EVENT_DISCONNECTED:
                ep->re_connect_status = -ECONNABORTED;
 disconnected:
+               xprt_force_disconnect(xprt);
                return rpcrdma_ep_destroy(ep);
        default:
                break;
@@ -1355,8 +1356,8 @@ int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
                --ep->re_send_count;
        }
 
+       trace_xprtrdma_post_send(req);
        rc = frwr_send(r_xprt, req);
-       trace_xprtrdma_post_send(req, rc);
        if (rc)
                return -ENOTCONN;
        return 0;
index 3a12fc1..73dbed0 100644 (file)
@@ -402,10 +402,11 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
                read_lock_bh(&sk->sk_callback_lock);
                ret = tipc_conn_rcv_sub(srv, con, &s);
                read_unlock_bh(&sk->sk_callback_lock);
+               if (!ret)
+                       return 0;
        }
-       if (ret < 0)
-               tipc_conn_close(con);
 
+       tipc_conn_close(con);
        return ret;
 }
 
index c98e602..e23f94a 100644 (file)
@@ -800,6 +800,8 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
                        *copied -= sk_msg_free(sk, msg);
                        tls_free_open_rec(sk);
                }
+               if (psock)
+                       sk_psock_put(sk, psock);
                return err;
        }
 more_data:
@@ -2081,8 +2083,9 @@ static void tls_data_ready(struct sock *sk)
        strp_data_ready(&ctx->strp);
 
        psock = sk_psock_get(sk);
-       if (psock && !list_empty(&psock->ingress_msg)) {
-               ctx->saved_data_ready(sk);
+       if (psock) {
+               if (!list_empty(&psock->ingress_msg))
+                       ctx->saved_data_ready(sk);
                sk_psock_put(sk, psock);
        }
 }
index 709038a..69efc89 100644 (file)
@@ -157,7 +157,11 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
 
 void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt)
 {
+       if (pkt->tap_delivered)
+               return;
+
        vsock_deliver_tap(virtio_transport_build_skb, pkt);
+       pkt->tap_delivered = true;
 }
 EXPORT_SYMBOL_GPL(virtio_transport_deliver_tap_pkt);
 
index 8aa415a..0285aaa 100644 (file)
@@ -357,6 +357,12 @@ void x25_disconnect(struct sock *sk, int reason, unsigned char cause,
                sk->sk_state_change(sk);
                sock_set_flag(sk, SOCK_DEAD);
        }
+       if (x25->neighbour) {
+               read_lock_bh(&x25_list_lock);
+               x25_neigh_put(x25->neighbour);
+               x25->neighbour = NULL;
+               read_unlock_bh(&x25_list_lock);
+       }
 }
 
 /*
index 80b4a70..13a35f7 100644 (file)
@@ -416,7 +416,7 @@ TRACE_EVENT_FN(foo_bar_with_fn,
  * Note, TRACE_EVENT() itself is simply defined as:
  *
  * #define TRACE_EVENT(name, proto, args, tstruct, assign, printk)  \
- *  DEFINE_EVENT_CLASS(name, proto, args, tstruct, assign, printk); \
+ *  DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, printk); \
  *  DEFINE_EVENT(name, name, proto, args)
  *
  * The DEFINE_EVENT() also can be declared with conditions and reg functions:
index ba8b8d5..fbdb325 100755 (executable)
@@ -126,7 +126,7 @@ faultlinenum=$(( $(wc -l $T.oo  | cut -d" " -f1) - \
 faultline=`cat $T.dis | head -1 | cut -d":" -f2-`
 faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'`
 
-cat $T.oo | sed -e "${faultlinenum}s/^\(.*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/"
+cat $T.oo | sed -e "${faultlinenum}s/^\([^:]*:\)\(.*\)/\1\*\2\t\t<-- trapping instruction/"
 echo
 cat $T.aa
 cleanup
index f22858b..80f3542 100644 (file)
@@ -4,6 +4,7 @@ GCC_PLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
 HOST_EXTRACXXFLAGS += -I$(GCC_PLUGINS_DIR)/include -I$(src) -std=gnu++98 -fno-rtti
 HOST_EXTRACXXFLAGS += -fno-exceptions -fasynchronous-unwind-tables -ggdb
 HOST_EXTRACXXFLAGS += -Wno-narrowing -Wno-unused-variable -Wno-c++11-compat
+HOST_EXTRACXXFLAGS += -Wno-format-diag
 
 $(obj)/randomize_layout_plugin.o: $(objtree)/$(obj)/randomize_layout_seed.h
 quiet_cmd_create_randomize_layout_seed = GENSEED $@
index 17f0607..9ad76b7 100644 (file)
@@ -35,7 +35,9 @@
 #include "ggc.h"
 #include "timevar.h"
 
+#if BUILDING_GCC_VERSION < 10000
 #include "params.h"
+#endif
 
 #if BUILDING_GCC_VERSION <= 4009
 #include "pointer-set.h"
@@ -847,6 +849,7 @@ static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree l
        return gimple_build_assign(lhs, subcode, op1, op2 PASS_MEM_STAT);
 }
 
+#if BUILDING_GCC_VERSION < 10000
 template <>
 template <>
 inline bool is_a_helper<const ggoto *>::test(const_gimple gs)
@@ -860,6 +863,7 @@ inline bool is_a_helper<const greturn *>::test(const_gimple gs)
 {
        return gs->code == GIMPLE_RETURN;
 }
+#endif
 
 static inline gasm *as_a_gasm(gimple stmt)
 {
index dbd3746..cc75eeb 100644 (file)
@@ -51,7 +51,6 @@ static void stackleak_add_track_stack(gimple_stmt_iterator *gsi, bool after)
        gimple stmt;
        gcall *stackleak_track_stack;
        cgraph_node_ptr node;
-       int frequency;
        basic_block bb;
 
        /* Insert call to void stackleak_track_stack(void) */
@@ -68,9 +67,9 @@ static void stackleak_add_track_stack(gimple_stmt_iterator *gsi, bool after)
        bb = gimple_bb(stackleak_track_stack);
        node = cgraph_get_create_node(track_function_decl);
        gcc_assert(node);
-       frequency = compute_call_stmt_bb_frequency(current_function_decl, bb);
        cgraph_create_edge(cgraph_get_node(current_function_decl), node,
-                       stackleak_track_stack, bb->count, frequency);
+                       stackleak_track_stack, bb->count,
+                       compute_call_stmt_bb_frequency(current_function_decl, bb));
 }
 
 static bool is_alloca(gimple stmt)
index 39db889..c4b9916 100644 (file)
@@ -12,7 +12,7 @@ rb_node_type = utils.CachedType("struct rb_node")
 
 def rb_first(root):
     if root.type == rb_root_type.get_type():
-        node = node.address.cast(rb_root_type.get_type().pointer())
+        node = root.address.cast(rb_root_type.get_type().pointer())
     elif root.type != rb_root_type.get_type().pointer():
         raise gdb.GdbError("Must be struct rb_root not {}".format(root.type))
 
@@ -28,7 +28,7 @@ def rb_first(root):
 
 def rb_last(root):
     if root.type == rb_root_type.get_type():
-        node = node.address.cast(rb_root_type.get_type().pointer())
+        node = root.address.cast(rb_root_type.get_type().pointer())
     elif root.type != rb_root_type.get_type().pointer():
         raise gdb.GdbError("Must be struct rb_root not {}".format(root.type))
 
index 3e8dea6..6dc3078 100644 (file)
@@ -34,7 +34,7 @@ struct sym_entry {
        unsigned int len;
        unsigned int start_pos;
        unsigned int percpu_absolute;
-       unsigned char sym[0];
+       unsigned char sym[];
 };
 
 struct addr_range {
index 0b4e321..4c037c2 100644 (file)
@@ -5842,40 +5842,60 @@ static unsigned int selinux_ipv6_postroute(void *priv,
 
 static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
 {
-       int err = 0;
-       u32 perm;
+       int rc = 0;
+       unsigned int msg_len;
+       unsigned int data_len = skb->len;
+       unsigned char *data = skb->data;
        struct nlmsghdr *nlh;
        struct sk_security_struct *sksec = sk->sk_security;
+       u16 sclass = sksec->sclass;
+       u32 perm;
 
-       if (skb->len < NLMSG_HDRLEN) {
-               err = -EINVAL;
-               goto out;
-       }
-       nlh = nlmsg_hdr(skb);
+       while (data_len >= nlmsg_total_size(0)) {
+               nlh = (struct nlmsghdr *)data;
+
+               /* NOTE: the nlmsg_len field isn't reliably set by some netlink
+                *       users which means we can't reject skb's with bogus
+                *       length fields; our solution is to follow what
+                *       netlink_rcv_skb() does and simply skip processing at
+                *       messages with length fields that are clearly junk
+                */
+               if (nlh->nlmsg_len < NLMSG_HDRLEN || nlh->nlmsg_len > data_len)
+                       return 0;
 
-       err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
-       if (err) {
-               if (err == -EINVAL) {
+               rc = selinux_nlmsg_lookup(sclass, nlh->nlmsg_type, &perm);
+               if (rc == 0) {
+                       rc = sock_has_perm(sk, perm);
+                       if (rc)
+                               return rc;
+               } else if (rc == -EINVAL) {
+                       /* -EINVAL is a missing msg/perm mapping */
                        pr_warn_ratelimited("SELinux: unrecognized netlink"
-                              " message: protocol=%hu nlmsg_type=%hu sclass=%s"
-                              " pid=%d comm=%s\n",
-                              sk->sk_protocol, nlh->nlmsg_type,
-                              secclass_map[sksec->sclass - 1].name,
-                              task_pid_nr(current), current->comm);
-                       if (!enforcing_enabled(&selinux_state) ||
-                           security_get_allow_unknown(&selinux_state))
-                               err = 0;
+                               " message: protocol=%hu nlmsg_type=%hu sclass=%s"
+                               " pid=%d comm=%s\n",
+                               sk->sk_protocol, nlh->nlmsg_type,
+                               secclass_map[sclass - 1].name,
+                               task_pid_nr(current), current->comm);
+                       if (enforcing_enabled(&selinux_state) &&
+                           !security_get_allow_unknown(&selinux_state))
+                               return rc;
+                       rc = 0;
+               } else if (rc == -ENOENT) {
+                       /* -ENOENT is a missing socket/class mapping, ignore */
+                       rc = 0;
+               } else {
+                       return rc;
                }
 
-               /* Ignore */
-               if (err == -ENOENT)
-                       err = 0;
-               goto out;
+               /* move to the next message after applying netlink padding */
+               msg_len = NLMSG_ALIGN(nlh->nlmsg_len);
+               if (msg_len >= data_len)
+                       return 0;
+               data_len -= msg_len;
+               data += msg_len;
        }
 
-       err = sock_has_perm(sk, perm);
-out:
-       return err;
+       return rc;
 }
 
 static void ipc_init_security(struct ipc_security_struct *isec, u16 sclass)
index 939a74f..da94a1b 100644 (file)
@@ -429,7 +429,7 @@ int cond_read_list(struct policydb *p, void *fp)
 
        p->cond_list = kcalloc(len, sizeof(*p->cond_list), GFP_KERNEL);
        if (!p->cond_list)
-               return rc;
+               return -ENOMEM;
 
        rc = avtab_alloc(&(p->te_cond_avtab), p->te_avtab.nel);
        if (rc)
index 59d62f0..1545f8f 100644 (file)
@@ -205,13 +205,14 @@ static snd_pcm_sframes_t calc_dst_frames(struct snd_pcm_substream *plug,
        plugin = snd_pcm_plug_first(plug);
        while (plugin && frames > 0) {
                plugin_next = plugin->next;
+               if (check_size && plugin->buf_frames &&
+                   frames > plugin->buf_frames)
+                       frames = plugin->buf_frames;
                if (plugin->dst_frames) {
                        frames = plugin->dst_frames(plugin, frames);
                        if (frames < 0)
                                return frames;
                }
-               if (check_size && frames > plugin->buf_frames)
-                       frames = plugin->buf_frames;
                plugin = plugin_next;
        }
        return frames;
@@ -225,14 +226,15 @@ static snd_pcm_sframes_t calc_src_frames(struct snd_pcm_substream *plug,
 
        plugin = snd_pcm_plug_last(plug);
        while (plugin && frames > 0) {
-               if (check_size && frames > plugin->buf_frames)
-                       frames = plugin->buf_frames;
                plugin_prev = plugin->prev;
                if (plugin->src_frames) {
                        frames = plugin->src_frames(plugin, frames);
                        if (frames < 0)
                                return frames;
                }
+               if (check_size && plugin->buf_frames &&
+                   frames > plugin->buf_frames)
+                       frames = plugin->buf_frames;
                plugin = plugin_prev;
        }
        return frames;
index e764816..b039429 100644 (file)
@@ -867,10 +867,13 @@ static void snd_miro_write(struct snd_miro *chip, unsigned char reg,
        spin_unlock_irqrestore(&chip->lock, flags);
 }
 
+static inline void snd_miro_write_mask(struct snd_miro *chip,
+               unsigned char reg, unsigned char value, unsigned char mask)
+{
+       unsigned char oldval = snd_miro_read(chip, reg);
 
-#define snd_miro_write_mask(chip, reg, value, mask)    \
-       snd_miro_write(chip, reg,                       \
-               (snd_miro_read(chip, reg) & ~(mask)) | ((value) & (mask)))
+       snd_miro_write(chip, reg, (oldval & ~mask) | (value & mask));
+}
 
 /*
  *  Proc Interface
index d06b296..0e6d20e 100644 (file)
@@ -317,10 +317,13 @@ static void snd_opti9xx_write(struct snd_opti9xx *chip, unsigned char reg,
 }
 
 
-#define snd_opti9xx_write_mask(chip, reg, value, mask) \
-       snd_opti9xx_write(chip, reg,                    \
-               (snd_opti9xx_read(chip, reg) & ~(mask)) | ((value) & (mask)))
+static inline void snd_opti9xx_write_mask(struct snd_opti9xx *chip,
+               unsigned char reg, unsigned char value, unsigned char mask)
+{
+       unsigned char oldval = snd_opti9xx_read(chip, reg);
 
+       snd_opti9xx_write(chip, reg, (oldval & ~mask) | (value & mask));
+}
 
 static int snd_opti9xx_configure(struct snd_opti9xx *chip,
                                           long port,
index 457a2c0..0310193 100644 (file)
@@ -2078,9 +2078,10 @@ static void pcm_mmap_prepare(struct snd_pcm_substream *substream,
  * some HD-audio PCI entries are exposed without any codecs, and such devices
  * should be ignored from the beginning.
  */
-static const struct snd_pci_quirk driver_blacklist[] = {
-       SND_PCI_QUIRK(0x1462, 0xcb59, "MSI TRX40 Creator", 0),
-       SND_PCI_QUIRK(0x1462, 0xcb60, "MSI TRX40", 0),
+static const struct pci_device_id driver_blacklist[] = {
+       { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1043, 0x874f) }, /* ASUS ROG Zenith II / Strix */
+       { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb59) }, /* MSI TRX40 Creator */
+       { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb60) }, /* MSI TRX40 */
        {}
 };
 
@@ -2100,7 +2101,7 @@ static int azx_probe(struct pci_dev *pci,
        bool schedule_probe;
        int err;
 
-       if (snd_pci_quirk_lookup(pci, driver_blacklist)) {
+       if (pci_match_id(driver_blacklist, pci)) {
                dev_info(&pci->dev, "Skipping the blacklisted device\n");
                return -ENODEV;
        }
index 4eff160..93760a3 100644 (file)
@@ -1848,8 +1848,10 @@ static bool check_non_pcm_per_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
        /* Add sanity check to pass klockwork check.
         * This should never happen.
         */
-       if (WARN_ON(spdif == NULL))
+       if (WARN_ON(spdif == NULL)) {
+               mutex_unlock(&codec->spdif_mutex);
                return true;
+       }
        non_pcm = !!(spdif->status & IEC958_AES0_NONAUDIO);
        mutex_unlock(&codec->spdif_mutex);
        return non_pcm;
@@ -2198,7 +2200,9 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
 
        for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
                struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+               struct hdmi_eld *pin_eld = &per_pin->sink_eld;
 
+               pin_eld->eld_valid = false;
                hdmi_present_sense(per_pin, 0);
        }
 
index c1a85c8..c16f639 100644 (file)
@@ -7420,6 +7420,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x8560, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
+       SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
index d37db32..e39dc85 100644 (file)
@@ -21,8 +21,7 @@
 enum {
        LINE6_PODHD300,
        LINE6_PODHD400,
-       LINE6_PODHD500_0,
-       LINE6_PODHD500_1,
+       LINE6_PODHD500,
        LINE6_PODX3,
        LINE6_PODX3LIVE,
        LINE6_PODHD500X,
@@ -318,8 +317,7 @@ static const struct usb_device_id podhd_id_table[] = {
        /* TODO: no need to alloc data interfaces when only audio is used */
        { LINE6_DEVICE(0x5057),    .driver_info = LINE6_PODHD300 },
        { LINE6_DEVICE(0x5058),    .driver_info = LINE6_PODHD400 },
-       { LINE6_IF_NUM(0x414D, 0), .driver_info = LINE6_PODHD500_0 },
-       { LINE6_IF_NUM(0x414D, 1), .driver_info = LINE6_PODHD500_1 },
+       { LINE6_IF_NUM(0x414D, 0), .driver_info = LINE6_PODHD500 },
        { LINE6_IF_NUM(0x414A, 0), .driver_info = LINE6_PODX3 },
        { LINE6_IF_NUM(0x414B, 0), .driver_info = LINE6_PODX3LIVE },
        { LINE6_IF_NUM(0x4159, 0), .driver_info = LINE6_PODHD500X },
@@ -352,23 +350,13 @@ static const struct line6_properties podhd_properties_table[] = {
                .ep_audio_r = 0x82,
                .ep_audio_w = 0x01,
        },
-       [LINE6_PODHD500_0] = {
+       [LINE6_PODHD500] = {
                .id = "PODHD500",
                .name = "POD HD500",
-               .capabilities   = LINE6_CAP_PCM
+               .capabilities   = LINE6_CAP_PCM | LINE6_CAP_CONTROL
                                | LINE6_CAP_HWMON,
                .altsetting = 1,
-               .ep_ctrl_r = 0x81,
-               .ep_ctrl_w = 0x01,
-               .ep_audio_r = 0x86,
-               .ep_audio_w = 0x02,
-       },
-       [LINE6_PODHD500_1] = {
-               .id = "PODHD500",
-               .name = "POD HD500",
-               .capabilities   = LINE6_CAP_PCM
-                               | LINE6_CAP_HWMON,
-               .altsetting = 0,
+               .ctrl_if = 1,
                .ep_ctrl_r = 0x81,
                .ep_ctrl_w = 0x01,
                .ep_audio_r = 0x86,
index 351ba21..848a4cc 100644 (file)
@@ -1687,7 +1687,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
 
        case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
        case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
-       case USB_ID(0x16b0, 0x06b2): /* NuPrime DAC-10 */
+       case USB_ID(0x16d0, 0x06b2): /* NuPrime DAC-10 */
        case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
        case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */
        case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */
index 16b9a42..001076c 100644 (file)
@@ -314,6 +314,7 @@ int apply_xbc(const char *path, const char *xbc_path)
        ret = delete_xbc(path);
        if (ret < 0) {
                pr_err("Failed to delete previous boot config: %d\n", ret);
+               free(data);
                return ret;
        }
 
@@ -321,24 +322,26 @@ int apply_xbc(const char *path, const char *xbc_path)
        fd = open(path, O_RDWR | O_APPEND);
        if (fd < 0) {
                pr_err("Failed to open %s: %d\n", path, fd);
+               free(data);
                return fd;
        }
        /* TODO: Ensure the @path is initramfs/initrd image */
        ret = write(fd, data, size + 8);
        if (ret < 0) {
                pr_err("Failed to apply a boot config: %d\n", ret);
-               return ret;
+               goto out;
        }
        /* Write a magic word of the bootconfig */
        ret = write(fd, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN);
        if (ret < 0) {
                pr_err("Failed to apply a boot config magic: %d\n", ret);
-               return ret;
+               goto out;
        }
+out:
        close(fd);
        free(data);
 
-       return 0;
+       return ret;
 }
 
 int usage(void)
index 11eee0b..d979ff1 100644 (file)
@@ -3,6 +3,7 @@
 #define _GNU_SOURCE
 #include <poll.h>
 #include <unistd.h>
+#include <assert.h>
 #include <signal.h>
 #include <pthread.h>
 #include <sys/epoll.h>
@@ -3136,4 +3137,149 @@ TEST(epoll59)
        close(ctx.sfd[0]);
 }
 
+enum {
+       EPOLL60_EVENTS_NR = 10,
+};
+
+struct epoll60_ctx {
+       volatile int stopped;
+       int ready;
+       int waiters;
+       int epfd;
+       int evfd[EPOLL60_EVENTS_NR];
+};
+
+static void *epoll60_wait_thread(void *ctx_)
+{
+       struct epoll60_ctx *ctx = ctx_;
+       struct epoll_event e;
+       sigset_t sigmask;
+       uint64_t v;
+       int ret;
+
+       /* Block SIGUSR1 */
+       sigemptyset(&sigmask);
+       sigaddset(&sigmask, SIGUSR1);
+       sigprocmask(SIG_SETMASK, &sigmask, NULL);
+
+       /* Prepare empty mask for epoll_pwait() */
+       sigemptyset(&sigmask);
+
+       while (!ctx->stopped) {
+               /* Mark we are ready */
+               __atomic_fetch_add(&ctx->ready, 1, __ATOMIC_ACQUIRE);
+
+               /* Start when all are ready */
+               while (__atomic_load_n(&ctx->ready, __ATOMIC_ACQUIRE) &&
+                      !ctx->stopped);
+
+               /* Account this waiter */
+               __atomic_fetch_add(&ctx->waiters, 1, __ATOMIC_ACQUIRE);
+
+               ret = epoll_pwait(ctx->epfd, &e, 1, 2000, &sigmask);
+               if (ret != 1) {
+                       /* We expect only signal delivery on stop */
+                       assert(ret < 0 && errno == EINTR && "Lost wakeup!\n");
+                       assert(ctx->stopped);
+                       break;
+               }
+
+               ret = read(e.data.fd, &v, sizeof(v));
+               /* Since we are on ET mode, thus each thread gets its own fd. */
+               assert(ret == sizeof(v));
+
+               __atomic_fetch_sub(&ctx->waiters, 1, __ATOMIC_RELEASE);
+       }
+
+       return NULL;
+}
+
+static inline unsigned long long msecs(void)
+{
+       struct timespec ts;
+       unsigned long long msecs;
+
+       clock_gettime(CLOCK_REALTIME, &ts);
+       msecs = ts.tv_sec * 1000ull;
+       msecs += ts.tv_nsec / 1000000ull;
+
+       return msecs;
+}
+
+static inline int count_waiters(struct epoll60_ctx *ctx)
+{
+       return __atomic_load_n(&ctx->waiters, __ATOMIC_ACQUIRE);
+}
+
+TEST(epoll60)
+{
+       struct epoll60_ctx ctx = { 0 };
+       pthread_t waiters[ARRAY_SIZE(ctx.evfd)];
+       struct epoll_event e;
+       int i, n, ret;
+
+       signal(SIGUSR1, signal_handler);
+
+       ctx.epfd = epoll_create1(0);
+       ASSERT_GE(ctx.epfd, 0);
+
+       /* Create event fds */
+       for (i = 0; i < ARRAY_SIZE(ctx.evfd); i++) {
+               ctx.evfd[i] = eventfd(0, EFD_NONBLOCK);
+               ASSERT_GE(ctx.evfd[i], 0);
+
+               e.events = EPOLLIN | EPOLLET;
+               e.data.fd = ctx.evfd[i];
+               ASSERT_EQ(epoll_ctl(ctx.epfd, EPOLL_CTL_ADD, ctx.evfd[i], &e), 0);
+       }
+
+       /* Create waiter threads */
+       for (i = 0; i < ARRAY_SIZE(waiters); i++)
+               ASSERT_EQ(pthread_create(&waiters[i], NULL,
+                                        epoll60_wait_thread, &ctx), 0);
+
+       for (i = 0; i < 300; i++) {
+               uint64_t v = 1, ms;
+
+               /* Wait for all to be ready */
+               while (__atomic_load_n(&ctx.ready, __ATOMIC_ACQUIRE) !=
+                      ARRAY_SIZE(ctx.evfd))
+                       ;
+
+               /* Steady, go */
+               __atomic_fetch_sub(&ctx.ready, ARRAY_SIZE(ctx.evfd),
+                                  __ATOMIC_ACQUIRE);
+
+               /* Wait all have gone to kernel */
+               while (count_waiters(&ctx) != ARRAY_SIZE(ctx.evfd))
+                       ;
+
+               /* 1ms should be enough to schedule away */
+               usleep(1000);
+
+               /* Quickly signal all handles at once */
+               for (n = 0; n < ARRAY_SIZE(ctx.evfd); n++) {
+                       ret = write(ctx.evfd[n], &v, sizeof(v));
+                       ASSERT_EQ(ret, sizeof(v));
+               }
+
+               /* Busy loop for 1s and wait for all waiters to wake up */
+               ms = msecs();
+               while (count_waiters(&ctx) && msecs() < ms + 1000)
+                       ;
+
+               ASSERT_EQ(count_waiters(&ctx), 0);
+       }
+       ctx.stopped = 1;
+       /* Stop waiters */
+       for (i = 0; i < ARRAY_SIZE(waiters); i++)
+               ret = pthread_kill(waiters[i], SIGUSR1);
+       for (i = 0; i < ARRAY_SIZE(waiters); i++)
+               pthread_join(waiters[i], NULL);
+
+       for (i = 0; i < ARRAY_SIZE(waiters); i++)
+               close(ctx.evfd[i]);
+       close(ctx.epfd);
+}
+
 TEST_HARNESS_MAIN
index 063ecb2..a4605b5 100755 (executable)
@@ -17,6 +17,7 @@ echo "                -v|--verbose Increase verbosity of test messages"
 echo "         -vv        Alias of -v -v (Show all results in stdout)"
 echo "         -vvv       Alias of -v -v -v (Show all commands immediately)"
 echo "         --fail-unsupported Treat UNSUPPORTED as a failure"
+echo "         --fail-unresolved Treat UNRESOLVED as a failure"
 echo "         -d|--debug Debug mode (trace all shell commands)"
 echo "         -l|--logdir <dir> Save logs on the <dir>"
 echo "                     If <dir> is -, all logs output in console only"
@@ -29,8 +30,25 @@ err_ret=1
 # kselftest skip code is 4
 err_skip=4
 
+# cgroup RT scheduling prevents chrt commands from succeeding, which
+# induces failures in test wakeup tests.  Disable for the duration of
+# the tests.
+
+readonly sched_rt_runtime=/proc/sys/kernel/sched_rt_runtime_us
+
+sched_rt_runtime_orig=$(cat $sched_rt_runtime)
+
+setup() {
+  echo -1 > $sched_rt_runtime
+}
+
+cleanup() {
+  echo $sched_rt_runtime_orig > $sched_rt_runtime
+}
+
 errexit() { # message
   echo "Error: $1" 1>&2
+  cleanup
   exit $err_ret
 }
 
@@ -39,6 +57,8 @@ if [ `id -u` -ne 0 ]; then
   errexit "this must be run by root user"
 fi
 
+setup
+
 # Utilities
 absdir() { # file_path
   (cd `dirname $1`; pwd)
@@ -93,6 +113,10 @@ parse_opts() { # opts
       UNSUPPORTED_RESULT=1
       shift 1
     ;;
+    --fail-unresolved)
+      UNRESOLVED_RESULT=1
+      shift 1
+    ;;
     --logdir|-l)
       LOG_DIR=$2
       shift 2
@@ -157,6 +181,7 @@ KEEP_LOG=0
 DEBUG=0
 VERBOSE=0
 UNSUPPORTED_RESULT=0
+UNRESOLVED_RESULT=0
 STOP_FAILURE=0
 # Parse command-line options
 parse_opts $*
@@ -235,6 +260,7 @@ TOTAL_RESULT=0
 
 INSTANCE=
 CASENO=0
+
 testcase() { # testfile
   CASENO=$((CASENO+1))
   desc=`grep "^#[ \t]*description:" $1 | cut -f2 -d:`
@@ -260,7 +286,7 @@ eval_result() { # sigval
     $UNRESOLVED)
       prlog "  [${color_blue}UNRESOLVED${color_reset}]"
       UNRESOLVED_CASES="$UNRESOLVED_CASES $CASENO"
-      return 1 # this is a kind of bug.. something happened.
+      return $UNRESOLVED_RESULT # depends on use case
     ;;
     $UNTESTED)
       prlog "  [${color_blue}UNTESTED${color_reset}]"
@@ -273,7 +299,7 @@ eval_result() { # sigval
       return $UNSUPPORTED_RESULT # depends on use case
     ;;
     $XFAIL)
-      prlog "  [${color_red}XFAIL${color_reset}]"
+      prlog "  [${color_green}XFAIL${color_reset}]"
       XFAILED_CASES="$XFAILED_CASES $CASENO"
       return 0
     ;;
@@ -406,5 +432,7 @@ prlog "# of unsupported: " `echo $UNSUPPORTED_CASES | wc -w`
 prlog "# of xfailed: " `echo $XFAILED_CASES | wc -w`
 prlog "# of undefined(test bug): " `echo $UNDEFINED_CASES | wc -w`
 
+cleanup
+
 # if no error, return 0
 exit $TOTAL_RESULT
index aefab0c..f598538 100644 (file)
@@ -10,10 +10,7 @@ if ! grep -q function_graph available_tracers; then
     exit_unsupported
 fi
 
-if [ ! -f set_ftrace_filter ]; then
-    echo "set_ftrace_filter not found? Is dynamic ftrace not set?"
-    exit_unsupported
-fi
+check_filter_file set_ftrace_filter
 
 do_reset() {
     if [ -e /proc/sys/kernel/stack_tracer_enabled ]; then
index c8a5209..d610f47 100644 (file)
@@ -9,6 +9,8 @@ if ! grep -q function_graph available_tracers; then
     exit_unsupported
 fi
 
+check_filter_file set_ftrace_filter
+
 fail() { # msg
     echo $1
     exit_fail
index f4e92af..28936f4 100644 (file)
@@ -9,6 +9,8 @@ if ! grep -q function available_tracers; then
     exit_unsupported
 fi
 
+check_filter_file set_ftrace_filter
+
 disable_tracing
 clear_trace
 
index 8aa46a2..71db68a 100644 (file)
@@ -15,10 +15,7 @@ if [ ! -f set_ftrace_notrace_pid ]; then
     exit_unsupported
 fi
 
-if [ ! -f set_ftrace_filter ]; then
-    echo "set_ftrace_filter not found? Is function tracer not set?"
-    exit_unsupported
-fi
+check_filter_file set_ftrace_filter
 
 do_function_fork=1
 
index f2ee1e8..d58403c 100644 (file)
@@ -16,10 +16,7 @@ if [ ! -f set_ftrace_pid ]; then
     exit_unsupported
 fi
 
-if [ ! -f set_ftrace_filter ]; then
-    echo "set_ftrace_filter not found? Is function tracer not set?"
-    exit_unsupported
-fi
+check_filter_file set_ftrace_filter
 
 do_function_fork=1
 
index 1a52f28..b2aff78 100644 (file)
@@ -3,7 +3,7 @@
 # description: ftrace - stacktrace filter command
 # flags: instance
 
-[ ! -f set_ftrace_filter ] && exit_unsupported
+check_filter_file set_ftrace_filter
 
 echo _do_fork:stacktrace >> set_ftrace_filter
 
index ca2ffd7..e9b1fd5 100644 (file)
 #
 
 # The triggers are set within the set_ftrace_filter file
-if [ ! -f set_ftrace_filter ]; then
-    echo "set_ftrace_filter not found? Is dynamic ftrace not set?"
-    exit_unsupported
-fi
+check_filter_file set_ftrace_filter
 
 do_reset() {
     reset_ftrace_filter
index 9330c87..1a4b4a4 100644 (file)
@@ -2,7 +2,7 @@
 # SPDX-License-Identifier: GPL-2.0
 # description: ftrace - function trace on module
 
-[ ! -f set_ftrace_filter ] && exit_unsupported
+check_filter_file set_ftrace_filter
 
 : "mod: allows to filter a non exist function"
 echo 'non_exist_func:mod:non_exist_module' > set_ftrace_filter
index dfbae63..a3dadb6 100644 (file)
@@ -18,10 +18,7 @@ if ! grep -q function_graph available_tracers; then
     exit_unsupported;
 fi
 
-if [ ! -f set_ftrace_filter ]; then
-    echo "set_ftrace_filter not found? Is dynamic ftrace not set?"
-    exit_unsupported
-fi
+check_filter_file set_ftrace_filter
 
 if [ ! -f function_profile_enabled ]; then
     echo "function_profile_enabled not found, function profiling enabled?"
index 51f6e61..70bad44 100644 (file)
 #
 
 # The triggers are set within the set_ftrace_filter file
-if [ ! -f set_ftrace_filter ]; then
-    echo "set_ftrace_filter not found? Is dynamic ftrace not set?"
-    exit_unsupported
-fi
+check_filter_file set_ftrace_filter
 
 fail() { # mesg
     echo $1
index b414f0e..51e9e80 100644 (file)
@@ -8,6 +8,8 @@ if [ ! -f stack_trace ]; then
   exit_unsupported
 fi
 
+check_filter_file stack_trace_filter
+
 echo > stack_trace_filter
 echo 0 > stack_max_size
 echo 1 > /proc/sys/kernel/stack_tracer_enabled
index 1947387..3ed173f 100644 (file)
 #
 
 # The triggers are set within the set_ftrace_filter file
-if [ ! -f set_ftrace_filter ]; then
-    echo "set_ftrace_filter not found? Is dynamic ftrace not set?"
-    exit_unsupported
-fi
+check_filter_file set_ftrace_filter
 
 fail() { # mesg
     echo $1
index 5d45505..61a3c7e 100644 (file)
@@ -1,3 +1,9 @@
+check_filter_file() { # check filter file introduced by dynamic ftrace
+    if [ ! -f "$1" ]; then
+        echo "$1 not found? Is dynamic ftrace not set?"
+        exit_unsupported
+    fi
+}
 
 clear_trace() { # reset trace output
     echo > trace
index 1bcb67d..81490ec 100644 (file)
@@ -38,7 +38,7 @@ for width in 64 32 16 8; do
   echo 0 > events/kprobes/testprobe/enable
 
   : "Confirm the arguments is recorded in given types correctly"
-  ARGS=`grep "testprobe" trace | sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'`
+  ARGS=`grep "testprobe" trace | head -n 1 | sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'`
   check_types $ARGS $width
 
   : "Clear event for next loop"
index 7650a82..df50728 100644 (file)
@@ -5,6 +5,8 @@
 [ -f kprobe_events ] || exit_unsupported # this is configurable
 grep "function" available_tracers || exit_unsupported # this is configurable
 
+check_filter_file set_ftrace_filter
+
 # prepare
 echo nop > current_tracer
 echo _do_fork > set_ftrace_filter
index 0bb8061..32bdc97 100644 (file)
@@ -1,13 +1,13 @@
 # SPDX-License-Identifier: GPL-2.0
 
-MOUNT_CFLAGS := $(shell pkg-config --cflags mount 2>/dev/null)
-MOUNT_LDLIBS := $(shell pkg-config --libs mount 2>/dev/null)
-ifeq ($(MOUNT_LDLIBS),)
-MOUNT_LDLIBS := -lmount -I/usr/include/libmount
+VAR_CFLAGS := $(shell pkg-config --cflags mount 2>/dev/null)
+VAR_LDLIBS := $(shell pkg-config --libs mount 2>/dev/null)
+ifeq ($(VAR_LDLIBS),)
+VAR_LDLIBS := -lmount -I/usr/include/libmount
 endif
 
-CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/ $(MOUNT_CFLAGS)
-LDLIBS += $(MOUNT_LDLIBS)
+CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/ $(VAR_CFLAGS)
+LDLIBS += $(VAR_LDLIBS)
 
 TEST_PROGS := gpio-mockup.sh
 TEST_FILES := gpio-mockup-sysfs.sh
index 7340fd6..39f0fa2 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 CFLAGS := $(CFLAGS) -Wall -D_GNU_SOURCE
-LDLIBS := $(LDLIBS) -lm
+LDLIBS += -lm
 
 uname_M := $(shell uname -m 2>/dev/null || echo not)
 ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/)
diff --git a/tools/testing/selftests/kselftest_deps.sh b/tools/testing/selftests/kselftest_deps.sh
new file mode 100755 (executable)
index 0000000..bbc0464
--- /dev/null
@@ -0,0 +1,272 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# kselftest_deps.sh
+#
+# Checks for kselftest build dependencies on the build system.
+# Copyright (c) 2020 Shuah Khan <skhan@linuxfoundation.org>
+#
+#
+
+usage()
+{
+
+echo -e "Usage: $0 -[p] <compiler> [test_name]\n"
+echo -e "\tkselftest_deps.sh [-p] gcc"
+echo -e "\tkselftest_deps.sh [-p] gcc vm"
+echo -e "\tkselftest_deps.sh [-p] aarch64-linux-gnu-gcc"
+echo -e "\tkselftest_deps.sh [-p] aarch64-linux-gnu-gcc vm\n"
+echo "- Should be run in selftests directory in the kernel repo."
+echo "- Checks if Kselftests can be built/cross-built on a system."
+echo "- Parses all test/sub-test Makefile to find library dependencies."
+echo "- Runs compile test on a trivial C file with LDLIBS specified"
+echo "  in the test Makefiles to identify missing library dependencies."
+echo "- Prints suggested target list for a system filtering out tests"
+echo "  failed the build dependency check from the TARGETS in Selftests"
+echo "  main Makefile when optional -p is specified."
+echo "- Prints pass/fail dependency check for each tests/sub-test."
+echo "- Prints pass/fail targets and libraries."
+echo "- Default: runs dependency checks on all tests."
+echo "- Optional test name can be specified to check dependencies for it."
+exit 1
+
+}
+
+# Start main()
+main()
+{
+
+base_dir=`pwd`
+# Make sure we're in the selftests top-level directory.
+if [ $(basename "$base_dir") !=  "selftests" ]; then
+       echo -e "\tPlease run $0 in"
+       echo -e "\ttools/testing/selftests directory ..."
+       exit 1
+fi
+
+print_targets=0
+
+while getopts "p" arg; do
+    case $arg in
+        p)
+               print_targets=1
+       shift;;
+    esac
+done
+
+if [ $# -eq 0 ]
+then
+       usage
+fi
+
+# Compiler
+CC=$1
+
+tmp_file=$(mktemp).c
+trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT
+#echo $tmp_file
+
+pass=$(mktemp).out
+trap "rm -f $pass" EXIT
+#echo $pass
+
+fail=$(mktemp).out
+trap "rm -f $fail" EXIT
+#echo $fail
+
+# Generate tmp source fire for compile test
+cat << "EOF" > $tmp_file
+int main()
+{
+}
+EOF
+
+# Save results
+total_cnt=0
+fail_trgts=()
+fail_libs=()
+fail_cnt=0
+pass_trgts=()
+pass_libs=()
+pass_cnt=0
+
+# Get all TARGETS from selftests Makefile
+targets=$(egrep "^TARGETS +|^TARGETS =" Makefile | cut -d "=" -f2)
+
+# Single test case
+if [ $# -eq 2 ]
+then
+       test=$2/Makefile
+
+       l1_test $test
+       l2_test $test
+       l3_test $test
+
+       print_results $1 $2
+       exit $?
+fi
+
+# Level 1: LDLIBS set static.
+#
+# Find all LDLIBS set statically for all executables built by a Makefile
+# and filter out VAR_LDLIBS to discard the following:
+#      gpio/Makefile:LDLIBS += $(VAR_LDLIBS)
+# Append space at the end of the list to append more tests.
+
+l1_tests=$(grep -r --include=Makefile "^LDLIBS" | \
+               grep -v "VAR_LDLIBS" | awk -F: '{print $1}')
+
+# Level 2: LDLIBS set dynamically.
+#
+# Level 2
+# Some tests have multiple valid LDLIBS lines for individual sub-tests
+# that need dependency checks. Find them and append them to the tests
+# e.g: vm/Makefile:$(OUTPUT)/userfaultfd: LDLIBS += -lpthread
+# Filter out VAR_LDLIBS to discard the following:
+#      memfd/Makefile:$(OUTPUT)/fuse_mnt: LDLIBS += $(VAR_LDLIBS)
+# Append space at the end of the list to append more tests.
+
+l2_tests=$(grep -r --include=Makefile ": LDLIBS" | \
+               grep -v "VAR_LDLIBS" | awk -F: '{print $1}')
+
+# Level 3
+# gpio,  memfd and others use pkg-config to find mount and fuse libs
+# respectively and save it in VAR_LDLIBS. If pkg-config doesn't find
+# any, VAR_LDLIBS set to default.
+# Use the default value and filter out pkg-config for dependency check.
+# e.g:
+# gpio/Makefile
+#      VAR_LDLIBS := $(shell pkg-config --libs mount) 2>/dev/null)
+# memfd/Makefile
+#      VAR_LDLIBS := $(shell pkg-config fuse --libs 2>/dev/null)
+
+l3_tests=$(grep -r --include=Makefile "^VAR_LDLIBS" | \
+               grep -v "pkg-config" | awk -F: '{print $1}')
+
+#echo $l1_tests
+#echo $l2_1_tests
+#echo $l3_tests
+
+all_tests
+print_results $1 $2
+
+exit $?
+}
+# end main()
+
+all_tests()
+{
+       for test in $l1_tests; do
+               l1_test $test
+       done
+
+       for test in $l2_tests; do
+               l2_test $test
+       done
+
+       for test in $l3_tests; do
+               l3_test $test
+       done
+}
+
+# Use same parsing used for l1_tests and pick libraries this time.
+l1_test()
+{
+       test_libs=$(grep --include=Makefile "^LDLIBS" $test | \
+                       grep -v "VAR_LDLIBS" | \
+                       sed -e 's/\:/ /' | \
+                       sed -e 's/+/ /' | cut -d "=" -f 2)
+
+       check_libs $test $test_libs
+}
+
+# Use same parsing used for l2__tests and pick libraries this time.
+l2_test()
+{
+       test_libs=$(grep --include=Makefile ": LDLIBS" $test | \
+                       grep -v "VAR_LDLIBS" | \
+                       sed -e 's/\:/ /' | sed -e 's/+/ /' | \
+                       cut -d "=" -f 2)
+
+       check_libs $test $test_libs
+}
+
+l3_test()
+{
+       test_libs=$(grep --include=Makefile "^VAR_LDLIBS" $test | \
+                       grep -v "pkg-config" | sed -e 's/\:/ /' |
+                       sed -e 's/+/ /' | cut -d "=" -f 2)
+
+       check_libs $test $test_libs
+}
+
+check_libs()
+{
+
+if [[ ! -z "${test_libs// }" ]]
+then
+
+       #echo $test_libs
+
+       for lib in $test_libs; do
+
+       let total_cnt+=1
+       $CC -o $tmp_file.bin $lib $tmp_file > /dev/null 2>&1
+       if [ $? -ne 0 ]; then
+               echo "FAIL: $test dependency check: $lib" >> $fail
+               let fail_cnt+=1
+               fail_libs+="$lib "
+               fail_target=$(echo "$test" | cut -d "/" -f1)
+               fail_trgts+="$fail_target "
+               targets=$(echo "$targets" | grep -v "$fail_target")
+       else
+               echo "PASS: $test dependency check passed $lib" >> $pass
+               let pass_cnt+=1
+               pass_libs+="$lib "
+               pass_trgts+="$(echo "$test" | cut -d "/" -f1) "
+       fi
+
+       done
+fi
+}
+
+print_results()
+{
+       echo -e "========================================================";
+       echo -e "Kselftest Dependency Check for [$0 $1 $2] results..."
+
+       if [ $print_targets -ne 0 ]
+       then
+       echo -e "Suggested Selftest Targets for your configuration:"
+       echo -e "$targets";
+       fi
+
+       echo -e "========================================================";
+       echo -e "Checked tests defining LDLIBS dependencies"
+       echo -e "--------------------------------------------------------";
+       echo -e "Total tests with Dependencies:"
+       echo -e "$total_cnt Pass: $pass_cnt Fail: $fail_cnt";
+
+       if [ $pass_cnt -ne 0 ]; then
+       echo -e "--------------------------------------------------------";
+       cat $pass
+       echo -e "--------------------------------------------------------";
+       echo -e "Targets passed build dependency check on system:"
+       echo -e "$(echo "$pass_trgts" | xargs -n1 | sort -u | xargs)"
+       fi
+
+       if [ $fail_cnt -ne 0 ]; then
+       echo -e "--------------------------------------------------------";
+       cat $fail
+       echo -e "--------------------------------------------------------";
+       echo -e "Targets failed build dependency check on system:"
+       echo -e "$(echo "$fail_trgts" | xargs -n1 | sort -u | xargs)"
+       echo -e "--------------------------------------------------------";
+       echo -e "Missing libraries system"
+       echo -e "$(echo "$fail_libs" | xargs -n1 | sort -u | xargs)"
+       fi
+
+       echo -e "--------------------------------------------------------";
+       echo -e "========================================================";
+}
+
+main "$@"
index 712a2dd..b728c0a 100644 (file)
@@ -5,8 +5,34 @@ all:
 
 top_srcdir = ../../../..
 KSFT_KHDR_INSTALL := 1
+
+# For cross-builds to work, UNAME_M has to map to ARCH and arch specific
+# directories and targets in this Makefile. "uname -m" doesn't map to
+# arch specific sub-directory names.
+#
+# UNAME_M variable to used to run the compiles pointing to the right arch
+# directories and build the right targets for these supported architectures.
+#
+# TEST_GEN_PROGS and LIBKVM are set using UNAME_M variable.
+# LINUX_TOOL_ARCH_INCLUDE is set using ARCH variable.
+#
+# x86_64 targets are named to include x86_64 as a suffix and directories
+# for includes are in x86_64 sub-directory. s390x and aarch64 follow the
+# same convention. "uname -m" doesn't result in the correct mapping for
+# s390x and aarch64.
+#
+# No change necessary for x86_64
 UNAME_M := $(shell uname -m)
 
+# Set UNAME_M for arm64 compile/install to work
+ifeq ($(ARCH),arm64)
+       UNAME_M := aarch64
+endif
+# Set UNAME_M s390x compile/install to work
+ifeq ($(ARCH),s390)
+       UNAME_M := s390x
+endif
+
 LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c
 LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c
 LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
@@ -53,7 +79,7 @@ LIBKVM += $(LIBKVM_$(UNAME_M))
 INSTALL_HDR_PATH = $(top_srcdir)/usr
 LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
 LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
-LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/x86/include
+LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
 CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
        -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
        -I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \
@@ -84,6 +110,7 @@ $(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c
 $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
        $(AR) crs $@ $^
 
+x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS))))
 all: $(STATIC_LIBS)
 $(TEST_GEN_PROGS): $(STATIC_LIBS)
 
index d8f4d6b..a034438 100644 (file)
@@ -219,8 +219,8 @@ struct hv_enlightened_vmcs {
 #define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK \
                (~((1ull << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) - 1))
 
-struct hv_enlightened_vmcs *current_evmcs;
-struct hv_vp_assist_page *current_vp_assist;
+extern struct hv_enlightened_vmcs *current_evmcs;
+extern struct hv_vp_assist_page *current_vp_assist;
 
 int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id);
 
index 6f17f69..4ae104f 100644 (file)
@@ -17,6 +17,9 @@
 
 bool enable_evmcs;
 
+struct hv_enlightened_vmcs *current_evmcs;
+struct hv_vp_assist_page *current_vp_assist;
+
 struct eptPageTableEntry {
        uint64_t readable:1;
        uint64_t writable:1;
index 187b14c..4da8b56 100644 (file)
@@ -8,11 +8,21 @@ TEST_GEN_PROGS := memfd_test
 TEST_PROGS := run_fuse_test.sh run_hugetlbfs_test.sh
 TEST_GEN_FILES := fuse_test fuse_mnt
 
-fuse_mnt.o: CFLAGS += $(shell pkg-config fuse --cflags)
+VAR_CFLAGS := $(shell pkg-config fuse --cflags 2>/dev/null)
+ifeq ($(VAR_CFLAGS),)
+VAR_CFLAGS := -D_FILE_OFFSET_BITS=64 -I/usr/include/fuse
+endif
+
+VAR_LDLIBS := $(shell pkg-config fuse --libs 2>/dev/null)
+ifeq ($(VAR_LDLIBS),)
+VAR_LDLIBS := -lfuse -pthread
+endif
+
+fuse_mnt.o: CFLAGS += $(VAR_CFLAGS)
 
 include ../lib.mk
 
-$(OUTPUT)/fuse_mnt: LDLIBS += $(shell pkg-config fuse --libs)
+$(OUTPUT)/fuse_mnt: LDLIBS += $(VAR_LDLIBS)
 
 $(OUTPUT)/memfd_test: memfd_test.c common.c
 $(OUTPUT)/fuse_test: fuse_test.c common.c
index 35505b3..4555f88 100644 (file)
@@ -165,9 +165,10 @@ void *child_thread(void *arg)
                        socklen_t zc_len = sizeof(zc);
                        int res;
 
+                       memset(&zc, 0, sizeof(zc));
                        zc.address = (__u64)((unsigned long)addr);
                        zc.length = chunk_size;
-                       zc.recv_skip_hint = 0;
+
                        res = getsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE,
                                         &zc, &zc_len);
                        if (res == -1)
@@ -281,12 +282,14 @@ static void setup_sockaddr(int domain, const char *str_addr,
 static void do_accept(int fdlisten)
 {
        pthread_attr_t attr;
+       int rcvlowat;
 
        pthread_attr_init(&attr);
        pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
 
+       rcvlowat = chunk_size;
        if (setsockopt(fdlisten, SOL_SOCKET, SO_RCVLOWAT,
-                      &chunk_size, sizeof(chunk_size)) == -1) {
+                      &rcvlowat, sizeof(rcvlowat)) == -1) {
                perror("setsockopt SO_RCVLOWAT");
        }
 
index 936e1ca..17a1f53 100755 (executable)
@@ -48,8 +48,11 @@ cleanup() {
        exec 2>/dev/null
        printf "$orig_message_cost" > /proc/sys/net/core/message_cost
        ip0 link del dev wg0
+       ip0 link del dev wg1
        ip1 link del dev wg0
+       ip1 link del dev wg1
        ip2 link del dev wg0
+       ip2 link del dev wg1
        local to_kill="$(ip netns pids $netns0) $(ip netns pids $netns1) $(ip netns pids $netns2)"
        [[ -n $to_kill ]] && kill $to_kill
        pp ip netns del $netns1
@@ -77,18 +80,20 @@ ip0 link set wg0 netns $netns2
 key1="$(pp wg genkey)"
 key2="$(pp wg genkey)"
 key3="$(pp wg genkey)"
+key4="$(pp wg genkey)"
 pub1="$(pp wg pubkey <<<"$key1")"
 pub2="$(pp wg pubkey <<<"$key2")"
 pub3="$(pp wg pubkey <<<"$key3")"
+pub4="$(pp wg pubkey <<<"$key4")"
 psk="$(pp wg genpsk)"
 [[ -n $key1 && -n $key2 && -n $psk ]]
 
 configure_peers() {
        ip1 addr add 192.168.241.1/24 dev wg0
-       ip1 addr add fd00::1/24 dev wg0
+       ip1 addr add fd00::1/112 dev wg0
 
        ip2 addr add 192.168.241.2/24 dev wg0
-       ip2 addr add fd00::2/24 dev wg0
+       ip2 addr add fd00::2/112 dev wg0
 
        n1 wg set wg0 \
                private-key <(echo "$key1") \
@@ -230,9 +235,38 @@ n1 ping -W 1 -c 1 192.168.241.2
 n1 wg set wg0 private-key <(echo "$key3")
 n2 wg set wg0 peer "$pub3" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 peer "$pub1" remove
 n1 ping -W 1 -c 1 192.168.241.2
+n2 wg set wg0 peer "$pub3" remove
+
+# Test that we can route wg through wg
+ip1 addr flush dev wg0
+ip2 addr flush dev wg0
+ip1 addr add fd00::5:1/112 dev wg0
+ip2 addr add fd00::5:2/112 dev wg0
+n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") allowed-ips fd00::5:2/128 endpoint 127.0.0.1:2
+n2 wg set wg0 private-key <(echo "$key2") listen-port 2 peer "$pub1" preshared-key <(echo "$psk") allowed-ips fd00::5:1/128 endpoint 127.212.121.99:9998
+ip1 link add wg1 type wireguard
+ip2 link add wg1 type wireguard
+ip1 addr add 192.168.241.1/24 dev wg1
+ip1 addr add fd00::1/112 dev wg1
+ip2 addr add 192.168.241.2/24 dev wg1
+ip2 addr add fd00::2/112 dev wg1
+ip1 link set mtu 1340 up dev wg1
+ip2 link set mtu 1340 up dev wg1
+n1 wg set wg1 listen-port 5 private-key <(echo "$key3") peer "$pub4" allowed-ips 192.168.241.2/32,fd00::2/128 endpoint [fd00::5:2]:5
+n2 wg set wg1 listen-port 5 private-key <(echo "$key4") peer "$pub3" allowed-ips 192.168.241.1/32,fd00::1/128 endpoint [fd00::5:1]:5
+tests
+# Try to set up a routing loop between the two namespaces
+ip1 link set netns $netns0 dev wg1
+ip0 addr add 192.168.241.1/24 dev wg1
+ip0 link set up dev wg1
+n0 ping -W 1 -c 1 192.168.241.2
+n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7
+ip2 link del wg0
+ip2 link del wg1
+! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel
 
+ip0 link del wg1
 ip1 link del wg0
-ip2 link del wg0
 
 # Test using NAT. We now change the topology to this:
 # ┌────────────────────────────────────────┐    ┌────────────────────────────────────────────────┐     ┌────────────────────────────────────────┐
@@ -282,6 +316,20 @@ pp sleep 3
 n2 ping -W 1 -c 1 192.168.241.1
 n1 wg set wg0 peer "$pub2" persistent-keepalive 0
 
+# Test that onion routing works, even when it loops
+n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5
+ip1 addr add 192.168.242.1/24 dev wg0
+ip2 link add wg1 type wireguard
+ip2 addr add 192.168.242.2/24 dev wg1
+n2 wg set wg1 private-key <(echo "$key3") listen-port 5 peer "$pub1" allowed-ips 192.168.242.1/32
+ip2 link set wg1 up
+n1 ping -W 1 -c 1 192.168.242.2
+ip2 link del wg1
+n1 wg set wg0 peer "$pub3" endpoint 192.168.242.2:5
+! n1 ping -W 1 -c 1 192.168.242.2 || false # Should not crash kernel
+n1 wg set wg0 peer "$pub3" remove
+ip1 addr del 192.168.242.1/24 dev wg0
+
 # Do a wg-quick(8)-style policy routing for the default route, making sure vethc has a v6 address to tease out bugs.
 ip1 -6 addr add fc00::9/96 dev vethc
 ip1 -6 route add default via fc00::1
index 990c510..f52f1e2 100644 (file)
@@ -10,3 +10,4 @@ CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE="console=hvc0 wg.success=hvc1"
 CONFIG_SECTION_MISMATCH_WARN_ONLY=y
 CONFIG_FRAME_WARN=1280
+CONFIG_THREAD_SHIFT=14
index 5909e7e..9803dbb 100644 (file)
@@ -25,7 +25,6 @@ CONFIG_KASAN=y
 CONFIG_KASAN_INLINE=y
 CONFIG_UBSAN=y
 CONFIG_UBSAN_SANITIZE_ALL=y
-CONFIG_UBSAN_NO_ALIGNMENT=y
 CONFIG_UBSAN_NULL=y
 CONFIG_DEBUG_KMEMLEAK=y
 CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=8192
index d31f267..25c0e47 100644 (file)
@@ -125,12 +125,16 @@ static void __hyp_text kvm_adjust_itstate(struct kvm_vcpu *vcpu)
  */
 void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
 {
+       u32 pc = *vcpu_pc(vcpu);
        bool is_thumb;
 
        is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_AA32_T_BIT);
        if (is_thumb && !is_wide_instr)
-               *vcpu_pc(vcpu) += 2;
+               pc += 2;
        else
-               *vcpu_pc(vcpu) += 4;
+               pc += 4;
+
+       *vcpu_pc(vcpu) = pc;
+
        kvm_adjust_itstate(vcpu);
 }
index 14a162e..ae36471 100644 (file)
@@ -186,6 +186,33 @@ static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
        kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
 }
 
+static void kvm_psci_narrow_to_32bit(struct kvm_vcpu *vcpu)
+{
+       int i;
+
+       /*
+        * Zero the input registers' upper 32 bits. They will be fully
+        * zeroed on exit, so we're fine changing them in place.
+        */
+       for (i = 1; i < 4; i++)
+               vcpu_set_reg(vcpu, i, lower_32_bits(vcpu_get_reg(vcpu, i)));
+}
+
+static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32 fn)
+{
+       switch(fn) {
+       case PSCI_0_2_FN64_CPU_SUSPEND:
+       case PSCI_0_2_FN64_CPU_ON:
+       case PSCI_0_2_FN64_AFFINITY_INFO:
+               /* Disallow these functions for 32bit guests */
+               if (vcpu_mode_is_32bit(vcpu))
+                       return PSCI_RET_NOT_SUPPORTED;
+               break;
+       }
+
+       return 0;
+}
+
 static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
 {
        struct kvm *kvm = vcpu->kvm;
@@ -193,6 +220,10 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
        unsigned long val;
        int ret = 1;
 
+       val = kvm_psci_check_allowed_function(vcpu, psci_fn);
+       if (val)
+               goto out;
+
        switch (psci_fn) {
        case PSCI_0_2_FN_PSCI_VERSION:
                /*
@@ -210,12 +241,16 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
                val = PSCI_RET_SUCCESS;
                break;
        case PSCI_0_2_FN_CPU_ON:
+               kvm_psci_narrow_to_32bit(vcpu);
+               fallthrough;
        case PSCI_0_2_FN64_CPU_ON:
                mutex_lock(&kvm->lock);
                val = kvm_psci_vcpu_on(vcpu);
                mutex_unlock(&kvm->lock);
                break;
        case PSCI_0_2_FN_AFFINITY_INFO:
+               kvm_psci_narrow_to_32bit(vcpu);
+               fallthrough;
        case PSCI_0_2_FN64_AFFINITY_INFO:
                val = kvm_psci_vcpu_affinity_info(vcpu);
                break;
@@ -256,6 +291,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
                break;
        }
 
+out:
        smccc_set_retval(vcpu, val, 0, 0, 0);
        return ret;
 }
@@ -273,6 +309,10 @@ static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
                break;
        case PSCI_1_0_FN_PSCI_FEATURES:
                feature = smccc_get_arg1(vcpu);
+               val = kvm_psci_check_allowed_function(vcpu, feature);
+               if (val)
+                       break;
+
                switch(feature) {
                case PSCI_0_2_FN_PSCI_VERSION:
                case PSCI_0_2_FN_CPU_SUSPEND:
index a963b9d..32e32d6 100644 (file)
@@ -294,8 +294,15 @@ int vgic_init(struct kvm *kvm)
                }
        }
 
-       if (vgic_has_its(kvm)) {
+       if (vgic_has_its(kvm))
                vgic_lpi_translation_cache_init(kvm);
+
+       /*
+        * If we have GICv4.1 enabled, unconditionnaly request enable the
+        * v4 support so that we get HW-accelerated vSGIs. Otherwise, only
+        * enable it if we present a virtual ITS to the guest.
+        */
+       if (vgic_supports_direct_msis(kvm)) {
                ret = vgic_v4_init(kvm);
                if (ret)
                        goto out;
@@ -348,6 +355,12 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
 
+       /*
+        * Retire all pending LPIs on this vcpu anyway as we're
+        * going to destroy it.
+        */
+       vgic_flush_pending_lpis(vcpu);
+
        INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
 }
 
@@ -359,10 +372,10 @@ static void __kvm_vgic_destroy(struct kvm *kvm)
 
        vgic_debug_destroy(kvm);
 
-       kvm_vgic_dist_destroy(kvm);
-
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_vgic_vcpu_destroy(vcpu);
+
+       kvm_vgic_dist_destroy(kvm);
 }
 
 void kvm_vgic_destroy(struct kvm *kvm)
index d53d34a..c012a52 100644 (file)
@@ -96,14 +96,21 @@ out_unlock:
         * We "cache" the configuration table entries in our struct vgic_irq's.
         * However we only have those structs for mapped IRQs, so we read in
         * the respective config data from memory here upon mapping the LPI.
+        *
+        * Should any of these fail, behave as if we couldn't create the LPI
+        * by dropping the refcount and returning the error.
         */
        ret = update_lpi_config(kvm, irq, NULL, false);
-       if (ret)
+       if (ret) {
+               vgic_put_irq(kvm, irq);
                return ERR_PTR(ret);
+       }
 
        ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
-       if (ret)
+       if (ret) {
+               vgic_put_irq(kvm, irq);
                return ERR_PTR(ret);
+       }
 
        return irq;
 }
index 5945f06..a016f07 100644 (file)
@@ -409,24 +409,28 @@ static const struct vgic_register_region vgic_v2_dist_registers[] = {
                NULL, vgic_mmio_uaccess_write_v2_group, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
-               vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
+               vgic_mmio_read_enable, vgic_mmio_write_senable,
+               NULL, vgic_uaccess_write_senable, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
-               vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
+               vgic_mmio_read_enable, vgic_mmio_write_cenable,
+               NULL, vgic_uaccess_write_cenable, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
-               vgic_mmio_read_pending, vgic_mmio_write_spending, NULL, NULL, 1,
+               vgic_mmio_read_pending, vgic_mmio_write_spending,
+               NULL, vgic_uaccess_write_spending, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
-               vgic_mmio_read_pending, vgic_mmio_write_cpending, NULL, NULL, 1,
+               vgic_mmio_read_pending, vgic_mmio_write_cpending,
+               NULL, vgic_uaccess_write_cpending, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
                vgic_mmio_read_active, vgic_mmio_write_sactive,
-               NULL, vgic_mmio_uaccess_write_sactive, 1,
+               vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
                vgic_mmio_read_active, vgic_mmio_write_cactive,
-               NULL, vgic_mmio_uaccess_write_cactive, 1,
+               vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
                vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
index e72dcc4..89a14ec 100644 (file)
@@ -50,7 +50,8 @@ bool vgic_has_its(struct kvm *kvm)
 
 bool vgic_supports_direct_msis(struct kvm *kvm)
 {
-       return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
+       return (kvm_vgic_global_state.has_gicv4_1 ||
+               (kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm)));
 }
 
 /*
@@ -538,10 +539,12 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = {
                vgic_mmio_read_group, vgic_mmio_write_group, NULL, NULL, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER,
-               vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1,
+               vgic_mmio_read_enable, vgic_mmio_write_senable,
+               NULL, vgic_uaccess_write_senable, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER,
-               vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1,
+               vgic_mmio_read_enable, vgic_mmio_write_cenable,
+              NULL, vgic_uaccess_write_cenable, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR,
                vgic_mmio_read_pending, vgic_mmio_write_spending,
@@ -553,11 +556,11 @@ static const struct vgic_register_region vgic_v3_dist_registers[] = {
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
                vgic_mmio_read_active, vgic_mmio_write_sactive,
-               NULL, vgic_mmio_uaccess_write_sactive, 1,
+               vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
                vgic_mmio_read_active, vgic_mmio_write_cactive,
-               NULL, vgic_mmio_uaccess_write_cactive,
+               vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive,
                1, VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
                vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
@@ -609,11 +612,13 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
        REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGROUPR0,
                vgic_mmio_read_group, vgic_mmio_write_group, 4,
                VGIC_ACCESS_32bit),
-       REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ISENABLER0,
-               vgic_mmio_read_enable, vgic_mmio_write_senable, 4,
+       REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISENABLER0,
+               vgic_mmio_read_enable, vgic_mmio_write_senable,
+               NULL, vgic_uaccess_write_senable, 4,
                VGIC_ACCESS_32bit),
-       REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ICENABLER0,
-               vgic_mmio_read_enable, vgic_mmio_write_cenable, 4,
+       REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICENABLER0,
+               vgic_mmio_read_enable, vgic_mmio_write_cenable,
+               NULL, vgic_uaccess_write_cenable, 4,
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISPENDR0,
                vgic_mmio_read_pending, vgic_mmio_write_spending,
@@ -625,12 +630,12 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
                VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISACTIVER0,
                vgic_mmio_read_active, vgic_mmio_write_sactive,
-               NULL, vgic_mmio_uaccess_write_sactive,
-               4, VGIC_ACCESS_32bit),
+               vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 4,
+               VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICACTIVER0,
                vgic_mmio_read_active, vgic_mmio_write_cactive,
-               NULL, vgic_mmio_uaccess_write_cactive,
-               4, VGIC_ACCESS_32bit),
+               vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 4,
+               VGIC_ACCESS_32bit),
        REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0,
                vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
                VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
index 2199302..b2d73fc 100644 (file)
@@ -184,6 +184,48 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
        }
 }
 
+int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
+                              gpa_t addr, unsigned int len,
+                              unsigned long val)
+{
+       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+       int i;
+       unsigned long flags;
+
+       for_each_set_bit(i, &val, len * 8) {
+               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
+               irq->enabled = true;
+               vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+
+               vgic_put_irq(vcpu->kvm, irq);
+       }
+
+       return 0;
+}
+
+int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
+                              gpa_t addr, unsigned int len,
+                              unsigned long val)
+{
+       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+       int i;
+       unsigned long flags;
+
+       for_each_set_bit(i, &val, len * 8) {
+               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
+               irq->enabled = false;
+               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+
+               vgic_put_irq(vcpu->kvm, irq);
+       }
+
+       return 0;
+}
+
 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
                                     gpa_t addr, unsigned int len)
 {
@@ -219,17 +261,6 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
        return value;
 }
 
-/* Must be called with irq->irq_lock held */
-static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
-                                bool is_uaccess)
-{
-       if (is_uaccess)
-               return;
-
-       irq->pending_latch = true;
-       vgic_irq_set_phys_active(irq, true);
-}
-
 static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
 {
        return (vgic_irq_is_sgi(irq->intid) &&
@@ -240,7 +271,6 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
                              gpa_t addr, unsigned int len,
                              unsigned long val)
 {
-       bool is_uaccess = !kvm_get_running_vcpu();
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
        int i;
        unsigned long flags;
@@ -270,22 +300,48 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
                        continue;
                }
 
+               irq->pending_latch = true;
                if (irq->hw)
-                       vgic_hw_irq_spending(vcpu, irq, is_uaccess);
-               else
-                       irq->pending_latch = true;
+                       vgic_irq_set_phys_active(irq, true);
+
                vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
                vgic_put_irq(vcpu->kvm, irq);
        }
 }
 
-/* Must be called with irq->irq_lock held */
-static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
-                                bool is_uaccess)
+int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
+                               gpa_t addr, unsigned int len,
+                               unsigned long val)
 {
-       if (is_uaccess)
-               return;
+       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+       int i;
+       unsigned long flags;
+
+       for_each_set_bit(i, &val, len * 8) {
+               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
+               irq->pending_latch = true;
 
+               /*
+                * GICv2 SGIs are terribly broken. We can't restore
+                * the source of the interrupt, so just pick the vcpu
+                * itself as the source...
+                */
+               if (is_vgic_v2_sgi(vcpu, irq))
+                       irq->source |= BIT(vcpu->vcpu_id);
+
+               vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+
+               vgic_put_irq(vcpu->kvm, irq);
+       }
+
+       return 0;
+}
+
+/* Must be called with irq->irq_lock held */
+static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
+{
        irq->pending_latch = false;
 
        /*
@@ -308,7 +364,6 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
                              gpa_t addr, unsigned int len,
                              unsigned long val)
 {
-       bool is_uaccess = !kvm_get_running_vcpu();
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
        int i;
        unsigned long flags;
@@ -339,7 +394,7 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
                }
 
                if (irq->hw)
-                       vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
+                       vgic_hw_irq_cpending(vcpu, irq);
                else
                        irq->pending_latch = false;
 
@@ -348,8 +403,68 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
        }
 }
 
-unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
-                                   gpa_t addr, unsigned int len)
+int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
+                               gpa_t addr, unsigned int len,
+                               unsigned long val)
+{
+       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+       int i;
+       unsigned long flags;
+
+       for_each_set_bit(i, &val, len * 8) {
+               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
+               /*
+                * More fun with GICv2 SGIs! If we're clearing one of them
+                * from userspace, which source vcpu to clear? Let's not
+                * even think of it, and blow the whole set.
+                */
+               if (is_vgic_v2_sgi(vcpu, irq))
+                       irq->source = 0;
+
+               irq->pending_latch = false;
+
+               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+
+               vgic_put_irq(vcpu->kvm, irq);
+       }
+
+       return 0;
+}
+
+/*
+ * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
+ * is not queued on some running VCPU's LRs, because then the change to the
+ * active state can be overwritten when the VCPU's state is synced coming back
+ * from the guest.
+ *
+ * For shared interrupts as well as GICv3 private interrupts, we have to
+ * stop all the VCPUs because interrupts can be migrated while we don't hold
+ * the IRQ locks and we don't want to be chasing moving targets.
+ *
+ * For GICv2 private interrupts we don't have to do anything because
+ * userspace accesses to the VGIC state already require all VCPUs to be
+ * stopped, and only the VCPU itself can modify its private interrupts
+ * active state, which guarantees that the VCPU is not running.
+ */
+static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
+{
+       if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
+           intid >= VGIC_NR_PRIVATE_IRQS)
+               kvm_arm_halt_guest(vcpu->kvm);
+}
+
+/* See vgic_access_active_prepare */
+static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
+{
+       if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
+           intid >= VGIC_NR_PRIVATE_IRQS)
+               kvm_arm_resume_guest(vcpu->kvm);
+}
+
+static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+                                            gpa_t addr, unsigned int len)
 {
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
        u32 value = 0;
@@ -359,6 +474,10 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
        for (i = 0; i < len * 8; i++) {
                struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
 
+               /*
+                * Even for HW interrupts, don't evaluate the HW state as
+                * all the guest is interested in is the virtual state.
+                */
                if (irq->active)
                        value |= (1U << i);
 
@@ -368,6 +487,29 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
        return value;
 }
 
+unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+                                   gpa_t addr, unsigned int len)
+{
+       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+       u32 val;
+
+       mutex_lock(&vcpu->kvm->lock);
+       vgic_access_active_prepare(vcpu, intid);
+
+       val = __vgic_mmio_read_active(vcpu, addr, len);
+
+       vgic_access_active_finish(vcpu, intid);
+       mutex_unlock(&vcpu->kvm->lock);
+
+       return val;
+}
+
+unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
+                                   gpa_t addr, unsigned int len)
+{
+       return __vgic_mmio_read_active(vcpu, addr, len);
+}
+
 /* Must be called with irq->irq_lock held */
 static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
                                      bool active, bool is_uaccess)
@@ -426,36 +568,6 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
                raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
 }
 
-/*
- * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
- * is not queued on some running VCPU's LRs, because then the change to the
- * active state can be overwritten when the VCPU's state is synced coming back
- * from the guest.
- *
- * For shared interrupts, we have to stop all the VCPUs because interrupts can
- * be migrated while we don't hold the IRQ locks and we don't want to be
- * chasing moving targets.
- *
- * For private interrupts we don't have to do anything because userspace
- * accesses to the VGIC state already require all VCPUs to be stopped, and
- * only the VCPU itself can modify its private interrupts active state, which
- * guarantees that the VCPU is not running.
- */
-static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
-{
-       if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
-           intid > VGIC_NR_PRIVATE_IRQS)
-               kvm_arm_halt_guest(vcpu->kvm);
-}
-
-/* See vgic_change_active_prepare */
-static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
-{
-       if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
-           intid > VGIC_NR_PRIVATE_IRQS)
-               kvm_arm_resume_guest(vcpu->kvm);
-}
-
 static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
                                      gpa_t addr, unsigned int len,
                                      unsigned long val)
@@ -477,11 +589,11 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 
        mutex_lock(&vcpu->kvm->lock);
-       vgic_change_active_prepare(vcpu, intid);
+       vgic_access_active_prepare(vcpu, intid);
 
        __vgic_mmio_write_cactive(vcpu, addr, len, val);
 
-       vgic_change_active_finish(vcpu, intid);
+       vgic_access_active_finish(vcpu, intid);
        mutex_unlock(&vcpu->kvm->lock);
 }
 
@@ -514,11 +626,11 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
        u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
 
        mutex_lock(&vcpu->kvm->lock);
-       vgic_change_active_prepare(vcpu, intid);
+       vgic_access_active_prepare(vcpu, intid);
 
        __vgic_mmio_write_sactive(vcpu, addr, len, val);
 
-       vgic_change_active_finish(vcpu, intid);
+       vgic_access_active_finish(vcpu, intid);
        mutex_unlock(&vcpu->kvm->lock);
 }
 
index 5af2aef..fefcca2 100644 (file)
@@ -138,6 +138,14 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
                             gpa_t addr, unsigned int len,
                             unsigned long val);
 
+int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
+                              gpa_t addr, unsigned int len,
+                              unsigned long val);
+
+int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
+                              gpa_t addr, unsigned int len,
+                              unsigned long val);
+
 unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
                                     gpa_t addr, unsigned int len);
 
@@ -149,9 +157,20 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
                              gpa_t addr, unsigned int len,
                              unsigned long val);
 
+int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
+                               gpa_t addr, unsigned int len,
+                               unsigned long val);
+
+int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
+                               gpa_t addr, unsigned int len,
+                               unsigned long val);
+
 unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
                                    gpa_t addr, unsigned int len);
 
+unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
+                                   gpa_t addr, unsigned int len);
+
 void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
                             gpa_t addr, unsigned int len,
                             unsigned long val);